pci.c 85 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818
  1. /*
  2. * QEMU PCI bus manager
  3. *
  4. * Copyright (c) 2004 Fabrice Bellard
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include "qemu/datadir.h"
  26. #include "qemu/units.h"
  27. #include "hw/irq.h"
  28. #include "hw/pci/pci.h"
  29. #include "hw/pci/pci_bridge.h"
  30. #include "hw/pci/pci_bus.h"
  31. #include "hw/pci/pci_host.h"
  32. #include "hw/qdev-properties.h"
  33. #include "hw/qdev-properties-system.h"
  34. #include "migration/qemu-file-types.h"
  35. #include "migration/vmstate.h"
  36. #include "net/net.h"
  37. #include "sysemu/numa.h"
  38. #include "sysemu/sysemu.h"
  39. #include "hw/loader.h"
  40. #include "qemu/error-report.h"
  41. #include "qemu/range.h"
  42. #include "trace.h"
  43. #include "hw/pci/msi.h"
  44. #include "hw/pci/msix.h"
  45. #include "hw/hotplug.h"
  46. #include "hw/boards.h"
  47. #include "qapi/error.h"
  48. #include "qemu/cutils.h"
  49. #include "pci-internal.h"
  50. #include "hw/xen/xen.h"
  51. #include "hw/i386/kvm/xen_evtchn.h"
  52. //#define DEBUG_PCI
  53. #ifdef DEBUG_PCI
  54. # define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__)
  55. #else
  56. # define PCI_DPRINTF(format, ...) do { } while (0)
  57. #endif
  58. bool pci_available = true;
  59. static char *pcibus_get_dev_path(DeviceState *dev);
  60. static char *pcibus_get_fw_dev_path(DeviceState *dev);
  61. static void pcibus_reset(BusState *qbus);
  62. static Property pci_props[] = {
  63. DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1),
  64. DEFINE_PROP_STRING("romfile", PCIDevice, romfile),
  65. DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, -1),
  66. DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1),
  67. DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present,
  68. QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false),
  69. DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present,
  70. QEMU_PCIE_LNKSTA_DLLLA_BITNR, true),
  71. DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present,
  72. QEMU_PCIE_EXTCAP_INIT_BITNR, true),
  73. DEFINE_PROP_STRING("failover_pair_id", PCIDevice,
  74. failover_pair_id),
  75. DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0),
  76. DEFINE_PROP_END_OF_LIST()
  77. };
  78. static const VMStateDescription vmstate_pcibus = {
  79. .name = "PCIBUS",
  80. .version_id = 1,
  81. .minimum_version_id = 1,
  82. .fields = (VMStateField[]) {
  83. VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL),
  84. VMSTATE_VARRAY_INT32(irq_count, PCIBus,
  85. nirq, 0, vmstate_info_int32,
  86. int32_t),
  87. VMSTATE_END_OF_LIST()
  88. }
  89. };
  90. static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data)
  91. {
  92. return a - b;
  93. }
  94. static GSequence *pci_acpi_index_list(void)
  95. {
  96. static GSequence *used_acpi_index_list;
  97. if (!used_acpi_index_list) {
  98. used_acpi_index_list = g_sequence_new(NULL);
  99. }
  100. return used_acpi_index_list;
  101. }
  102. static void pci_init_bus_master(PCIDevice *pci_dev)
  103. {
  104. AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev);
  105. memory_region_init_alias(&pci_dev->bus_master_enable_region,
  106. OBJECT(pci_dev), "bus master",
  107. dma_as->root, 0, memory_region_size(dma_as->root));
  108. memory_region_set_enabled(&pci_dev->bus_master_enable_region, false);
  109. memory_region_add_subregion(&pci_dev->bus_master_container_region, 0,
  110. &pci_dev->bus_master_enable_region);
  111. }
  112. static void pcibus_machine_done(Notifier *notifier, void *data)
  113. {
  114. PCIBus *bus = container_of(notifier, PCIBus, machine_done);
  115. int i;
  116. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  117. if (bus->devices[i]) {
  118. pci_init_bus_master(bus->devices[i]);
  119. }
  120. }
  121. }
  122. static void pci_bus_realize(BusState *qbus, Error **errp)
  123. {
  124. PCIBus *bus = PCI_BUS(qbus);
  125. bus->machine_done.notify = pcibus_machine_done;
  126. qemu_add_machine_init_done_notifier(&bus->machine_done);
  127. vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_pcibus, bus);
  128. }
  129. static void pcie_bus_realize(BusState *qbus, Error **errp)
  130. {
  131. PCIBus *bus = PCI_BUS(qbus);
  132. Error *local_err = NULL;
  133. pci_bus_realize(qbus, &local_err);
  134. if (local_err) {
  135. error_propagate(errp, local_err);
  136. return;
  137. }
  138. /*
  139. * A PCI-E bus can support extended config space if it's the root
  140. * bus, or if the bus/bridge above it does as well
  141. */
  142. if (pci_bus_is_root(bus)) {
  143. bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
  144. } else {
  145. PCIBus *parent_bus = pci_get_bus(bus->parent_dev);
  146. if (pci_bus_allows_extended_config_space(parent_bus)) {
  147. bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
  148. }
  149. }
  150. }
  151. static void pci_bus_unrealize(BusState *qbus)
  152. {
  153. PCIBus *bus = PCI_BUS(qbus);
  154. qemu_remove_machine_init_done_notifier(&bus->machine_done);
  155. vmstate_unregister(NULL, &vmstate_pcibus, bus);
  156. }
  157. static int pcibus_num(PCIBus *bus)
  158. {
  159. if (pci_bus_is_root(bus)) {
  160. return 0; /* pci host bridge */
  161. }
  162. return bus->parent_dev->config[PCI_SECONDARY_BUS];
  163. }
  164. static uint16_t pcibus_numa_node(PCIBus *bus)
  165. {
  166. return NUMA_NODE_UNASSIGNED;
  167. }
  168. static void pci_bus_class_init(ObjectClass *klass, void *data)
  169. {
  170. BusClass *k = BUS_CLASS(klass);
  171. PCIBusClass *pbc = PCI_BUS_CLASS(klass);
  172. k->print_dev = pcibus_dev_print;
  173. k->get_dev_path = pcibus_get_dev_path;
  174. k->get_fw_dev_path = pcibus_get_fw_dev_path;
  175. k->realize = pci_bus_realize;
  176. k->unrealize = pci_bus_unrealize;
  177. k->reset = pcibus_reset;
  178. pbc->bus_num = pcibus_num;
  179. pbc->numa_node = pcibus_numa_node;
  180. }
  181. static const TypeInfo pci_bus_info = {
  182. .name = TYPE_PCI_BUS,
  183. .parent = TYPE_BUS,
  184. .instance_size = sizeof(PCIBus),
  185. .class_size = sizeof(PCIBusClass),
  186. .class_init = pci_bus_class_init,
  187. };
  188. static const TypeInfo cxl_interface_info = {
  189. .name = INTERFACE_CXL_DEVICE,
  190. .parent = TYPE_INTERFACE,
  191. };
  192. static const TypeInfo pcie_interface_info = {
  193. .name = INTERFACE_PCIE_DEVICE,
  194. .parent = TYPE_INTERFACE,
  195. };
  196. static const TypeInfo conventional_pci_interface_info = {
  197. .name = INTERFACE_CONVENTIONAL_PCI_DEVICE,
  198. .parent = TYPE_INTERFACE,
  199. };
  200. static void pcie_bus_class_init(ObjectClass *klass, void *data)
  201. {
  202. BusClass *k = BUS_CLASS(klass);
  203. k->realize = pcie_bus_realize;
  204. }
  205. static const TypeInfo pcie_bus_info = {
  206. .name = TYPE_PCIE_BUS,
  207. .parent = TYPE_PCI_BUS,
  208. .class_init = pcie_bus_class_init,
  209. };
  210. static const TypeInfo cxl_bus_info = {
  211. .name = TYPE_CXL_BUS,
  212. .parent = TYPE_PCIE_BUS,
  213. .class_init = pcie_bus_class_init,
  214. };
  215. static void pci_update_mappings(PCIDevice *d);
  216. static void pci_irq_handler(void *opaque, int irq_num, int level);
  217. static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **);
  218. static void pci_del_option_rom(PCIDevice *pdev);
  219. static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET;
  220. static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU;
  221. PCIHostStateList pci_host_bridges;
  222. int pci_bar(PCIDevice *d, int reg)
  223. {
  224. uint8_t type;
  225. /* PCIe virtual functions do not have their own BARs */
  226. assert(!pci_is_vf(d));
  227. if (reg != PCI_ROM_SLOT)
  228. return PCI_BASE_ADDRESS_0 + reg * 4;
  229. type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
  230. return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS;
  231. }
  232. static inline int pci_irq_state(PCIDevice *d, int irq_num)
  233. {
  234. return (d->irq_state >> irq_num) & 0x1;
  235. }
  236. static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level)
  237. {
  238. d->irq_state &= ~(0x1 << irq_num);
  239. d->irq_state |= level << irq_num;
  240. }
  241. static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change)
  242. {
  243. assert(irq_num >= 0);
  244. assert(irq_num < bus->nirq);
  245. bus->irq_count[irq_num] += change;
  246. bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0);
  247. }
  248. static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change)
  249. {
  250. PCIBus *bus;
  251. for (;;) {
  252. int dev_irq = irq_num;
  253. bus = pci_get_bus(pci_dev);
  254. assert(bus->map_irq);
  255. irq_num = bus->map_irq(pci_dev, irq_num);
  256. trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num,
  257. pci_bus_is_root(bus) ? "root-complex"
  258. : DEVICE(bus->parent_dev)->canonical_path);
  259. if (bus->set_irq)
  260. break;
  261. pci_dev = bus->parent_dev;
  262. }
  263. pci_bus_change_irq_level(bus, irq_num, change);
  264. }
  265. int pci_bus_get_irq_level(PCIBus *bus, int irq_num)
  266. {
  267. assert(irq_num >= 0);
  268. assert(irq_num < bus->nirq);
  269. return !!bus->irq_count[irq_num];
  270. }
  271. /* Update interrupt status bit in config space on interrupt
  272. * state change. */
  273. static void pci_update_irq_status(PCIDevice *dev)
  274. {
  275. if (dev->irq_state) {
  276. dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT;
  277. } else {
  278. dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
  279. }
  280. }
  281. void pci_device_deassert_intx(PCIDevice *dev)
  282. {
  283. int i;
  284. for (i = 0; i < PCI_NUM_PINS; ++i) {
  285. pci_irq_handler(dev, i, 0);
  286. }
  287. }
  288. static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg)
  289. {
  290. MemTxAttrs attrs = {};
  291. /*
  292. * Xen uses the high bits of the address to contain some of the bits
  293. * of the PIRQ#. Therefore we can't just send the write cycle and
  294. * trust that it's caught by the APIC at 0xfee00000 because the
  295. * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166.
  296. * So we intercept the delivery here instead of in kvm_send_msi().
  297. */
  298. if (xen_mode == XEN_EMULATE &&
  299. xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) {
  300. return;
  301. }
  302. attrs.requester_id = pci_requester_id(dev);
  303. address_space_stl_le(&dev->bus_master_as, msg.address, msg.data,
  304. attrs, NULL);
  305. }
  306. static void pci_reset_regions(PCIDevice *dev)
  307. {
  308. int r;
  309. if (pci_is_vf(dev)) {
  310. return;
  311. }
  312. for (r = 0; r < PCI_NUM_REGIONS; ++r) {
  313. PCIIORegion *region = &dev->io_regions[r];
  314. if (!region->size) {
  315. continue;
  316. }
  317. if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) &&
  318. region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  319. pci_set_quad(dev->config + pci_bar(dev, r), region->type);
  320. } else {
  321. pci_set_long(dev->config + pci_bar(dev, r), region->type);
  322. }
  323. }
  324. }
  325. static void pci_do_device_reset(PCIDevice *dev)
  326. {
  327. pci_device_deassert_intx(dev);
  328. assert(dev->irq_state == 0);
  329. /* Clear all writable bits */
  330. pci_word_test_and_clear_mask(dev->config + PCI_COMMAND,
  331. pci_get_word(dev->wmask + PCI_COMMAND) |
  332. pci_get_word(dev->w1cmask + PCI_COMMAND));
  333. pci_word_test_and_clear_mask(dev->config + PCI_STATUS,
  334. pci_get_word(dev->wmask + PCI_STATUS) |
  335. pci_get_word(dev->w1cmask + PCI_STATUS));
  336. /* Some devices make bits of PCI_INTERRUPT_LINE read only */
  337. pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE,
  338. pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) |
  339. pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE));
  340. dev->config[PCI_CACHE_LINE_SIZE] = 0x0;
  341. pci_reset_regions(dev);
  342. pci_update_mappings(dev);
  343. msi_reset(dev);
  344. msix_reset(dev);
  345. }
  346. /*
  347. * This function is called on #RST and FLR.
  348. * FLR if PCI_EXP_DEVCTL_BCR_FLR is set
  349. */
  350. void pci_device_reset(PCIDevice *dev)
  351. {
  352. device_cold_reset(&dev->qdev);
  353. pci_do_device_reset(dev);
  354. }
  355. /*
  356. * Trigger pci bus reset under a given bus.
  357. * Called via bus_cold_reset on RST# assert, after the devices
  358. * have been reset device_cold_reset-ed already.
  359. */
  360. static void pcibus_reset(BusState *qbus)
  361. {
  362. PCIBus *bus = DO_UPCAST(PCIBus, qbus, qbus);
  363. int i;
  364. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  365. if (bus->devices[i]) {
  366. pci_do_device_reset(bus->devices[i]);
  367. }
  368. }
  369. for (i = 0; i < bus->nirq; i++) {
  370. assert(bus->irq_count[i] == 0);
  371. }
  372. }
  373. static void pci_host_bus_register(DeviceState *host)
  374. {
  375. PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
  376. QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next);
  377. }
  378. static void pci_host_bus_unregister(DeviceState *host)
  379. {
  380. PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
  381. QLIST_REMOVE(host_bridge, next);
  382. }
  383. PCIBus *pci_device_root_bus(const PCIDevice *d)
  384. {
  385. PCIBus *bus = pci_get_bus(d);
  386. while (!pci_bus_is_root(bus)) {
  387. d = bus->parent_dev;
  388. assert(d != NULL);
  389. bus = pci_get_bus(d);
  390. }
  391. return bus;
  392. }
  393. const char *pci_root_bus_path(PCIDevice *dev)
  394. {
  395. PCIBus *rootbus = pci_device_root_bus(dev);
  396. PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
  397. PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge);
  398. assert(host_bridge->bus == rootbus);
  399. if (hc->root_bus_path) {
  400. return (*hc->root_bus_path)(host_bridge, rootbus);
  401. }
  402. return rootbus->qbus.name;
  403. }
  404. bool pci_bus_bypass_iommu(PCIBus *bus)
  405. {
  406. PCIBus *rootbus = bus;
  407. PCIHostState *host_bridge;
  408. if (!pci_bus_is_root(bus)) {
  409. rootbus = pci_device_root_bus(bus->parent_dev);
  410. }
  411. host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
  412. assert(host_bridge->bus == rootbus);
  413. return host_bridge->bypass_iommu;
  414. }
  415. static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent,
  416. MemoryRegion *address_space_mem,
  417. MemoryRegion *address_space_io,
  418. uint8_t devfn_min)
  419. {
  420. assert(PCI_FUNC(devfn_min) == 0);
  421. bus->devfn_min = devfn_min;
  422. bus->slot_reserved_mask = 0x0;
  423. bus->address_space_mem = address_space_mem;
  424. bus->address_space_io = address_space_io;
  425. bus->flags |= PCI_BUS_IS_ROOT;
  426. /* host bridge */
  427. QLIST_INIT(&bus->child);
  428. pci_host_bus_register(parent);
  429. }
  430. static void pci_bus_uninit(PCIBus *bus)
  431. {
  432. pci_host_bus_unregister(BUS(bus)->parent);
  433. }
  434. bool pci_bus_is_express(const PCIBus *bus)
  435. {
  436. return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS);
  437. }
  438. void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
  439. const char *name,
  440. MemoryRegion *address_space_mem,
  441. MemoryRegion *address_space_io,
  442. uint8_t devfn_min, const char *typename)
  443. {
  444. qbus_init(bus, bus_size, typename, parent, name);
  445. pci_root_bus_internal_init(bus, parent, address_space_mem,
  446. address_space_io, devfn_min);
  447. }
  448. PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
  449. MemoryRegion *address_space_mem,
  450. MemoryRegion *address_space_io,
  451. uint8_t devfn_min, const char *typename)
  452. {
  453. PCIBus *bus;
  454. bus = PCI_BUS(qbus_new(typename, parent, name));
  455. pci_root_bus_internal_init(bus, parent, address_space_mem,
  456. address_space_io, devfn_min);
  457. return bus;
  458. }
  459. void pci_root_bus_cleanup(PCIBus *bus)
  460. {
  461. pci_bus_uninit(bus);
  462. /* the caller of the unplug hotplug handler will delete this device */
  463. qbus_unrealize(BUS(bus));
  464. }
  465. void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq,
  466. void *irq_opaque, int nirq)
  467. {
  468. bus->set_irq = set_irq;
  469. bus->irq_opaque = irq_opaque;
  470. bus->nirq = nirq;
  471. bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0]));
  472. }
  473. void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq)
  474. {
  475. bus->map_irq = map_irq;
  476. }
  477. void pci_bus_irqs_cleanup(PCIBus *bus)
  478. {
  479. bus->set_irq = NULL;
  480. bus->map_irq = NULL;
  481. bus->irq_opaque = NULL;
  482. bus->nirq = 0;
  483. g_free(bus->irq_count);
  484. }
  485. PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
  486. pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
  487. void *irq_opaque,
  488. MemoryRegion *address_space_mem,
  489. MemoryRegion *address_space_io,
  490. uint8_t devfn_min, int nirq,
  491. const char *typename)
  492. {
  493. PCIBus *bus;
  494. bus = pci_root_bus_new(parent, name, address_space_mem,
  495. address_space_io, devfn_min, typename);
  496. pci_bus_irqs(bus, set_irq, irq_opaque, nirq);
  497. pci_bus_map_irqs(bus, map_irq);
  498. return bus;
  499. }
  500. void pci_unregister_root_bus(PCIBus *bus)
  501. {
  502. pci_bus_irqs_cleanup(bus);
  503. pci_root_bus_cleanup(bus);
  504. }
  505. int pci_bus_num(PCIBus *s)
  506. {
  507. return PCI_BUS_GET_CLASS(s)->bus_num(s);
  508. }
  509. /* Returns the min and max bus numbers of a PCI bus hierarchy */
  510. void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus)
  511. {
  512. int i;
  513. *min_bus = *max_bus = pci_bus_num(bus);
  514. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  515. PCIDevice *dev = bus->devices[i];
  516. if (dev && IS_PCI_BRIDGE(dev)) {
  517. *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]);
  518. *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]);
  519. }
  520. }
  521. }
  522. int pci_bus_numa_node(PCIBus *bus)
  523. {
  524. return PCI_BUS_GET_CLASS(bus)->numa_node(bus);
  525. }
  526. static int get_pci_config_device(QEMUFile *f, void *pv, size_t size,
  527. const VMStateField *field)
  528. {
  529. PCIDevice *s = container_of(pv, PCIDevice, config);
  530. uint8_t *config;
  531. int i;
  532. assert(size == pci_config_size(s));
  533. config = g_malloc(size);
  534. qemu_get_buffer(f, config, size);
  535. for (i = 0; i < size; ++i) {
  536. if ((config[i] ^ s->config[i]) &
  537. s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) {
  538. error_report("%s: Bad config data: i=0x%x read: %x device: %x "
  539. "cmask: %x wmask: %x w1cmask:%x", __func__,
  540. i, config[i], s->config[i],
  541. s->cmask[i], s->wmask[i], s->w1cmask[i]);
  542. g_free(config);
  543. return -EINVAL;
  544. }
  545. }
  546. memcpy(s->config, config, size);
  547. pci_update_mappings(s);
  548. if (IS_PCI_BRIDGE(s)) {
  549. pci_bridge_update_mappings(PCI_BRIDGE(s));
  550. }
  551. memory_region_set_enabled(&s->bus_master_enable_region,
  552. pci_get_word(s->config + PCI_COMMAND)
  553. & PCI_COMMAND_MASTER);
  554. g_free(config);
  555. return 0;
  556. }
  557. /* just put buffer */
  558. static int put_pci_config_device(QEMUFile *f, void *pv, size_t size,
  559. const VMStateField *field, JSONWriter *vmdesc)
  560. {
  561. const uint8_t **v = pv;
  562. assert(size == pci_config_size(container_of(pv, PCIDevice, config)));
  563. qemu_put_buffer(f, *v, size);
  564. return 0;
  565. }
  566. static VMStateInfo vmstate_info_pci_config = {
  567. .name = "pci config",
  568. .get = get_pci_config_device,
  569. .put = put_pci_config_device,
  570. };
  571. static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size,
  572. const VMStateField *field)
  573. {
  574. PCIDevice *s = container_of(pv, PCIDevice, irq_state);
  575. uint32_t irq_state[PCI_NUM_PINS];
  576. int i;
  577. for (i = 0; i < PCI_NUM_PINS; ++i) {
  578. irq_state[i] = qemu_get_be32(f);
  579. if (irq_state[i] != 0x1 && irq_state[i] != 0) {
  580. fprintf(stderr, "irq state %d: must be 0 or 1.\n",
  581. irq_state[i]);
  582. return -EINVAL;
  583. }
  584. }
  585. for (i = 0; i < PCI_NUM_PINS; ++i) {
  586. pci_set_irq_state(s, i, irq_state[i]);
  587. }
  588. return 0;
  589. }
  590. static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size,
  591. const VMStateField *field, JSONWriter *vmdesc)
  592. {
  593. int i;
  594. PCIDevice *s = container_of(pv, PCIDevice, irq_state);
  595. for (i = 0; i < PCI_NUM_PINS; ++i) {
  596. qemu_put_be32(f, pci_irq_state(s, i));
  597. }
  598. return 0;
  599. }
  600. static VMStateInfo vmstate_info_pci_irq_state = {
  601. .name = "pci irq state",
  602. .get = get_pci_irq_state,
  603. .put = put_pci_irq_state,
  604. };
  605. static bool migrate_is_pcie(void *opaque, int version_id)
  606. {
  607. return pci_is_express((PCIDevice *)opaque);
  608. }
  609. static bool migrate_is_not_pcie(void *opaque, int version_id)
  610. {
  611. return !pci_is_express((PCIDevice *)opaque);
  612. }
  613. const VMStateDescription vmstate_pci_device = {
  614. .name = "PCIDevice",
  615. .version_id = 2,
  616. .minimum_version_id = 1,
  617. .fields = (VMStateField[]) {
  618. VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
  619. VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
  620. migrate_is_not_pcie,
  621. 0, vmstate_info_pci_config,
  622. PCI_CONFIG_SPACE_SIZE),
  623. VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
  624. migrate_is_pcie,
  625. 0, vmstate_info_pci_config,
  626. PCIE_CONFIG_SPACE_SIZE),
  627. VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2,
  628. vmstate_info_pci_irq_state,
  629. PCI_NUM_PINS * sizeof(int32_t)),
  630. VMSTATE_END_OF_LIST()
  631. }
  632. };
  633. void pci_device_save(PCIDevice *s, QEMUFile *f)
  634. {
  635. /* Clear interrupt status bit: it is implicit
  636. * in irq_state which we are saving.
  637. * This makes us compatible with old devices
  638. * which never set or clear this bit. */
  639. s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
  640. vmstate_save_state(f, &vmstate_pci_device, s, NULL);
  641. /* Restore the interrupt status bit. */
  642. pci_update_irq_status(s);
  643. }
  644. int pci_device_load(PCIDevice *s, QEMUFile *f)
  645. {
  646. int ret;
  647. ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id);
  648. /* Restore the interrupt status bit. */
  649. pci_update_irq_status(s);
  650. return ret;
  651. }
  652. static void pci_set_default_subsystem_id(PCIDevice *pci_dev)
  653. {
  654. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
  655. pci_default_sub_vendor_id);
  656. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
  657. pci_default_sub_device_id);
  658. }
  659. /*
  660. * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL
  661. * [[<domain>:]<bus>:]<slot>.<func>, return -1 on error
  662. */
  663. static int pci_parse_devaddr(const char *addr, int *domp, int *busp,
  664. unsigned int *slotp, unsigned int *funcp)
  665. {
  666. const char *p;
  667. char *e;
  668. unsigned long val;
  669. unsigned long dom = 0, bus = 0;
  670. unsigned int slot = 0;
  671. unsigned int func = 0;
  672. p = addr;
  673. val = strtoul(p, &e, 16);
  674. if (e == p)
  675. return -1;
  676. if (*e == ':') {
  677. bus = val;
  678. p = e + 1;
  679. val = strtoul(p, &e, 16);
  680. if (e == p)
  681. return -1;
  682. if (*e == ':') {
  683. dom = bus;
  684. bus = val;
  685. p = e + 1;
  686. val = strtoul(p, &e, 16);
  687. if (e == p)
  688. return -1;
  689. }
  690. }
  691. slot = val;
  692. if (funcp != NULL) {
  693. if (*e != '.')
  694. return -1;
  695. p = e + 1;
  696. val = strtoul(p, &e, 16);
  697. if (e == p)
  698. return -1;
  699. func = val;
  700. }
  701. /* if funcp == NULL func is 0 */
  702. if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7)
  703. return -1;
  704. if (*e)
  705. return -1;
  706. *domp = dom;
  707. *busp = bus;
  708. *slotp = slot;
  709. if (funcp != NULL)
  710. *funcp = func;
  711. return 0;
  712. }
  713. static void pci_init_cmask(PCIDevice *dev)
  714. {
  715. pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff);
  716. pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff);
  717. dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST;
  718. dev->cmask[PCI_REVISION_ID] = 0xff;
  719. dev->cmask[PCI_CLASS_PROG] = 0xff;
  720. pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff);
  721. dev->cmask[PCI_HEADER_TYPE] = 0xff;
  722. dev->cmask[PCI_CAPABILITY_LIST] = 0xff;
  723. }
  724. static void pci_init_wmask(PCIDevice *dev)
  725. {
  726. int config_size = pci_config_size(dev);
  727. dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff;
  728. dev->wmask[PCI_INTERRUPT_LINE] = 0xff;
  729. pci_set_word(dev->wmask + PCI_COMMAND,
  730. PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
  731. PCI_COMMAND_INTX_DISABLE);
  732. pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR);
  733. memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff,
  734. config_size - PCI_CONFIG_HEADER_SIZE);
  735. }
  736. static void pci_init_w1cmask(PCIDevice *dev)
  737. {
  738. /*
  739. * Note: It's okay to set w1cmask even for readonly bits as
  740. * long as their value is hardwired to 0.
  741. */
  742. pci_set_word(dev->w1cmask + PCI_STATUS,
  743. PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT |
  744. PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT |
  745. PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY);
  746. }
  747. static void pci_init_mask_bridge(PCIDevice *d)
  748. {
  749. /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and
  750. PCI_SEC_LETENCY_TIMER */
  751. memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4);
  752. /* base and limit */
  753. d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff;
  754. d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff;
  755. pci_set_word(d->wmask + PCI_MEMORY_BASE,
  756. PCI_MEMORY_RANGE_MASK & 0xffff);
  757. pci_set_word(d->wmask + PCI_MEMORY_LIMIT,
  758. PCI_MEMORY_RANGE_MASK & 0xffff);
  759. pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE,
  760. PCI_PREF_RANGE_MASK & 0xffff);
  761. pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT,
  762. PCI_PREF_RANGE_MASK & 0xffff);
  763. /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */
  764. memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8);
  765. /* Supported memory and i/o types */
  766. d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16;
  767. d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16;
  768. pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE,
  769. PCI_PREF_RANGE_TYPE_64);
  770. pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT,
  771. PCI_PREF_RANGE_TYPE_64);
  772. /*
  773. * TODO: Bridges default to 10-bit VGA decoding but we currently only
  774. * implement 16-bit decoding (no alias support).
  775. */
  776. pci_set_word(d->wmask + PCI_BRIDGE_CONTROL,
  777. PCI_BRIDGE_CTL_PARITY |
  778. PCI_BRIDGE_CTL_SERR |
  779. PCI_BRIDGE_CTL_ISA |
  780. PCI_BRIDGE_CTL_VGA |
  781. PCI_BRIDGE_CTL_VGA_16BIT |
  782. PCI_BRIDGE_CTL_MASTER_ABORT |
  783. PCI_BRIDGE_CTL_BUS_RESET |
  784. PCI_BRIDGE_CTL_FAST_BACK |
  785. PCI_BRIDGE_CTL_DISCARD |
  786. PCI_BRIDGE_CTL_SEC_DISCARD |
  787. PCI_BRIDGE_CTL_DISCARD_SERR);
  788. /* Below does not do anything as we never set this bit, put here for
  789. * completeness. */
  790. pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL,
  791. PCI_BRIDGE_CTL_DISCARD_STATUS);
  792. d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK;
  793. d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK;
  794. pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE,
  795. PCI_PREF_RANGE_TYPE_MASK);
  796. pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT,
  797. PCI_PREF_RANGE_TYPE_MASK);
  798. }
  799. static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp)
  800. {
  801. uint8_t slot = PCI_SLOT(dev->devfn);
  802. uint8_t func;
  803. if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
  804. dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
  805. }
  806. /*
  807. * With SR/IOV and ARI, a device at function 0 need not be a multifunction
  808. * device, as it may just be a VF that ended up with function 0 in
  809. * the legacy PCI interpretation. Avoid failing in such cases:
  810. */
  811. if (pci_is_vf(dev) &&
  812. dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
  813. return;
  814. }
  815. /*
  816. * multifunction bit is interpreted in two ways as follows.
  817. * - all functions must set the bit to 1.
  818. * Example: Intel X53
  819. * - function 0 must set the bit, but the rest function (> 0)
  820. * is allowed to leave the bit to 0.
  821. * Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10,
  822. *
  823. * So OS (at least Linux) checks the bit of only function 0,
  824. * and doesn't see the bit of function > 0.
  825. *
  826. * The below check allows both interpretation.
  827. */
  828. if (PCI_FUNC(dev->devfn)) {
  829. PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)];
  830. if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) {
  831. /* function 0 should set multifunction bit */
  832. error_setg(errp, "PCI: single function device can't be populated "
  833. "in function %x.%x", slot, PCI_FUNC(dev->devfn));
  834. return;
  835. }
  836. return;
  837. }
  838. if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
  839. return;
  840. }
  841. /* function 0 indicates single function, so function > 0 must be NULL */
  842. for (func = 1; func < PCI_FUNC_MAX; ++func) {
  843. if (bus->devices[PCI_DEVFN(slot, func)]) {
  844. error_setg(errp, "PCI: %x.0 indicates single function, "
  845. "but %x.%x is already populated.",
  846. slot, slot, func);
  847. return;
  848. }
  849. }
  850. }
  851. static void pci_config_alloc(PCIDevice *pci_dev)
  852. {
  853. int config_size = pci_config_size(pci_dev);
  854. pci_dev->config = g_malloc0(config_size);
  855. pci_dev->cmask = g_malloc0(config_size);
  856. pci_dev->wmask = g_malloc0(config_size);
  857. pci_dev->w1cmask = g_malloc0(config_size);
  858. pci_dev->used = g_malloc0(config_size);
  859. }
  860. static void pci_config_free(PCIDevice *pci_dev)
  861. {
  862. g_free(pci_dev->config);
  863. g_free(pci_dev->cmask);
  864. g_free(pci_dev->wmask);
  865. g_free(pci_dev->w1cmask);
  866. g_free(pci_dev->used);
  867. }
  868. static void do_pci_unregister_device(PCIDevice *pci_dev)
  869. {
  870. pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL;
  871. pci_config_free(pci_dev);
  872. if (xen_mode == XEN_EMULATE) {
  873. xen_evtchn_remove_pci_device(pci_dev);
  874. }
  875. if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) {
  876. memory_region_del_subregion(&pci_dev->bus_master_container_region,
  877. &pci_dev->bus_master_enable_region);
  878. }
  879. address_space_destroy(&pci_dev->bus_master_as);
  880. }
  881. /* Extract PCIReqIDCache into BDF format */
  882. static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache)
  883. {
  884. uint8_t bus_n;
  885. uint16_t result;
  886. switch (cache->type) {
  887. case PCI_REQ_ID_BDF:
  888. result = pci_get_bdf(cache->dev);
  889. break;
  890. case PCI_REQ_ID_SECONDARY_BUS:
  891. bus_n = pci_dev_bus_num(cache->dev);
  892. result = PCI_BUILD_BDF(bus_n, 0);
  893. break;
  894. default:
  895. error_report("Invalid PCI requester ID cache type: %d",
  896. cache->type);
  897. exit(1);
  898. break;
  899. }
  900. return result;
  901. }
  902. /* Parse bridges up to the root complex and return requester ID
  903. * cache for specific device. For full PCIe topology, the cache
  904. * result would be exactly the same as getting BDF of the device.
  905. * However, several tricks are required when system mixed up with
  906. * legacy PCI devices and PCIe-to-PCI bridges.
  907. *
  908. * Here we cache the proxy device (and type) not requester ID since
  909. * bus number might change from time to time.
  910. */
  911. static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev)
  912. {
  913. PCIDevice *parent;
  914. PCIReqIDCache cache = {
  915. .dev = dev,
  916. .type = PCI_REQ_ID_BDF,
  917. };
  918. while (!pci_bus_is_root(pci_get_bus(dev))) {
  919. /* We are under PCI/PCIe bridges */
  920. parent = pci_get_bus(dev)->parent_dev;
  921. if (pci_is_express(parent)) {
  922. if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
  923. /* When we pass through PCIe-to-PCI/PCIX bridges, we
  924. * override the requester ID using secondary bus
  925. * number of parent bridge with zeroed devfn
  926. * (pcie-to-pci bridge spec chap 2.3). */
  927. cache.type = PCI_REQ_ID_SECONDARY_BUS;
  928. cache.dev = dev;
  929. }
  930. } else {
  931. /* Legacy PCI, override requester ID with the bridge's
  932. * BDF upstream. When the root complex connects to
  933. * legacy PCI devices (including buses), it can only
  934. * obtain requester ID info from directly attached
  935. * devices. If devices are attached under bridges, only
  936. * the requester ID of the bridge that is directly
  937. * attached to the root complex can be recognized. */
  938. cache.type = PCI_REQ_ID_BDF;
  939. cache.dev = parent;
  940. }
  941. dev = parent;
  942. }
  943. return cache;
  944. }
  945. uint16_t pci_requester_id(PCIDevice *dev)
  946. {
  947. return pci_req_id_cache_extract(&dev->requester_id_cache);
  948. }
  949. static bool pci_bus_devfn_available(PCIBus *bus, int devfn)
  950. {
  951. return !(bus->devices[devfn]);
  952. }
  953. static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn)
  954. {
  955. return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn));
  956. }
  957. /* -1 for devfn means auto assign */
  958. static PCIDevice *do_pci_register_device(PCIDevice *pci_dev,
  959. const char *name, int devfn,
  960. Error **errp)
  961. {
  962. PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
  963. PCIConfigReadFunc *config_read = pc->config_read;
  964. PCIConfigWriteFunc *config_write = pc->config_write;
  965. Error *local_err = NULL;
  966. DeviceState *dev = DEVICE(pci_dev);
  967. PCIBus *bus = pci_get_bus(pci_dev);
  968. bool is_bridge = IS_PCI_BRIDGE(pci_dev);
  969. /* Only pci bridges can be attached to extra PCI root buses */
  970. if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) {
  971. error_setg(errp,
  972. "PCI: Only PCI/PCIe bridges can be plugged into %s",
  973. bus->parent_dev->name);
  974. return NULL;
  975. }
  976. if (devfn < 0) {
  977. for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices);
  978. devfn += PCI_FUNC_MAX) {
  979. if (pci_bus_devfn_available(bus, devfn) &&
  980. !pci_bus_devfn_reserved(bus, devfn)) {
  981. goto found;
  982. }
  983. }
  984. error_setg(errp, "PCI: no slot/function available for %s, all in use "
  985. "or reserved", name);
  986. return NULL;
  987. found: ;
  988. } else if (pci_bus_devfn_reserved(bus, devfn)) {
  989. error_setg(errp, "PCI: slot %d function %d not available for %s,"
  990. " reserved",
  991. PCI_SLOT(devfn), PCI_FUNC(devfn), name);
  992. return NULL;
  993. } else if (!pci_bus_devfn_available(bus, devfn)) {
  994. error_setg(errp, "PCI: slot %d function %d not available for %s,"
  995. " in use by %s,id=%s",
  996. PCI_SLOT(devfn), PCI_FUNC(devfn), name,
  997. bus->devices[devfn]->name, bus->devices[devfn]->qdev.id);
  998. return NULL;
  999. } else if (dev->hotplugged &&
  1000. !pci_is_vf(pci_dev) &&
  1001. pci_get_function_0(pci_dev)) {
  1002. error_setg(errp, "PCI: slot %d function 0 already occupied by %s,"
  1003. " new func %s cannot be exposed to guest.",
  1004. PCI_SLOT(pci_get_function_0(pci_dev)->devfn),
  1005. pci_get_function_0(pci_dev)->name,
  1006. name);
  1007. return NULL;
  1008. }
  1009. pci_dev->devfn = devfn;
  1010. pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev);
  1011. pstrcpy(pci_dev->name, sizeof(pci_dev->name), name);
  1012. memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev),
  1013. "bus master container", UINT64_MAX);
  1014. address_space_init(&pci_dev->bus_master_as,
  1015. &pci_dev->bus_master_container_region, pci_dev->name);
  1016. if (phase_check(PHASE_MACHINE_READY)) {
  1017. pci_init_bus_master(pci_dev);
  1018. }
  1019. pci_dev->irq_state = 0;
  1020. pci_config_alloc(pci_dev);
  1021. pci_config_set_vendor_id(pci_dev->config, pc->vendor_id);
  1022. pci_config_set_device_id(pci_dev->config, pc->device_id);
  1023. pci_config_set_revision(pci_dev->config, pc->revision);
  1024. pci_config_set_class(pci_dev->config, pc->class_id);
  1025. if (!is_bridge) {
  1026. if (pc->subsystem_vendor_id || pc->subsystem_id) {
  1027. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
  1028. pc->subsystem_vendor_id);
  1029. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
  1030. pc->subsystem_id);
  1031. } else {
  1032. pci_set_default_subsystem_id(pci_dev);
  1033. }
  1034. } else {
  1035. /* subsystem_vendor_id/subsystem_id are only for header type 0 */
  1036. assert(!pc->subsystem_vendor_id);
  1037. assert(!pc->subsystem_id);
  1038. }
  1039. pci_init_cmask(pci_dev);
  1040. pci_init_wmask(pci_dev);
  1041. pci_init_w1cmask(pci_dev);
  1042. if (is_bridge) {
  1043. pci_init_mask_bridge(pci_dev);
  1044. }
  1045. pci_init_multifunction(bus, pci_dev, &local_err);
  1046. if (local_err) {
  1047. error_propagate(errp, local_err);
  1048. do_pci_unregister_device(pci_dev);
  1049. return NULL;
  1050. }
  1051. if (!config_read)
  1052. config_read = pci_default_read_config;
  1053. if (!config_write)
  1054. config_write = pci_default_write_config;
  1055. pci_dev->config_read = config_read;
  1056. pci_dev->config_write = config_write;
  1057. bus->devices[devfn] = pci_dev;
  1058. pci_dev->version_id = 2; /* Current pci device vmstate version */
  1059. return pci_dev;
  1060. }
  1061. static void pci_unregister_io_regions(PCIDevice *pci_dev)
  1062. {
  1063. PCIIORegion *r;
  1064. int i;
  1065. for(i = 0; i < PCI_NUM_REGIONS; i++) {
  1066. r = &pci_dev->io_regions[i];
  1067. if (!r->size || r->addr == PCI_BAR_UNMAPPED)
  1068. continue;
  1069. memory_region_del_subregion(r->address_space, r->memory);
  1070. }
  1071. pci_unregister_vga(pci_dev);
  1072. }
  1073. static void pci_qdev_unrealize(DeviceState *dev)
  1074. {
  1075. PCIDevice *pci_dev = PCI_DEVICE(dev);
  1076. PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
  1077. pci_unregister_io_regions(pci_dev);
  1078. pci_del_option_rom(pci_dev);
  1079. if (pc->exit) {
  1080. pc->exit(pci_dev);
  1081. }
  1082. pci_device_deassert_intx(pci_dev);
  1083. do_pci_unregister_device(pci_dev);
  1084. pci_dev->msi_trigger = NULL;
  1085. /*
  1086. * clean up acpi-index so it could reused by another device
  1087. */
  1088. if (pci_dev->acpi_index) {
  1089. GSequence *used_indexes = pci_acpi_index_list();
  1090. g_sequence_remove(g_sequence_lookup(used_indexes,
  1091. GINT_TO_POINTER(pci_dev->acpi_index),
  1092. g_cmp_uint32, NULL));
  1093. }
  1094. }
  1095. void pci_register_bar(PCIDevice *pci_dev, int region_num,
  1096. uint8_t type, MemoryRegion *memory)
  1097. {
  1098. PCIIORegion *r;
  1099. uint32_t addr; /* offset in pci config space */
  1100. uint64_t wmask;
  1101. pcibus_t size = memory_region_size(memory);
  1102. uint8_t hdr_type;
  1103. assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */
  1104. assert(region_num >= 0);
  1105. assert(region_num < PCI_NUM_REGIONS);
  1106. assert(is_power_of_2(size));
  1107. /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */
  1108. hdr_type =
  1109. pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
  1110. assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2);
  1111. r = &pci_dev->io_regions[region_num];
  1112. r->addr = PCI_BAR_UNMAPPED;
  1113. r->size = size;
  1114. r->type = type;
  1115. r->memory = memory;
  1116. r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO
  1117. ? pci_get_bus(pci_dev)->address_space_io
  1118. : pci_get_bus(pci_dev)->address_space_mem;
  1119. wmask = ~(size - 1);
  1120. if (region_num == PCI_ROM_SLOT) {
  1121. /* ROM enable bit is writable */
  1122. wmask |= PCI_ROM_ADDRESS_ENABLE;
  1123. }
  1124. addr = pci_bar(pci_dev, region_num);
  1125. pci_set_long(pci_dev->config + addr, type);
  1126. if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) &&
  1127. r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  1128. pci_set_quad(pci_dev->wmask + addr, wmask);
  1129. pci_set_quad(pci_dev->cmask + addr, ~0ULL);
  1130. } else {
  1131. pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff);
  1132. pci_set_long(pci_dev->cmask + addr, 0xffffffff);
  1133. }
  1134. }
  1135. static void pci_update_vga(PCIDevice *pci_dev)
  1136. {
  1137. uint16_t cmd;
  1138. if (!pci_dev->has_vga) {
  1139. return;
  1140. }
  1141. cmd = pci_get_word(pci_dev->config + PCI_COMMAND);
  1142. memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM],
  1143. cmd & PCI_COMMAND_MEMORY);
  1144. memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO],
  1145. cmd & PCI_COMMAND_IO);
  1146. memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI],
  1147. cmd & PCI_COMMAND_IO);
  1148. }
  1149. void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem,
  1150. MemoryRegion *io_lo, MemoryRegion *io_hi)
  1151. {
  1152. PCIBus *bus = pci_get_bus(pci_dev);
  1153. assert(!pci_dev->has_vga);
  1154. assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE);
  1155. pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem;
  1156. memory_region_add_subregion_overlap(bus->address_space_mem,
  1157. QEMU_PCI_VGA_MEM_BASE, mem, 1);
  1158. assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE);
  1159. pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo;
  1160. memory_region_add_subregion_overlap(bus->address_space_io,
  1161. QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1);
  1162. assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE);
  1163. pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi;
  1164. memory_region_add_subregion_overlap(bus->address_space_io,
  1165. QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1);
  1166. pci_dev->has_vga = true;
  1167. pci_update_vga(pci_dev);
  1168. }
  1169. void pci_unregister_vga(PCIDevice *pci_dev)
  1170. {
  1171. PCIBus *bus = pci_get_bus(pci_dev);
  1172. if (!pci_dev->has_vga) {
  1173. return;
  1174. }
  1175. memory_region_del_subregion(bus->address_space_mem,
  1176. pci_dev->vga_regions[QEMU_PCI_VGA_MEM]);
  1177. memory_region_del_subregion(bus->address_space_io,
  1178. pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]);
  1179. memory_region_del_subregion(bus->address_space_io,
  1180. pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]);
  1181. pci_dev->has_vga = false;
  1182. }
  1183. pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num)
  1184. {
  1185. return pci_dev->io_regions[region_num].addr;
  1186. }
  1187. static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg,
  1188. uint8_t type, pcibus_t size)
  1189. {
  1190. pcibus_t new_addr;
  1191. if (!pci_is_vf(d)) {
  1192. int bar = pci_bar(d, reg);
  1193. if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  1194. new_addr = pci_get_quad(d->config + bar);
  1195. } else {
  1196. new_addr = pci_get_long(d->config + bar);
  1197. }
  1198. } else {
  1199. PCIDevice *pf = d->exp.sriov_vf.pf;
  1200. uint16_t sriov_cap = pf->exp.sriov_cap;
  1201. int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4;
  1202. uint16_t vf_offset =
  1203. pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET);
  1204. uint16_t vf_stride =
  1205. pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE);
  1206. uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride;
  1207. if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  1208. new_addr = pci_get_quad(pf->config + bar);
  1209. } else {
  1210. new_addr = pci_get_long(pf->config + bar);
  1211. }
  1212. new_addr += vf_num * size;
  1213. }
  1214. /* The ROM slot has a specific enable bit, keep it intact */
  1215. if (reg != PCI_ROM_SLOT) {
  1216. new_addr &= ~(size - 1);
  1217. }
  1218. return new_addr;
  1219. }
  1220. pcibus_t pci_bar_address(PCIDevice *d,
  1221. int reg, uint8_t type, pcibus_t size)
  1222. {
  1223. pcibus_t new_addr, last_addr;
  1224. uint16_t cmd = pci_get_word(d->config + PCI_COMMAND);
  1225. Object *machine = qdev_get_machine();
  1226. ObjectClass *oc = object_get_class(machine);
  1227. MachineClass *mc = MACHINE_CLASS(oc);
  1228. bool allow_0_address = mc->pci_allow_0_address;
  1229. if (type & PCI_BASE_ADDRESS_SPACE_IO) {
  1230. if (!(cmd & PCI_COMMAND_IO)) {
  1231. return PCI_BAR_UNMAPPED;
  1232. }
  1233. new_addr = pci_config_get_bar_addr(d, reg, type, size);
  1234. last_addr = new_addr + size - 1;
  1235. /* Check if 32 bit BAR wraps around explicitly.
  1236. * TODO: make priorities correct and remove this work around.
  1237. */
  1238. if (last_addr <= new_addr || last_addr >= UINT32_MAX ||
  1239. (!allow_0_address && new_addr == 0)) {
  1240. return PCI_BAR_UNMAPPED;
  1241. }
  1242. return new_addr;
  1243. }
  1244. if (!(cmd & PCI_COMMAND_MEMORY)) {
  1245. return PCI_BAR_UNMAPPED;
  1246. }
  1247. new_addr = pci_config_get_bar_addr(d, reg, type, size);
  1248. /* the ROM slot has a specific enable bit */
  1249. if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) {
  1250. return PCI_BAR_UNMAPPED;
  1251. }
  1252. new_addr &= ~(size - 1);
  1253. last_addr = new_addr + size - 1;
  1254. /* NOTE: we do not support wrapping */
  1255. /* XXX: as we cannot support really dynamic
  1256. mappings, we handle specific values as invalid
  1257. mappings. */
  1258. if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED ||
  1259. (!allow_0_address && new_addr == 0)) {
  1260. return PCI_BAR_UNMAPPED;
  1261. }
  1262. /* Now pcibus_t is 64bit.
  1263. * Check if 32 bit BAR wraps around explicitly.
  1264. * Without this, PC ide doesn't work well.
  1265. * TODO: remove this work around.
  1266. */
  1267. if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) {
  1268. return PCI_BAR_UNMAPPED;
  1269. }
  1270. /*
  1271. * OS is allowed to set BAR beyond its addressable
  1272. * bits. For example, 32 bit OS can set 64bit bar
  1273. * to >4G. Check it. TODO: we might need to support
  1274. * it in the future for e.g. PAE.
  1275. */
  1276. if (last_addr >= HWADDR_MAX) {
  1277. return PCI_BAR_UNMAPPED;
  1278. }
  1279. return new_addr;
  1280. }
  1281. static void pci_update_mappings(PCIDevice *d)
  1282. {
  1283. PCIIORegion *r;
  1284. int i;
  1285. pcibus_t new_addr;
  1286. for(i = 0; i < PCI_NUM_REGIONS; i++) {
  1287. r = &d->io_regions[i];
  1288. /* this region isn't registered */
  1289. if (!r->size)
  1290. continue;
  1291. new_addr = pci_bar_address(d, i, r->type, r->size);
  1292. if (!d->has_power) {
  1293. new_addr = PCI_BAR_UNMAPPED;
  1294. }
  1295. /* This bar isn't changed */
  1296. if (new_addr == r->addr)
  1297. continue;
  1298. /* now do the real mapping */
  1299. if (r->addr != PCI_BAR_UNMAPPED) {
  1300. trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d),
  1301. PCI_SLOT(d->devfn),
  1302. PCI_FUNC(d->devfn),
  1303. i, r->addr, r->size);
  1304. memory_region_del_subregion(r->address_space, r->memory);
  1305. }
  1306. r->addr = new_addr;
  1307. if (r->addr != PCI_BAR_UNMAPPED) {
  1308. trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d),
  1309. PCI_SLOT(d->devfn),
  1310. PCI_FUNC(d->devfn),
  1311. i, r->addr, r->size);
  1312. memory_region_add_subregion_overlap(r->address_space,
  1313. r->addr, r->memory, 1);
  1314. }
  1315. }
  1316. pci_update_vga(d);
  1317. }
  1318. static inline int pci_irq_disabled(PCIDevice *d)
  1319. {
  1320. return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE;
  1321. }
  1322. /* Called after interrupt disabled field update in config space,
  1323. * assert/deassert interrupts if necessary.
  1324. * Gets original interrupt disable bit value (before update). */
  1325. static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled)
  1326. {
  1327. int i, disabled = pci_irq_disabled(d);
  1328. if (disabled == was_irq_disabled)
  1329. return;
  1330. for (i = 0; i < PCI_NUM_PINS; ++i) {
  1331. int state = pci_irq_state(d, i);
  1332. pci_change_irq_level(d, i, disabled ? -state : state);
  1333. }
  1334. }
  1335. uint32_t pci_default_read_config(PCIDevice *d,
  1336. uint32_t address, int len)
  1337. {
  1338. uint32_t val = 0;
  1339. assert(address + len <= pci_config_size(d));
  1340. if (pci_is_express_downstream_port(d) &&
  1341. ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) {
  1342. pcie_sync_bridge_lnk(d);
  1343. }
  1344. memcpy(&val, d->config + address, len);
  1345. return le32_to_cpu(val);
  1346. }
  1347. void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l)
  1348. {
  1349. int i, was_irq_disabled = pci_irq_disabled(d);
  1350. uint32_t val = val_in;
  1351. assert(addr + l <= pci_config_size(d));
  1352. for (i = 0; i < l; val >>= 8, ++i) {
  1353. uint8_t wmask = d->wmask[addr + i];
  1354. uint8_t w1cmask = d->w1cmask[addr + i];
  1355. assert(!(wmask & w1cmask));
  1356. d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask);
  1357. d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */
  1358. }
  1359. if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) ||
  1360. ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) ||
  1361. ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) ||
  1362. range_covers_byte(addr, l, PCI_COMMAND))
  1363. pci_update_mappings(d);
  1364. if (range_covers_byte(addr, l, PCI_COMMAND)) {
  1365. pci_update_irq_disabled(d, was_irq_disabled);
  1366. memory_region_set_enabled(&d->bus_master_enable_region,
  1367. (pci_get_word(d->config + PCI_COMMAND)
  1368. & PCI_COMMAND_MASTER) && d->has_power);
  1369. }
  1370. msi_write_config(d, addr, val_in, l);
  1371. msix_write_config(d, addr, val_in, l);
  1372. pcie_sriov_config_write(d, addr, val_in, l);
  1373. }
  1374. /***********************************************************/
  1375. /* generic PCI irq support */
  1376. /* 0 <= irq_num <= 3. level must be 0 or 1 */
  1377. static void pci_irq_handler(void *opaque, int irq_num, int level)
  1378. {
  1379. PCIDevice *pci_dev = opaque;
  1380. int change;
  1381. assert(0 <= irq_num && irq_num < PCI_NUM_PINS);
  1382. assert(level == 0 || level == 1);
  1383. change = level - pci_irq_state(pci_dev, irq_num);
  1384. if (!change)
  1385. return;
  1386. pci_set_irq_state(pci_dev, irq_num, level);
  1387. pci_update_irq_status(pci_dev);
  1388. if (pci_irq_disabled(pci_dev))
  1389. return;
  1390. pci_change_irq_level(pci_dev, irq_num, change);
  1391. }
  1392. qemu_irq pci_allocate_irq(PCIDevice *pci_dev)
  1393. {
  1394. int intx = pci_intx(pci_dev);
  1395. assert(0 <= intx && intx < PCI_NUM_PINS);
  1396. return qemu_allocate_irq(pci_irq_handler, pci_dev, intx);
  1397. }
  1398. void pci_set_irq(PCIDevice *pci_dev, int level)
  1399. {
  1400. int intx = pci_intx(pci_dev);
  1401. pci_irq_handler(pci_dev, intx, level);
  1402. }
  1403. /* Special hooks used by device assignment */
  1404. void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq)
  1405. {
  1406. assert(pci_bus_is_root(bus));
  1407. bus->route_intx_to_irq = route_intx_to_irq;
  1408. }
  1409. PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin)
  1410. {
  1411. PCIBus *bus;
  1412. do {
  1413. int dev_irq = pin;
  1414. bus = pci_get_bus(dev);
  1415. pin = bus->map_irq(dev, pin);
  1416. trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin,
  1417. pci_bus_is_root(bus) ? "root-complex"
  1418. : DEVICE(bus->parent_dev)->canonical_path);
  1419. dev = bus->parent_dev;
  1420. } while (dev);
  1421. if (!bus->route_intx_to_irq) {
  1422. error_report("PCI: Bug - unimplemented PCI INTx routing (%s)",
  1423. object_get_typename(OBJECT(bus->qbus.parent)));
  1424. return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 };
  1425. }
  1426. return bus->route_intx_to_irq(bus->irq_opaque, pin);
  1427. }
  1428. bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new)
  1429. {
  1430. return old->mode != new->mode || old->irq != new->irq;
  1431. }
  1432. void pci_bus_fire_intx_routing_notifier(PCIBus *bus)
  1433. {
  1434. PCIDevice *dev;
  1435. PCIBus *sec;
  1436. int i;
  1437. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  1438. dev = bus->devices[i];
  1439. if (dev && dev->intx_routing_notifier) {
  1440. dev->intx_routing_notifier(dev);
  1441. }
  1442. }
  1443. QLIST_FOREACH(sec, &bus->child, sibling) {
  1444. pci_bus_fire_intx_routing_notifier(sec);
  1445. }
  1446. }
  1447. void pci_device_set_intx_routing_notifier(PCIDevice *dev,
  1448. PCIINTxRoutingNotifier notifier)
  1449. {
  1450. dev->intx_routing_notifier = notifier;
  1451. }
  1452. /*
  1453. * PCI-to-PCI bridge specification
  1454. * 9.1: Interrupt routing. Table 9-1
  1455. *
  1456. * the PCI Express Base Specification, Revision 2.1
  1457. * 2.2.8.1: INTx interrupt signaling - Rules
  1458. * the Implementation Note
  1459. * Table 2-20
  1460. */
  1461. /*
  1462. * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD
  1463. * 0-origin unlike PCI interrupt pin register.
  1464. */
  1465. int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin)
  1466. {
  1467. return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin);
  1468. }
  1469. /***********************************************************/
  1470. /* monitor info on PCI */
  1471. static const pci_class_desc pci_class_descriptions[] =
  1472. {
  1473. { 0x0001, "VGA controller", "display"},
  1474. { 0x0100, "SCSI controller", "scsi"},
  1475. { 0x0101, "IDE controller", "ide"},
  1476. { 0x0102, "Floppy controller", "fdc"},
  1477. { 0x0103, "IPI controller", "ipi"},
  1478. { 0x0104, "RAID controller", "raid"},
  1479. { 0x0106, "SATA controller"},
  1480. { 0x0107, "SAS controller"},
  1481. { 0x0180, "Storage controller"},
  1482. { 0x0200, "Ethernet controller", "ethernet"},
  1483. { 0x0201, "Token Ring controller", "token-ring"},
  1484. { 0x0202, "FDDI controller", "fddi"},
  1485. { 0x0203, "ATM controller", "atm"},
  1486. { 0x0280, "Network controller"},
  1487. { 0x0300, "VGA controller", "display", 0x00ff},
  1488. { 0x0301, "XGA controller"},
  1489. { 0x0302, "3D controller"},
  1490. { 0x0380, "Display controller"},
  1491. { 0x0400, "Video controller", "video"},
  1492. { 0x0401, "Audio controller", "sound"},
  1493. { 0x0402, "Phone"},
  1494. { 0x0403, "Audio controller", "sound"},
  1495. { 0x0480, "Multimedia controller"},
  1496. { 0x0500, "RAM controller", "memory"},
  1497. { 0x0501, "Flash controller", "flash"},
  1498. { 0x0580, "Memory controller"},
  1499. { 0x0600, "Host bridge", "host"},
  1500. { 0x0601, "ISA bridge", "isa"},
  1501. { 0x0602, "EISA bridge", "eisa"},
  1502. { 0x0603, "MC bridge", "mca"},
  1503. { 0x0604, "PCI bridge", "pci-bridge"},
  1504. { 0x0605, "PCMCIA bridge", "pcmcia"},
  1505. { 0x0606, "NUBUS bridge", "nubus"},
  1506. { 0x0607, "CARDBUS bridge", "cardbus"},
  1507. { 0x0608, "RACEWAY bridge"},
  1508. { 0x0680, "Bridge"},
  1509. { 0x0700, "Serial port", "serial"},
  1510. { 0x0701, "Parallel port", "parallel"},
  1511. { 0x0800, "Interrupt controller", "interrupt-controller"},
  1512. { 0x0801, "DMA controller", "dma-controller"},
  1513. { 0x0802, "Timer", "timer"},
  1514. { 0x0803, "RTC", "rtc"},
  1515. { 0x0900, "Keyboard", "keyboard"},
  1516. { 0x0901, "Pen", "pen"},
  1517. { 0x0902, "Mouse", "mouse"},
  1518. { 0x0A00, "Dock station", "dock", 0x00ff},
  1519. { 0x0B00, "i386 cpu", "cpu", 0x00ff},
  1520. { 0x0c00, "Firewire controller", "firewire"},
  1521. { 0x0c01, "Access bus controller", "access-bus"},
  1522. { 0x0c02, "SSA controller", "ssa"},
  1523. { 0x0c03, "USB controller", "usb"},
  1524. { 0x0c04, "Fibre channel controller", "fibre-channel"},
  1525. { 0x0c05, "SMBus"},
  1526. { 0, NULL}
  1527. };
  1528. void pci_for_each_device_under_bus_reverse(PCIBus *bus,
  1529. pci_bus_dev_fn fn,
  1530. void *opaque)
  1531. {
  1532. PCIDevice *d;
  1533. int devfn;
  1534. for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
  1535. d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn];
  1536. if (d) {
  1537. fn(bus, d, opaque);
  1538. }
  1539. }
  1540. }
  1541. void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
  1542. pci_bus_dev_fn fn, void *opaque)
  1543. {
  1544. bus = pci_find_bus_nr(bus, bus_num);
  1545. if (bus) {
  1546. pci_for_each_device_under_bus_reverse(bus, fn, opaque);
  1547. }
  1548. }
  1549. void pci_for_each_device_under_bus(PCIBus *bus,
  1550. pci_bus_dev_fn fn, void *opaque)
  1551. {
  1552. PCIDevice *d;
  1553. int devfn;
  1554. for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
  1555. d = bus->devices[devfn];
  1556. if (d) {
  1557. fn(bus, d, opaque);
  1558. }
  1559. }
  1560. }
  1561. void pci_for_each_device(PCIBus *bus, int bus_num,
  1562. pci_bus_dev_fn fn, void *opaque)
  1563. {
  1564. bus = pci_find_bus_nr(bus, bus_num);
  1565. if (bus) {
  1566. pci_for_each_device_under_bus(bus, fn, opaque);
  1567. }
  1568. }
  1569. const pci_class_desc *get_class_desc(int class)
  1570. {
  1571. const pci_class_desc *desc;
  1572. desc = pci_class_descriptions;
  1573. while (desc->desc && class != desc->class) {
  1574. desc++;
  1575. }
  1576. return desc;
  1577. }
  1578. /* Initialize a PCI NIC. */
  1579. PCIDevice *pci_nic_init_nofail(NICInfo *nd, PCIBus *rootbus,
  1580. const char *default_model,
  1581. const char *default_devaddr)
  1582. {
  1583. const char *devaddr = nd->devaddr ? nd->devaddr : default_devaddr;
  1584. GPtrArray *pci_nic_models;
  1585. PCIBus *bus;
  1586. PCIDevice *pci_dev;
  1587. DeviceState *dev;
  1588. int devfn;
  1589. int i;
  1590. int dom, busnr;
  1591. unsigned slot;
  1592. if (nd->model && !strcmp(nd->model, "virtio")) {
  1593. g_free(nd->model);
  1594. nd->model = g_strdup("virtio-net-pci");
  1595. }
  1596. pci_nic_models = qemu_get_nic_models(TYPE_PCI_DEVICE);
  1597. if (qemu_show_nic_models(nd->model, (const char **)pci_nic_models->pdata)) {
  1598. exit(0);
  1599. }
  1600. i = qemu_find_nic_model(nd, (const char **)pci_nic_models->pdata,
  1601. default_model);
  1602. if (i < 0) {
  1603. exit(1);
  1604. }
  1605. if (!rootbus) {
  1606. error_report("No primary PCI bus");
  1607. exit(1);
  1608. }
  1609. assert(!rootbus->parent_dev);
  1610. if (!devaddr) {
  1611. devfn = -1;
  1612. busnr = 0;
  1613. } else {
  1614. if (pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) {
  1615. error_report("Invalid PCI device address %s for device %s",
  1616. devaddr, nd->model);
  1617. exit(1);
  1618. }
  1619. if (dom != 0) {
  1620. error_report("No support for non-zero PCI domains");
  1621. exit(1);
  1622. }
  1623. devfn = PCI_DEVFN(slot, 0);
  1624. }
  1625. bus = pci_find_bus_nr(rootbus, busnr);
  1626. if (!bus) {
  1627. error_report("Invalid PCI device address %s for device %s",
  1628. devaddr, nd->model);
  1629. exit(1);
  1630. }
  1631. pci_dev = pci_new(devfn, nd->model);
  1632. dev = &pci_dev->qdev;
  1633. qdev_set_nic_properties(dev, nd);
  1634. pci_realize_and_unref(pci_dev, bus, &error_fatal);
  1635. g_ptr_array_free(pci_nic_models, true);
  1636. return pci_dev;
  1637. }
  1638. PCIDevice *pci_vga_init(PCIBus *bus)
  1639. {
  1640. vga_interface_created = true;
  1641. switch (vga_interface_type) {
  1642. case VGA_CIRRUS:
  1643. return pci_create_simple(bus, -1, "cirrus-vga");
  1644. case VGA_QXL:
  1645. return pci_create_simple(bus, -1, "qxl-vga");
  1646. case VGA_STD:
  1647. return pci_create_simple(bus, -1, "VGA");
  1648. case VGA_VMWARE:
  1649. return pci_create_simple(bus, -1, "vmware-svga");
  1650. case VGA_VIRTIO:
  1651. return pci_create_simple(bus, -1, "virtio-vga");
  1652. case VGA_NONE:
  1653. default: /* Other non-PCI types. Checking for unsupported types is already
  1654. done in vl.c. */
  1655. return NULL;
  1656. }
  1657. }
  1658. /* Whether a given bus number is in range of the secondary
  1659. * bus of the given bridge device. */
  1660. static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num)
  1661. {
  1662. return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) &
  1663. PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ &&
  1664. dev->config[PCI_SECONDARY_BUS] <= bus_num &&
  1665. bus_num <= dev->config[PCI_SUBORDINATE_BUS];
  1666. }
  1667. /* Whether a given bus number is in a range of a root bus */
  1668. static bool pci_root_bus_in_range(PCIBus *bus, int bus_num)
  1669. {
  1670. int i;
  1671. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  1672. PCIDevice *dev = bus->devices[i];
  1673. if (dev && IS_PCI_BRIDGE(dev)) {
  1674. if (pci_secondary_bus_in_range(dev, bus_num)) {
  1675. return true;
  1676. }
  1677. }
  1678. }
  1679. return false;
  1680. }
  1681. PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num)
  1682. {
  1683. PCIBus *sec;
  1684. if (!bus) {
  1685. return NULL;
  1686. }
  1687. if (pci_bus_num(bus) == bus_num) {
  1688. return bus;
  1689. }
  1690. /* Consider all bus numbers in range for the host pci bridge. */
  1691. if (!pci_bus_is_root(bus) &&
  1692. !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) {
  1693. return NULL;
  1694. }
  1695. /* try child bus */
  1696. for (; bus; bus = sec) {
  1697. QLIST_FOREACH(sec, &bus->child, sibling) {
  1698. if (pci_bus_num(sec) == bus_num) {
  1699. return sec;
  1700. }
  1701. /* PXB buses assumed to be children of bus 0 */
  1702. if (pci_bus_is_root(sec)) {
  1703. if (pci_root_bus_in_range(sec, bus_num)) {
  1704. break;
  1705. }
  1706. } else {
  1707. if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) {
  1708. break;
  1709. }
  1710. }
  1711. }
  1712. }
  1713. return NULL;
  1714. }
  1715. void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin,
  1716. pci_bus_fn end, void *parent_state)
  1717. {
  1718. PCIBus *sec;
  1719. void *state;
  1720. if (!bus) {
  1721. return;
  1722. }
  1723. if (begin) {
  1724. state = begin(bus, parent_state);
  1725. } else {
  1726. state = parent_state;
  1727. }
  1728. QLIST_FOREACH(sec, &bus->child, sibling) {
  1729. pci_for_each_bus_depth_first(sec, begin, end, state);
  1730. }
  1731. if (end) {
  1732. end(bus, state);
  1733. }
  1734. }
  1735. PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn)
  1736. {
  1737. bus = pci_find_bus_nr(bus, bus_num);
  1738. if (!bus)
  1739. return NULL;
  1740. return bus->devices[devfn];
  1741. }
  1742. #define ONBOARD_INDEX_MAX (16 * 1024 - 1)
  1743. static void pci_qdev_realize(DeviceState *qdev, Error **errp)
  1744. {
  1745. PCIDevice *pci_dev = (PCIDevice *)qdev;
  1746. PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
  1747. ObjectClass *klass = OBJECT_CLASS(pc);
  1748. Error *local_err = NULL;
  1749. bool is_default_rom;
  1750. uint16_t class_id;
  1751. /*
  1752. * capped by systemd (see: udev-builtin-net_id.c)
  1753. * as it's the only known user honor it to avoid users
  1754. * misconfigure QEMU and then wonder why acpi-index doesn't work
  1755. */
  1756. if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) {
  1757. error_setg(errp, "acpi-index should be less or equal to %u",
  1758. ONBOARD_INDEX_MAX);
  1759. return;
  1760. }
  1761. /*
  1762. * make sure that acpi-index is unique across all present PCI devices
  1763. */
  1764. if (pci_dev->acpi_index) {
  1765. GSequence *used_indexes = pci_acpi_index_list();
  1766. if (g_sequence_lookup(used_indexes,
  1767. GINT_TO_POINTER(pci_dev->acpi_index),
  1768. g_cmp_uint32, NULL)) {
  1769. error_setg(errp, "a PCI device with acpi-index = %" PRIu32
  1770. " already exist", pci_dev->acpi_index);
  1771. return;
  1772. }
  1773. g_sequence_insert_sorted(used_indexes,
  1774. GINT_TO_POINTER(pci_dev->acpi_index),
  1775. g_cmp_uint32, NULL);
  1776. }
  1777. if (pci_dev->romsize != -1 && !is_power_of_2(pci_dev->romsize)) {
  1778. error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize);
  1779. return;
  1780. }
  1781. /* initialize cap_present for pci_is_express() and pci_config_size(),
  1782. * Note that hybrid PCIs are not set automatically and need to manage
  1783. * QEMU_PCI_CAP_EXPRESS manually */
  1784. if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) &&
  1785. !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) {
  1786. pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
  1787. }
  1788. if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) {
  1789. pci_dev->cap_present |= QEMU_PCIE_CAP_CXL;
  1790. }
  1791. pci_dev = do_pci_register_device(pci_dev,
  1792. object_get_typename(OBJECT(qdev)),
  1793. pci_dev->devfn, errp);
  1794. if (pci_dev == NULL)
  1795. return;
  1796. if (pc->realize) {
  1797. pc->realize(pci_dev, &local_err);
  1798. if (local_err) {
  1799. error_propagate(errp, local_err);
  1800. do_pci_unregister_device(pci_dev);
  1801. return;
  1802. }
  1803. }
  1804. if (pci_dev->failover_pair_id) {
  1805. if (!pci_bus_is_express(pci_get_bus(pci_dev))) {
  1806. error_setg(errp, "failover primary device must be on "
  1807. "PCIExpress bus");
  1808. pci_qdev_unrealize(DEVICE(pci_dev));
  1809. return;
  1810. }
  1811. class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE);
  1812. if (class_id != PCI_CLASS_NETWORK_ETHERNET) {
  1813. error_setg(errp, "failover primary device is not an "
  1814. "Ethernet device");
  1815. pci_qdev_unrealize(DEVICE(pci_dev));
  1816. return;
  1817. }
  1818. if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)
  1819. || (PCI_FUNC(pci_dev->devfn) != 0)) {
  1820. error_setg(errp, "failover: primary device must be in its own "
  1821. "PCI slot");
  1822. pci_qdev_unrealize(DEVICE(pci_dev));
  1823. return;
  1824. }
  1825. qdev->allow_unplug_during_migration = true;
  1826. }
  1827. /* rom loading */
  1828. is_default_rom = false;
  1829. if (pci_dev->romfile == NULL && pc->romfile != NULL) {
  1830. pci_dev->romfile = g_strdup(pc->romfile);
  1831. is_default_rom = true;
  1832. }
  1833. pci_add_option_rom(pci_dev, is_default_rom, &local_err);
  1834. if (local_err) {
  1835. error_propagate(errp, local_err);
  1836. pci_qdev_unrealize(DEVICE(pci_dev));
  1837. return;
  1838. }
  1839. pci_set_power(pci_dev, true);
  1840. pci_dev->msi_trigger = pci_msi_trigger;
  1841. }
  1842. PCIDevice *pci_new_multifunction(int devfn, bool multifunction,
  1843. const char *name)
  1844. {
  1845. DeviceState *dev;
  1846. dev = qdev_new(name);
  1847. qdev_prop_set_int32(dev, "addr", devfn);
  1848. qdev_prop_set_bit(dev, "multifunction", multifunction);
  1849. return PCI_DEVICE(dev);
  1850. }
  1851. PCIDevice *pci_new(int devfn, const char *name)
  1852. {
  1853. return pci_new_multifunction(devfn, false, name);
  1854. }
  1855. bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp)
  1856. {
  1857. return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp);
  1858. }
  1859. PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
  1860. bool multifunction,
  1861. const char *name)
  1862. {
  1863. PCIDevice *dev = pci_new_multifunction(devfn, multifunction, name);
  1864. pci_realize_and_unref(dev, bus, &error_fatal);
  1865. return dev;
  1866. }
  1867. PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name)
  1868. {
  1869. return pci_create_simple_multifunction(bus, devfn, false, name);
  1870. }
  1871. static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size)
  1872. {
  1873. int offset = PCI_CONFIG_HEADER_SIZE;
  1874. int i;
  1875. for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) {
  1876. if (pdev->used[i])
  1877. offset = i + 1;
  1878. else if (i - offset + 1 == size)
  1879. return offset;
  1880. }
  1881. return 0;
  1882. }
  1883. static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id,
  1884. uint8_t *prev_p)
  1885. {
  1886. uint8_t next, prev;
  1887. if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST))
  1888. return 0;
  1889. for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
  1890. prev = next + PCI_CAP_LIST_NEXT)
  1891. if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id)
  1892. break;
  1893. if (prev_p)
  1894. *prev_p = prev;
  1895. return next;
  1896. }
  1897. static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset)
  1898. {
  1899. uint8_t next, prev, found = 0;
  1900. if (!(pdev->used[offset])) {
  1901. return 0;
  1902. }
  1903. assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST);
  1904. for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
  1905. prev = next + PCI_CAP_LIST_NEXT) {
  1906. if (next <= offset && next > found) {
  1907. found = next;
  1908. }
  1909. }
  1910. return found;
  1911. }
  1912. /* Patch the PCI vendor and device ids in a PCI rom image if necessary.
  1913. This is needed for an option rom which is used for more than one device. */
  1914. static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size)
  1915. {
  1916. uint16_t vendor_id;
  1917. uint16_t device_id;
  1918. uint16_t rom_vendor_id;
  1919. uint16_t rom_device_id;
  1920. uint16_t rom_magic;
  1921. uint16_t pcir_offset;
  1922. uint8_t checksum;
  1923. /* Words in rom data are little endian (like in PCI configuration),
  1924. so they can be read / written with pci_get_word / pci_set_word. */
  1925. /* Only a valid rom will be patched. */
  1926. rom_magic = pci_get_word(ptr);
  1927. if (rom_magic != 0xaa55) {
  1928. PCI_DPRINTF("Bad ROM magic %04x\n", rom_magic);
  1929. return;
  1930. }
  1931. pcir_offset = pci_get_word(ptr + 0x18);
  1932. if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) {
  1933. PCI_DPRINTF("Bad PCIR offset 0x%x or signature\n", pcir_offset);
  1934. return;
  1935. }
  1936. vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
  1937. device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
  1938. rom_vendor_id = pci_get_word(ptr + pcir_offset + 4);
  1939. rom_device_id = pci_get_word(ptr + pcir_offset + 6);
  1940. PCI_DPRINTF("%s: ROM id %04x%04x / PCI id %04x%04x\n", pdev->romfile,
  1941. vendor_id, device_id, rom_vendor_id, rom_device_id);
  1942. checksum = ptr[6];
  1943. if (vendor_id != rom_vendor_id) {
  1944. /* Patch vendor id and checksum (at offset 6 for etherboot roms). */
  1945. checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8);
  1946. checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8);
  1947. PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
  1948. ptr[6] = checksum;
  1949. pci_set_word(ptr + pcir_offset + 4, vendor_id);
  1950. }
  1951. if (device_id != rom_device_id) {
  1952. /* Patch device id and checksum (at offset 6 for etherboot roms). */
  1953. checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8);
  1954. checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8);
  1955. PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
  1956. ptr[6] = checksum;
  1957. pci_set_word(ptr + pcir_offset + 6, device_id);
  1958. }
  1959. }
  1960. /* Add an option rom for the device */
  1961. static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom,
  1962. Error **errp)
  1963. {
  1964. int64_t size;
  1965. char *path;
  1966. void *ptr;
  1967. char name[32];
  1968. const VMStateDescription *vmsd;
  1969. if (!pdev->romfile)
  1970. return;
  1971. if (strlen(pdev->romfile) == 0)
  1972. return;
  1973. if (!pdev->rom_bar) {
  1974. /*
  1975. * Load rom via fw_cfg instead of creating a rom bar,
  1976. * for 0.11 compatibility.
  1977. */
  1978. int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE);
  1979. /*
  1980. * Hot-plugged devices can't use the option ROM
  1981. * if the rom bar is disabled.
  1982. */
  1983. if (DEVICE(pdev)->hotplugged) {
  1984. error_setg(errp, "Hot-plugged device without ROM bar"
  1985. " can't have an option ROM");
  1986. return;
  1987. }
  1988. if (class == 0x0300) {
  1989. rom_add_vga(pdev->romfile);
  1990. } else {
  1991. rom_add_option(pdev->romfile, -1);
  1992. }
  1993. return;
  1994. }
  1995. path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile);
  1996. if (path == NULL) {
  1997. path = g_strdup(pdev->romfile);
  1998. }
  1999. size = get_image_size(path);
  2000. if (size < 0) {
  2001. error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile);
  2002. g_free(path);
  2003. return;
  2004. } else if (size == 0) {
  2005. error_setg(errp, "romfile \"%s\" is empty", pdev->romfile);
  2006. g_free(path);
  2007. return;
  2008. } else if (size > 2 * GiB) {
  2009. error_setg(errp, "romfile \"%s\" too large (size cannot exceed 2 GiB)",
  2010. pdev->romfile);
  2011. g_free(path);
  2012. return;
  2013. }
  2014. if (pdev->romsize != -1) {
  2015. if (size > pdev->romsize) {
  2016. error_setg(errp, "romfile \"%s\" (%u bytes) is too large for ROM size %u",
  2017. pdev->romfile, (uint32_t)size, pdev->romsize);
  2018. g_free(path);
  2019. return;
  2020. }
  2021. } else {
  2022. pdev->romsize = pow2ceil(size);
  2023. }
  2024. vmsd = qdev_get_vmsd(DEVICE(pdev));
  2025. if (vmsd) {
  2026. snprintf(name, sizeof(name), "%s.rom", vmsd->name);
  2027. } else {
  2028. snprintf(name, sizeof(name), "%s.rom", object_get_typename(OBJECT(pdev)));
  2029. }
  2030. pdev->has_rom = true;
  2031. memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, &error_fatal);
  2032. ptr = memory_region_get_ram_ptr(&pdev->rom);
  2033. if (load_image_size(path, ptr, size) < 0) {
  2034. error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile);
  2035. g_free(path);
  2036. return;
  2037. }
  2038. g_free(path);
  2039. if (is_default_rom) {
  2040. /* Only the default rom images will be patched (if needed). */
  2041. pci_patch_ids(pdev, ptr, size);
  2042. }
  2043. pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom);
  2044. }
  2045. static void pci_del_option_rom(PCIDevice *pdev)
  2046. {
  2047. if (!pdev->has_rom)
  2048. return;
  2049. vmstate_unregister_ram(&pdev->rom, &pdev->qdev);
  2050. pdev->has_rom = false;
  2051. }
  2052. /*
  2053. * On success, pci_add_capability() returns a positive value
  2054. * that the offset of the pci capability.
  2055. * On failure, it sets an error and returns a negative error
  2056. * code.
  2057. */
  2058. int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
  2059. uint8_t offset, uint8_t size,
  2060. Error **errp)
  2061. {
  2062. uint8_t *config;
  2063. int i, overlapping_cap;
  2064. if (!offset) {
  2065. offset = pci_find_space(pdev, size);
  2066. /* out of PCI config space is programming error */
  2067. assert(offset);
  2068. } else {
  2069. /* Verify that capabilities don't overlap. Note: device assignment
  2070. * depends on this check to verify that the device is not broken.
  2071. * Should never trigger for emulated devices, but it's helpful
  2072. * for debugging these. */
  2073. for (i = offset; i < offset + size; i++) {
  2074. overlapping_cap = pci_find_capability_at_offset(pdev, i);
  2075. if (overlapping_cap) {
  2076. error_setg(errp, "%s:%02x:%02x.%x "
  2077. "Attempt to add PCI capability %x at offset "
  2078. "%x overlaps existing capability %x at offset %x",
  2079. pci_root_bus_path(pdev), pci_dev_bus_num(pdev),
  2080. PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
  2081. cap_id, offset, overlapping_cap, i);
  2082. return -EINVAL;
  2083. }
  2084. }
  2085. }
  2086. config = pdev->config + offset;
  2087. config[PCI_CAP_LIST_ID] = cap_id;
  2088. config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST];
  2089. pdev->config[PCI_CAPABILITY_LIST] = offset;
  2090. pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
  2091. memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4));
  2092. /* Make capability read-only by default */
  2093. memset(pdev->wmask + offset, 0, size);
  2094. /* Check capability by default */
  2095. memset(pdev->cmask + offset, 0xFF, size);
  2096. return offset;
  2097. }
  2098. /* Unlink capability from the pci config space. */
  2099. void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size)
  2100. {
  2101. uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev);
  2102. if (!offset)
  2103. return;
  2104. pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT];
  2105. /* Make capability writable again */
  2106. memset(pdev->wmask + offset, 0xff, size);
  2107. memset(pdev->w1cmask + offset, 0, size);
  2108. /* Clear cmask as device-specific registers can't be checked */
  2109. memset(pdev->cmask + offset, 0, size);
  2110. memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4));
  2111. if (!pdev->config[PCI_CAPABILITY_LIST])
  2112. pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST;
  2113. }
  2114. uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id)
  2115. {
  2116. return pci_find_capability_list(pdev, cap_id, NULL);
  2117. }
  2118. static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len)
  2119. {
  2120. PCIDevice *d = (PCIDevice *)dev;
  2121. const char *name = NULL;
  2122. const pci_class_desc *desc = pci_class_descriptions;
  2123. int class = pci_get_word(d->config + PCI_CLASS_DEVICE);
  2124. while (desc->desc &&
  2125. (class & ~desc->fw_ign_bits) !=
  2126. (desc->class & ~desc->fw_ign_bits)) {
  2127. desc++;
  2128. }
  2129. if (desc->desc) {
  2130. name = desc->fw_name;
  2131. }
  2132. if (name) {
  2133. pstrcpy(buf, len, name);
  2134. } else {
  2135. snprintf(buf, len, "pci%04x,%04x",
  2136. pci_get_word(d->config + PCI_VENDOR_ID),
  2137. pci_get_word(d->config + PCI_DEVICE_ID));
  2138. }
  2139. return buf;
  2140. }
  2141. static char *pcibus_get_fw_dev_path(DeviceState *dev)
  2142. {
  2143. PCIDevice *d = (PCIDevice *)dev;
  2144. char name[33];
  2145. int has_func = !!PCI_FUNC(d->devfn);
  2146. return g_strdup_printf("%s@%x%s%.*x",
  2147. pci_dev_fw_name(dev, name, sizeof(name)),
  2148. PCI_SLOT(d->devfn),
  2149. has_func ? "," : "",
  2150. has_func,
  2151. PCI_FUNC(d->devfn));
  2152. }
  2153. static char *pcibus_get_dev_path(DeviceState *dev)
  2154. {
  2155. PCIDevice *d = container_of(dev, PCIDevice, qdev);
  2156. PCIDevice *t;
  2157. int slot_depth;
  2158. /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function.
  2159. * 00 is added here to make this format compatible with
  2160. * domain:Bus:Slot.Func for systems without nested PCI bridges.
  2161. * Slot.Function list specifies the slot and function numbers for all
  2162. * devices on the path from root to the specific device. */
  2163. const char *root_bus_path;
  2164. int root_bus_len;
  2165. char slot[] = ":SS.F";
  2166. int slot_len = sizeof slot - 1 /* For '\0' */;
  2167. int path_len;
  2168. char *path, *p;
  2169. int s;
  2170. root_bus_path = pci_root_bus_path(d);
  2171. root_bus_len = strlen(root_bus_path);
  2172. /* Calculate # of slots on path between device and root. */;
  2173. slot_depth = 0;
  2174. for (t = d; t; t = pci_get_bus(t)->parent_dev) {
  2175. ++slot_depth;
  2176. }
  2177. path_len = root_bus_len + slot_len * slot_depth;
  2178. /* Allocate memory, fill in the terminating null byte. */
  2179. path = g_malloc(path_len + 1 /* For '\0' */);
  2180. path[path_len] = '\0';
  2181. memcpy(path, root_bus_path, root_bus_len);
  2182. /* Fill in slot numbers. We walk up from device to root, so need to print
  2183. * them in the reverse order, last to first. */
  2184. p = path + path_len;
  2185. for (t = d; t; t = pci_get_bus(t)->parent_dev) {
  2186. p -= slot_len;
  2187. s = snprintf(slot, sizeof slot, ":%02x.%x",
  2188. PCI_SLOT(t->devfn), PCI_FUNC(t->devfn));
  2189. assert(s == slot_len);
  2190. memcpy(p, slot, slot_len);
  2191. }
  2192. return path;
  2193. }
  2194. static int pci_qdev_find_recursive(PCIBus *bus,
  2195. const char *id, PCIDevice **pdev)
  2196. {
  2197. DeviceState *qdev = qdev_find_recursive(&bus->qbus, id);
  2198. if (!qdev) {
  2199. return -ENODEV;
  2200. }
  2201. /* roughly check if given qdev is pci device */
  2202. if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) {
  2203. *pdev = PCI_DEVICE(qdev);
  2204. return 0;
  2205. }
  2206. return -EINVAL;
  2207. }
  2208. int pci_qdev_find_device(const char *id, PCIDevice **pdev)
  2209. {
  2210. PCIHostState *host_bridge;
  2211. int rc = -ENODEV;
  2212. QLIST_FOREACH(host_bridge, &pci_host_bridges, next) {
  2213. int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev);
  2214. if (!tmp) {
  2215. rc = 0;
  2216. break;
  2217. }
  2218. if (tmp != -ENODEV) {
  2219. rc = tmp;
  2220. }
  2221. }
  2222. return rc;
  2223. }
  2224. MemoryRegion *pci_address_space(PCIDevice *dev)
  2225. {
  2226. return pci_get_bus(dev)->address_space_mem;
  2227. }
  2228. MemoryRegion *pci_address_space_io(PCIDevice *dev)
  2229. {
  2230. return pci_get_bus(dev)->address_space_io;
  2231. }
  2232. static void pci_device_class_init(ObjectClass *klass, void *data)
  2233. {
  2234. DeviceClass *k = DEVICE_CLASS(klass);
  2235. k->realize = pci_qdev_realize;
  2236. k->unrealize = pci_qdev_unrealize;
  2237. k->bus_type = TYPE_PCI_BUS;
  2238. device_class_set_props(k, pci_props);
  2239. }
  2240. static void pci_device_class_base_init(ObjectClass *klass, void *data)
  2241. {
  2242. if (!object_class_is_abstract(klass)) {
  2243. ObjectClass *conventional =
  2244. object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE);
  2245. ObjectClass *pcie =
  2246. object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE);
  2247. ObjectClass *cxl =
  2248. object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE);
  2249. assert(conventional || pcie || cxl);
  2250. }
  2251. }
  2252. AddressSpace *pci_device_iommu_address_space(PCIDevice *dev)
  2253. {
  2254. PCIBus *bus = pci_get_bus(dev);
  2255. PCIBus *iommu_bus = bus;
  2256. uint8_t devfn = dev->devfn;
  2257. while (iommu_bus && !iommu_bus->iommu_fn && iommu_bus->parent_dev) {
  2258. PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev);
  2259. /*
  2260. * The requester ID of the provided device may be aliased, as seen from
  2261. * the IOMMU, due to topology limitations. The IOMMU relies on a
  2262. * requester ID to provide a unique AddressSpace for devices, but
  2263. * conventional PCI buses pre-date such concepts. Instead, the PCIe-
  2264. * to-PCI bridge creates and accepts transactions on behalf of down-
  2265. * stream devices. When doing so, all downstream devices are masked
  2266. * (aliased) behind a single requester ID. The requester ID used
  2267. * depends on the format of the bridge devices. Proper PCIe-to-PCI
  2268. * bridges, with a PCIe capability indicating such, follow the
  2269. * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification,
  2270. * where the bridge uses the seconary bus as the bridge portion of the
  2271. * requester ID and devfn of 00.0. For other bridges, typically those
  2272. * found on the root complex such as the dmi-to-pci-bridge, we follow
  2273. * the convention of typical bare-metal hardware, which uses the
  2274. * requester ID of the bridge itself. There are device specific
  2275. * exceptions to these rules, but these are the defaults that the
  2276. * Linux kernel uses when determining DMA aliases itself and believed
  2277. * to be true for the bare metal equivalents of the devices emulated
  2278. * in QEMU.
  2279. */
  2280. if (!pci_bus_is_express(iommu_bus)) {
  2281. PCIDevice *parent = iommu_bus->parent_dev;
  2282. if (pci_is_express(parent) &&
  2283. pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
  2284. devfn = PCI_DEVFN(0, 0);
  2285. bus = iommu_bus;
  2286. } else {
  2287. devfn = parent->devfn;
  2288. bus = parent_bus;
  2289. }
  2290. }
  2291. iommu_bus = parent_bus;
  2292. }
  2293. if (!pci_bus_bypass_iommu(bus) && iommu_bus && iommu_bus->iommu_fn) {
  2294. return iommu_bus->iommu_fn(bus, iommu_bus->iommu_opaque, devfn);
  2295. }
  2296. return &address_space_memory;
  2297. }
  2298. void pci_setup_iommu(PCIBus *bus, PCIIOMMUFunc fn, void *opaque)
  2299. {
  2300. bus->iommu_fn = fn;
  2301. bus->iommu_opaque = opaque;
  2302. }
  2303. static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque)
  2304. {
  2305. Range *range = opaque;
  2306. uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND);
  2307. int i;
  2308. if (!(cmd & PCI_COMMAND_MEMORY)) {
  2309. return;
  2310. }
  2311. if (IS_PCI_BRIDGE(dev)) {
  2312. pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
  2313. pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
  2314. base = MAX(base, 0x1ULL << 32);
  2315. if (limit >= base) {
  2316. Range pref_range;
  2317. range_set_bounds(&pref_range, base, limit);
  2318. range_extend(range, &pref_range);
  2319. }
  2320. }
  2321. for (i = 0; i < PCI_NUM_REGIONS; ++i) {
  2322. PCIIORegion *r = &dev->io_regions[i];
  2323. pcibus_t lob, upb;
  2324. Range region_range;
  2325. if (!r->size ||
  2326. (r->type & PCI_BASE_ADDRESS_SPACE_IO) ||
  2327. !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) {
  2328. continue;
  2329. }
  2330. lob = pci_bar_address(dev, i, r->type, r->size);
  2331. upb = lob + r->size - 1;
  2332. if (lob == PCI_BAR_UNMAPPED) {
  2333. continue;
  2334. }
  2335. lob = MAX(lob, 0x1ULL << 32);
  2336. if (upb >= lob) {
  2337. range_set_bounds(&region_range, lob, upb);
  2338. range_extend(range, &region_range);
  2339. }
  2340. }
  2341. }
  2342. void pci_bus_get_w64_range(PCIBus *bus, Range *range)
  2343. {
  2344. range_make_empty(range);
  2345. pci_for_each_device_under_bus(bus, pci_dev_get_w64, range);
  2346. }
  2347. static bool pcie_has_upstream_port(PCIDevice *dev)
  2348. {
  2349. PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev));
  2350. /* Device associated with an upstream port.
  2351. * As there are several types of these, it's easier to check the
  2352. * parent device: upstream ports are always connected to
  2353. * root or downstream ports.
  2354. */
  2355. return parent_dev &&
  2356. pci_is_express(parent_dev) &&
  2357. parent_dev->exp.exp_cap &&
  2358. (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT ||
  2359. pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM);
  2360. }
  2361. PCIDevice *pci_get_function_0(PCIDevice *pci_dev)
  2362. {
  2363. PCIBus *bus = pci_get_bus(pci_dev);
  2364. if(pcie_has_upstream_port(pci_dev)) {
  2365. /* With an upstream PCIe port, we only support 1 device at slot 0 */
  2366. return bus->devices[0];
  2367. } else {
  2368. /* Other bus types might support multiple devices at slots 0-31 */
  2369. return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)];
  2370. }
  2371. }
  2372. MSIMessage pci_get_msi_message(PCIDevice *dev, int vector)
  2373. {
  2374. MSIMessage msg;
  2375. if (msix_enabled(dev)) {
  2376. msg = msix_get_message(dev, vector);
  2377. } else if (msi_enabled(dev)) {
  2378. msg = msi_get_message(dev, vector);
  2379. } else {
  2380. /* Should never happen */
  2381. error_report("%s: unknown interrupt type", __func__);
  2382. abort();
  2383. }
  2384. return msg;
  2385. }
  2386. void pci_set_power(PCIDevice *d, bool state)
  2387. {
  2388. if (d->has_power == state) {
  2389. return;
  2390. }
  2391. d->has_power = state;
  2392. pci_update_mappings(d);
  2393. memory_region_set_enabled(&d->bus_master_enable_region,
  2394. (pci_get_word(d->config + PCI_COMMAND)
  2395. & PCI_COMMAND_MASTER) && d->has_power);
  2396. if (!d->has_power) {
  2397. pci_device_reset(d);
  2398. }
  2399. }
  2400. static const TypeInfo pci_device_type_info = {
  2401. .name = TYPE_PCI_DEVICE,
  2402. .parent = TYPE_DEVICE,
  2403. .instance_size = sizeof(PCIDevice),
  2404. .abstract = true,
  2405. .class_size = sizeof(PCIDeviceClass),
  2406. .class_init = pci_device_class_init,
  2407. .class_base_init = pci_device_class_base_init,
  2408. };
  2409. static void pci_register_types(void)
  2410. {
  2411. type_register_static(&pci_bus_info);
  2412. type_register_static(&pcie_bus_info);
  2413. type_register_static(&cxl_bus_info);
  2414. type_register_static(&conventional_pci_interface_info);
  2415. type_register_static(&cxl_interface_info);
  2416. type_register_static(&pcie_interface_info);
  2417. type_register_static(&pci_device_type_info);
  2418. }
  2419. type_init(pci_register_types)