2
0

pci.c 87 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851
  1. /*
  2. * QEMU PCI bus manager
  3. *
  4. * Copyright (c) 2004 Fabrice Bellard
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include "qemu/datadir.h"
  26. #include "qemu/units.h"
  27. #include "hw/irq.h"
  28. #include "hw/pci/pci.h"
  29. #include "hw/pci/pci_bridge.h"
  30. #include "hw/pci/pci_bus.h"
  31. #include "hw/pci/pci_host.h"
  32. #include "hw/qdev-properties.h"
  33. #include "hw/qdev-properties-system.h"
  34. #include "migration/qemu-file-types.h"
  35. #include "migration/vmstate.h"
  36. #include "net/net.h"
  37. #include "sysemu/numa.h"
  38. #include "sysemu/runstate.h"
  39. #include "sysemu/sysemu.h"
  40. #include "hw/loader.h"
  41. #include "qemu/error-report.h"
  42. #include "qemu/range.h"
  43. #include "trace.h"
  44. #include "hw/pci/msi.h"
  45. #include "hw/pci/msix.h"
  46. #include "hw/hotplug.h"
  47. #include "hw/boards.h"
  48. #include "qapi/error.h"
  49. #include "qemu/cutils.h"
  50. #include "pci-internal.h"
  51. #include "hw/xen/xen.h"
  52. #include "hw/i386/kvm/xen_evtchn.h"
  53. //#define DEBUG_PCI
  54. #ifdef DEBUG_PCI
  55. # define PCI_DPRINTF(format, ...) printf(format, ## __VA_ARGS__)
  56. #else
  57. # define PCI_DPRINTF(format, ...) do { } while (0)
  58. #endif
  59. bool pci_available = true;
  60. static char *pcibus_get_dev_path(DeviceState *dev);
  61. static char *pcibus_get_fw_dev_path(DeviceState *dev);
  62. static void pcibus_reset_hold(Object *obj, ResetType type);
  63. static bool pcie_has_upstream_port(PCIDevice *dev);
  64. static Property pci_props[] = {
  65. DEFINE_PROP_PCI_DEVFN("addr", PCIDevice, devfn, -1),
  66. DEFINE_PROP_STRING("romfile", PCIDevice, romfile),
  67. DEFINE_PROP_UINT32("romsize", PCIDevice, romsize, -1),
  68. DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1),
  69. DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present,
  70. QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false),
  71. DEFINE_PROP_BIT("x-pcie-lnksta-dllla", PCIDevice, cap_present,
  72. QEMU_PCIE_LNKSTA_DLLLA_BITNR, true),
  73. DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present,
  74. QEMU_PCIE_EXTCAP_INIT_BITNR, true),
  75. DEFINE_PROP_STRING("failover_pair_id", PCIDevice,
  76. failover_pair_id),
  77. DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0),
  78. DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present,
  79. QEMU_PCIE_ERR_UNC_MASK_BITNR, true),
  80. DEFINE_PROP_BIT("x-pcie-ari-nextfn-1", PCIDevice, cap_present,
  81. QEMU_PCIE_ARI_NEXTFN_1_BITNR, false),
  82. DEFINE_PROP_END_OF_LIST()
  83. };
  84. static const VMStateDescription vmstate_pcibus = {
  85. .name = "PCIBUS",
  86. .version_id = 1,
  87. .minimum_version_id = 1,
  88. .fields = (const VMStateField[]) {
  89. VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL),
  90. VMSTATE_VARRAY_INT32(irq_count, PCIBus,
  91. nirq, 0, vmstate_info_int32,
  92. int32_t),
  93. VMSTATE_END_OF_LIST()
  94. }
  95. };
  96. static gint g_cmp_uint32(gconstpointer a, gconstpointer b, gpointer user_data)
  97. {
  98. return a - b;
  99. }
  100. static GSequence *pci_acpi_index_list(void)
  101. {
  102. static GSequence *used_acpi_index_list;
  103. if (!used_acpi_index_list) {
  104. used_acpi_index_list = g_sequence_new(NULL);
  105. }
  106. return used_acpi_index_list;
  107. }
  108. static void pci_init_bus_master(PCIDevice *pci_dev)
  109. {
  110. AddressSpace *dma_as = pci_device_iommu_address_space(pci_dev);
  111. memory_region_init_alias(&pci_dev->bus_master_enable_region,
  112. OBJECT(pci_dev), "bus master",
  113. dma_as->root, 0, memory_region_size(dma_as->root));
  114. memory_region_set_enabled(&pci_dev->bus_master_enable_region, false);
  115. memory_region_add_subregion(&pci_dev->bus_master_container_region, 0,
  116. &pci_dev->bus_master_enable_region);
  117. }
  118. static void pcibus_machine_done(Notifier *notifier, void *data)
  119. {
  120. PCIBus *bus = container_of(notifier, PCIBus, machine_done);
  121. int i;
  122. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  123. if (bus->devices[i]) {
  124. pci_init_bus_master(bus->devices[i]);
  125. }
  126. }
  127. }
  128. static void pci_bus_realize(BusState *qbus, Error **errp)
  129. {
  130. PCIBus *bus = PCI_BUS(qbus);
  131. bus->machine_done.notify = pcibus_machine_done;
  132. qemu_add_machine_init_done_notifier(&bus->machine_done);
  133. vmstate_register_any(NULL, &vmstate_pcibus, bus);
  134. }
  135. static void pcie_bus_realize(BusState *qbus, Error **errp)
  136. {
  137. PCIBus *bus = PCI_BUS(qbus);
  138. Error *local_err = NULL;
  139. pci_bus_realize(qbus, &local_err);
  140. if (local_err) {
  141. error_propagate(errp, local_err);
  142. return;
  143. }
  144. /*
  145. * A PCI-E bus can support extended config space if it's the root
  146. * bus, or if the bus/bridge above it does as well
  147. */
  148. if (pci_bus_is_root(bus)) {
  149. bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
  150. } else {
  151. PCIBus *parent_bus = pci_get_bus(bus->parent_dev);
  152. if (pci_bus_allows_extended_config_space(parent_bus)) {
  153. bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE;
  154. }
  155. }
  156. }
  157. static void pci_bus_unrealize(BusState *qbus)
  158. {
  159. PCIBus *bus = PCI_BUS(qbus);
  160. qemu_remove_machine_init_done_notifier(&bus->machine_done);
  161. vmstate_unregister(NULL, &vmstate_pcibus, bus);
  162. }
  163. static int pcibus_num(PCIBus *bus)
  164. {
  165. if (pci_bus_is_root(bus)) {
  166. return 0; /* pci host bridge */
  167. }
  168. return bus->parent_dev->config[PCI_SECONDARY_BUS];
  169. }
  170. static uint16_t pcibus_numa_node(PCIBus *bus)
  171. {
  172. return NUMA_NODE_UNASSIGNED;
  173. }
  174. static void pci_bus_class_init(ObjectClass *klass, void *data)
  175. {
  176. BusClass *k = BUS_CLASS(klass);
  177. PCIBusClass *pbc = PCI_BUS_CLASS(klass);
  178. ResettableClass *rc = RESETTABLE_CLASS(klass);
  179. k->print_dev = pcibus_dev_print;
  180. k->get_dev_path = pcibus_get_dev_path;
  181. k->get_fw_dev_path = pcibus_get_fw_dev_path;
  182. k->realize = pci_bus_realize;
  183. k->unrealize = pci_bus_unrealize;
  184. rc->phases.hold = pcibus_reset_hold;
  185. pbc->bus_num = pcibus_num;
  186. pbc->numa_node = pcibus_numa_node;
  187. }
  188. static const TypeInfo pci_bus_info = {
  189. .name = TYPE_PCI_BUS,
  190. .parent = TYPE_BUS,
  191. .instance_size = sizeof(PCIBus),
  192. .class_size = sizeof(PCIBusClass),
  193. .class_init = pci_bus_class_init,
  194. };
  195. static const TypeInfo cxl_interface_info = {
  196. .name = INTERFACE_CXL_DEVICE,
  197. .parent = TYPE_INTERFACE,
  198. };
  199. static const TypeInfo pcie_interface_info = {
  200. .name = INTERFACE_PCIE_DEVICE,
  201. .parent = TYPE_INTERFACE,
  202. };
  203. static const TypeInfo conventional_pci_interface_info = {
  204. .name = INTERFACE_CONVENTIONAL_PCI_DEVICE,
  205. .parent = TYPE_INTERFACE,
  206. };
  207. static void pcie_bus_class_init(ObjectClass *klass, void *data)
  208. {
  209. BusClass *k = BUS_CLASS(klass);
  210. k->realize = pcie_bus_realize;
  211. }
  212. static const TypeInfo pcie_bus_info = {
  213. .name = TYPE_PCIE_BUS,
  214. .parent = TYPE_PCI_BUS,
  215. .class_init = pcie_bus_class_init,
  216. };
  217. static const TypeInfo cxl_bus_info = {
  218. .name = TYPE_CXL_BUS,
  219. .parent = TYPE_PCIE_BUS,
  220. .class_init = pcie_bus_class_init,
  221. };
  222. static void pci_update_mappings(PCIDevice *d);
  223. static void pci_irq_handler(void *opaque, int irq_num, int level);
  224. static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, Error **);
  225. static void pci_del_option_rom(PCIDevice *pdev);
  226. static uint16_t pci_default_sub_vendor_id = PCI_SUBVENDOR_ID_REDHAT_QUMRANET;
  227. static uint16_t pci_default_sub_device_id = PCI_SUBDEVICE_ID_QEMU;
  228. PCIHostStateList pci_host_bridges;
  229. int pci_bar(PCIDevice *d, int reg)
  230. {
  231. uint8_t type;
  232. /* PCIe virtual functions do not have their own BARs */
  233. assert(!pci_is_vf(d));
  234. if (reg != PCI_ROM_SLOT)
  235. return PCI_BASE_ADDRESS_0 + reg * 4;
  236. type = d->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
  237. return type == PCI_HEADER_TYPE_BRIDGE ? PCI_ROM_ADDRESS1 : PCI_ROM_ADDRESS;
  238. }
  239. static inline int pci_irq_state(PCIDevice *d, int irq_num)
  240. {
  241. return (d->irq_state >> irq_num) & 0x1;
  242. }
  243. static inline void pci_set_irq_state(PCIDevice *d, int irq_num, int level)
  244. {
  245. d->irq_state &= ~(0x1 << irq_num);
  246. d->irq_state |= level << irq_num;
  247. }
  248. static void pci_bus_change_irq_level(PCIBus *bus, int irq_num, int change)
  249. {
  250. assert(irq_num >= 0);
  251. assert(irq_num < bus->nirq);
  252. bus->irq_count[irq_num] += change;
  253. bus->set_irq(bus->irq_opaque, irq_num, bus->irq_count[irq_num] != 0);
  254. }
  255. static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change)
  256. {
  257. PCIBus *bus;
  258. for (;;) {
  259. int dev_irq = irq_num;
  260. bus = pci_get_bus(pci_dev);
  261. assert(bus->map_irq);
  262. irq_num = bus->map_irq(pci_dev, irq_num);
  263. trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num,
  264. pci_bus_is_root(bus) ? "root-complex"
  265. : DEVICE(bus->parent_dev)->canonical_path);
  266. if (bus->set_irq)
  267. break;
  268. pci_dev = bus->parent_dev;
  269. }
  270. pci_bus_change_irq_level(bus, irq_num, change);
  271. }
  272. int pci_bus_get_irq_level(PCIBus *bus, int irq_num)
  273. {
  274. assert(irq_num >= 0);
  275. assert(irq_num < bus->nirq);
  276. return !!bus->irq_count[irq_num];
  277. }
  278. /* Update interrupt status bit in config space on interrupt
  279. * state change. */
  280. static void pci_update_irq_status(PCIDevice *dev)
  281. {
  282. if (dev->irq_state) {
  283. dev->config[PCI_STATUS] |= PCI_STATUS_INTERRUPT;
  284. } else {
  285. dev->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
  286. }
  287. }
  288. void pci_device_deassert_intx(PCIDevice *dev)
  289. {
  290. int i;
  291. for (i = 0; i < PCI_NUM_PINS; ++i) {
  292. pci_irq_handler(dev, i, 0);
  293. }
  294. }
  295. static void pci_msi_trigger(PCIDevice *dev, MSIMessage msg)
  296. {
  297. MemTxAttrs attrs = {};
  298. /*
  299. * Xen uses the high bits of the address to contain some of the bits
  300. * of the PIRQ#. Therefore we can't just send the write cycle and
  301. * trust that it's caught by the APIC at 0xfee00000 because the
  302. * target of the write might be e.g. 0x0x1000fee46000 for PIRQ#4166.
  303. * So we intercept the delivery here instead of in kvm_send_msi().
  304. */
  305. if (xen_mode == XEN_EMULATE &&
  306. xen_evtchn_deliver_pirq_msi(msg.address, msg.data)) {
  307. return;
  308. }
  309. attrs.requester_id = pci_requester_id(dev);
  310. address_space_stl_le(&dev->bus_master_as, msg.address, msg.data,
  311. attrs, NULL);
  312. }
  313. static void pci_reset_regions(PCIDevice *dev)
  314. {
  315. int r;
  316. if (pci_is_vf(dev)) {
  317. return;
  318. }
  319. for (r = 0; r < PCI_NUM_REGIONS; ++r) {
  320. PCIIORegion *region = &dev->io_regions[r];
  321. if (!region->size) {
  322. continue;
  323. }
  324. if (!(region->type & PCI_BASE_ADDRESS_SPACE_IO) &&
  325. region->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  326. pci_set_quad(dev->config + pci_bar(dev, r), region->type);
  327. } else {
  328. pci_set_long(dev->config + pci_bar(dev, r), region->type);
  329. }
  330. }
  331. }
  332. static void pci_do_device_reset(PCIDevice *dev)
  333. {
  334. pci_device_deassert_intx(dev);
  335. assert(dev->irq_state == 0);
  336. /* Clear all writable bits */
  337. pci_word_test_and_clear_mask(dev->config + PCI_COMMAND,
  338. pci_get_word(dev->wmask + PCI_COMMAND) |
  339. pci_get_word(dev->w1cmask + PCI_COMMAND));
  340. pci_word_test_and_clear_mask(dev->config + PCI_STATUS,
  341. pci_get_word(dev->wmask + PCI_STATUS) |
  342. pci_get_word(dev->w1cmask + PCI_STATUS));
  343. /* Some devices make bits of PCI_INTERRUPT_LINE read only */
  344. pci_byte_test_and_clear_mask(dev->config + PCI_INTERRUPT_LINE,
  345. pci_get_word(dev->wmask + PCI_INTERRUPT_LINE) |
  346. pci_get_word(dev->w1cmask + PCI_INTERRUPT_LINE));
  347. dev->config[PCI_CACHE_LINE_SIZE] = 0x0;
  348. pci_reset_regions(dev);
  349. pci_update_mappings(dev);
  350. msi_reset(dev);
  351. msix_reset(dev);
  352. pcie_sriov_pf_reset(dev);
  353. }
  354. /*
  355. * This function is called on #RST and FLR.
  356. * FLR if PCI_EXP_DEVCTL_BCR_FLR is set
  357. */
  358. void pci_device_reset(PCIDevice *dev)
  359. {
  360. device_cold_reset(&dev->qdev);
  361. pci_do_device_reset(dev);
  362. }
  363. /*
  364. * Trigger pci bus reset under a given bus.
  365. * Called via bus_cold_reset on RST# assert, after the devices
  366. * have been reset device_cold_reset-ed already.
  367. */
  368. static void pcibus_reset_hold(Object *obj, ResetType type)
  369. {
  370. PCIBus *bus = PCI_BUS(obj);
  371. int i;
  372. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  373. if (bus->devices[i]) {
  374. pci_do_device_reset(bus->devices[i]);
  375. }
  376. }
  377. for (i = 0; i < bus->nirq; i++) {
  378. assert(bus->irq_count[i] == 0);
  379. }
  380. }
  381. static void pci_host_bus_register(DeviceState *host)
  382. {
  383. PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
  384. QLIST_INSERT_HEAD(&pci_host_bridges, host_bridge, next);
  385. }
  386. static void pci_host_bus_unregister(DeviceState *host)
  387. {
  388. PCIHostState *host_bridge = PCI_HOST_BRIDGE(host);
  389. QLIST_REMOVE(host_bridge, next);
  390. }
  391. PCIBus *pci_device_root_bus(const PCIDevice *d)
  392. {
  393. PCIBus *bus = pci_get_bus(d);
  394. while (!pci_bus_is_root(bus)) {
  395. d = bus->parent_dev;
  396. assert(d != NULL);
  397. bus = pci_get_bus(d);
  398. }
  399. return bus;
  400. }
  401. const char *pci_root_bus_path(PCIDevice *dev)
  402. {
  403. PCIBus *rootbus = pci_device_root_bus(dev);
  404. PCIHostState *host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
  405. PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_GET_CLASS(host_bridge);
  406. assert(host_bridge->bus == rootbus);
  407. if (hc->root_bus_path) {
  408. return (*hc->root_bus_path)(host_bridge, rootbus);
  409. }
  410. return rootbus->qbus.name;
  411. }
  412. bool pci_bus_bypass_iommu(PCIBus *bus)
  413. {
  414. PCIBus *rootbus = bus;
  415. PCIHostState *host_bridge;
  416. if (!pci_bus_is_root(bus)) {
  417. rootbus = pci_device_root_bus(bus->parent_dev);
  418. }
  419. host_bridge = PCI_HOST_BRIDGE(rootbus->qbus.parent);
  420. assert(host_bridge->bus == rootbus);
  421. return host_bridge->bypass_iommu;
  422. }
  423. static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent,
  424. MemoryRegion *mem, MemoryRegion *io,
  425. uint8_t devfn_min)
  426. {
  427. assert(PCI_FUNC(devfn_min) == 0);
  428. bus->devfn_min = devfn_min;
  429. bus->slot_reserved_mask = 0x0;
  430. bus->address_space_mem = mem;
  431. bus->address_space_io = io;
  432. bus->flags |= PCI_BUS_IS_ROOT;
  433. /* host bridge */
  434. QLIST_INIT(&bus->child);
  435. pci_host_bus_register(parent);
  436. }
  437. static void pci_bus_uninit(PCIBus *bus)
  438. {
  439. pci_host_bus_unregister(BUS(bus)->parent);
  440. }
  441. bool pci_bus_is_express(const PCIBus *bus)
  442. {
  443. return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS);
  444. }
  445. void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent,
  446. const char *name,
  447. MemoryRegion *mem, MemoryRegion *io,
  448. uint8_t devfn_min, const char *typename)
  449. {
  450. qbus_init(bus, bus_size, typename, parent, name);
  451. pci_root_bus_internal_init(bus, parent, mem, io, devfn_min);
  452. }
  453. PCIBus *pci_root_bus_new(DeviceState *parent, const char *name,
  454. MemoryRegion *mem, MemoryRegion *io,
  455. uint8_t devfn_min, const char *typename)
  456. {
  457. PCIBus *bus;
  458. bus = PCI_BUS(qbus_new(typename, parent, name));
  459. pci_root_bus_internal_init(bus, parent, mem, io, devfn_min);
  460. return bus;
  461. }
  462. void pci_root_bus_cleanup(PCIBus *bus)
  463. {
  464. pci_bus_uninit(bus);
  465. /* the caller of the unplug hotplug handler will delete this device */
  466. qbus_unrealize(BUS(bus));
  467. }
  468. void pci_bus_irqs(PCIBus *bus, pci_set_irq_fn set_irq,
  469. void *irq_opaque, int nirq)
  470. {
  471. bus->set_irq = set_irq;
  472. bus->irq_opaque = irq_opaque;
  473. bus->nirq = nirq;
  474. g_free(bus->irq_count);
  475. bus->irq_count = g_malloc0(nirq * sizeof(bus->irq_count[0]));
  476. }
  477. void pci_bus_map_irqs(PCIBus *bus, pci_map_irq_fn map_irq)
  478. {
  479. bus->map_irq = map_irq;
  480. }
  481. void pci_bus_irqs_cleanup(PCIBus *bus)
  482. {
  483. bus->set_irq = NULL;
  484. bus->map_irq = NULL;
  485. bus->irq_opaque = NULL;
  486. bus->nirq = 0;
  487. g_free(bus->irq_count);
  488. bus->irq_count = NULL;
  489. }
  490. PCIBus *pci_register_root_bus(DeviceState *parent, const char *name,
  491. pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
  492. void *irq_opaque,
  493. MemoryRegion *mem, MemoryRegion *io,
  494. uint8_t devfn_min, int nirq,
  495. const char *typename)
  496. {
  497. PCIBus *bus;
  498. bus = pci_root_bus_new(parent, name, mem, io, devfn_min, typename);
  499. pci_bus_irqs(bus, set_irq, irq_opaque, nirq);
  500. pci_bus_map_irqs(bus, map_irq);
  501. return bus;
  502. }
  503. void pci_unregister_root_bus(PCIBus *bus)
  504. {
  505. pci_bus_irqs_cleanup(bus);
  506. pci_root_bus_cleanup(bus);
  507. }
  508. int pci_bus_num(PCIBus *s)
  509. {
  510. return PCI_BUS_GET_CLASS(s)->bus_num(s);
  511. }
  512. /* Returns the min and max bus numbers of a PCI bus hierarchy */
  513. void pci_bus_range(PCIBus *bus, int *min_bus, int *max_bus)
  514. {
  515. int i;
  516. *min_bus = *max_bus = pci_bus_num(bus);
  517. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  518. PCIDevice *dev = bus->devices[i];
  519. if (dev && IS_PCI_BRIDGE(dev)) {
  520. *min_bus = MIN(*min_bus, dev->config[PCI_SECONDARY_BUS]);
  521. *max_bus = MAX(*max_bus, dev->config[PCI_SUBORDINATE_BUS]);
  522. }
  523. }
  524. }
  525. int pci_bus_numa_node(PCIBus *bus)
  526. {
  527. return PCI_BUS_GET_CLASS(bus)->numa_node(bus);
  528. }
  529. static int get_pci_config_device(QEMUFile *f, void *pv, size_t size,
  530. const VMStateField *field)
  531. {
  532. PCIDevice *s = container_of(pv, PCIDevice, config);
  533. uint8_t *config;
  534. int i;
  535. assert(size == pci_config_size(s));
  536. config = g_malloc(size);
  537. qemu_get_buffer(f, config, size);
  538. for (i = 0; i < size; ++i) {
  539. if ((config[i] ^ s->config[i]) &
  540. s->cmask[i] & ~s->wmask[i] & ~s->w1cmask[i]) {
  541. error_report("%s: Bad config data: i=0x%x read: %x device: %x "
  542. "cmask: %x wmask: %x w1cmask:%x", __func__,
  543. i, config[i], s->config[i],
  544. s->cmask[i], s->wmask[i], s->w1cmask[i]);
  545. g_free(config);
  546. return -EINVAL;
  547. }
  548. }
  549. memcpy(s->config, config, size);
  550. pci_update_mappings(s);
  551. if (IS_PCI_BRIDGE(s)) {
  552. pci_bridge_update_mappings(PCI_BRIDGE(s));
  553. }
  554. memory_region_set_enabled(&s->bus_master_enable_region,
  555. pci_get_word(s->config + PCI_COMMAND)
  556. & PCI_COMMAND_MASTER);
  557. g_free(config);
  558. return 0;
  559. }
  560. /* just put buffer */
  561. static int put_pci_config_device(QEMUFile *f, void *pv, size_t size,
  562. const VMStateField *field, JSONWriter *vmdesc)
  563. {
  564. const uint8_t **v = pv;
  565. assert(size == pci_config_size(container_of(pv, PCIDevice, config)));
  566. qemu_put_buffer(f, *v, size);
  567. return 0;
  568. }
  569. static const VMStateInfo vmstate_info_pci_config = {
  570. .name = "pci config",
  571. .get = get_pci_config_device,
  572. .put = put_pci_config_device,
  573. };
  574. static int get_pci_irq_state(QEMUFile *f, void *pv, size_t size,
  575. const VMStateField *field)
  576. {
  577. PCIDevice *s = container_of(pv, PCIDevice, irq_state);
  578. uint32_t irq_state[PCI_NUM_PINS];
  579. int i;
  580. for (i = 0; i < PCI_NUM_PINS; ++i) {
  581. irq_state[i] = qemu_get_be32(f);
  582. if (irq_state[i] != 0x1 && irq_state[i] != 0) {
  583. fprintf(stderr, "irq state %d: must be 0 or 1.\n",
  584. irq_state[i]);
  585. return -EINVAL;
  586. }
  587. }
  588. for (i = 0; i < PCI_NUM_PINS; ++i) {
  589. pci_set_irq_state(s, i, irq_state[i]);
  590. }
  591. return 0;
  592. }
  593. static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size,
  594. const VMStateField *field, JSONWriter *vmdesc)
  595. {
  596. int i;
  597. PCIDevice *s = container_of(pv, PCIDevice, irq_state);
  598. for (i = 0; i < PCI_NUM_PINS; ++i) {
  599. qemu_put_be32(f, pci_irq_state(s, i));
  600. }
  601. return 0;
  602. }
  603. static const VMStateInfo vmstate_info_pci_irq_state = {
  604. .name = "pci irq state",
  605. .get = get_pci_irq_state,
  606. .put = put_pci_irq_state,
  607. };
  608. static bool migrate_is_pcie(void *opaque, int version_id)
  609. {
  610. return pci_is_express((PCIDevice *)opaque);
  611. }
  612. static bool migrate_is_not_pcie(void *opaque, int version_id)
  613. {
  614. return !pci_is_express((PCIDevice *)opaque);
  615. }
  616. const VMStateDescription vmstate_pci_device = {
  617. .name = "PCIDevice",
  618. .version_id = 2,
  619. .minimum_version_id = 1,
  620. .fields = (const VMStateField[]) {
  621. VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice),
  622. VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
  623. migrate_is_not_pcie,
  624. 0, vmstate_info_pci_config,
  625. PCI_CONFIG_SPACE_SIZE),
  626. VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice,
  627. migrate_is_pcie,
  628. 0, vmstate_info_pci_config,
  629. PCIE_CONFIG_SPACE_SIZE),
  630. VMSTATE_BUFFER_UNSAFE_INFO(irq_state, PCIDevice, 2,
  631. vmstate_info_pci_irq_state,
  632. PCI_NUM_PINS * sizeof(int32_t)),
  633. VMSTATE_END_OF_LIST()
  634. }
  635. };
  636. void pci_device_save(PCIDevice *s, QEMUFile *f)
  637. {
  638. /* Clear interrupt status bit: it is implicit
  639. * in irq_state which we are saving.
  640. * This makes us compatible with old devices
  641. * which never set or clear this bit. */
  642. s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT;
  643. vmstate_save_state(f, &vmstate_pci_device, s, NULL);
  644. /* Restore the interrupt status bit. */
  645. pci_update_irq_status(s);
  646. }
  647. int pci_device_load(PCIDevice *s, QEMUFile *f)
  648. {
  649. int ret;
  650. ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id);
  651. /* Restore the interrupt status bit. */
  652. pci_update_irq_status(s);
  653. return ret;
  654. }
  655. static void pci_set_default_subsystem_id(PCIDevice *pci_dev)
  656. {
  657. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
  658. pci_default_sub_vendor_id);
  659. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
  660. pci_default_sub_device_id);
  661. }
  662. /*
  663. * Parse [[<domain>:]<bus>:]<slot>, return -1 on error if funcp == NULL
  664. * [[<domain>:]<bus>:]<slot>.<func>, return -1 on error
  665. */
  666. static int pci_parse_devaddr(const char *addr, int *domp, int *busp,
  667. unsigned int *slotp, unsigned int *funcp)
  668. {
  669. const char *p;
  670. char *e;
  671. unsigned long val;
  672. unsigned long dom = 0, bus = 0;
  673. unsigned int slot = 0;
  674. unsigned int func = 0;
  675. p = addr;
  676. val = strtoul(p, &e, 16);
  677. if (e == p)
  678. return -1;
  679. if (*e == ':') {
  680. bus = val;
  681. p = e + 1;
  682. val = strtoul(p, &e, 16);
  683. if (e == p)
  684. return -1;
  685. if (*e == ':') {
  686. dom = bus;
  687. bus = val;
  688. p = e + 1;
  689. val = strtoul(p, &e, 16);
  690. if (e == p)
  691. return -1;
  692. }
  693. }
  694. slot = val;
  695. if (funcp != NULL) {
  696. if (*e != '.')
  697. return -1;
  698. p = e + 1;
  699. val = strtoul(p, &e, 16);
  700. if (e == p)
  701. return -1;
  702. func = val;
  703. }
  704. /* if funcp == NULL func is 0 */
  705. if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7)
  706. return -1;
  707. if (*e)
  708. return -1;
  709. *domp = dom;
  710. *busp = bus;
  711. *slotp = slot;
  712. if (funcp != NULL)
  713. *funcp = func;
  714. return 0;
  715. }
  716. static void pci_init_cmask(PCIDevice *dev)
  717. {
  718. pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff);
  719. pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff);
  720. dev->cmask[PCI_STATUS] = PCI_STATUS_CAP_LIST;
  721. dev->cmask[PCI_REVISION_ID] = 0xff;
  722. dev->cmask[PCI_CLASS_PROG] = 0xff;
  723. pci_set_word(dev->cmask + PCI_CLASS_DEVICE, 0xffff);
  724. dev->cmask[PCI_HEADER_TYPE] = 0xff;
  725. dev->cmask[PCI_CAPABILITY_LIST] = 0xff;
  726. }
  727. static void pci_init_wmask(PCIDevice *dev)
  728. {
  729. int config_size = pci_config_size(dev);
  730. dev->wmask[PCI_CACHE_LINE_SIZE] = 0xff;
  731. dev->wmask[PCI_INTERRUPT_LINE] = 0xff;
  732. pci_set_word(dev->wmask + PCI_COMMAND,
  733. PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
  734. PCI_COMMAND_INTX_DISABLE);
  735. pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR);
  736. memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff,
  737. config_size - PCI_CONFIG_HEADER_SIZE);
  738. }
  739. static void pci_init_w1cmask(PCIDevice *dev)
  740. {
  741. /*
  742. * Note: It's okay to set w1cmask even for readonly bits as
  743. * long as their value is hardwired to 0.
  744. */
  745. pci_set_word(dev->w1cmask + PCI_STATUS,
  746. PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT |
  747. PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT |
  748. PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY);
  749. }
  750. static void pci_init_mask_bridge(PCIDevice *d)
  751. {
  752. /* PCI_PRIMARY_BUS, PCI_SECONDARY_BUS, PCI_SUBORDINATE_BUS and
  753. PCI_SEC_LATENCY_TIMER */
  754. memset(d->wmask + PCI_PRIMARY_BUS, 0xff, 4);
  755. /* base and limit */
  756. d->wmask[PCI_IO_BASE] = PCI_IO_RANGE_MASK & 0xff;
  757. d->wmask[PCI_IO_LIMIT] = PCI_IO_RANGE_MASK & 0xff;
  758. pci_set_word(d->wmask + PCI_MEMORY_BASE,
  759. PCI_MEMORY_RANGE_MASK & 0xffff);
  760. pci_set_word(d->wmask + PCI_MEMORY_LIMIT,
  761. PCI_MEMORY_RANGE_MASK & 0xffff);
  762. pci_set_word(d->wmask + PCI_PREF_MEMORY_BASE,
  763. PCI_PREF_RANGE_MASK & 0xffff);
  764. pci_set_word(d->wmask + PCI_PREF_MEMORY_LIMIT,
  765. PCI_PREF_RANGE_MASK & 0xffff);
  766. /* PCI_PREF_BASE_UPPER32 and PCI_PREF_LIMIT_UPPER32 */
  767. memset(d->wmask + PCI_PREF_BASE_UPPER32, 0xff, 8);
  768. /* Supported memory and i/o types */
  769. d->config[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_16;
  770. d->config[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_16;
  771. pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_BASE,
  772. PCI_PREF_RANGE_TYPE_64);
  773. pci_word_test_and_set_mask(d->config + PCI_PREF_MEMORY_LIMIT,
  774. PCI_PREF_RANGE_TYPE_64);
  775. /*
  776. * TODO: Bridges default to 10-bit VGA decoding but we currently only
  777. * implement 16-bit decoding (no alias support).
  778. */
  779. pci_set_word(d->wmask + PCI_BRIDGE_CONTROL,
  780. PCI_BRIDGE_CTL_PARITY |
  781. PCI_BRIDGE_CTL_SERR |
  782. PCI_BRIDGE_CTL_ISA |
  783. PCI_BRIDGE_CTL_VGA |
  784. PCI_BRIDGE_CTL_VGA_16BIT |
  785. PCI_BRIDGE_CTL_MASTER_ABORT |
  786. PCI_BRIDGE_CTL_BUS_RESET |
  787. PCI_BRIDGE_CTL_FAST_BACK |
  788. PCI_BRIDGE_CTL_DISCARD |
  789. PCI_BRIDGE_CTL_SEC_DISCARD |
  790. PCI_BRIDGE_CTL_DISCARD_SERR);
  791. /* Below does not do anything as we never set this bit, put here for
  792. * completeness. */
  793. pci_set_word(d->w1cmask + PCI_BRIDGE_CONTROL,
  794. PCI_BRIDGE_CTL_DISCARD_STATUS);
  795. d->cmask[PCI_IO_BASE] |= PCI_IO_RANGE_TYPE_MASK;
  796. d->cmask[PCI_IO_LIMIT] |= PCI_IO_RANGE_TYPE_MASK;
  797. pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_BASE,
  798. PCI_PREF_RANGE_TYPE_MASK);
  799. pci_word_test_and_set_mask(d->cmask + PCI_PREF_MEMORY_LIMIT,
  800. PCI_PREF_RANGE_TYPE_MASK);
  801. }
  802. static void pci_init_multifunction(PCIBus *bus, PCIDevice *dev, Error **errp)
  803. {
  804. uint8_t slot = PCI_SLOT(dev->devfn);
  805. uint8_t func;
  806. if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
  807. dev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
  808. }
  809. /*
  810. * With SR/IOV and ARI, a device at function 0 need not be a multifunction
  811. * device, as it may just be a VF that ended up with function 0 in
  812. * the legacy PCI interpretation. Avoid failing in such cases:
  813. */
  814. if (pci_is_vf(dev) &&
  815. dev->exp.sriov_vf.pf->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
  816. return;
  817. }
  818. /*
  819. * multifunction bit is interpreted in two ways as follows.
  820. * - all functions must set the bit to 1.
  821. * Example: Intel X53
  822. * - function 0 must set the bit, but the rest function (> 0)
  823. * is allowed to leave the bit to 0.
  824. * Example: PIIX3(also in qemu), PIIX4(also in qemu), ICH10,
  825. *
  826. * So OS (at least Linux) checks the bit of only function 0,
  827. * and doesn't see the bit of function > 0.
  828. *
  829. * The below check allows both interpretation.
  830. */
  831. if (PCI_FUNC(dev->devfn)) {
  832. PCIDevice *f0 = bus->devices[PCI_DEVFN(slot, 0)];
  833. if (f0 && !(f0->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) {
  834. /* function 0 should set multifunction bit */
  835. error_setg(errp, "PCI: single function device can't be populated "
  836. "in function %x.%x", slot, PCI_FUNC(dev->devfn));
  837. return;
  838. }
  839. return;
  840. }
  841. if (dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
  842. return;
  843. }
  844. /* function 0 indicates single function, so function > 0 must be NULL */
  845. for (func = 1; func < PCI_FUNC_MAX; ++func) {
  846. if (bus->devices[PCI_DEVFN(slot, func)]) {
  847. error_setg(errp, "PCI: %x.0 indicates single function, "
  848. "but %x.%x is already populated.",
  849. slot, slot, func);
  850. return;
  851. }
  852. }
  853. }
  854. static void pci_config_alloc(PCIDevice *pci_dev)
  855. {
  856. int config_size = pci_config_size(pci_dev);
  857. pci_dev->config = g_malloc0(config_size);
  858. pci_dev->cmask = g_malloc0(config_size);
  859. pci_dev->wmask = g_malloc0(config_size);
  860. pci_dev->w1cmask = g_malloc0(config_size);
  861. pci_dev->used = g_malloc0(config_size);
  862. }
  863. static void pci_config_free(PCIDevice *pci_dev)
  864. {
  865. g_free(pci_dev->config);
  866. g_free(pci_dev->cmask);
  867. g_free(pci_dev->wmask);
  868. g_free(pci_dev->w1cmask);
  869. g_free(pci_dev->used);
  870. }
  871. static void do_pci_unregister_device(PCIDevice *pci_dev)
  872. {
  873. pci_get_bus(pci_dev)->devices[pci_dev->devfn] = NULL;
  874. pci_config_free(pci_dev);
  875. if (xen_mode == XEN_EMULATE) {
  876. xen_evtchn_remove_pci_device(pci_dev);
  877. }
  878. if (memory_region_is_mapped(&pci_dev->bus_master_enable_region)) {
  879. memory_region_del_subregion(&pci_dev->bus_master_container_region,
  880. &pci_dev->bus_master_enable_region);
  881. }
  882. address_space_destroy(&pci_dev->bus_master_as);
  883. }
  884. /* Extract PCIReqIDCache into BDF format */
  885. static uint16_t pci_req_id_cache_extract(PCIReqIDCache *cache)
  886. {
  887. uint8_t bus_n;
  888. uint16_t result;
  889. switch (cache->type) {
  890. case PCI_REQ_ID_BDF:
  891. result = pci_get_bdf(cache->dev);
  892. break;
  893. case PCI_REQ_ID_SECONDARY_BUS:
  894. bus_n = pci_dev_bus_num(cache->dev);
  895. result = PCI_BUILD_BDF(bus_n, 0);
  896. break;
  897. default:
  898. error_report("Invalid PCI requester ID cache type: %d",
  899. cache->type);
  900. exit(1);
  901. break;
  902. }
  903. return result;
  904. }
  905. /* Parse bridges up to the root complex and return requester ID
  906. * cache for specific device. For full PCIe topology, the cache
  907. * result would be exactly the same as getting BDF of the device.
  908. * However, several tricks are required when system mixed up with
  909. * legacy PCI devices and PCIe-to-PCI bridges.
  910. *
  911. * Here we cache the proxy device (and type) not requester ID since
  912. * bus number might change from time to time.
  913. */
  914. static PCIReqIDCache pci_req_id_cache_get(PCIDevice *dev)
  915. {
  916. PCIDevice *parent;
  917. PCIReqIDCache cache = {
  918. .dev = dev,
  919. .type = PCI_REQ_ID_BDF,
  920. };
  921. while (!pci_bus_is_root(pci_get_bus(dev))) {
  922. /* We are under PCI/PCIe bridges */
  923. parent = pci_get_bus(dev)->parent_dev;
  924. if (pci_is_express(parent)) {
  925. if (pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
  926. /* When we pass through PCIe-to-PCI/PCIX bridges, we
  927. * override the requester ID using secondary bus
  928. * number of parent bridge with zeroed devfn
  929. * (pcie-to-pci bridge spec chap 2.3). */
  930. cache.type = PCI_REQ_ID_SECONDARY_BUS;
  931. cache.dev = dev;
  932. }
  933. } else {
  934. /* Legacy PCI, override requester ID with the bridge's
  935. * BDF upstream. When the root complex connects to
  936. * legacy PCI devices (including buses), it can only
  937. * obtain requester ID info from directly attached
  938. * devices. If devices are attached under bridges, only
  939. * the requester ID of the bridge that is directly
  940. * attached to the root complex can be recognized. */
  941. cache.type = PCI_REQ_ID_BDF;
  942. cache.dev = parent;
  943. }
  944. dev = parent;
  945. }
  946. return cache;
  947. }
  948. uint16_t pci_requester_id(PCIDevice *dev)
  949. {
  950. return pci_req_id_cache_extract(&dev->requester_id_cache);
  951. }
  952. static bool pci_bus_devfn_available(PCIBus *bus, int devfn)
  953. {
  954. return !(bus->devices[devfn]);
  955. }
  956. static bool pci_bus_devfn_reserved(PCIBus *bus, int devfn)
  957. {
  958. return bus->slot_reserved_mask & (1UL << PCI_SLOT(devfn));
  959. }
  960. uint32_t pci_bus_get_slot_reserved_mask(PCIBus *bus)
  961. {
  962. return bus->slot_reserved_mask;
  963. }
  964. void pci_bus_set_slot_reserved_mask(PCIBus *bus, uint32_t mask)
  965. {
  966. bus->slot_reserved_mask |= mask;
  967. }
  968. void pci_bus_clear_slot_reserved_mask(PCIBus *bus, uint32_t mask)
  969. {
  970. bus->slot_reserved_mask &= ~mask;
  971. }
  972. /* -1 for devfn means auto assign */
  973. static PCIDevice *do_pci_register_device(PCIDevice *pci_dev,
  974. const char *name, int devfn,
  975. Error **errp)
  976. {
  977. PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
  978. PCIConfigReadFunc *config_read = pc->config_read;
  979. PCIConfigWriteFunc *config_write = pc->config_write;
  980. Error *local_err = NULL;
  981. DeviceState *dev = DEVICE(pci_dev);
  982. PCIBus *bus = pci_get_bus(pci_dev);
  983. bool is_bridge = IS_PCI_BRIDGE(pci_dev);
  984. /* Only pci bridges can be attached to extra PCI root buses */
  985. if (pci_bus_is_root(bus) && bus->parent_dev && !is_bridge) {
  986. error_setg(errp,
  987. "PCI: Only PCI/PCIe bridges can be plugged into %s",
  988. bus->parent_dev->name);
  989. return NULL;
  990. }
  991. if (devfn < 0) {
  992. for(devfn = bus->devfn_min ; devfn < ARRAY_SIZE(bus->devices);
  993. devfn += PCI_FUNC_MAX) {
  994. if (pci_bus_devfn_available(bus, devfn) &&
  995. !pci_bus_devfn_reserved(bus, devfn)) {
  996. goto found;
  997. }
  998. }
  999. error_setg(errp, "PCI: no slot/function available for %s, all in use "
  1000. "or reserved", name);
  1001. return NULL;
  1002. found: ;
  1003. } else if (pci_bus_devfn_reserved(bus, devfn)) {
  1004. error_setg(errp, "PCI: slot %d function %d not available for %s,"
  1005. " reserved",
  1006. PCI_SLOT(devfn), PCI_FUNC(devfn), name);
  1007. return NULL;
  1008. } else if (!pci_bus_devfn_available(bus, devfn)) {
  1009. error_setg(errp, "PCI: slot %d function %d not available for %s,"
  1010. " in use by %s,id=%s",
  1011. PCI_SLOT(devfn), PCI_FUNC(devfn), name,
  1012. bus->devices[devfn]->name, bus->devices[devfn]->qdev.id);
  1013. return NULL;
  1014. } /*
  1015. * Populating function 0 triggers a scan from the guest that
  1016. * exposes other non-zero functions. Hence we need to ensure that
  1017. * function 0 wasn't added yet.
  1018. */
  1019. else if (dev->hotplugged &&
  1020. !pci_is_vf(pci_dev) &&
  1021. pci_get_function_0(pci_dev)) {
  1022. error_setg(errp, "PCI: slot %d function 0 already occupied by %s,"
  1023. " new func %s cannot be exposed to guest.",
  1024. PCI_SLOT(pci_get_function_0(pci_dev)->devfn),
  1025. pci_get_function_0(pci_dev)->name,
  1026. name);
  1027. return NULL;
  1028. }
  1029. pci_dev->devfn = devfn;
  1030. pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev);
  1031. pstrcpy(pci_dev->name, sizeof(pci_dev->name), name);
  1032. memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev),
  1033. "bus master container", UINT64_MAX);
  1034. address_space_init(&pci_dev->bus_master_as,
  1035. &pci_dev->bus_master_container_region, pci_dev->name);
  1036. if (phase_check(PHASE_MACHINE_READY)) {
  1037. pci_init_bus_master(pci_dev);
  1038. }
  1039. pci_dev->irq_state = 0;
  1040. pci_config_alloc(pci_dev);
  1041. pci_config_set_vendor_id(pci_dev->config, pc->vendor_id);
  1042. pci_config_set_device_id(pci_dev->config, pc->device_id);
  1043. pci_config_set_revision(pci_dev->config, pc->revision);
  1044. pci_config_set_class(pci_dev->config, pc->class_id);
  1045. if (!is_bridge) {
  1046. if (pc->subsystem_vendor_id || pc->subsystem_id) {
  1047. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
  1048. pc->subsystem_vendor_id);
  1049. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
  1050. pc->subsystem_id);
  1051. } else {
  1052. pci_set_default_subsystem_id(pci_dev);
  1053. }
  1054. } else {
  1055. /* subsystem_vendor_id/subsystem_id are only for header type 0 */
  1056. assert(!pc->subsystem_vendor_id);
  1057. assert(!pc->subsystem_id);
  1058. }
  1059. pci_init_cmask(pci_dev);
  1060. pci_init_wmask(pci_dev);
  1061. pci_init_w1cmask(pci_dev);
  1062. if (is_bridge) {
  1063. pci_init_mask_bridge(pci_dev);
  1064. }
  1065. pci_init_multifunction(bus, pci_dev, &local_err);
  1066. if (local_err) {
  1067. error_propagate(errp, local_err);
  1068. do_pci_unregister_device(pci_dev);
  1069. return NULL;
  1070. }
  1071. if (!config_read)
  1072. config_read = pci_default_read_config;
  1073. if (!config_write)
  1074. config_write = pci_default_write_config;
  1075. pci_dev->config_read = config_read;
  1076. pci_dev->config_write = config_write;
  1077. bus->devices[devfn] = pci_dev;
  1078. pci_dev->version_id = 2; /* Current pci device vmstate version */
  1079. return pci_dev;
  1080. }
  1081. static void pci_unregister_io_regions(PCIDevice *pci_dev)
  1082. {
  1083. PCIIORegion *r;
  1084. int i;
  1085. for(i = 0; i < PCI_NUM_REGIONS; i++) {
  1086. r = &pci_dev->io_regions[i];
  1087. if (!r->size || r->addr == PCI_BAR_UNMAPPED)
  1088. continue;
  1089. memory_region_del_subregion(r->address_space, r->memory);
  1090. }
  1091. pci_unregister_vga(pci_dev);
  1092. }
  1093. static void pci_qdev_unrealize(DeviceState *dev)
  1094. {
  1095. PCIDevice *pci_dev = PCI_DEVICE(dev);
  1096. PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
  1097. pci_unregister_io_regions(pci_dev);
  1098. pci_del_option_rom(pci_dev);
  1099. if (pc->exit) {
  1100. pc->exit(pci_dev);
  1101. }
  1102. pci_device_deassert_intx(pci_dev);
  1103. do_pci_unregister_device(pci_dev);
  1104. pci_dev->msi_trigger = NULL;
  1105. /*
  1106. * clean up acpi-index so it could reused by another device
  1107. */
  1108. if (pci_dev->acpi_index) {
  1109. GSequence *used_indexes = pci_acpi_index_list();
  1110. g_sequence_remove(g_sequence_lookup(used_indexes,
  1111. GINT_TO_POINTER(pci_dev->acpi_index),
  1112. g_cmp_uint32, NULL));
  1113. }
  1114. }
  1115. void pci_register_bar(PCIDevice *pci_dev, int region_num,
  1116. uint8_t type, MemoryRegion *memory)
  1117. {
  1118. PCIIORegion *r;
  1119. uint32_t addr; /* offset in pci config space */
  1120. uint64_t wmask;
  1121. pcibus_t size = memory_region_size(memory);
  1122. uint8_t hdr_type;
  1123. assert(!pci_is_vf(pci_dev)); /* VFs must use pcie_sriov_vf_register_bar */
  1124. assert(region_num >= 0);
  1125. assert(region_num < PCI_NUM_REGIONS);
  1126. assert(is_power_of_2(size));
  1127. /* A PCI bridge device (with Type 1 header) may only have at most 2 BARs */
  1128. hdr_type =
  1129. pci_dev->config[PCI_HEADER_TYPE] & ~PCI_HEADER_TYPE_MULTI_FUNCTION;
  1130. assert(hdr_type != PCI_HEADER_TYPE_BRIDGE || region_num < 2);
  1131. r = &pci_dev->io_regions[region_num];
  1132. r->addr = PCI_BAR_UNMAPPED;
  1133. r->size = size;
  1134. r->type = type;
  1135. r->memory = memory;
  1136. r->address_space = type & PCI_BASE_ADDRESS_SPACE_IO
  1137. ? pci_get_bus(pci_dev)->address_space_io
  1138. : pci_get_bus(pci_dev)->address_space_mem;
  1139. wmask = ~(size - 1);
  1140. if (region_num == PCI_ROM_SLOT) {
  1141. /* ROM enable bit is writable */
  1142. wmask |= PCI_ROM_ADDRESS_ENABLE;
  1143. }
  1144. addr = pci_bar(pci_dev, region_num);
  1145. pci_set_long(pci_dev->config + addr, type);
  1146. if (!(r->type & PCI_BASE_ADDRESS_SPACE_IO) &&
  1147. r->type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  1148. pci_set_quad(pci_dev->wmask + addr, wmask);
  1149. pci_set_quad(pci_dev->cmask + addr, ~0ULL);
  1150. } else {
  1151. pci_set_long(pci_dev->wmask + addr, wmask & 0xffffffff);
  1152. pci_set_long(pci_dev->cmask + addr, 0xffffffff);
  1153. }
  1154. }
  1155. static void pci_update_vga(PCIDevice *pci_dev)
  1156. {
  1157. uint16_t cmd;
  1158. if (!pci_dev->has_vga) {
  1159. return;
  1160. }
  1161. cmd = pci_get_word(pci_dev->config + PCI_COMMAND);
  1162. memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_MEM],
  1163. cmd & PCI_COMMAND_MEMORY);
  1164. memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO],
  1165. cmd & PCI_COMMAND_IO);
  1166. memory_region_set_enabled(pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI],
  1167. cmd & PCI_COMMAND_IO);
  1168. }
  1169. void pci_register_vga(PCIDevice *pci_dev, MemoryRegion *mem,
  1170. MemoryRegion *io_lo, MemoryRegion *io_hi)
  1171. {
  1172. PCIBus *bus = pci_get_bus(pci_dev);
  1173. assert(!pci_dev->has_vga);
  1174. assert(memory_region_size(mem) == QEMU_PCI_VGA_MEM_SIZE);
  1175. pci_dev->vga_regions[QEMU_PCI_VGA_MEM] = mem;
  1176. memory_region_add_subregion_overlap(bus->address_space_mem,
  1177. QEMU_PCI_VGA_MEM_BASE, mem, 1);
  1178. assert(memory_region_size(io_lo) == QEMU_PCI_VGA_IO_LO_SIZE);
  1179. pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO] = io_lo;
  1180. memory_region_add_subregion_overlap(bus->address_space_io,
  1181. QEMU_PCI_VGA_IO_LO_BASE, io_lo, 1);
  1182. assert(memory_region_size(io_hi) == QEMU_PCI_VGA_IO_HI_SIZE);
  1183. pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI] = io_hi;
  1184. memory_region_add_subregion_overlap(bus->address_space_io,
  1185. QEMU_PCI_VGA_IO_HI_BASE, io_hi, 1);
  1186. pci_dev->has_vga = true;
  1187. pci_update_vga(pci_dev);
  1188. }
  1189. void pci_unregister_vga(PCIDevice *pci_dev)
  1190. {
  1191. PCIBus *bus = pci_get_bus(pci_dev);
  1192. if (!pci_dev->has_vga) {
  1193. return;
  1194. }
  1195. memory_region_del_subregion(bus->address_space_mem,
  1196. pci_dev->vga_regions[QEMU_PCI_VGA_MEM]);
  1197. memory_region_del_subregion(bus->address_space_io,
  1198. pci_dev->vga_regions[QEMU_PCI_VGA_IO_LO]);
  1199. memory_region_del_subregion(bus->address_space_io,
  1200. pci_dev->vga_regions[QEMU_PCI_VGA_IO_HI]);
  1201. pci_dev->has_vga = false;
  1202. }
  1203. pcibus_t pci_get_bar_addr(PCIDevice *pci_dev, int region_num)
  1204. {
  1205. return pci_dev->io_regions[region_num].addr;
  1206. }
  1207. static pcibus_t pci_config_get_bar_addr(PCIDevice *d, int reg,
  1208. uint8_t type, pcibus_t size)
  1209. {
  1210. pcibus_t new_addr;
  1211. if (!pci_is_vf(d)) {
  1212. int bar = pci_bar(d, reg);
  1213. if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  1214. new_addr = pci_get_quad(d->config + bar);
  1215. } else {
  1216. new_addr = pci_get_long(d->config + bar);
  1217. }
  1218. } else {
  1219. PCIDevice *pf = d->exp.sriov_vf.pf;
  1220. uint16_t sriov_cap = pf->exp.sriov_cap;
  1221. int bar = sriov_cap + PCI_SRIOV_BAR + reg * 4;
  1222. uint16_t vf_offset =
  1223. pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_OFFSET);
  1224. uint16_t vf_stride =
  1225. pci_get_word(pf->config + sriov_cap + PCI_SRIOV_VF_STRIDE);
  1226. uint32_t vf_num = (d->devfn - (pf->devfn + vf_offset)) / vf_stride;
  1227. if (type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
  1228. new_addr = pci_get_quad(pf->config + bar);
  1229. } else {
  1230. new_addr = pci_get_long(pf->config + bar);
  1231. }
  1232. new_addr += vf_num * size;
  1233. }
  1234. /* The ROM slot has a specific enable bit, keep it intact */
  1235. if (reg != PCI_ROM_SLOT) {
  1236. new_addr &= ~(size - 1);
  1237. }
  1238. return new_addr;
  1239. }
  1240. pcibus_t pci_bar_address(PCIDevice *d,
  1241. int reg, uint8_t type, pcibus_t size)
  1242. {
  1243. pcibus_t new_addr, last_addr;
  1244. uint16_t cmd = pci_get_word(d->config + PCI_COMMAND);
  1245. MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
  1246. bool allow_0_address = mc->pci_allow_0_address;
  1247. if (type & PCI_BASE_ADDRESS_SPACE_IO) {
  1248. if (!(cmd & PCI_COMMAND_IO)) {
  1249. return PCI_BAR_UNMAPPED;
  1250. }
  1251. new_addr = pci_config_get_bar_addr(d, reg, type, size);
  1252. last_addr = new_addr + size - 1;
  1253. /* Check if 32 bit BAR wraps around explicitly.
  1254. * TODO: make priorities correct and remove this work around.
  1255. */
  1256. if (last_addr <= new_addr || last_addr >= UINT32_MAX ||
  1257. (!allow_0_address && new_addr == 0)) {
  1258. return PCI_BAR_UNMAPPED;
  1259. }
  1260. return new_addr;
  1261. }
  1262. if (!(cmd & PCI_COMMAND_MEMORY)) {
  1263. return PCI_BAR_UNMAPPED;
  1264. }
  1265. new_addr = pci_config_get_bar_addr(d, reg, type, size);
  1266. /* the ROM slot has a specific enable bit */
  1267. if (reg == PCI_ROM_SLOT && !(new_addr & PCI_ROM_ADDRESS_ENABLE)) {
  1268. return PCI_BAR_UNMAPPED;
  1269. }
  1270. new_addr &= ~(size - 1);
  1271. last_addr = new_addr + size - 1;
  1272. /* NOTE: we do not support wrapping */
  1273. /* XXX: as we cannot support really dynamic
  1274. mappings, we handle specific values as invalid
  1275. mappings. */
  1276. if (last_addr <= new_addr || last_addr == PCI_BAR_UNMAPPED ||
  1277. (!allow_0_address && new_addr == 0)) {
  1278. return PCI_BAR_UNMAPPED;
  1279. }
  1280. /* Now pcibus_t is 64bit.
  1281. * Check if 32 bit BAR wraps around explicitly.
  1282. * Without this, PC ide doesn't work well.
  1283. * TODO: remove this work around.
  1284. */
  1285. if (!(type & PCI_BASE_ADDRESS_MEM_TYPE_64) && last_addr >= UINT32_MAX) {
  1286. return PCI_BAR_UNMAPPED;
  1287. }
  1288. /*
  1289. * OS is allowed to set BAR beyond its addressable
  1290. * bits. For example, 32 bit OS can set 64bit bar
  1291. * to >4G. Check it. TODO: we might need to support
  1292. * it in the future for e.g. PAE.
  1293. */
  1294. if (last_addr >= HWADDR_MAX) {
  1295. return PCI_BAR_UNMAPPED;
  1296. }
  1297. return new_addr;
  1298. }
  1299. static void pci_update_mappings(PCIDevice *d)
  1300. {
  1301. PCIIORegion *r;
  1302. int i;
  1303. pcibus_t new_addr;
  1304. for(i = 0; i < PCI_NUM_REGIONS; i++) {
  1305. r = &d->io_regions[i];
  1306. /* this region isn't registered */
  1307. if (!r->size)
  1308. continue;
  1309. new_addr = pci_bar_address(d, i, r->type, r->size);
  1310. if (!d->has_power) {
  1311. new_addr = PCI_BAR_UNMAPPED;
  1312. }
  1313. /* This bar isn't changed */
  1314. if (new_addr == r->addr)
  1315. continue;
  1316. /* now do the real mapping */
  1317. if (r->addr != PCI_BAR_UNMAPPED) {
  1318. trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d),
  1319. PCI_SLOT(d->devfn),
  1320. PCI_FUNC(d->devfn),
  1321. i, r->addr, r->size);
  1322. memory_region_del_subregion(r->address_space, r->memory);
  1323. }
  1324. r->addr = new_addr;
  1325. if (r->addr != PCI_BAR_UNMAPPED) {
  1326. trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d),
  1327. PCI_SLOT(d->devfn),
  1328. PCI_FUNC(d->devfn),
  1329. i, r->addr, r->size);
  1330. memory_region_add_subregion_overlap(r->address_space,
  1331. r->addr, r->memory, 1);
  1332. }
  1333. }
  1334. pci_update_vga(d);
  1335. }
  1336. static inline int pci_irq_disabled(PCIDevice *d)
  1337. {
  1338. return pci_get_word(d->config + PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE;
  1339. }
  1340. /* Called after interrupt disabled field update in config space,
  1341. * assert/deassert interrupts if necessary.
  1342. * Gets original interrupt disable bit value (before update). */
  1343. static void pci_update_irq_disabled(PCIDevice *d, int was_irq_disabled)
  1344. {
  1345. int i, disabled = pci_irq_disabled(d);
  1346. if (disabled == was_irq_disabled)
  1347. return;
  1348. for (i = 0; i < PCI_NUM_PINS; ++i) {
  1349. int state = pci_irq_state(d, i);
  1350. pci_change_irq_level(d, i, disabled ? -state : state);
  1351. }
  1352. }
  1353. uint32_t pci_default_read_config(PCIDevice *d,
  1354. uint32_t address, int len)
  1355. {
  1356. uint32_t val = 0;
  1357. assert(address + len <= pci_config_size(d));
  1358. if (pci_is_express_downstream_port(d) &&
  1359. ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) {
  1360. pcie_sync_bridge_lnk(d);
  1361. }
  1362. memcpy(&val, d->config + address, len);
  1363. return le32_to_cpu(val);
  1364. }
  1365. void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int l)
  1366. {
  1367. int i, was_irq_disabled = pci_irq_disabled(d);
  1368. uint32_t val = val_in;
  1369. assert(addr + l <= pci_config_size(d));
  1370. for (i = 0; i < l; val >>= 8, ++i) {
  1371. uint8_t wmask = d->wmask[addr + i];
  1372. uint8_t w1cmask = d->w1cmask[addr + i];
  1373. assert(!(wmask & w1cmask));
  1374. d->config[addr + i] = (d->config[addr + i] & ~wmask) | (val & wmask);
  1375. d->config[addr + i] &= ~(val & w1cmask); /* W1C: Write 1 to Clear */
  1376. }
  1377. if (ranges_overlap(addr, l, PCI_BASE_ADDRESS_0, 24) ||
  1378. ranges_overlap(addr, l, PCI_ROM_ADDRESS, 4) ||
  1379. ranges_overlap(addr, l, PCI_ROM_ADDRESS1, 4) ||
  1380. range_covers_byte(addr, l, PCI_COMMAND))
  1381. pci_update_mappings(d);
  1382. if (ranges_overlap(addr, l, PCI_COMMAND, 2)) {
  1383. pci_update_irq_disabled(d, was_irq_disabled);
  1384. memory_region_set_enabled(&d->bus_master_enable_region,
  1385. (pci_get_word(d->config + PCI_COMMAND)
  1386. & PCI_COMMAND_MASTER) && d->has_power);
  1387. }
  1388. msi_write_config(d, addr, val_in, l);
  1389. msix_write_config(d, addr, val_in, l);
  1390. pcie_sriov_config_write(d, addr, val_in, l);
  1391. }
  1392. /***********************************************************/
  1393. /* generic PCI irq support */
  1394. /* 0 <= irq_num <= 3. level must be 0 or 1 */
  1395. static void pci_irq_handler(void *opaque, int irq_num, int level)
  1396. {
  1397. PCIDevice *pci_dev = opaque;
  1398. int change;
  1399. assert(0 <= irq_num && irq_num < PCI_NUM_PINS);
  1400. assert(level == 0 || level == 1);
  1401. change = level - pci_irq_state(pci_dev, irq_num);
  1402. if (!change)
  1403. return;
  1404. pci_set_irq_state(pci_dev, irq_num, level);
  1405. pci_update_irq_status(pci_dev);
  1406. if (pci_irq_disabled(pci_dev))
  1407. return;
  1408. pci_change_irq_level(pci_dev, irq_num, change);
  1409. }
  1410. qemu_irq pci_allocate_irq(PCIDevice *pci_dev)
  1411. {
  1412. int intx = pci_intx(pci_dev);
  1413. assert(0 <= intx && intx < PCI_NUM_PINS);
  1414. return qemu_allocate_irq(pci_irq_handler, pci_dev, intx);
  1415. }
  1416. void pci_set_irq(PCIDevice *pci_dev, int level)
  1417. {
  1418. int intx = pci_intx(pci_dev);
  1419. pci_irq_handler(pci_dev, intx, level);
  1420. }
  1421. /* Special hooks used by device assignment */
  1422. void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq)
  1423. {
  1424. assert(pci_bus_is_root(bus));
  1425. bus->route_intx_to_irq = route_intx_to_irq;
  1426. }
  1427. PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin)
  1428. {
  1429. PCIBus *bus;
  1430. do {
  1431. int dev_irq = pin;
  1432. bus = pci_get_bus(dev);
  1433. pin = bus->map_irq(dev, pin);
  1434. trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin,
  1435. pci_bus_is_root(bus) ? "root-complex"
  1436. : DEVICE(bus->parent_dev)->canonical_path);
  1437. dev = bus->parent_dev;
  1438. } while (dev);
  1439. if (!bus->route_intx_to_irq) {
  1440. error_report("PCI: Bug - unimplemented PCI INTx routing (%s)",
  1441. object_get_typename(OBJECT(bus->qbus.parent)));
  1442. return (PCIINTxRoute) { PCI_INTX_DISABLED, -1 };
  1443. }
  1444. return bus->route_intx_to_irq(bus->irq_opaque, pin);
  1445. }
  1446. bool pci_intx_route_changed(PCIINTxRoute *old, PCIINTxRoute *new)
  1447. {
  1448. return old->mode != new->mode || old->irq != new->irq;
  1449. }
  1450. void pci_bus_fire_intx_routing_notifier(PCIBus *bus)
  1451. {
  1452. PCIDevice *dev;
  1453. PCIBus *sec;
  1454. int i;
  1455. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  1456. dev = bus->devices[i];
  1457. if (dev && dev->intx_routing_notifier) {
  1458. dev->intx_routing_notifier(dev);
  1459. }
  1460. }
  1461. QLIST_FOREACH(sec, &bus->child, sibling) {
  1462. pci_bus_fire_intx_routing_notifier(sec);
  1463. }
  1464. }
  1465. void pci_device_set_intx_routing_notifier(PCIDevice *dev,
  1466. PCIINTxRoutingNotifier notifier)
  1467. {
  1468. dev->intx_routing_notifier = notifier;
  1469. }
  1470. /*
  1471. * PCI-to-PCI bridge specification
  1472. * 9.1: Interrupt routing. Table 9-1
  1473. *
  1474. * the PCI Express Base Specification, Revision 2.1
  1475. * 2.2.8.1: INTx interrupt signaling - Rules
  1476. * the Implementation Note
  1477. * Table 2-20
  1478. */
  1479. /*
  1480. * 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD
  1481. * 0-origin unlike PCI interrupt pin register.
  1482. */
  1483. int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin)
  1484. {
  1485. return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin);
  1486. }
  1487. /***********************************************************/
  1488. /* monitor info on PCI */
  1489. static const pci_class_desc pci_class_descriptions[] =
  1490. {
  1491. { 0x0001, "VGA controller", "display"},
  1492. { 0x0100, "SCSI controller", "scsi"},
  1493. { 0x0101, "IDE controller", "ide"},
  1494. { 0x0102, "Floppy controller", "fdc"},
  1495. { 0x0103, "IPI controller", "ipi"},
  1496. { 0x0104, "RAID controller", "raid"},
  1497. { 0x0106, "SATA controller"},
  1498. { 0x0107, "SAS controller"},
  1499. { 0x0180, "Storage controller"},
  1500. { 0x0200, "Ethernet controller", "ethernet"},
  1501. { 0x0201, "Token Ring controller", "token-ring"},
  1502. { 0x0202, "FDDI controller", "fddi"},
  1503. { 0x0203, "ATM controller", "atm"},
  1504. { 0x0280, "Network controller"},
  1505. { 0x0300, "VGA controller", "display", 0x00ff},
  1506. { 0x0301, "XGA controller"},
  1507. { 0x0302, "3D controller"},
  1508. { 0x0380, "Display controller"},
  1509. { 0x0400, "Video controller", "video"},
  1510. { 0x0401, "Audio controller", "sound"},
  1511. { 0x0402, "Phone"},
  1512. { 0x0403, "Audio controller", "sound"},
  1513. { 0x0480, "Multimedia controller"},
  1514. { 0x0500, "RAM controller", "memory"},
  1515. { 0x0501, "Flash controller", "flash"},
  1516. { 0x0580, "Memory controller"},
  1517. { 0x0600, "Host bridge", "host"},
  1518. { 0x0601, "ISA bridge", "isa"},
  1519. { 0x0602, "EISA bridge", "eisa"},
  1520. { 0x0603, "MC bridge", "mca"},
  1521. { 0x0604, "PCI bridge", "pci-bridge"},
  1522. { 0x0605, "PCMCIA bridge", "pcmcia"},
  1523. { 0x0606, "NUBUS bridge", "nubus"},
  1524. { 0x0607, "CARDBUS bridge", "cardbus"},
  1525. { 0x0608, "RACEWAY bridge"},
  1526. { 0x0680, "Bridge"},
  1527. { 0x0700, "Serial port", "serial"},
  1528. { 0x0701, "Parallel port", "parallel"},
  1529. { 0x0800, "Interrupt controller", "interrupt-controller"},
  1530. { 0x0801, "DMA controller", "dma-controller"},
  1531. { 0x0802, "Timer", "timer"},
  1532. { 0x0803, "RTC", "rtc"},
  1533. { 0x0900, "Keyboard", "keyboard"},
  1534. { 0x0901, "Pen", "pen"},
  1535. { 0x0902, "Mouse", "mouse"},
  1536. { 0x0A00, "Dock station", "dock", 0x00ff},
  1537. { 0x0B00, "i386 cpu", "cpu", 0x00ff},
  1538. { 0x0c00, "Firewire controller", "firewire"},
  1539. { 0x0c01, "Access bus controller", "access-bus"},
  1540. { 0x0c02, "SSA controller", "ssa"},
  1541. { 0x0c03, "USB controller", "usb"},
  1542. { 0x0c04, "Fibre channel controller", "fibre-channel"},
  1543. { 0x0c05, "SMBus"},
  1544. { 0, NULL}
  1545. };
  1546. void pci_for_each_device_under_bus_reverse(PCIBus *bus,
  1547. pci_bus_dev_fn fn,
  1548. void *opaque)
  1549. {
  1550. PCIDevice *d;
  1551. int devfn;
  1552. for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
  1553. d = bus->devices[ARRAY_SIZE(bus->devices) - 1 - devfn];
  1554. if (d) {
  1555. fn(bus, d, opaque);
  1556. }
  1557. }
  1558. }
  1559. void pci_for_each_device_reverse(PCIBus *bus, int bus_num,
  1560. pci_bus_dev_fn fn, void *opaque)
  1561. {
  1562. bus = pci_find_bus_nr(bus, bus_num);
  1563. if (bus) {
  1564. pci_for_each_device_under_bus_reverse(bus, fn, opaque);
  1565. }
  1566. }
  1567. void pci_for_each_device_under_bus(PCIBus *bus,
  1568. pci_bus_dev_fn fn, void *opaque)
  1569. {
  1570. PCIDevice *d;
  1571. int devfn;
  1572. for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
  1573. d = bus->devices[devfn];
  1574. if (d) {
  1575. fn(bus, d, opaque);
  1576. }
  1577. }
  1578. }
  1579. void pci_for_each_device(PCIBus *bus, int bus_num,
  1580. pci_bus_dev_fn fn, void *opaque)
  1581. {
  1582. bus = pci_find_bus_nr(bus, bus_num);
  1583. if (bus) {
  1584. pci_for_each_device_under_bus(bus, fn, opaque);
  1585. }
  1586. }
  1587. const pci_class_desc *get_class_desc(int class)
  1588. {
  1589. const pci_class_desc *desc;
  1590. desc = pci_class_descriptions;
  1591. while (desc->desc && class != desc->class) {
  1592. desc++;
  1593. }
  1594. return desc;
  1595. }
  1596. void pci_init_nic_devices(PCIBus *bus, const char *default_model)
  1597. {
  1598. qemu_create_nic_bus_devices(&bus->qbus, TYPE_PCI_DEVICE, default_model,
  1599. "virtio", "virtio-net-pci");
  1600. }
  1601. bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model,
  1602. const char *alias, const char *devaddr)
  1603. {
  1604. NICInfo *nd = qemu_find_nic_info(model, true, alias);
  1605. int dom, busnr, devfn;
  1606. PCIDevice *pci_dev;
  1607. unsigned slot;
  1608. PCIBus *bus;
  1609. if (!nd) {
  1610. return false;
  1611. }
  1612. if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) {
  1613. error_report("Invalid PCI device address %s for device %s",
  1614. devaddr, model);
  1615. exit(1);
  1616. }
  1617. if (dom != 0) {
  1618. error_report("No support for non-zero PCI domains");
  1619. exit(1);
  1620. }
  1621. devfn = PCI_DEVFN(slot, 0);
  1622. bus = pci_find_bus_nr(rootbus, busnr);
  1623. if (!bus) {
  1624. error_report("Invalid PCI device address %s for device %s",
  1625. devaddr, model);
  1626. exit(1);
  1627. }
  1628. pci_dev = pci_new(devfn, model);
  1629. qdev_set_nic_properties(&pci_dev->qdev, nd);
  1630. pci_realize_and_unref(pci_dev, bus, &error_fatal);
  1631. return true;
  1632. }
  1633. PCIDevice *pci_vga_init(PCIBus *bus)
  1634. {
  1635. vga_interface_created = true;
  1636. switch (vga_interface_type) {
  1637. case VGA_CIRRUS:
  1638. return pci_create_simple(bus, -1, "cirrus-vga");
  1639. case VGA_QXL:
  1640. return pci_create_simple(bus, -1, "qxl-vga");
  1641. case VGA_STD:
  1642. return pci_create_simple(bus, -1, "VGA");
  1643. case VGA_VMWARE:
  1644. return pci_create_simple(bus, -1, "vmware-svga");
  1645. case VGA_VIRTIO:
  1646. return pci_create_simple(bus, -1, "virtio-vga");
  1647. case VGA_NONE:
  1648. default: /* Other non-PCI types. Checking for unsupported types is already
  1649. done in vl.c. */
  1650. return NULL;
  1651. }
  1652. }
  1653. /* Whether a given bus number is in range of the secondary
  1654. * bus of the given bridge device. */
  1655. static bool pci_secondary_bus_in_range(PCIDevice *dev, int bus_num)
  1656. {
  1657. return !(pci_get_word(dev->config + PCI_BRIDGE_CONTROL) &
  1658. PCI_BRIDGE_CTL_BUS_RESET) /* Don't walk the bus if it's reset. */ &&
  1659. dev->config[PCI_SECONDARY_BUS] <= bus_num &&
  1660. bus_num <= dev->config[PCI_SUBORDINATE_BUS];
  1661. }
  1662. /* Whether a given bus number is in a range of a root bus */
  1663. static bool pci_root_bus_in_range(PCIBus *bus, int bus_num)
  1664. {
  1665. int i;
  1666. for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
  1667. PCIDevice *dev = bus->devices[i];
  1668. if (dev && IS_PCI_BRIDGE(dev)) {
  1669. if (pci_secondary_bus_in_range(dev, bus_num)) {
  1670. return true;
  1671. }
  1672. }
  1673. }
  1674. return false;
  1675. }
  1676. PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num)
  1677. {
  1678. PCIBus *sec;
  1679. if (!bus) {
  1680. return NULL;
  1681. }
  1682. if (pci_bus_num(bus) == bus_num) {
  1683. return bus;
  1684. }
  1685. /* Consider all bus numbers in range for the host pci bridge. */
  1686. if (!pci_bus_is_root(bus) &&
  1687. !pci_secondary_bus_in_range(bus->parent_dev, bus_num)) {
  1688. return NULL;
  1689. }
  1690. /* try child bus */
  1691. for (; bus; bus = sec) {
  1692. QLIST_FOREACH(sec, &bus->child, sibling) {
  1693. if (pci_bus_num(sec) == bus_num) {
  1694. return sec;
  1695. }
  1696. /* PXB buses assumed to be children of bus 0 */
  1697. if (pci_bus_is_root(sec)) {
  1698. if (pci_root_bus_in_range(sec, bus_num)) {
  1699. break;
  1700. }
  1701. } else {
  1702. if (pci_secondary_bus_in_range(sec->parent_dev, bus_num)) {
  1703. break;
  1704. }
  1705. }
  1706. }
  1707. }
  1708. return NULL;
  1709. }
  1710. void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin,
  1711. pci_bus_fn end, void *parent_state)
  1712. {
  1713. PCIBus *sec;
  1714. void *state;
  1715. if (!bus) {
  1716. return;
  1717. }
  1718. if (begin) {
  1719. state = begin(bus, parent_state);
  1720. } else {
  1721. state = parent_state;
  1722. }
  1723. QLIST_FOREACH(sec, &bus->child, sibling) {
  1724. pci_for_each_bus_depth_first(sec, begin, end, state);
  1725. }
  1726. if (end) {
  1727. end(bus, state);
  1728. }
  1729. }
  1730. PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn)
  1731. {
  1732. bus = pci_find_bus_nr(bus, bus_num);
  1733. if (!bus)
  1734. return NULL;
  1735. return bus->devices[devfn];
  1736. }
  1737. #define ONBOARD_INDEX_MAX (16 * 1024 - 1)
  1738. static void pci_qdev_realize(DeviceState *qdev, Error **errp)
  1739. {
  1740. PCIDevice *pci_dev = (PCIDevice *)qdev;
  1741. PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
  1742. ObjectClass *klass = OBJECT_CLASS(pc);
  1743. Error *local_err = NULL;
  1744. bool is_default_rom;
  1745. uint16_t class_id;
  1746. /*
  1747. * capped by systemd (see: udev-builtin-net_id.c)
  1748. * as it's the only known user honor it to avoid users
  1749. * misconfigure QEMU and then wonder why acpi-index doesn't work
  1750. */
  1751. if (pci_dev->acpi_index > ONBOARD_INDEX_MAX) {
  1752. error_setg(errp, "acpi-index should be less or equal to %u",
  1753. ONBOARD_INDEX_MAX);
  1754. return;
  1755. }
  1756. /*
  1757. * make sure that acpi-index is unique across all present PCI devices
  1758. */
  1759. if (pci_dev->acpi_index) {
  1760. GSequence *used_indexes = pci_acpi_index_list();
  1761. if (g_sequence_lookup(used_indexes,
  1762. GINT_TO_POINTER(pci_dev->acpi_index),
  1763. g_cmp_uint32, NULL)) {
  1764. error_setg(errp, "a PCI device with acpi-index = %" PRIu32
  1765. " already exist", pci_dev->acpi_index);
  1766. return;
  1767. }
  1768. g_sequence_insert_sorted(used_indexes,
  1769. GINT_TO_POINTER(pci_dev->acpi_index),
  1770. g_cmp_uint32, NULL);
  1771. }
  1772. if (pci_dev->romsize != -1 && !is_power_of_2(pci_dev->romsize)) {
  1773. error_setg(errp, "ROM size %u is not a power of two", pci_dev->romsize);
  1774. return;
  1775. }
  1776. /* initialize cap_present for pci_is_express() and pci_config_size(),
  1777. * Note that hybrid PCIs are not set automatically and need to manage
  1778. * QEMU_PCI_CAP_EXPRESS manually */
  1779. if (object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE) &&
  1780. !object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE)) {
  1781. pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
  1782. }
  1783. if (object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE)) {
  1784. pci_dev->cap_present |= QEMU_PCIE_CAP_CXL;
  1785. }
  1786. pci_dev = do_pci_register_device(pci_dev,
  1787. object_get_typename(OBJECT(qdev)),
  1788. pci_dev->devfn, errp);
  1789. if (pci_dev == NULL)
  1790. return;
  1791. if (pc->realize) {
  1792. pc->realize(pci_dev, &local_err);
  1793. if (local_err) {
  1794. error_propagate(errp, local_err);
  1795. do_pci_unregister_device(pci_dev);
  1796. return;
  1797. }
  1798. }
  1799. /*
  1800. * A PCIe Downstream Port that do not have ARI Forwarding enabled must
  1801. * associate only Device 0 with the device attached to the bus
  1802. * representing the Link from the Port (PCIe base spec rev 4.0 ver 0.3,
  1803. * sec 7.3.1).
  1804. * With ARI, PCI_SLOT() can return non-zero value as the traditional
  1805. * 5-bit Device Number and 3-bit Function Number fields in its associated
  1806. * Routing IDs, Requester IDs and Completer IDs are interpreted as a
  1807. * single 8-bit Function Number. Hence, ignore ARI capable devices.
  1808. */
  1809. if (pci_is_express(pci_dev) &&
  1810. !pcie_find_capability(pci_dev, PCI_EXT_CAP_ID_ARI) &&
  1811. pcie_has_upstream_port(pci_dev) &&
  1812. PCI_SLOT(pci_dev->devfn)) {
  1813. warn_report("PCI: slot %d is not valid for %s,"
  1814. " parent device only allows plugging into slot 0.",
  1815. PCI_SLOT(pci_dev->devfn), pci_dev->name);
  1816. }
  1817. if (pci_dev->failover_pair_id) {
  1818. if (!pci_bus_is_express(pci_get_bus(pci_dev))) {
  1819. error_setg(errp, "failover primary device must be on "
  1820. "PCIExpress bus");
  1821. pci_qdev_unrealize(DEVICE(pci_dev));
  1822. return;
  1823. }
  1824. class_id = pci_get_word(pci_dev->config + PCI_CLASS_DEVICE);
  1825. if (class_id != PCI_CLASS_NETWORK_ETHERNET) {
  1826. error_setg(errp, "failover primary device is not an "
  1827. "Ethernet device");
  1828. pci_qdev_unrealize(DEVICE(pci_dev));
  1829. return;
  1830. }
  1831. if ((pci_dev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)
  1832. || (PCI_FUNC(pci_dev->devfn) != 0)) {
  1833. error_setg(errp, "failover: primary device must be in its own "
  1834. "PCI slot");
  1835. pci_qdev_unrealize(DEVICE(pci_dev));
  1836. return;
  1837. }
  1838. qdev->allow_unplug_during_migration = true;
  1839. }
  1840. /* rom loading */
  1841. is_default_rom = false;
  1842. if (pci_dev->romfile == NULL && pc->romfile != NULL) {
  1843. pci_dev->romfile = g_strdup(pc->romfile);
  1844. is_default_rom = true;
  1845. }
  1846. pci_add_option_rom(pci_dev, is_default_rom, &local_err);
  1847. if (local_err) {
  1848. error_propagate(errp, local_err);
  1849. pci_qdev_unrealize(DEVICE(pci_dev));
  1850. return;
  1851. }
  1852. pci_set_power(pci_dev, true);
  1853. pci_dev->msi_trigger = pci_msi_trigger;
  1854. }
  1855. static PCIDevice *pci_new_internal(int devfn, bool multifunction,
  1856. const char *name)
  1857. {
  1858. DeviceState *dev;
  1859. dev = qdev_new(name);
  1860. qdev_prop_set_int32(dev, "addr", devfn);
  1861. qdev_prop_set_bit(dev, "multifunction", multifunction);
  1862. return PCI_DEVICE(dev);
  1863. }
  1864. PCIDevice *pci_new_multifunction(int devfn, const char *name)
  1865. {
  1866. return pci_new_internal(devfn, true, name);
  1867. }
  1868. PCIDevice *pci_new(int devfn, const char *name)
  1869. {
  1870. return pci_new_internal(devfn, false, name);
  1871. }
  1872. bool pci_realize_and_unref(PCIDevice *dev, PCIBus *bus, Error **errp)
  1873. {
  1874. return qdev_realize_and_unref(&dev->qdev, &bus->qbus, errp);
  1875. }
  1876. PCIDevice *pci_create_simple_multifunction(PCIBus *bus, int devfn,
  1877. const char *name)
  1878. {
  1879. PCIDevice *dev = pci_new_multifunction(devfn, name);
  1880. pci_realize_and_unref(dev, bus, &error_fatal);
  1881. return dev;
  1882. }
  1883. PCIDevice *pci_create_simple(PCIBus *bus, int devfn, const char *name)
  1884. {
  1885. PCIDevice *dev = pci_new(devfn, name);
  1886. pci_realize_and_unref(dev, bus, &error_fatal);
  1887. return dev;
  1888. }
  1889. static uint8_t pci_find_space(PCIDevice *pdev, uint8_t size)
  1890. {
  1891. int offset = PCI_CONFIG_HEADER_SIZE;
  1892. int i;
  1893. for (i = PCI_CONFIG_HEADER_SIZE; i < PCI_CONFIG_SPACE_SIZE; ++i) {
  1894. if (pdev->used[i])
  1895. offset = i + 1;
  1896. else if (i - offset + 1 == size)
  1897. return offset;
  1898. }
  1899. return 0;
  1900. }
  1901. static uint8_t pci_find_capability_list(PCIDevice *pdev, uint8_t cap_id,
  1902. uint8_t *prev_p)
  1903. {
  1904. uint8_t next, prev;
  1905. if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST))
  1906. return 0;
  1907. for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
  1908. prev = next + PCI_CAP_LIST_NEXT)
  1909. if (pdev->config[next + PCI_CAP_LIST_ID] == cap_id)
  1910. break;
  1911. if (prev_p)
  1912. *prev_p = prev;
  1913. return next;
  1914. }
  1915. static uint8_t pci_find_capability_at_offset(PCIDevice *pdev, uint8_t offset)
  1916. {
  1917. uint8_t next, prev, found = 0;
  1918. if (!(pdev->used[offset])) {
  1919. return 0;
  1920. }
  1921. assert(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST);
  1922. for (prev = PCI_CAPABILITY_LIST; (next = pdev->config[prev]);
  1923. prev = next + PCI_CAP_LIST_NEXT) {
  1924. if (next <= offset && next > found) {
  1925. found = next;
  1926. }
  1927. }
  1928. return found;
  1929. }
  1930. /* Patch the PCI vendor and device ids in a PCI rom image if necessary.
  1931. This is needed for an option rom which is used for more than one device. */
  1932. static void pci_patch_ids(PCIDevice *pdev, uint8_t *ptr, uint32_t size)
  1933. {
  1934. uint16_t vendor_id;
  1935. uint16_t device_id;
  1936. uint16_t rom_vendor_id;
  1937. uint16_t rom_device_id;
  1938. uint16_t rom_magic;
  1939. uint16_t pcir_offset;
  1940. uint8_t checksum;
  1941. /* Words in rom data are little endian (like in PCI configuration),
  1942. so they can be read / written with pci_get_word / pci_set_word. */
  1943. /* Only a valid rom will be patched. */
  1944. rom_magic = pci_get_word(ptr);
  1945. if (rom_magic != 0xaa55) {
  1946. PCI_DPRINTF("Bad ROM magic %04x\n", rom_magic);
  1947. return;
  1948. }
  1949. pcir_offset = pci_get_word(ptr + 0x18);
  1950. if (pcir_offset + 8 >= size || memcmp(ptr + pcir_offset, "PCIR", 4)) {
  1951. PCI_DPRINTF("Bad PCIR offset 0x%x or signature\n", pcir_offset);
  1952. return;
  1953. }
  1954. vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
  1955. device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
  1956. rom_vendor_id = pci_get_word(ptr + pcir_offset + 4);
  1957. rom_device_id = pci_get_word(ptr + pcir_offset + 6);
  1958. PCI_DPRINTF("%s: ROM id %04x%04x / PCI id %04x%04x\n", pdev->romfile,
  1959. vendor_id, device_id, rom_vendor_id, rom_device_id);
  1960. checksum = ptr[6];
  1961. if (vendor_id != rom_vendor_id) {
  1962. /* Patch vendor id and checksum (at offset 6 for etherboot roms). */
  1963. checksum += (uint8_t)rom_vendor_id + (uint8_t)(rom_vendor_id >> 8);
  1964. checksum -= (uint8_t)vendor_id + (uint8_t)(vendor_id >> 8);
  1965. PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
  1966. ptr[6] = checksum;
  1967. pci_set_word(ptr + pcir_offset + 4, vendor_id);
  1968. }
  1969. if (device_id != rom_device_id) {
  1970. /* Patch device id and checksum (at offset 6 for etherboot roms). */
  1971. checksum += (uint8_t)rom_device_id + (uint8_t)(rom_device_id >> 8);
  1972. checksum -= (uint8_t)device_id + (uint8_t)(device_id >> 8);
  1973. PCI_DPRINTF("ROM checksum %02x / %02x\n", ptr[6], checksum);
  1974. ptr[6] = checksum;
  1975. pci_set_word(ptr + pcir_offset + 6, device_id);
  1976. }
  1977. }
  1978. /* Add an option rom for the device */
  1979. static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom,
  1980. Error **errp)
  1981. {
  1982. int64_t size = 0;
  1983. g_autofree char *path = NULL;
  1984. char name[32];
  1985. const VMStateDescription *vmsd;
  1986. /*
  1987. * In case of incoming migration ROM will come with migration stream, no
  1988. * reason to load the file. Neither we want to fail if local ROM file
  1989. * mismatches with specified romsize.
  1990. */
  1991. bool load_file = !runstate_check(RUN_STATE_INMIGRATE);
  1992. if (!pdev->romfile || !strlen(pdev->romfile)) {
  1993. return;
  1994. }
  1995. if (!pdev->rom_bar) {
  1996. /*
  1997. * Load rom via fw_cfg instead of creating a rom bar,
  1998. * for 0.11 compatibility.
  1999. */
  2000. int class = pci_get_word(pdev->config + PCI_CLASS_DEVICE);
  2001. /*
  2002. * Hot-plugged devices can't use the option ROM
  2003. * if the rom bar is disabled.
  2004. */
  2005. if (DEVICE(pdev)->hotplugged) {
  2006. error_setg(errp, "Hot-plugged device without ROM bar"
  2007. " can't have an option ROM");
  2008. return;
  2009. }
  2010. if (class == 0x0300) {
  2011. rom_add_vga(pdev->romfile);
  2012. } else {
  2013. rom_add_option(pdev->romfile, -1);
  2014. }
  2015. return;
  2016. }
  2017. if (load_file || pdev->romsize == -1) {
  2018. path = qemu_find_file(QEMU_FILE_TYPE_BIOS, pdev->romfile);
  2019. if (path == NULL) {
  2020. path = g_strdup(pdev->romfile);
  2021. }
  2022. size = get_image_size(path);
  2023. if (size < 0) {
  2024. error_setg(errp, "failed to find romfile \"%s\"", pdev->romfile);
  2025. return;
  2026. } else if (size == 0) {
  2027. error_setg(errp, "romfile \"%s\" is empty", pdev->romfile);
  2028. return;
  2029. } else if (size > 2 * GiB) {
  2030. error_setg(errp,
  2031. "romfile \"%s\" too large (size cannot exceed 2 GiB)",
  2032. pdev->romfile);
  2033. return;
  2034. }
  2035. if (pdev->romsize != -1) {
  2036. if (size > pdev->romsize) {
  2037. error_setg(errp, "romfile \"%s\" (%u bytes) "
  2038. "is too large for ROM size %u",
  2039. pdev->romfile, (uint32_t)size, pdev->romsize);
  2040. return;
  2041. }
  2042. } else {
  2043. pdev->romsize = pow2ceil(size);
  2044. }
  2045. }
  2046. vmsd = qdev_get_vmsd(DEVICE(pdev));
  2047. snprintf(name, sizeof(name), "%s.rom",
  2048. vmsd ? vmsd->name : object_get_typename(OBJECT(pdev)));
  2049. pdev->has_rom = true;
  2050. memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize,
  2051. &error_fatal);
  2052. if (load_file) {
  2053. void *ptr = memory_region_get_ram_ptr(&pdev->rom);
  2054. if (load_image_size(path, ptr, size) < 0) {
  2055. error_setg(errp, "failed to load romfile \"%s\"", pdev->romfile);
  2056. return;
  2057. }
  2058. if (is_default_rom) {
  2059. /* Only the default rom images will be patched (if needed). */
  2060. pci_patch_ids(pdev, ptr, size);
  2061. }
  2062. }
  2063. pci_register_bar(pdev, PCI_ROM_SLOT, 0, &pdev->rom);
  2064. }
  2065. static void pci_del_option_rom(PCIDevice *pdev)
  2066. {
  2067. if (!pdev->has_rom)
  2068. return;
  2069. vmstate_unregister_ram(&pdev->rom, &pdev->qdev);
  2070. pdev->has_rom = false;
  2071. }
  2072. /*
  2073. * On success, pci_add_capability() returns a positive value
  2074. * that the offset of the pci capability.
  2075. * On failure, it sets an error and returns a negative error
  2076. * code.
  2077. */
  2078. int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
  2079. uint8_t offset, uint8_t size,
  2080. Error **errp)
  2081. {
  2082. uint8_t *config;
  2083. int i, overlapping_cap;
  2084. if (!offset) {
  2085. offset = pci_find_space(pdev, size);
  2086. /* out of PCI config space is programming error */
  2087. assert(offset);
  2088. } else {
  2089. /* Verify that capabilities don't overlap. Note: device assignment
  2090. * depends on this check to verify that the device is not broken.
  2091. * Should never trigger for emulated devices, but it's helpful
  2092. * for debugging these. */
  2093. for (i = offset; i < offset + size; i++) {
  2094. overlapping_cap = pci_find_capability_at_offset(pdev, i);
  2095. if (overlapping_cap) {
  2096. error_setg(errp, "%s:%02x:%02x.%x "
  2097. "Attempt to add PCI capability %x at offset "
  2098. "%x overlaps existing capability %x at offset %x",
  2099. pci_root_bus_path(pdev), pci_dev_bus_num(pdev),
  2100. PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
  2101. cap_id, offset, overlapping_cap, i);
  2102. return -EINVAL;
  2103. }
  2104. }
  2105. }
  2106. config = pdev->config + offset;
  2107. config[PCI_CAP_LIST_ID] = cap_id;
  2108. config[PCI_CAP_LIST_NEXT] = pdev->config[PCI_CAPABILITY_LIST];
  2109. pdev->config[PCI_CAPABILITY_LIST] = offset;
  2110. pdev->config[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
  2111. memset(pdev->used + offset, 0xFF, QEMU_ALIGN_UP(size, 4));
  2112. /* Make capability read-only by default */
  2113. memset(pdev->wmask + offset, 0, size);
  2114. /* Check capability by default */
  2115. memset(pdev->cmask + offset, 0xFF, size);
  2116. return offset;
  2117. }
  2118. /* Unlink capability from the pci config space. */
  2119. void pci_del_capability(PCIDevice *pdev, uint8_t cap_id, uint8_t size)
  2120. {
  2121. uint8_t prev, offset = pci_find_capability_list(pdev, cap_id, &prev);
  2122. if (!offset)
  2123. return;
  2124. pdev->config[prev] = pdev->config[offset + PCI_CAP_LIST_NEXT];
  2125. /* Make capability writable again */
  2126. memset(pdev->wmask + offset, 0xff, size);
  2127. memset(pdev->w1cmask + offset, 0, size);
  2128. /* Clear cmask as device-specific registers can't be checked */
  2129. memset(pdev->cmask + offset, 0, size);
  2130. memset(pdev->used + offset, 0, QEMU_ALIGN_UP(size, 4));
  2131. if (!pdev->config[PCI_CAPABILITY_LIST])
  2132. pdev->config[PCI_STATUS] &= ~PCI_STATUS_CAP_LIST;
  2133. }
  2134. uint8_t pci_find_capability(PCIDevice *pdev, uint8_t cap_id)
  2135. {
  2136. return pci_find_capability_list(pdev, cap_id, NULL);
  2137. }
  2138. static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len)
  2139. {
  2140. PCIDevice *d = (PCIDevice *)dev;
  2141. const char *name = NULL;
  2142. const pci_class_desc *desc = pci_class_descriptions;
  2143. int class = pci_get_word(d->config + PCI_CLASS_DEVICE);
  2144. while (desc->desc &&
  2145. (class & ~desc->fw_ign_bits) !=
  2146. (desc->class & ~desc->fw_ign_bits)) {
  2147. desc++;
  2148. }
  2149. if (desc->desc) {
  2150. name = desc->fw_name;
  2151. }
  2152. if (name) {
  2153. pstrcpy(buf, len, name);
  2154. } else {
  2155. snprintf(buf, len, "pci%04x,%04x",
  2156. pci_get_word(d->config + PCI_VENDOR_ID),
  2157. pci_get_word(d->config + PCI_DEVICE_ID));
  2158. }
  2159. return buf;
  2160. }
  2161. static char *pcibus_get_fw_dev_path(DeviceState *dev)
  2162. {
  2163. PCIDevice *d = (PCIDevice *)dev;
  2164. char name[33];
  2165. int has_func = !!PCI_FUNC(d->devfn);
  2166. return g_strdup_printf("%s@%x%s%.*x",
  2167. pci_dev_fw_name(dev, name, sizeof(name)),
  2168. PCI_SLOT(d->devfn),
  2169. has_func ? "," : "",
  2170. has_func,
  2171. PCI_FUNC(d->devfn));
  2172. }
  2173. static char *pcibus_get_dev_path(DeviceState *dev)
  2174. {
  2175. PCIDevice *d = container_of(dev, PCIDevice, qdev);
  2176. PCIDevice *t;
  2177. int slot_depth;
  2178. /* Path format: Domain:00:Slot.Function:Slot.Function....:Slot.Function.
  2179. * 00 is added here to make this format compatible with
  2180. * domain:Bus:Slot.Func for systems without nested PCI bridges.
  2181. * Slot.Function list specifies the slot and function numbers for all
  2182. * devices on the path from root to the specific device. */
  2183. const char *root_bus_path;
  2184. int root_bus_len;
  2185. char slot[] = ":SS.F";
  2186. int slot_len = sizeof slot - 1 /* For '\0' */;
  2187. int path_len;
  2188. char *path, *p;
  2189. int s;
  2190. root_bus_path = pci_root_bus_path(d);
  2191. root_bus_len = strlen(root_bus_path);
  2192. /* Calculate # of slots on path between device and root. */;
  2193. slot_depth = 0;
  2194. for (t = d; t; t = pci_get_bus(t)->parent_dev) {
  2195. ++slot_depth;
  2196. }
  2197. path_len = root_bus_len + slot_len * slot_depth;
  2198. /* Allocate memory, fill in the terminating null byte. */
  2199. path = g_malloc(path_len + 1 /* For '\0' */);
  2200. path[path_len] = '\0';
  2201. memcpy(path, root_bus_path, root_bus_len);
  2202. /* Fill in slot numbers. We walk up from device to root, so need to print
  2203. * them in the reverse order, last to first. */
  2204. p = path + path_len;
  2205. for (t = d; t; t = pci_get_bus(t)->parent_dev) {
  2206. p -= slot_len;
  2207. s = snprintf(slot, sizeof slot, ":%02x.%x",
  2208. PCI_SLOT(t->devfn), PCI_FUNC(t->devfn));
  2209. assert(s == slot_len);
  2210. memcpy(p, slot, slot_len);
  2211. }
  2212. return path;
  2213. }
  2214. static int pci_qdev_find_recursive(PCIBus *bus,
  2215. const char *id, PCIDevice **pdev)
  2216. {
  2217. DeviceState *qdev = qdev_find_recursive(&bus->qbus, id);
  2218. if (!qdev) {
  2219. return -ENODEV;
  2220. }
  2221. /* roughly check if given qdev is pci device */
  2222. if (object_dynamic_cast(OBJECT(qdev), TYPE_PCI_DEVICE)) {
  2223. *pdev = PCI_DEVICE(qdev);
  2224. return 0;
  2225. }
  2226. return -EINVAL;
  2227. }
  2228. int pci_qdev_find_device(const char *id, PCIDevice **pdev)
  2229. {
  2230. PCIHostState *host_bridge;
  2231. int rc = -ENODEV;
  2232. QLIST_FOREACH(host_bridge, &pci_host_bridges, next) {
  2233. int tmp = pci_qdev_find_recursive(host_bridge->bus, id, pdev);
  2234. if (!tmp) {
  2235. rc = 0;
  2236. break;
  2237. }
  2238. if (tmp != -ENODEV) {
  2239. rc = tmp;
  2240. }
  2241. }
  2242. return rc;
  2243. }
  2244. MemoryRegion *pci_address_space(PCIDevice *dev)
  2245. {
  2246. return pci_get_bus(dev)->address_space_mem;
  2247. }
  2248. MemoryRegion *pci_address_space_io(PCIDevice *dev)
  2249. {
  2250. return pci_get_bus(dev)->address_space_io;
  2251. }
  2252. static void pci_device_class_init(ObjectClass *klass, void *data)
  2253. {
  2254. DeviceClass *k = DEVICE_CLASS(klass);
  2255. k->realize = pci_qdev_realize;
  2256. k->unrealize = pci_qdev_unrealize;
  2257. k->bus_type = TYPE_PCI_BUS;
  2258. device_class_set_props(k, pci_props);
  2259. }
  2260. static void pci_device_class_base_init(ObjectClass *klass, void *data)
  2261. {
  2262. if (!object_class_is_abstract(klass)) {
  2263. ObjectClass *conventional =
  2264. object_class_dynamic_cast(klass, INTERFACE_CONVENTIONAL_PCI_DEVICE);
  2265. ObjectClass *pcie =
  2266. object_class_dynamic_cast(klass, INTERFACE_PCIE_DEVICE);
  2267. ObjectClass *cxl =
  2268. object_class_dynamic_cast(klass, INTERFACE_CXL_DEVICE);
  2269. assert(conventional || pcie || cxl);
  2270. }
  2271. }
  2272. AddressSpace *pci_device_iommu_address_space(PCIDevice *dev)
  2273. {
  2274. PCIBus *bus = pci_get_bus(dev);
  2275. PCIBus *iommu_bus = bus;
  2276. uint8_t devfn = dev->devfn;
  2277. while (iommu_bus && !iommu_bus->iommu_ops && iommu_bus->parent_dev) {
  2278. PCIBus *parent_bus = pci_get_bus(iommu_bus->parent_dev);
  2279. /*
  2280. * The requester ID of the provided device may be aliased, as seen from
  2281. * the IOMMU, due to topology limitations. The IOMMU relies on a
  2282. * requester ID to provide a unique AddressSpace for devices, but
  2283. * conventional PCI buses pre-date such concepts. Instead, the PCIe-
  2284. * to-PCI bridge creates and accepts transactions on behalf of down-
  2285. * stream devices. When doing so, all downstream devices are masked
  2286. * (aliased) behind a single requester ID. The requester ID used
  2287. * depends on the format of the bridge devices. Proper PCIe-to-PCI
  2288. * bridges, with a PCIe capability indicating such, follow the
  2289. * guidelines of chapter 2.3 of the PCIe-to-PCI/X bridge specification,
  2290. * where the bridge uses the seconary bus as the bridge portion of the
  2291. * requester ID and devfn of 00.0. For other bridges, typically those
  2292. * found on the root complex such as the dmi-to-pci-bridge, we follow
  2293. * the convention of typical bare-metal hardware, which uses the
  2294. * requester ID of the bridge itself. There are device specific
  2295. * exceptions to these rules, but these are the defaults that the
  2296. * Linux kernel uses when determining DMA aliases itself and believed
  2297. * to be true for the bare metal equivalents of the devices emulated
  2298. * in QEMU.
  2299. */
  2300. if (!pci_bus_is_express(iommu_bus)) {
  2301. PCIDevice *parent = iommu_bus->parent_dev;
  2302. if (pci_is_express(parent) &&
  2303. pcie_cap_get_type(parent) == PCI_EXP_TYPE_PCI_BRIDGE) {
  2304. devfn = PCI_DEVFN(0, 0);
  2305. bus = iommu_bus;
  2306. } else {
  2307. devfn = parent->devfn;
  2308. bus = parent_bus;
  2309. }
  2310. }
  2311. iommu_bus = parent_bus;
  2312. }
  2313. if (!pci_bus_bypass_iommu(bus) && iommu_bus->iommu_ops) {
  2314. return iommu_bus->iommu_ops->get_address_space(bus,
  2315. iommu_bus->iommu_opaque, devfn);
  2316. }
  2317. return &address_space_memory;
  2318. }
  2319. void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque)
  2320. {
  2321. /*
  2322. * If called, pci_setup_iommu() should provide a minimum set of
  2323. * useful callbacks for the bus.
  2324. */
  2325. assert(ops);
  2326. assert(ops->get_address_space);
  2327. bus->iommu_ops = ops;
  2328. bus->iommu_opaque = opaque;
  2329. }
  2330. static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque)
  2331. {
  2332. Range *range = opaque;
  2333. uint16_t cmd = pci_get_word(dev->config + PCI_COMMAND);
  2334. int i;
  2335. if (!(cmd & PCI_COMMAND_MEMORY)) {
  2336. return;
  2337. }
  2338. if (IS_PCI_BRIDGE(dev)) {
  2339. pcibus_t base = pci_bridge_get_base(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
  2340. pcibus_t limit = pci_bridge_get_limit(dev, PCI_BASE_ADDRESS_MEM_PREFETCH);
  2341. base = MAX(base, 0x1ULL << 32);
  2342. if (limit >= base) {
  2343. Range pref_range;
  2344. range_set_bounds(&pref_range, base, limit);
  2345. range_extend(range, &pref_range);
  2346. }
  2347. }
  2348. for (i = 0; i < PCI_NUM_REGIONS; ++i) {
  2349. PCIIORegion *r = &dev->io_regions[i];
  2350. pcibus_t lob, upb;
  2351. Range region_range;
  2352. if (!r->size ||
  2353. (r->type & PCI_BASE_ADDRESS_SPACE_IO) ||
  2354. !(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64)) {
  2355. continue;
  2356. }
  2357. lob = pci_bar_address(dev, i, r->type, r->size);
  2358. upb = lob + r->size - 1;
  2359. if (lob == PCI_BAR_UNMAPPED) {
  2360. continue;
  2361. }
  2362. lob = MAX(lob, 0x1ULL << 32);
  2363. if (upb >= lob) {
  2364. range_set_bounds(&region_range, lob, upb);
  2365. range_extend(range, &region_range);
  2366. }
  2367. }
  2368. }
  2369. void pci_bus_get_w64_range(PCIBus *bus, Range *range)
  2370. {
  2371. range_make_empty(range);
  2372. pci_for_each_device_under_bus(bus, pci_dev_get_w64, range);
  2373. }
  2374. static bool pcie_has_upstream_port(PCIDevice *dev)
  2375. {
  2376. PCIDevice *parent_dev = pci_bridge_get_device(pci_get_bus(dev));
  2377. /* Device associated with an upstream port.
  2378. * As there are several types of these, it's easier to check the
  2379. * parent device: upstream ports are always connected to
  2380. * root or downstream ports.
  2381. */
  2382. return parent_dev &&
  2383. pci_is_express(parent_dev) &&
  2384. parent_dev->exp.exp_cap &&
  2385. (pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_ROOT_PORT ||
  2386. pcie_cap_get_type(parent_dev) == PCI_EXP_TYPE_DOWNSTREAM);
  2387. }
  2388. PCIDevice *pci_get_function_0(PCIDevice *pci_dev)
  2389. {
  2390. PCIBus *bus = pci_get_bus(pci_dev);
  2391. if(pcie_has_upstream_port(pci_dev)) {
  2392. /* With an upstream PCIe port, we only support 1 device at slot 0 */
  2393. return bus->devices[0];
  2394. } else {
  2395. /* Other bus types might support multiple devices at slots 0-31 */
  2396. return bus->devices[PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 0)];
  2397. }
  2398. }
  2399. MSIMessage pci_get_msi_message(PCIDevice *dev, int vector)
  2400. {
  2401. MSIMessage msg;
  2402. if (msix_enabled(dev)) {
  2403. msg = msix_get_message(dev, vector);
  2404. } else if (msi_enabled(dev)) {
  2405. msg = msi_get_message(dev, vector);
  2406. } else {
  2407. /* Should never happen */
  2408. error_report("%s: unknown interrupt type", __func__);
  2409. abort();
  2410. }
  2411. return msg;
  2412. }
  2413. void pci_set_power(PCIDevice *d, bool state)
  2414. {
  2415. if (d->has_power == state) {
  2416. return;
  2417. }
  2418. d->has_power = state;
  2419. pci_update_mappings(d);
  2420. memory_region_set_enabled(&d->bus_master_enable_region,
  2421. (pci_get_word(d->config + PCI_COMMAND)
  2422. & PCI_COMMAND_MASTER) && d->has_power);
  2423. if (!d->has_power) {
  2424. pci_device_reset(d);
  2425. }
  2426. }
  2427. static const TypeInfo pci_device_type_info = {
  2428. .name = TYPE_PCI_DEVICE,
  2429. .parent = TYPE_DEVICE,
  2430. .instance_size = sizeof(PCIDevice),
  2431. .abstract = true,
  2432. .class_size = sizeof(PCIDeviceClass),
  2433. .class_init = pci_device_class_init,
  2434. .class_base_init = pci_device_class_base_init,
  2435. };
  2436. static void pci_register_types(void)
  2437. {
  2438. type_register_static(&pci_bus_info);
  2439. type_register_static(&pcie_bus_info);
  2440. type_register_static(&cxl_bus_info);
  2441. type_register_static(&conventional_pci_interface_info);
  2442. type_register_static(&cxl_interface_info);
  2443. type_register_static(&pcie_interface_info);
  2444. type_register_static(&pci_device_type_info);
  2445. }
  2446. type_init(pci_register_types)