2
0

xen_pt_config_init.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119
  1. /*
  2. * Copyright (c) 2007, Neocleus Corporation.
  3. * Copyright (c) 2007, Intel Corporation.
  4. *
  5. * This work is licensed under the terms of the GNU GPL, version 2. See
  6. * the COPYING file in the top-level directory.
  7. *
  8. * Alex Novik <alex@neocleus.com>
  9. * Allen Kay <allen.m.kay@intel.com>
  10. * Guy Zana <guy@neocleus.com>
  11. *
  12. * This file implements direct PCI assignment to a HVM guest
  13. */
  14. #include "qemu/osdep.h"
  15. #include "qapi/error.h"
  16. #include "qemu/timer.h"
  17. #include "hw/xen/xen_pt.h"
  18. #include "hw/xen/xen_igd.h"
  19. #include "hw/xen/xen-legacy-backend.h"
  20. #define XEN_PT_MERGE_VALUE(value, data, val_mask) \
  21. (((value) & (val_mask)) | ((data) & ~(val_mask)))
  22. #define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */
  23. /* prototype */
  24. static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
  25. uint32_t real_offset, uint32_t *data);
  26. /* helper */
  27. /* A return value of 1 means the capability should NOT be exposed to guest. */
  28. static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id)
  29. {
  30. switch (grp_id) {
  31. case PCI_CAP_ID_EXP:
  32. /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE
  33. * Controller looks trivial, e.g., the PCI Express Capabilities
  34. * Register is 0. We should not try to expose it to guest.
  35. *
  36. * The datasheet is available at
  37. * http://download.intel.com/design/network/datashts/82599_datasheet.pdf
  38. *
  39. * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the
  40. * PCI Express Capability Structure of the VF of Intel 82599 10GbE
  41. * Controller looks trivial, e.g., the PCI Express Capabilities
  42. * Register is 0, so the Capability Version is 0 and
  43. * xen_pt_pcie_size_init() would fail.
  44. */
  45. if (d->vendor_id == PCI_VENDOR_ID_INTEL &&
  46. d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) {
  47. return 1;
  48. }
  49. break;
  50. }
  51. return 0;
  52. }
  53. /* find emulate register group entry */
  54. XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address)
  55. {
  56. XenPTRegGroup *entry = NULL;
  57. /* find register group entry */
  58. QLIST_FOREACH(entry, &s->reg_grps, entries) {
  59. /* check address */
  60. if ((entry->base_offset <= address)
  61. && ((entry->base_offset + entry->size) > address)) {
  62. return entry;
  63. }
  64. }
  65. /* group entry not found */
  66. return NULL;
  67. }
  68. /* find emulate register entry */
  69. XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address)
  70. {
  71. XenPTReg *reg_entry = NULL;
  72. XenPTRegInfo *reg = NULL;
  73. uint32_t real_offset = 0;
  74. /* find register entry */
  75. QLIST_FOREACH(reg_entry, &reg_grp->reg_tbl_list, entries) {
  76. reg = reg_entry->reg;
  77. real_offset = reg_grp->base_offset + reg->offset;
  78. /* check address */
  79. if ((real_offset <= address)
  80. && ((real_offset + reg->size) > address)) {
  81. return reg_entry;
  82. }
  83. }
  84. return NULL;
  85. }
  86. static uint32_t get_throughable_mask(const XenPCIPassthroughState *s,
  87. XenPTRegInfo *reg, uint32_t valid_mask)
  88. {
  89. uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask);
  90. if (!s->permissive) {
  91. throughable_mask &= ~reg->res_mask;
  92. }
  93. return throughable_mask & valid_mask;
  94. }
  95. /****************
  96. * general register functions
  97. */
  98. /* register initialization function */
  99. static int xen_pt_common_reg_init(XenPCIPassthroughState *s,
  100. XenPTRegInfo *reg, uint32_t real_offset,
  101. uint32_t *data)
  102. {
  103. *data = reg->init_val;
  104. return 0;
  105. }
  106. /* Read register functions */
  107. static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  108. uint8_t *value, uint8_t valid_mask)
  109. {
  110. XenPTRegInfo *reg = cfg_entry->reg;
  111. uint8_t valid_emu_mask = 0;
  112. uint8_t *data = cfg_entry->ptr.byte;
  113. /* emulate byte register */
  114. valid_emu_mask = reg->emu_mask & valid_mask;
  115. *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
  116. return 0;
  117. }
  118. static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  119. uint16_t *value, uint16_t valid_mask)
  120. {
  121. XenPTRegInfo *reg = cfg_entry->reg;
  122. uint16_t valid_emu_mask = 0;
  123. uint16_t *data = cfg_entry->ptr.half_word;
  124. /* emulate word register */
  125. valid_emu_mask = reg->emu_mask & valid_mask;
  126. *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
  127. return 0;
  128. }
  129. static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  130. uint32_t *value, uint32_t valid_mask)
  131. {
  132. XenPTRegInfo *reg = cfg_entry->reg;
  133. uint32_t valid_emu_mask = 0;
  134. uint32_t *data = cfg_entry->ptr.word;
  135. /* emulate long register */
  136. valid_emu_mask = reg->emu_mask & valid_mask;
  137. *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
  138. return 0;
  139. }
  140. /* Write register functions */
  141. static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  142. uint8_t *val, uint8_t dev_value,
  143. uint8_t valid_mask)
  144. {
  145. XenPTRegInfo *reg = cfg_entry->reg;
  146. uint8_t writable_mask = 0;
  147. uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  148. uint8_t *data = cfg_entry->ptr.byte;
  149. /* modify emulate register */
  150. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  151. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  152. /* create value for writing to I/O device register */
  153. *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
  154. throughable_mask);
  155. return 0;
  156. }
  157. static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  158. uint16_t *val, uint16_t dev_value,
  159. uint16_t valid_mask)
  160. {
  161. XenPTRegInfo *reg = cfg_entry->reg;
  162. uint16_t writable_mask = 0;
  163. uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  164. uint16_t *data = cfg_entry->ptr.half_word;
  165. /* modify emulate register */
  166. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  167. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  168. /* create value for writing to I/O device register */
  169. *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
  170. throughable_mask);
  171. return 0;
  172. }
  173. static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  174. uint32_t *val, uint32_t dev_value,
  175. uint32_t valid_mask)
  176. {
  177. XenPTRegInfo *reg = cfg_entry->reg;
  178. uint32_t writable_mask = 0;
  179. uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  180. uint32_t *data = cfg_entry->ptr.word;
  181. /* modify emulate register */
  182. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  183. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  184. /* create value for writing to I/O device register */
  185. *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
  186. throughable_mask);
  187. return 0;
  188. }
  189. /* XenPTRegInfo declaration
  190. * - only for emulated register (either a part or whole bit).
  191. * - for passthrough register that need special behavior (like interacting with
  192. * other component), set emu_mask to all 0 and specify r/w func properly.
  193. * - do NOT use ALL F for init_val, otherwise the tbl will not be registered.
  194. */
  195. /********************
  196. * Header Type0
  197. */
  198. static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s,
  199. XenPTRegInfo *reg, uint32_t real_offset,
  200. uint32_t *data)
  201. {
  202. *data = s->real_device.vendor_id;
  203. return 0;
  204. }
  205. static int xen_pt_device_reg_init(XenPCIPassthroughState *s,
  206. XenPTRegInfo *reg, uint32_t real_offset,
  207. uint32_t *data)
  208. {
  209. *data = s->real_device.device_id;
  210. return 0;
  211. }
  212. static int xen_pt_status_reg_init(XenPCIPassthroughState *s,
  213. XenPTRegInfo *reg, uint32_t real_offset,
  214. uint32_t *data)
  215. {
  216. XenPTRegGroup *reg_grp_entry = NULL;
  217. XenPTReg *reg_entry = NULL;
  218. uint32_t reg_field = 0;
  219. /* find Header register group */
  220. reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST);
  221. if (reg_grp_entry) {
  222. /* find Capabilities Pointer register */
  223. reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST);
  224. if (reg_entry) {
  225. /* check Capabilities Pointer register */
  226. if (*reg_entry->ptr.half_word) {
  227. reg_field |= PCI_STATUS_CAP_LIST;
  228. } else {
  229. reg_field &= ~PCI_STATUS_CAP_LIST;
  230. }
  231. } else {
  232. xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*"
  233. " for Capabilities Pointer register."
  234. " (%s)\n", __func__);
  235. return -1;
  236. }
  237. } else {
  238. xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup"
  239. " for Header. (%s)\n", __func__);
  240. return -1;
  241. }
  242. *data = reg_field;
  243. return 0;
  244. }
  245. static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s,
  246. XenPTRegInfo *reg, uint32_t real_offset,
  247. uint32_t *data)
  248. {
  249. /* read PCI_HEADER_TYPE */
  250. *data = reg->init_val;
  251. if ((PCI_DEVICE(s)->cap_present & QEMU_PCI_CAP_MULTIFUNCTION)) {
  252. *data |= PCI_HEADER_TYPE_MULTI_FUNCTION;
  253. }
  254. return 0;
  255. }
  256. /* initialize Interrupt Pin register */
  257. static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s,
  258. XenPTRegInfo *reg, uint32_t real_offset,
  259. uint32_t *data)
  260. {
  261. if (s->real_device.irq) {
  262. *data = xen_pt_pci_read_intx(s);
  263. }
  264. return 0;
  265. }
  266. /* Command register */
  267. static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  268. uint16_t *val, uint16_t dev_value,
  269. uint16_t valid_mask)
  270. {
  271. XenPTRegInfo *reg = cfg_entry->reg;
  272. uint16_t writable_mask = 0;
  273. uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  274. uint16_t *data = cfg_entry->ptr.half_word;
  275. /* modify emulate register */
  276. writable_mask = ~reg->ro_mask & valid_mask;
  277. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  278. /* create value for writing to I/O device register */
  279. if (*val & PCI_COMMAND_INTX_DISABLE) {
  280. throughable_mask |= PCI_COMMAND_INTX_DISABLE;
  281. } else {
  282. if (s->machine_irq) {
  283. throughable_mask |= PCI_COMMAND_INTX_DISABLE;
  284. }
  285. }
  286. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  287. return 0;
  288. }
  289. /* BAR */
  290. #define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */
  291. #define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */
  292. #define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */
  293. #define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */
  294. static bool is_64bit_bar(PCIIORegion *r)
  295. {
  296. return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64);
  297. }
  298. static uint64_t xen_pt_get_bar_size(PCIIORegion *r)
  299. {
  300. if (is_64bit_bar(r)) {
  301. uint64_t size64;
  302. size64 = (r + 1)->size;
  303. size64 <<= 32;
  304. size64 += r->size;
  305. return size64;
  306. }
  307. return r->size;
  308. }
  309. static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s,
  310. int index)
  311. {
  312. PCIDevice *d = PCI_DEVICE(s);
  313. XenPTRegion *region = NULL;
  314. PCIIORegion *r;
  315. /* check 64bit BAR */
  316. if ((0 < index) && (index < PCI_ROM_SLOT)) {
  317. int type = s->real_device.io_regions[index - 1].type;
  318. if ((type & XEN_HOST_PCI_REGION_TYPE_MEM)
  319. && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) {
  320. region = &s->bases[index - 1];
  321. if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) {
  322. return XEN_PT_BAR_FLAG_UPPER;
  323. }
  324. }
  325. }
  326. /* check unused BAR */
  327. r = &d->io_regions[index];
  328. if (!xen_pt_get_bar_size(r)) {
  329. return XEN_PT_BAR_FLAG_UNUSED;
  330. }
  331. /* for ExpROM BAR */
  332. if (index == PCI_ROM_SLOT) {
  333. return XEN_PT_BAR_FLAG_MEM;
  334. }
  335. /* check BAR I/O indicator */
  336. if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) {
  337. return XEN_PT_BAR_FLAG_IO;
  338. } else {
  339. return XEN_PT_BAR_FLAG_MEM;
  340. }
  341. }
  342. static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr)
  343. {
  344. if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) {
  345. return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK);
  346. } else {
  347. return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK);
  348. }
  349. }
  350. static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
  351. uint32_t real_offset, uint32_t *data)
  352. {
  353. uint32_t reg_field = 0;
  354. int index;
  355. index = xen_pt_bar_offset_to_index(reg->offset);
  356. if (index < 0 || index >= PCI_NUM_REGIONS) {
  357. XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
  358. return -1;
  359. }
  360. /* set BAR flag */
  361. s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index);
  362. if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) {
  363. reg_field = XEN_PT_INVALID_REG;
  364. }
  365. *data = reg_field;
  366. return 0;
  367. }
  368. static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  369. uint32_t *value, uint32_t valid_mask)
  370. {
  371. XenPTRegInfo *reg = cfg_entry->reg;
  372. uint32_t valid_emu_mask = 0;
  373. uint32_t bar_emu_mask = 0;
  374. int index;
  375. /* get BAR index */
  376. index = xen_pt_bar_offset_to_index(reg->offset);
  377. if (index < 0 || index >= PCI_NUM_REGIONS - 1) {
  378. XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
  379. return -1;
  380. }
  381. /* use fixed-up value from kernel sysfs */
  382. *value = base_address_with_flags(&s->real_device.io_regions[index]);
  383. /* set emulate mask depend on BAR flag */
  384. switch (s->bases[index].bar_flag) {
  385. case XEN_PT_BAR_FLAG_MEM:
  386. bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
  387. break;
  388. case XEN_PT_BAR_FLAG_IO:
  389. bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
  390. break;
  391. case XEN_PT_BAR_FLAG_UPPER:
  392. bar_emu_mask = XEN_PT_BAR_ALLF;
  393. break;
  394. default:
  395. break;
  396. }
  397. /* emulate BAR */
  398. valid_emu_mask = bar_emu_mask & valid_mask;
  399. *value = XEN_PT_MERGE_VALUE(*value, *cfg_entry->ptr.word, ~valid_emu_mask);
  400. return 0;
  401. }
  402. static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  403. uint32_t *val, uint32_t dev_value,
  404. uint32_t valid_mask)
  405. {
  406. XenPTRegInfo *reg = cfg_entry->reg;
  407. XenPTRegion *base = NULL;
  408. PCIDevice *d = PCI_DEVICE(s);
  409. const PCIIORegion *r;
  410. uint32_t writable_mask = 0;
  411. uint32_t bar_emu_mask = 0;
  412. uint32_t bar_ro_mask = 0;
  413. uint32_t r_size = 0;
  414. int index = 0;
  415. uint32_t *data = cfg_entry->ptr.word;
  416. index = xen_pt_bar_offset_to_index(reg->offset);
  417. if (index < 0 || index >= PCI_NUM_REGIONS) {
  418. XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index);
  419. return -1;
  420. }
  421. r = &d->io_regions[index];
  422. base = &s->bases[index];
  423. r_size = xen_pt_get_emul_size(base->bar_flag, r->size);
  424. /* set emulate mask and read-only mask values depend on the BAR flag */
  425. switch (s->bases[index].bar_flag) {
  426. case XEN_PT_BAR_FLAG_MEM:
  427. bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
  428. if (!r_size) {
  429. /* low 32 bits mask for 64 bit bars */
  430. bar_ro_mask = XEN_PT_BAR_ALLF;
  431. } else {
  432. bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1);
  433. }
  434. break;
  435. case XEN_PT_BAR_FLAG_IO:
  436. bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
  437. bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1);
  438. break;
  439. case XEN_PT_BAR_FLAG_UPPER:
  440. assert(index > 0);
  441. r_size = d->io_regions[index - 1].size >> 32;
  442. bar_emu_mask = XEN_PT_BAR_ALLF;
  443. bar_ro_mask = r_size ? r_size - 1 : 0;
  444. break;
  445. default:
  446. break;
  447. }
  448. /* modify emulate register */
  449. writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask;
  450. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  451. /* check whether we need to update the virtual region address or not */
  452. switch (s->bases[index].bar_flag) {
  453. case XEN_PT_BAR_FLAG_UPPER:
  454. case XEN_PT_BAR_FLAG_MEM:
  455. /* nothing to do */
  456. break;
  457. case XEN_PT_BAR_FLAG_IO:
  458. /* nothing to do */
  459. break;
  460. default:
  461. break;
  462. }
  463. /* create value for writing to I/O device register */
  464. *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
  465. return 0;
  466. }
  467. /* write Exp ROM BAR */
  468. static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s,
  469. XenPTReg *cfg_entry, uint32_t *val,
  470. uint32_t dev_value, uint32_t valid_mask)
  471. {
  472. XenPTRegInfo *reg = cfg_entry->reg;
  473. XenPTRegion *base = NULL;
  474. PCIDevice *d = PCI_DEVICE(s);
  475. uint32_t writable_mask = 0;
  476. uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  477. pcibus_t r_size = 0;
  478. uint32_t bar_ro_mask = 0;
  479. uint32_t *data = cfg_entry->ptr.word;
  480. r_size = d->io_regions[PCI_ROM_SLOT].size;
  481. base = &s->bases[PCI_ROM_SLOT];
  482. /* align memory type resource size */
  483. r_size = xen_pt_get_emul_size(base->bar_flag, r_size);
  484. /* set emulate mask and read-only mask */
  485. bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE;
  486. /* modify emulate register */
  487. writable_mask = ~bar_ro_mask & valid_mask;
  488. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  489. /* create value for writing to I/O device register */
  490. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  491. return 0;
  492. }
  493. static int xen_pt_intel_opregion_read(XenPCIPassthroughState *s,
  494. XenPTReg *cfg_entry,
  495. uint32_t *value, uint32_t valid_mask)
  496. {
  497. *value = igd_read_opregion(s);
  498. return 0;
  499. }
  500. static int xen_pt_intel_opregion_write(XenPCIPassthroughState *s,
  501. XenPTReg *cfg_entry, uint32_t *value,
  502. uint32_t dev_value, uint32_t valid_mask)
  503. {
  504. igd_write_opregion(s, *value);
  505. return 0;
  506. }
  507. /* Header Type0 reg static information table */
  508. static XenPTRegInfo xen_pt_emu_reg_header0[] = {
  509. /* Vendor ID reg */
  510. {
  511. .offset = PCI_VENDOR_ID,
  512. .size = 2,
  513. .init_val = 0x0000,
  514. .ro_mask = 0xFFFF,
  515. .emu_mask = 0xFFFF,
  516. .init = xen_pt_vendor_reg_init,
  517. .u.w.read = xen_pt_word_reg_read,
  518. .u.w.write = xen_pt_word_reg_write,
  519. },
  520. /* Device ID reg */
  521. {
  522. .offset = PCI_DEVICE_ID,
  523. .size = 2,
  524. .init_val = 0x0000,
  525. .ro_mask = 0xFFFF,
  526. .emu_mask = 0xFFFF,
  527. .init = xen_pt_device_reg_init,
  528. .u.w.read = xen_pt_word_reg_read,
  529. .u.w.write = xen_pt_word_reg_write,
  530. },
  531. /* Command reg */
  532. {
  533. .offset = PCI_COMMAND,
  534. .size = 2,
  535. .init_val = 0x0000,
  536. .res_mask = 0xF880,
  537. .emu_mask = 0x0743,
  538. .init = xen_pt_common_reg_init,
  539. .u.w.read = xen_pt_word_reg_read,
  540. .u.w.write = xen_pt_cmd_reg_write,
  541. },
  542. /* Capabilities Pointer reg */
  543. {
  544. .offset = PCI_CAPABILITY_LIST,
  545. .size = 1,
  546. .init_val = 0x00,
  547. .ro_mask = 0xFF,
  548. .emu_mask = 0xFF,
  549. .init = xen_pt_ptr_reg_init,
  550. .u.b.read = xen_pt_byte_reg_read,
  551. .u.b.write = xen_pt_byte_reg_write,
  552. },
  553. /* Status reg */
  554. /* use emulated Cap Ptr value to initialize,
  555. * so need to be declared after Cap Ptr reg
  556. */
  557. {
  558. .offset = PCI_STATUS,
  559. .size = 2,
  560. .init_val = 0x0000,
  561. .res_mask = 0x0007,
  562. .ro_mask = 0x06F8,
  563. .rw1c_mask = 0xF900,
  564. .emu_mask = 0x0010,
  565. .init = xen_pt_status_reg_init,
  566. .u.w.read = xen_pt_word_reg_read,
  567. .u.w.write = xen_pt_word_reg_write,
  568. },
  569. /* Cache Line Size reg */
  570. {
  571. .offset = PCI_CACHE_LINE_SIZE,
  572. .size = 1,
  573. .init_val = 0x00,
  574. .ro_mask = 0x00,
  575. .emu_mask = 0xFF,
  576. .init = xen_pt_common_reg_init,
  577. .u.b.read = xen_pt_byte_reg_read,
  578. .u.b.write = xen_pt_byte_reg_write,
  579. },
  580. /* Latency Timer reg */
  581. {
  582. .offset = PCI_LATENCY_TIMER,
  583. .size = 1,
  584. .init_val = 0x00,
  585. .ro_mask = 0x00,
  586. .emu_mask = 0xFF,
  587. .init = xen_pt_common_reg_init,
  588. .u.b.read = xen_pt_byte_reg_read,
  589. .u.b.write = xen_pt_byte_reg_write,
  590. },
  591. /* Header Type reg */
  592. {
  593. .offset = PCI_HEADER_TYPE,
  594. .size = 1,
  595. .init_val = 0x00,
  596. .ro_mask = 0xFF,
  597. .emu_mask = PCI_HEADER_TYPE_MULTI_FUNCTION,
  598. .init = xen_pt_header_type_reg_init,
  599. .u.b.read = xen_pt_byte_reg_read,
  600. .u.b.write = xen_pt_byte_reg_write,
  601. },
  602. /* Interrupt Line reg */
  603. {
  604. .offset = PCI_INTERRUPT_LINE,
  605. .size = 1,
  606. .init_val = 0x00,
  607. .ro_mask = 0x00,
  608. .emu_mask = 0xFF,
  609. .init = xen_pt_common_reg_init,
  610. .u.b.read = xen_pt_byte_reg_read,
  611. .u.b.write = xen_pt_byte_reg_write,
  612. },
  613. /* Interrupt Pin reg */
  614. {
  615. .offset = PCI_INTERRUPT_PIN,
  616. .size = 1,
  617. .init_val = 0x00,
  618. .ro_mask = 0xFF,
  619. .emu_mask = 0xFF,
  620. .init = xen_pt_irqpin_reg_init,
  621. .u.b.read = xen_pt_byte_reg_read,
  622. .u.b.write = xen_pt_byte_reg_write,
  623. },
  624. /* BAR 0 reg */
  625. /* mask of BAR need to be decided later, depends on IO/MEM type */
  626. {
  627. .offset = PCI_BASE_ADDRESS_0,
  628. .size = 4,
  629. .init_val = 0x00000000,
  630. .init = xen_pt_bar_reg_init,
  631. .u.dw.read = xen_pt_bar_reg_read,
  632. .u.dw.write = xen_pt_bar_reg_write,
  633. },
  634. /* BAR 1 reg */
  635. {
  636. .offset = PCI_BASE_ADDRESS_1,
  637. .size = 4,
  638. .init_val = 0x00000000,
  639. .init = xen_pt_bar_reg_init,
  640. .u.dw.read = xen_pt_bar_reg_read,
  641. .u.dw.write = xen_pt_bar_reg_write,
  642. },
  643. /* BAR 2 reg */
  644. {
  645. .offset = PCI_BASE_ADDRESS_2,
  646. .size = 4,
  647. .init_val = 0x00000000,
  648. .init = xen_pt_bar_reg_init,
  649. .u.dw.read = xen_pt_bar_reg_read,
  650. .u.dw.write = xen_pt_bar_reg_write,
  651. },
  652. /* BAR 3 reg */
  653. {
  654. .offset = PCI_BASE_ADDRESS_3,
  655. .size = 4,
  656. .init_val = 0x00000000,
  657. .init = xen_pt_bar_reg_init,
  658. .u.dw.read = xen_pt_bar_reg_read,
  659. .u.dw.write = xen_pt_bar_reg_write,
  660. },
  661. /* BAR 4 reg */
  662. {
  663. .offset = PCI_BASE_ADDRESS_4,
  664. .size = 4,
  665. .init_val = 0x00000000,
  666. .init = xen_pt_bar_reg_init,
  667. .u.dw.read = xen_pt_bar_reg_read,
  668. .u.dw.write = xen_pt_bar_reg_write,
  669. },
  670. /* BAR 5 reg */
  671. {
  672. .offset = PCI_BASE_ADDRESS_5,
  673. .size = 4,
  674. .init_val = 0x00000000,
  675. .init = xen_pt_bar_reg_init,
  676. .u.dw.read = xen_pt_bar_reg_read,
  677. .u.dw.write = xen_pt_bar_reg_write,
  678. },
  679. /* Expansion ROM BAR reg */
  680. {
  681. .offset = PCI_ROM_ADDRESS,
  682. .size = 4,
  683. .init_val = 0x00000000,
  684. .ro_mask = ~PCI_ROM_ADDRESS_MASK & ~PCI_ROM_ADDRESS_ENABLE,
  685. .emu_mask = (uint32_t)PCI_ROM_ADDRESS_MASK,
  686. .init = xen_pt_bar_reg_init,
  687. .u.dw.read = xen_pt_long_reg_read,
  688. .u.dw.write = xen_pt_exp_rom_bar_reg_write,
  689. },
  690. {
  691. .size = 0,
  692. },
  693. };
  694. /*********************************
  695. * Vital Product Data Capability
  696. */
  697. /* Vital Product Data Capability Structure reg static information table */
  698. static XenPTRegInfo xen_pt_emu_reg_vpd[] = {
  699. {
  700. .offset = PCI_CAP_LIST_NEXT,
  701. .size = 1,
  702. .init_val = 0x00,
  703. .ro_mask = 0xFF,
  704. .emu_mask = 0xFF,
  705. .init = xen_pt_ptr_reg_init,
  706. .u.b.read = xen_pt_byte_reg_read,
  707. .u.b.write = xen_pt_byte_reg_write,
  708. },
  709. {
  710. .offset = PCI_VPD_ADDR,
  711. .size = 2,
  712. .ro_mask = 0x0003,
  713. .emu_mask = 0x0003,
  714. .init = xen_pt_common_reg_init,
  715. .u.w.read = xen_pt_word_reg_read,
  716. .u.w.write = xen_pt_word_reg_write,
  717. },
  718. {
  719. .size = 0,
  720. },
  721. };
  722. /**************************************
  723. * Vendor Specific Capability
  724. */
  725. /* Vendor Specific Capability Structure reg static information table */
  726. static XenPTRegInfo xen_pt_emu_reg_vendor[] = {
  727. {
  728. .offset = PCI_CAP_LIST_NEXT,
  729. .size = 1,
  730. .init_val = 0x00,
  731. .ro_mask = 0xFF,
  732. .emu_mask = 0xFF,
  733. .init = xen_pt_ptr_reg_init,
  734. .u.b.read = xen_pt_byte_reg_read,
  735. .u.b.write = xen_pt_byte_reg_write,
  736. },
  737. {
  738. .size = 0,
  739. },
  740. };
  741. /*****************************
  742. * PCI Express Capability
  743. */
  744. static inline uint8_t get_capability_version(XenPCIPassthroughState *s,
  745. uint32_t offset)
  746. {
  747. uint8_t flag;
  748. if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) {
  749. return 0;
  750. }
  751. return flag & PCI_EXP_FLAGS_VERS;
  752. }
  753. static inline uint8_t get_device_type(XenPCIPassthroughState *s,
  754. uint32_t offset)
  755. {
  756. uint8_t flag;
  757. if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) {
  758. return 0;
  759. }
  760. return (flag & PCI_EXP_FLAGS_TYPE) >> 4;
  761. }
  762. /* initialize Link Control register */
  763. static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s,
  764. XenPTRegInfo *reg, uint32_t real_offset,
  765. uint32_t *data)
  766. {
  767. uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
  768. uint8_t dev_type = get_device_type(s, real_offset - reg->offset);
  769. /* no need to initialize in case of Root Complex Integrated Endpoint
  770. * with cap_ver 1.x
  771. */
  772. if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) {
  773. *data = XEN_PT_INVALID_REG;
  774. }
  775. *data = reg->init_val;
  776. return 0;
  777. }
  778. /* initialize Device Control 2 register */
  779. static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s,
  780. XenPTRegInfo *reg, uint32_t real_offset,
  781. uint32_t *data)
  782. {
  783. uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
  784. /* no need to initialize in case of cap_ver 1.x */
  785. if (cap_ver == 1) {
  786. *data = XEN_PT_INVALID_REG;
  787. }
  788. *data = reg->init_val;
  789. return 0;
  790. }
  791. /* initialize Link Control 2 register */
  792. static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s,
  793. XenPTRegInfo *reg, uint32_t real_offset,
  794. uint32_t *data)
  795. {
  796. uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
  797. uint32_t reg_field = 0;
  798. /* no need to initialize in case of cap_ver 1.x */
  799. if (cap_ver == 1) {
  800. reg_field = XEN_PT_INVALID_REG;
  801. } else {
  802. /* set Supported Link Speed */
  803. uint8_t lnkcap;
  804. int rc;
  805. rc = xen_host_pci_get_byte(&s->real_device,
  806. real_offset - reg->offset + PCI_EXP_LNKCAP,
  807. &lnkcap);
  808. if (rc) {
  809. return rc;
  810. }
  811. reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap;
  812. }
  813. *data = reg_field;
  814. return 0;
  815. }
  816. /* PCI Express Capability Structure reg static information table */
  817. static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
  818. /* Next Pointer reg */
  819. {
  820. .offset = PCI_CAP_LIST_NEXT,
  821. .size = 1,
  822. .init_val = 0x00,
  823. .ro_mask = 0xFF,
  824. .emu_mask = 0xFF,
  825. .init = xen_pt_ptr_reg_init,
  826. .u.b.read = xen_pt_byte_reg_read,
  827. .u.b.write = xen_pt_byte_reg_write,
  828. },
  829. /* Device Capabilities reg */
  830. {
  831. .offset = PCI_EXP_DEVCAP,
  832. .size = 4,
  833. .init_val = 0x00000000,
  834. .ro_mask = 0xFFFFFFFF,
  835. .emu_mask = 0x10000000,
  836. .init = xen_pt_common_reg_init,
  837. .u.dw.read = xen_pt_long_reg_read,
  838. .u.dw.write = xen_pt_long_reg_write,
  839. },
  840. /* Device Control reg */
  841. {
  842. .offset = PCI_EXP_DEVCTL,
  843. .size = 2,
  844. .init_val = 0x2810,
  845. .ro_mask = 0x8400,
  846. .emu_mask = 0xFFFF,
  847. .init = xen_pt_common_reg_init,
  848. .u.w.read = xen_pt_word_reg_read,
  849. .u.w.write = xen_pt_word_reg_write,
  850. },
  851. /* Device Status reg */
  852. {
  853. .offset = PCI_EXP_DEVSTA,
  854. .size = 2,
  855. .res_mask = 0xFFC0,
  856. .ro_mask = 0x0030,
  857. .rw1c_mask = 0x000F,
  858. .init = xen_pt_common_reg_init,
  859. .u.w.read = xen_pt_word_reg_read,
  860. .u.w.write = xen_pt_word_reg_write,
  861. },
  862. /* Link Control reg */
  863. {
  864. .offset = PCI_EXP_LNKCTL,
  865. .size = 2,
  866. .init_val = 0x0000,
  867. .ro_mask = 0xFC34,
  868. .emu_mask = 0xFFFF,
  869. .init = xen_pt_linkctrl_reg_init,
  870. .u.w.read = xen_pt_word_reg_read,
  871. .u.w.write = xen_pt_word_reg_write,
  872. },
  873. /* Link Status reg */
  874. {
  875. .offset = PCI_EXP_LNKSTA,
  876. .size = 2,
  877. .ro_mask = 0x3FFF,
  878. .rw1c_mask = 0xC000,
  879. .init = xen_pt_common_reg_init,
  880. .u.w.read = xen_pt_word_reg_read,
  881. .u.w.write = xen_pt_word_reg_write,
  882. },
  883. /* Device Control 2 reg */
  884. {
  885. .offset = 0x28,
  886. .size = 2,
  887. .init_val = 0x0000,
  888. .ro_mask = 0xFFA0,
  889. .emu_mask = 0xFFBF,
  890. .init = xen_pt_devctrl2_reg_init,
  891. .u.w.read = xen_pt_word_reg_read,
  892. .u.w.write = xen_pt_word_reg_write,
  893. },
  894. /* Link Control 2 reg */
  895. {
  896. .offset = 0x30,
  897. .size = 2,
  898. .init_val = 0x0000,
  899. .ro_mask = 0xE040,
  900. .emu_mask = 0xFFFF,
  901. .init = xen_pt_linkctrl2_reg_init,
  902. .u.w.read = xen_pt_word_reg_read,
  903. .u.w.write = xen_pt_word_reg_write,
  904. },
  905. {
  906. .size = 0,
  907. },
  908. };
  909. /*********************************
  910. * Power Management Capability
  911. */
  912. /* Power Management Capability reg static information table */
  913. static XenPTRegInfo xen_pt_emu_reg_pm[] = {
  914. /* Next Pointer reg */
  915. {
  916. .offset = PCI_CAP_LIST_NEXT,
  917. .size = 1,
  918. .init_val = 0x00,
  919. .ro_mask = 0xFF,
  920. .emu_mask = 0xFF,
  921. .init = xen_pt_ptr_reg_init,
  922. .u.b.read = xen_pt_byte_reg_read,
  923. .u.b.write = xen_pt_byte_reg_write,
  924. },
  925. /* Power Management Capabilities reg */
  926. {
  927. .offset = PCI_CAP_FLAGS,
  928. .size = 2,
  929. .init_val = 0x0000,
  930. .ro_mask = 0xFFFF,
  931. .emu_mask = 0xF9C8,
  932. .init = xen_pt_common_reg_init,
  933. .u.w.read = xen_pt_word_reg_read,
  934. .u.w.write = xen_pt_word_reg_write,
  935. },
  936. /* PCI Power Management Control/Status reg */
  937. {
  938. .offset = PCI_PM_CTRL,
  939. .size = 2,
  940. .init_val = 0x0008,
  941. .res_mask = 0x00F0,
  942. .ro_mask = 0x610C,
  943. .rw1c_mask = 0x8000,
  944. .emu_mask = 0x810B,
  945. .init = xen_pt_common_reg_init,
  946. .u.w.read = xen_pt_word_reg_read,
  947. .u.w.write = xen_pt_word_reg_write,
  948. },
  949. {
  950. .size = 0,
  951. },
  952. };
  953. /********************************
  954. * MSI Capability
  955. */
  956. /* Helper */
  957. #define xen_pt_msi_check_type(offset, flags, what) \
  958. ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \
  959. PCI_MSI_##what##_64 : PCI_MSI_##what##_32))
  960. /* Message Control register */
  961. static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s,
  962. XenPTRegInfo *reg, uint32_t real_offset,
  963. uint32_t *data)
  964. {
  965. XenPTMSI *msi = s->msi;
  966. uint16_t reg_field;
  967. int rc;
  968. /* use I/O device register's value as initial value */
  969. rc = xen_host_pci_get_word(&s->real_device, real_offset, &reg_field);
  970. if (rc) {
  971. return rc;
  972. }
  973. if (reg_field & PCI_MSI_FLAGS_ENABLE) {
  974. XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n");
  975. xen_host_pci_set_word(&s->real_device, real_offset,
  976. reg_field & ~PCI_MSI_FLAGS_ENABLE);
  977. }
  978. msi->flags |= reg_field;
  979. msi->ctrl_offset = real_offset;
  980. msi->initialized = false;
  981. msi->mapped = false;
  982. *data = reg->init_val;
  983. return 0;
  984. }
  985. static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
  986. XenPTReg *cfg_entry, uint16_t *val,
  987. uint16_t dev_value, uint16_t valid_mask)
  988. {
  989. XenPTRegInfo *reg = cfg_entry->reg;
  990. XenPTMSI *msi = s->msi;
  991. uint16_t writable_mask = 0;
  992. uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  993. uint16_t *data = cfg_entry->ptr.half_word;
  994. /* Currently no support for multi-vector */
  995. if (*val & PCI_MSI_FLAGS_QSIZE) {
  996. XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val);
  997. }
  998. /* modify emulate register */
  999. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1000. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  1001. msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE;
  1002. /* create value for writing to I/O device register */
  1003. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  1004. /* update MSI */
  1005. if (*val & PCI_MSI_FLAGS_ENABLE) {
  1006. /* setup MSI pirq for the first time */
  1007. if (!msi->initialized) {
  1008. /* Init physical one */
  1009. XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val);
  1010. if (xen_pt_msi_setup(s)) {
  1011. /* We do not broadcast the error to the framework code, so
  1012. * that MSI errors are contained in MSI emulation code and
  1013. * QEMU can go on running.
  1014. * Guest MSI would be actually not working.
  1015. */
  1016. *val &= ~PCI_MSI_FLAGS_ENABLE;
  1017. XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val);
  1018. return 0;
  1019. }
  1020. if (xen_pt_msi_update(s)) {
  1021. *val &= ~PCI_MSI_FLAGS_ENABLE;
  1022. XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val);
  1023. return 0;
  1024. }
  1025. msi->initialized = true;
  1026. msi->mapped = true;
  1027. }
  1028. msi->flags |= PCI_MSI_FLAGS_ENABLE;
  1029. } else if (msi->mapped) {
  1030. xen_pt_msi_disable(s);
  1031. }
  1032. return 0;
  1033. }
  1034. /* initialize Message Upper Address register */
  1035. static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s,
  1036. XenPTRegInfo *reg, uint32_t real_offset,
  1037. uint32_t *data)
  1038. {
  1039. /* no need to initialize in case of 32 bit type */
  1040. if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
  1041. *data = XEN_PT_INVALID_REG;
  1042. } else {
  1043. *data = reg->init_val;
  1044. }
  1045. return 0;
  1046. }
  1047. /* this function will be called twice (for 32 bit and 64 bit type) */
  1048. /* initialize Message Data register */
  1049. static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s,
  1050. XenPTRegInfo *reg, uint32_t real_offset,
  1051. uint32_t *data)
  1052. {
  1053. uint32_t flags = s->msi->flags;
  1054. uint32_t offset = reg->offset;
  1055. /* check the offset whether matches the type or not */
  1056. if (xen_pt_msi_check_type(offset, flags, DATA)) {
  1057. *data = reg->init_val;
  1058. } else {
  1059. *data = XEN_PT_INVALID_REG;
  1060. }
  1061. return 0;
  1062. }
  1063. /* this function will be called twice (for 32 bit and 64 bit type) */
  1064. /* initialize Mask register */
  1065. static int xen_pt_mask_reg_init(XenPCIPassthroughState *s,
  1066. XenPTRegInfo *reg, uint32_t real_offset,
  1067. uint32_t *data)
  1068. {
  1069. uint32_t flags = s->msi->flags;
  1070. /* check the offset whether matches the type or not */
  1071. if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
  1072. *data = XEN_PT_INVALID_REG;
  1073. } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) {
  1074. *data = reg->init_val;
  1075. } else {
  1076. *data = XEN_PT_INVALID_REG;
  1077. }
  1078. return 0;
  1079. }
  1080. /* this function will be called twice (for 32 bit and 64 bit type) */
  1081. /* initialize Pending register */
  1082. static int xen_pt_pending_reg_init(XenPCIPassthroughState *s,
  1083. XenPTRegInfo *reg, uint32_t real_offset,
  1084. uint32_t *data)
  1085. {
  1086. uint32_t flags = s->msi->flags;
  1087. /* check the offset whether matches the type or not */
  1088. if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
  1089. *data = XEN_PT_INVALID_REG;
  1090. } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) {
  1091. *data = reg->init_val;
  1092. } else {
  1093. *data = XEN_PT_INVALID_REG;
  1094. }
  1095. return 0;
  1096. }
  1097. /* write Message Address register */
  1098. static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s,
  1099. XenPTReg *cfg_entry, uint32_t *val,
  1100. uint32_t dev_value, uint32_t valid_mask)
  1101. {
  1102. XenPTRegInfo *reg = cfg_entry->reg;
  1103. uint32_t writable_mask = 0;
  1104. uint32_t old_addr = *cfg_entry->ptr.word;
  1105. uint32_t *data = cfg_entry->ptr.word;
  1106. /* modify emulate register */
  1107. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1108. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  1109. s->msi->addr_lo = *data;
  1110. /* create value for writing to I/O device register */
  1111. *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
  1112. /* update MSI */
  1113. if (*data != old_addr) {
  1114. if (s->msi->mapped) {
  1115. xen_pt_msi_update(s);
  1116. }
  1117. }
  1118. return 0;
  1119. }
  1120. /* write Message Upper Address register */
  1121. static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s,
  1122. XenPTReg *cfg_entry, uint32_t *val,
  1123. uint32_t dev_value, uint32_t valid_mask)
  1124. {
  1125. XenPTRegInfo *reg = cfg_entry->reg;
  1126. uint32_t writable_mask = 0;
  1127. uint32_t old_addr = *cfg_entry->ptr.word;
  1128. uint32_t *data = cfg_entry->ptr.word;
  1129. /* check whether the type is 64 bit or not */
  1130. if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
  1131. XEN_PT_ERR(&s->dev,
  1132. "Can't write to the upper address without 64 bit support\n");
  1133. return -1;
  1134. }
  1135. /* modify emulate register */
  1136. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1137. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  1138. /* update the msi_info too */
  1139. s->msi->addr_hi = *data;
  1140. /* create value for writing to I/O device register */
  1141. *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
  1142. /* update MSI */
  1143. if (*data != old_addr) {
  1144. if (s->msi->mapped) {
  1145. xen_pt_msi_update(s);
  1146. }
  1147. }
  1148. return 0;
  1149. }
  1150. /* this function will be called twice (for 32 bit and 64 bit type) */
  1151. /* write Message Data register */
  1152. static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s,
  1153. XenPTReg *cfg_entry, uint16_t *val,
  1154. uint16_t dev_value, uint16_t valid_mask)
  1155. {
  1156. XenPTRegInfo *reg = cfg_entry->reg;
  1157. XenPTMSI *msi = s->msi;
  1158. uint16_t writable_mask = 0;
  1159. uint16_t old_data = *cfg_entry->ptr.half_word;
  1160. uint32_t offset = reg->offset;
  1161. uint16_t *data = cfg_entry->ptr.half_word;
  1162. /* check the offset whether matches the type or not */
  1163. if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) {
  1164. /* exit I/O emulator */
  1165. XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n");
  1166. return -1;
  1167. }
  1168. /* modify emulate register */
  1169. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1170. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  1171. /* update the msi_info too */
  1172. msi->data = *data;
  1173. /* create value for writing to I/O device register */
  1174. *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
  1175. /* update MSI */
  1176. if (*data != old_data) {
  1177. if (msi->mapped) {
  1178. xen_pt_msi_update(s);
  1179. }
  1180. }
  1181. return 0;
  1182. }
  1183. static int xen_pt_mask_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  1184. uint32_t *val, uint32_t dev_value,
  1185. uint32_t valid_mask)
  1186. {
  1187. int rc;
  1188. rc = xen_pt_long_reg_write(s, cfg_entry, val, dev_value, valid_mask);
  1189. if (rc) {
  1190. return rc;
  1191. }
  1192. s->msi->mask = *val;
  1193. return 0;
  1194. }
  1195. /* MSI Capability Structure reg static information table */
  1196. static XenPTRegInfo xen_pt_emu_reg_msi[] = {
  1197. /* Next Pointer reg */
  1198. {
  1199. .offset = PCI_CAP_LIST_NEXT,
  1200. .size = 1,
  1201. .init_val = 0x00,
  1202. .ro_mask = 0xFF,
  1203. .emu_mask = 0xFF,
  1204. .init = xen_pt_ptr_reg_init,
  1205. .u.b.read = xen_pt_byte_reg_read,
  1206. .u.b.write = xen_pt_byte_reg_write,
  1207. },
  1208. /* Message Control reg */
  1209. {
  1210. .offset = PCI_MSI_FLAGS,
  1211. .size = 2,
  1212. .init_val = 0x0000,
  1213. .res_mask = 0xFE00,
  1214. .ro_mask = 0x018E,
  1215. .emu_mask = 0x017E,
  1216. .init = xen_pt_msgctrl_reg_init,
  1217. .u.w.read = xen_pt_word_reg_read,
  1218. .u.w.write = xen_pt_msgctrl_reg_write,
  1219. },
  1220. /* Message Address reg */
  1221. {
  1222. .offset = PCI_MSI_ADDRESS_LO,
  1223. .size = 4,
  1224. .init_val = 0x00000000,
  1225. .ro_mask = 0x00000003,
  1226. .emu_mask = 0xFFFFFFFF,
  1227. .init = xen_pt_common_reg_init,
  1228. .u.dw.read = xen_pt_long_reg_read,
  1229. .u.dw.write = xen_pt_msgaddr32_reg_write,
  1230. },
  1231. /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */
  1232. {
  1233. .offset = PCI_MSI_ADDRESS_HI,
  1234. .size = 4,
  1235. .init_val = 0x00000000,
  1236. .ro_mask = 0x00000000,
  1237. .emu_mask = 0xFFFFFFFF,
  1238. .init = xen_pt_msgaddr64_reg_init,
  1239. .u.dw.read = xen_pt_long_reg_read,
  1240. .u.dw.write = xen_pt_msgaddr64_reg_write,
  1241. },
  1242. /* Message Data reg (16 bits of data for 32-bit devices) */
  1243. {
  1244. .offset = PCI_MSI_DATA_32,
  1245. .size = 2,
  1246. .init_val = 0x0000,
  1247. .ro_mask = 0x0000,
  1248. .emu_mask = 0xFFFF,
  1249. .init = xen_pt_msgdata_reg_init,
  1250. .u.w.read = xen_pt_word_reg_read,
  1251. .u.w.write = xen_pt_msgdata_reg_write,
  1252. },
  1253. /* Message Data reg (16 bits of data for 64-bit devices) */
  1254. {
  1255. .offset = PCI_MSI_DATA_64,
  1256. .size = 2,
  1257. .init_val = 0x0000,
  1258. .ro_mask = 0x0000,
  1259. .emu_mask = 0xFFFF,
  1260. .init = xen_pt_msgdata_reg_init,
  1261. .u.w.read = xen_pt_word_reg_read,
  1262. .u.w.write = xen_pt_msgdata_reg_write,
  1263. },
  1264. /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
  1265. {
  1266. .offset = PCI_MSI_MASK_32,
  1267. .size = 4,
  1268. .init_val = 0x00000000,
  1269. .ro_mask = 0xFFFFFFFF,
  1270. .emu_mask = 0xFFFFFFFF,
  1271. .init = xen_pt_mask_reg_init,
  1272. .u.dw.read = xen_pt_long_reg_read,
  1273. .u.dw.write = xen_pt_mask_reg_write,
  1274. },
  1275. /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
  1276. {
  1277. .offset = PCI_MSI_MASK_64,
  1278. .size = 4,
  1279. .init_val = 0x00000000,
  1280. .ro_mask = 0xFFFFFFFF,
  1281. .emu_mask = 0xFFFFFFFF,
  1282. .init = xen_pt_mask_reg_init,
  1283. .u.dw.read = xen_pt_long_reg_read,
  1284. .u.dw.write = xen_pt_mask_reg_write,
  1285. },
  1286. /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
  1287. {
  1288. .offset = PCI_MSI_MASK_32 + 4,
  1289. .size = 4,
  1290. .init_val = 0x00000000,
  1291. .ro_mask = 0xFFFFFFFF,
  1292. .emu_mask = 0x00000000,
  1293. .init = xen_pt_pending_reg_init,
  1294. .u.dw.read = xen_pt_long_reg_read,
  1295. .u.dw.write = xen_pt_long_reg_write,
  1296. },
  1297. /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
  1298. {
  1299. .offset = PCI_MSI_MASK_64 + 4,
  1300. .size = 4,
  1301. .init_val = 0x00000000,
  1302. .ro_mask = 0xFFFFFFFF,
  1303. .emu_mask = 0x00000000,
  1304. .init = xen_pt_pending_reg_init,
  1305. .u.dw.read = xen_pt_long_reg_read,
  1306. .u.dw.write = xen_pt_long_reg_write,
  1307. },
  1308. {
  1309. .size = 0,
  1310. },
  1311. };
  1312. /**************************************
  1313. * MSI-X Capability
  1314. */
  1315. /* Message Control register for MSI-X */
  1316. static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s,
  1317. XenPTRegInfo *reg, uint32_t real_offset,
  1318. uint32_t *data)
  1319. {
  1320. uint16_t reg_field;
  1321. int rc;
  1322. /* use I/O device register's value as initial value */
  1323. rc = xen_host_pci_get_word(&s->real_device, real_offset, &reg_field);
  1324. if (rc) {
  1325. return rc;
  1326. }
  1327. if (reg_field & PCI_MSIX_FLAGS_ENABLE) {
  1328. XEN_PT_LOG(&s->dev, "MSIX already enabled, disabling it first\n");
  1329. xen_host_pci_set_word(&s->real_device, real_offset,
  1330. reg_field & ~PCI_MSIX_FLAGS_ENABLE);
  1331. }
  1332. s->msix->ctrl_offset = real_offset;
  1333. *data = reg->init_val;
  1334. return 0;
  1335. }
  1336. static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s,
  1337. XenPTReg *cfg_entry, uint16_t *val,
  1338. uint16_t dev_value, uint16_t valid_mask)
  1339. {
  1340. XenPTRegInfo *reg = cfg_entry->reg;
  1341. uint16_t writable_mask = 0;
  1342. uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  1343. int debug_msix_enabled_old;
  1344. uint16_t *data = cfg_entry->ptr.half_word;
  1345. /* modify emulate register */
  1346. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1347. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  1348. /* create value for writing to I/O device register */
  1349. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  1350. /* update MSI-X */
  1351. if ((*val & PCI_MSIX_FLAGS_ENABLE)
  1352. && !(*val & PCI_MSIX_FLAGS_MASKALL)) {
  1353. xen_pt_msix_update(s);
  1354. } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) {
  1355. xen_pt_msix_disable(s);
  1356. }
  1357. s->msix->maskall = *val & PCI_MSIX_FLAGS_MASKALL;
  1358. debug_msix_enabled_old = s->msix->enabled;
  1359. s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE);
  1360. if (s->msix->enabled != debug_msix_enabled_old) {
  1361. XEN_PT_LOG(&s->dev, "%s MSI-X\n",
  1362. s->msix->enabled ? "enable" : "disable");
  1363. }
  1364. return 0;
  1365. }
  1366. /* MSI-X Capability Structure reg static information table */
  1367. static XenPTRegInfo xen_pt_emu_reg_msix[] = {
  1368. /* Next Pointer reg */
  1369. {
  1370. .offset = PCI_CAP_LIST_NEXT,
  1371. .size = 1,
  1372. .init_val = 0x00,
  1373. .ro_mask = 0xFF,
  1374. .emu_mask = 0xFF,
  1375. .init = xen_pt_ptr_reg_init,
  1376. .u.b.read = xen_pt_byte_reg_read,
  1377. .u.b.write = xen_pt_byte_reg_write,
  1378. },
  1379. /* Message Control reg */
  1380. {
  1381. .offset = PCI_MSI_FLAGS,
  1382. .size = 2,
  1383. .init_val = 0x0000,
  1384. .res_mask = 0x3800,
  1385. .ro_mask = 0x07FF,
  1386. .emu_mask = 0x0000,
  1387. .init = xen_pt_msixctrl_reg_init,
  1388. .u.w.read = xen_pt_word_reg_read,
  1389. .u.w.write = xen_pt_msixctrl_reg_write,
  1390. },
  1391. {
  1392. .size = 0,
  1393. },
  1394. };
  1395. static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = {
  1396. /* Intel IGFX OpRegion reg */
  1397. {
  1398. .offset = 0x0,
  1399. .size = 4,
  1400. .init_val = 0,
  1401. .emu_mask = 0xFFFFFFFF,
  1402. .u.dw.read = xen_pt_intel_opregion_read,
  1403. .u.dw.write = xen_pt_intel_opregion_write,
  1404. },
  1405. {
  1406. .size = 0,
  1407. },
  1408. };
  1409. /****************************
  1410. * Capabilities
  1411. */
  1412. /* capability structure register group size functions */
  1413. static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s,
  1414. const XenPTRegGroupInfo *grp_reg,
  1415. uint32_t base_offset, uint8_t *size)
  1416. {
  1417. *size = grp_reg->grp_size;
  1418. return 0;
  1419. }
  1420. /* get Vendor Specific Capability Structure register group size */
  1421. static int xen_pt_vendor_size_init(XenPCIPassthroughState *s,
  1422. const XenPTRegGroupInfo *grp_reg,
  1423. uint32_t base_offset, uint8_t *size)
  1424. {
  1425. return xen_host_pci_get_byte(&s->real_device, base_offset + 0x02, size);
  1426. }
  1427. /* get PCI Express Capability Structure register group size */
  1428. static int xen_pt_pcie_size_init(XenPCIPassthroughState *s,
  1429. const XenPTRegGroupInfo *grp_reg,
  1430. uint32_t base_offset, uint8_t *size)
  1431. {
  1432. PCIDevice *d = PCI_DEVICE(s);
  1433. uint8_t version = get_capability_version(s, base_offset);
  1434. uint8_t type = get_device_type(s, base_offset);
  1435. uint8_t pcie_size = 0;
  1436. /* calculate size depend on capability version and device/port type */
  1437. /* in case of PCI Express Base Specification Rev 1.x */
  1438. if (version == 1) {
  1439. /* The PCI Express Capabilities, Device Capabilities, and Device
  1440. * Status/Control registers are required for all PCI Express devices.
  1441. * The Link Capabilities and Link Status/Control are required for all
  1442. * Endpoints that are not Root Complex Integrated Endpoints. Endpoints
  1443. * are not required to implement registers other than those listed
  1444. * above and terminate the capability structure.
  1445. */
  1446. switch (type) {
  1447. case PCI_EXP_TYPE_ENDPOINT:
  1448. case PCI_EXP_TYPE_LEG_END:
  1449. pcie_size = 0x14;
  1450. break;
  1451. case PCI_EXP_TYPE_RC_END:
  1452. /* has no link */
  1453. pcie_size = 0x0C;
  1454. break;
  1455. /* only EndPoint passthrough is supported */
  1456. case PCI_EXP_TYPE_ROOT_PORT:
  1457. case PCI_EXP_TYPE_UPSTREAM:
  1458. case PCI_EXP_TYPE_DOWNSTREAM:
  1459. case PCI_EXP_TYPE_PCI_BRIDGE:
  1460. case PCI_EXP_TYPE_PCIE_BRIDGE:
  1461. case PCI_EXP_TYPE_RC_EC:
  1462. default:
  1463. XEN_PT_ERR(d, "Unsupported device/port type 0x%x.\n", type);
  1464. return -1;
  1465. }
  1466. }
  1467. /* in case of PCI Express Base Specification Rev 2.0 */
  1468. else if (version == 2) {
  1469. switch (type) {
  1470. case PCI_EXP_TYPE_ENDPOINT:
  1471. case PCI_EXP_TYPE_LEG_END:
  1472. case PCI_EXP_TYPE_RC_END:
  1473. /* For Functions that do not implement the registers,
  1474. * these spaces must be hardwired to 0b.
  1475. */
  1476. pcie_size = 0x3C;
  1477. break;
  1478. /* only EndPoint passthrough is supported */
  1479. case PCI_EXP_TYPE_ROOT_PORT:
  1480. case PCI_EXP_TYPE_UPSTREAM:
  1481. case PCI_EXP_TYPE_DOWNSTREAM:
  1482. case PCI_EXP_TYPE_PCI_BRIDGE:
  1483. case PCI_EXP_TYPE_PCIE_BRIDGE:
  1484. case PCI_EXP_TYPE_RC_EC:
  1485. default:
  1486. XEN_PT_ERR(d, "Unsupported device/port type 0x%x.\n", type);
  1487. return -1;
  1488. }
  1489. } else {
  1490. XEN_PT_ERR(d, "Unsupported capability version 0x%x.\n", version);
  1491. return -1;
  1492. }
  1493. *size = pcie_size;
  1494. return 0;
  1495. }
  1496. /* get MSI Capability Structure register group size */
  1497. static int xen_pt_msi_size_init(XenPCIPassthroughState *s,
  1498. const XenPTRegGroupInfo *grp_reg,
  1499. uint32_t base_offset, uint8_t *size)
  1500. {
  1501. uint16_t msg_ctrl = 0;
  1502. uint8_t msi_size = 0xa;
  1503. int rc;
  1504. rc = xen_host_pci_get_word(&s->real_device, base_offset + PCI_MSI_FLAGS,
  1505. &msg_ctrl);
  1506. if (rc) {
  1507. return rc;
  1508. }
  1509. /* check if 64-bit address is capable of per-vector masking */
  1510. if (msg_ctrl & PCI_MSI_FLAGS_64BIT) {
  1511. msi_size += 4;
  1512. }
  1513. if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) {
  1514. msi_size += 10;
  1515. }
  1516. s->msi = g_new0(XenPTMSI, 1);
  1517. s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
  1518. *size = msi_size;
  1519. return 0;
  1520. }
  1521. /* get MSI-X Capability Structure register group size */
  1522. static int xen_pt_msix_size_init(XenPCIPassthroughState *s,
  1523. const XenPTRegGroupInfo *grp_reg,
  1524. uint32_t base_offset, uint8_t *size)
  1525. {
  1526. int rc = 0;
  1527. rc = xen_pt_msix_init(s, base_offset);
  1528. if (rc < 0) {
  1529. XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n");
  1530. return rc;
  1531. }
  1532. *size = grp_reg->grp_size;
  1533. return 0;
  1534. }
  1535. static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = {
  1536. /* Header Type0 reg group */
  1537. {
  1538. .grp_id = 0xFF,
  1539. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1540. .grp_size = 0x40,
  1541. .size_init = xen_pt_reg_grp_size_init,
  1542. .emu_regs = xen_pt_emu_reg_header0,
  1543. },
  1544. /* PCI PowerManagement Capability reg group */
  1545. {
  1546. .grp_id = PCI_CAP_ID_PM,
  1547. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1548. .grp_size = PCI_PM_SIZEOF,
  1549. .size_init = xen_pt_reg_grp_size_init,
  1550. .emu_regs = xen_pt_emu_reg_pm,
  1551. },
  1552. /* AGP Capability Structure reg group */
  1553. {
  1554. .grp_id = PCI_CAP_ID_AGP,
  1555. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1556. .grp_size = 0x30,
  1557. .size_init = xen_pt_reg_grp_size_init,
  1558. },
  1559. /* Vital Product Data Capability Structure reg group */
  1560. {
  1561. .grp_id = PCI_CAP_ID_VPD,
  1562. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1563. .grp_size = 0x08,
  1564. .size_init = xen_pt_reg_grp_size_init,
  1565. .emu_regs = xen_pt_emu_reg_vpd,
  1566. },
  1567. /* Slot Identification reg group */
  1568. {
  1569. .grp_id = PCI_CAP_ID_SLOTID,
  1570. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1571. .grp_size = 0x04,
  1572. .size_init = xen_pt_reg_grp_size_init,
  1573. },
  1574. /* MSI Capability Structure reg group */
  1575. {
  1576. .grp_id = PCI_CAP_ID_MSI,
  1577. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1578. .grp_size = 0xFF,
  1579. .size_init = xen_pt_msi_size_init,
  1580. .emu_regs = xen_pt_emu_reg_msi,
  1581. },
  1582. /* PCI-X Capabilities List Item reg group */
  1583. {
  1584. .grp_id = PCI_CAP_ID_PCIX,
  1585. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1586. .grp_size = 0x18,
  1587. .size_init = xen_pt_reg_grp_size_init,
  1588. },
  1589. /* Vendor Specific Capability Structure reg group */
  1590. {
  1591. .grp_id = PCI_CAP_ID_VNDR,
  1592. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1593. .grp_size = 0xFF,
  1594. .size_init = xen_pt_vendor_size_init,
  1595. .emu_regs = xen_pt_emu_reg_vendor,
  1596. },
  1597. /* SHPC Capability List Item reg group */
  1598. {
  1599. .grp_id = PCI_CAP_ID_SHPC,
  1600. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1601. .grp_size = 0x08,
  1602. .size_init = xen_pt_reg_grp_size_init,
  1603. },
  1604. /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */
  1605. {
  1606. .grp_id = PCI_CAP_ID_SSVID,
  1607. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1608. .grp_size = 0x08,
  1609. .size_init = xen_pt_reg_grp_size_init,
  1610. },
  1611. /* AGP 8x Capability Structure reg group */
  1612. {
  1613. .grp_id = PCI_CAP_ID_AGP3,
  1614. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1615. .grp_size = 0x30,
  1616. .size_init = xen_pt_reg_grp_size_init,
  1617. },
  1618. /* PCI Express Capability Structure reg group */
  1619. {
  1620. .grp_id = PCI_CAP_ID_EXP,
  1621. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1622. .grp_size = 0xFF,
  1623. .size_init = xen_pt_pcie_size_init,
  1624. .emu_regs = xen_pt_emu_reg_pcie,
  1625. },
  1626. /* MSI-X Capability Structure reg group */
  1627. {
  1628. .grp_id = PCI_CAP_ID_MSIX,
  1629. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1630. .grp_size = 0x0C,
  1631. .size_init = xen_pt_msix_size_init,
  1632. .emu_regs = xen_pt_emu_reg_msix,
  1633. },
  1634. /* Intel IGD Opregion group */
  1635. {
  1636. .grp_id = XEN_PCI_INTEL_OPREGION,
  1637. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1638. .grp_size = 0x4,
  1639. .size_init = xen_pt_reg_grp_size_init,
  1640. .emu_regs = xen_pt_emu_reg_igd_opregion,
  1641. },
  1642. {
  1643. .grp_size = 0,
  1644. },
  1645. };
  1646. /* initialize Capabilities Pointer or Next Pointer register */
  1647. static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s,
  1648. XenPTRegInfo *reg, uint32_t real_offset,
  1649. uint32_t *data)
  1650. {
  1651. int i, rc;
  1652. uint8_t reg_field;
  1653. uint8_t cap_id = 0;
  1654. rc = xen_host_pci_get_byte(&s->real_device, real_offset, &reg_field);
  1655. if (rc) {
  1656. return rc;
  1657. }
  1658. /* find capability offset */
  1659. while (reg_field) {
  1660. for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
  1661. if (xen_pt_hide_dev_cap(&s->real_device,
  1662. xen_pt_emu_reg_grps[i].grp_id)) {
  1663. continue;
  1664. }
  1665. rc = xen_host_pci_get_byte(&s->real_device,
  1666. reg_field + PCI_CAP_LIST_ID, &cap_id);
  1667. if (rc) {
  1668. XEN_PT_ERR(&s->dev, "Failed to read capability @0x%x (rc:%d)\n",
  1669. reg_field + PCI_CAP_LIST_ID, rc);
  1670. return rc;
  1671. }
  1672. if (xen_pt_emu_reg_grps[i].grp_id == cap_id) {
  1673. if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
  1674. goto out;
  1675. }
  1676. /* ignore the 0 hardwired capability, find next one */
  1677. break;
  1678. }
  1679. }
  1680. /* next capability */
  1681. rc = xen_host_pci_get_byte(&s->real_device,
  1682. reg_field + PCI_CAP_LIST_NEXT, &reg_field);
  1683. if (rc) {
  1684. return rc;
  1685. }
  1686. }
  1687. out:
  1688. *data = reg_field;
  1689. return 0;
  1690. }
  1691. /*************
  1692. * Main
  1693. */
  1694. static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap)
  1695. {
  1696. uint8_t id;
  1697. unsigned max_cap = XEN_PCI_CAP_MAX;
  1698. uint8_t pos = PCI_CAPABILITY_LIST;
  1699. uint8_t status = 0;
  1700. if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) {
  1701. return 0;
  1702. }
  1703. if ((status & PCI_STATUS_CAP_LIST) == 0) {
  1704. return 0;
  1705. }
  1706. while (max_cap--) {
  1707. if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) {
  1708. break;
  1709. }
  1710. if (pos < PCI_CONFIG_HEADER_SIZE) {
  1711. break;
  1712. }
  1713. pos &= ~3;
  1714. if (xen_host_pci_get_byte(&s->real_device,
  1715. pos + PCI_CAP_LIST_ID, &id)) {
  1716. break;
  1717. }
  1718. if (id == 0xff) {
  1719. break;
  1720. }
  1721. if (id == cap) {
  1722. return pos;
  1723. }
  1724. pos += PCI_CAP_LIST_NEXT;
  1725. }
  1726. return 0;
  1727. }
  1728. static void xen_pt_config_reg_init(XenPCIPassthroughState *s,
  1729. XenPTRegGroup *reg_grp, XenPTRegInfo *reg,
  1730. Error **errp)
  1731. {
  1732. XenPTReg *reg_entry;
  1733. uint32_t data = 0;
  1734. int rc = 0;
  1735. reg_entry = g_new0(XenPTReg, 1);
  1736. reg_entry->reg = reg;
  1737. if (reg->init) {
  1738. uint32_t host_mask, size_mask;
  1739. unsigned int offset;
  1740. uint32_t val = 0;
  1741. /* initialize emulate register */
  1742. rc = reg->init(s, reg_entry->reg,
  1743. reg_grp->base_offset + reg->offset, &data);
  1744. if (rc < 0) {
  1745. g_free(reg_entry);
  1746. error_setg(errp, "Init emulate register fail");
  1747. return;
  1748. }
  1749. if (data == XEN_PT_INVALID_REG) {
  1750. /* free unused BAR register entry */
  1751. g_free(reg_entry);
  1752. return;
  1753. }
  1754. /* Sync up the data to dev.config */
  1755. offset = reg_grp->base_offset + reg->offset;
  1756. size_mask = 0xFFFFFFFF >> ((4 - reg->size) << 3);
  1757. switch (reg->size) {
  1758. case 1: rc = xen_host_pci_get_byte(&s->real_device, offset, (uint8_t *)&val);
  1759. break;
  1760. case 2: rc = xen_host_pci_get_word(&s->real_device, offset, (uint16_t *)&val);
  1761. break;
  1762. case 4: rc = xen_host_pci_get_long(&s->real_device, offset, &val);
  1763. break;
  1764. default: abort();
  1765. }
  1766. if (rc) {
  1767. /* Serious issues when we cannot read the host values! */
  1768. g_free(reg_entry);
  1769. error_setg(errp, "Cannot read host values");
  1770. return;
  1771. }
  1772. /* Set bits in emu_mask are the ones we emulate. The dev.config shall
  1773. * contain the emulated view of the guest - therefore we flip the mask
  1774. * to mask out the host values (which dev.config initially has) . */
  1775. host_mask = size_mask & ~reg->emu_mask;
  1776. if ((data & host_mask) != (val & host_mask)) {
  1777. uint32_t new_val;
  1778. /*
  1779. * Merge the emulated bits (data) with the host bits (val)
  1780. * and mask out the bits past size to enable restoration
  1781. * of the proper value for logging below.
  1782. */
  1783. new_val = XEN_PT_MERGE_VALUE(val, data, host_mask) & size_mask;
  1784. /* Leave intact host and emulated values past the size - even though
  1785. * we do not care as we write per reg->size granularity, but for the
  1786. * logging below lets have the proper value. */
  1787. new_val |= ((val | data)) & ~size_mask;
  1788. XEN_PT_LOG(&s->dev,"Offset 0x%04x mismatch! Emulated=0x%04x, host=0x%04x, syncing to 0x%04x.\n",
  1789. offset, data, val, new_val);
  1790. val = new_val;
  1791. } else
  1792. val = data;
  1793. if (val & ~size_mask) {
  1794. error_setg(errp, "Offset 0x%04x:0x%04x expands past"
  1795. " register size (%d)", offset, val, reg->size);
  1796. g_free(reg_entry);
  1797. return;
  1798. }
  1799. /* This could be just pci_set_long as we don't modify the bits
  1800. * past reg->size, but in case this routine is run in parallel or the
  1801. * init value is larger, we do not want to over-write registers. */
  1802. switch (reg->size) {
  1803. case 1: pci_set_byte(s->dev.config + offset, (uint8_t)val);
  1804. break;
  1805. case 2: pci_set_word(s->dev.config + offset, (uint16_t)val);
  1806. break;
  1807. case 4: pci_set_long(s->dev.config + offset, val);
  1808. break;
  1809. default: abort();
  1810. }
  1811. /* set register value pointer to the data. */
  1812. reg_entry->ptr.byte = s->dev.config + offset;
  1813. }
  1814. /* list add register entry */
  1815. QLIST_INSERT_HEAD(&reg_grp->reg_tbl_list, reg_entry, entries);
  1816. }
  1817. void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp)
  1818. {
  1819. ERRP_GUARD();
  1820. int i, rc;
  1821. QLIST_INIT(&s->reg_grps);
  1822. for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
  1823. uint32_t reg_grp_offset = 0;
  1824. XenPTRegGroup *reg_grp_entry = NULL;
  1825. if (xen_pt_emu_reg_grps[i].grp_id != 0xFF
  1826. && xen_pt_emu_reg_grps[i].grp_id != XEN_PCI_INTEL_OPREGION) {
  1827. if (xen_pt_hide_dev_cap(&s->real_device,
  1828. xen_pt_emu_reg_grps[i].grp_id)) {
  1829. continue;
  1830. }
  1831. reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id);
  1832. if (!reg_grp_offset) {
  1833. continue;
  1834. }
  1835. }
  1836. if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) {
  1837. if (!is_igd_vga_passthrough(&s->real_device) ||
  1838. s->real_device.vendor_id != PCI_VENDOR_ID_INTEL) {
  1839. continue;
  1840. }
  1841. /*
  1842. * By default we will trap up to 0x40 in the cfg space.
  1843. * If an intel device is pass through we need to trap 0xfc,
  1844. * therefore the size should be 0xff.
  1845. */
  1846. reg_grp_offset = XEN_PCI_INTEL_OPREGION;
  1847. }
  1848. reg_grp_entry = g_new0(XenPTRegGroup, 1);
  1849. QLIST_INIT(&reg_grp_entry->reg_tbl_list);
  1850. QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries);
  1851. reg_grp_entry->base_offset = reg_grp_offset;
  1852. reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i;
  1853. if (xen_pt_emu_reg_grps[i].size_init) {
  1854. /* get register group size */
  1855. rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp,
  1856. reg_grp_offset,
  1857. &reg_grp_entry->size);
  1858. if (rc < 0) {
  1859. error_setg(errp, "Failed to initialize %d/%zu, type = 0x%x,"
  1860. " rc: %d", i, ARRAY_SIZE(xen_pt_emu_reg_grps),
  1861. xen_pt_emu_reg_grps[i].grp_type, rc);
  1862. xen_pt_config_delete(s);
  1863. return;
  1864. }
  1865. }
  1866. if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
  1867. if (xen_pt_emu_reg_grps[i].emu_regs) {
  1868. int j = 0;
  1869. XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs;
  1870. /* initialize capability register */
  1871. for (j = 0; regs->size != 0; j++, regs++) {
  1872. xen_pt_config_reg_init(s, reg_grp_entry, regs, errp);
  1873. if (*errp) {
  1874. error_append_hint(errp, "Failed to init register %d"
  1875. " offsets 0x%x in grp_type = 0x%x (%d/%zu)",
  1876. j,
  1877. regs->offset,
  1878. xen_pt_emu_reg_grps[i].grp_type,
  1879. i, ARRAY_SIZE(xen_pt_emu_reg_grps));
  1880. xen_pt_config_delete(s);
  1881. return;
  1882. }
  1883. }
  1884. }
  1885. }
  1886. }
  1887. }
  1888. /* delete all emulate register */
  1889. void xen_pt_config_delete(XenPCIPassthroughState *s)
  1890. {
  1891. struct XenPTRegGroup *reg_group, *next_grp;
  1892. struct XenPTReg *reg, *next_reg;
  1893. /* free MSI/MSI-X info table */
  1894. if (s->msix) {
  1895. xen_pt_msix_unmap(s);
  1896. }
  1897. g_free(s->msi);
  1898. /* free all register group entry */
  1899. QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) {
  1900. /* free all register entry */
  1901. QLIST_FOREACH_SAFE(reg, &reg_group->reg_tbl_list, entries, next_reg) {
  1902. QLIST_REMOVE(reg, entries);
  1903. g_free(reg);
  1904. }
  1905. QLIST_REMOVE(reg_group, entries);
  1906. g_free(reg_group);
  1907. }
  1908. }