2
0

xen_pt_config_init.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110
  1. /*
  2. * Copyright (c) 2007, Neocleus Corporation.
  3. * Copyright (c) 2007, Intel Corporation.
  4. *
  5. * This work is licensed under the terms of the GNU GPL, version 2. See
  6. * the COPYING file in the top-level directory.
  7. *
  8. * Alex Novik <alex@neocleus.com>
  9. * Allen Kay <allen.m.kay@intel.com>
  10. * Guy Zana <guy@neocleus.com>
  11. *
  12. * This file implements direct PCI assignment to a HVM guest
  13. */
  14. #include "qemu/osdep.h"
  15. #include "qapi/error.h"
  16. #include "qemu/timer.h"
  17. #include "hw/xen/xen-legacy-backend.h"
  18. #include "xen_pt.h"
  19. #define XEN_PT_MERGE_VALUE(value, data, val_mask) \
  20. (((value) & (val_mask)) | ((data) & ~(val_mask)))
  21. #define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */
  22. /* prototype */
  23. static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
  24. uint32_t real_offset, uint32_t *data);
  25. /* helper */
  26. /* A return value of 1 means the capability should NOT be exposed to guest. */
  27. static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id)
  28. {
  29. switch (grp_id) {
  30. case PCI_CAP_ID_EXP:
  31. /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE
  32. * Controller looks trivial, e.g., the PCI Express Capabilities
  33. * Register is 0. We should not try to expose it to guest.
  34. *
  35. * The datasheet is available at
  36. * http://download.intel.com/design/network/datashts/82599_datasheet.pdf
  37. *
  38. * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the
  39. * PCI Express Capability Structure of the VF of Intel 82599 10GbE
  40. * Controller looks trivial, e.g., the PCI Express Capabilities
  41. * Register is 0, so the Capability Version is 0 and
  42. * xen_pt_pcie_size_init() would fail.
  43. */
  44. if (d->vendor_id == PCI_VENDOR_ID_INTEL &&
  45. d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) {
  46. return 1;
  47. }
  48. break;
  49. }
  50. return 0;
  51. }
  52. /* find emulate register group entry */
  53. XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address)
  54. {
  55. XenPTRegGroup *entry = NULL;
  56. /* find register group entry */
  57. QLIST_FOREACH(entry, &s->reg_grps, entries) {
  58. /* check address */
  59. if ((entry->base_offset <= address)
  60. && ((entry->base_offset + entry->size) > address)) {
  61. return entry;
  62. }
  63. }
  64. /* group entry not found */
  65. return NULL;
  66. }
  67. /* find emulate register entry */
  68. XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address)
  69. {
  70. XenPTReg *reg_entry = NULL;
  71. XenPTRegInfo *reg = NULL;
  72. uint32_t real_offset = 0;
  73. /* find register entry */
  74. QLIST_FOREACH(reg_entry, &reg_grp->reg_tbl_list, entries) {
  75. reg = reg_entry->reg;
  76. real_offset = reg_grp->base_offset + reg->offset;
  77. /* check address */
  78. if ((real_offset <= address)
  79. && ((real_offset + reg->size) > address)) {
  80. return reg_entry;
  81. }
  82. }
  83. return NULL;
  84. }
  85. static uint32_t get_throughable_mask(const XenPCIPassthroughState *s,
  86. XenPTRegInfo *reg, uint32_t valid_mask)
  87. {
  88. uint32_t throughable_mask = ~(reg->emu_mask | reg->ro_mask);
  89. if (!s->permissive) {
  90. throughable_mask &= ~reg->res_mask;
  91. }
  92. return throughable_mask & valid_mask;
  93. }
  94. /****************
  95. * general register functions
  96. */
  97. /* register initialization function */
  98. static int xen_pt_common_reg_init(XenPCIPassthroughState *s,
  99. XenPTRegInfo *reg, uint32_t real_offset,
  100. uint32_t *data)
  101. {
  102. *data = reg->init_val;
  103. return 0;
  104. }
  105. /* Read register functions */
  106. static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  107. uint8_t *value, uint8_t valid_mask)
  108. {
  109. XenPTRegInfo *reg = cfg_entry->reg;
  110. uint8_t valid_emu_mask = 0;
  111. uint8_t *data = cfg_entry->ptr.byte;
  112. /* emulate byte register */
  113. valid_emu_mask = reg->emu_mask & valid_mask;
  114. *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
  115. return 0;
  116. }
  117. static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  118. uint16_t *value, uint16_t valid_mask)
  119. {
  120. XenPTRegInfo *reg = cfg_entry->reg;
  121. uint16_t valid_emu_mask = 0;
  122. uint16_t *data = cfg_entry->ptr.half_word;
  123. /* emulate word register */
  124. valid_emu_mask = reg->emu_mask & valid_mask;
  125. *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
  126. return 0;
  127. }
  128. static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  129. uint32_t *value, uint32_t valid_mask)
  130. {
  131. XenPTRegInfo *reg = cfg_entry->reg;
  132. uint32_t valid_emu_mask = 0;
  133. uint32_t *data = cfg_entry->ptr.word;
  134. /* emulate long register */
  135. valid_emu_mask = reg->emu_mask & valid_mask;
  136. *value = XEN_PT_MERGE_VALUE(*value, *data, ~valid_emu_mask);
  137. return 0;
  138. }
  139. /* Write register functions */
  140. static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  141. uint8_t *val, uint8_t dev_value,
  142. uint8_t valid_mask)
  143. {
  144. XenPTRegInfo *reg = cfg_entry->reg;
  145. uint8_t writable_mask = 0;
  146. uint8_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  147. uint8_t *data = cfg_entry->ptr.byte;
  148. /* modify emulate register */
  149. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  150. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  151. /* create value for writing to I/O device register */
  152. *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
  153. throughable_mask);
  154. return 0;
  155. }
  156. static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  157. uint16_t *val, uint16_t dev_value,
  158. uint16_t valid_mask)
  159. {
  160. XenPTRegInfo *reg = cfg_entry->reg;
  161. uint16_t writable_mask = 0;
  162. uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  163. uint16_t *data = cfg_entry->ptr.half_word;
  164. /* modify emulate register */
  165. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  166. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  167. /* create value for writing to I/O device register */
  168. *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
  169. throughable_mask);
  170. return 0;
  171. }
  172. static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  173. uint32_t *val, uint32_t dev_value,
  174. uint32_t valid_mask)
  175. {
  176. XenPTRegInfo *reg = cfg_entry->reg;
  177. uint32_t writable_mask = 0;
  178. uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  179. uint32_t *data = cfg_entry->ptr.word;
  180. /* modify emulate register */
  181. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  182. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  183. /* create value for writing to I/O device register */
  184. *val = XEN_PT_MERGE_VALUE(*val, dev_value & ~reg->rw1c_mask,
  185. throughable_mask);
  186. return 0;
  187. }
  188. /* XenPTRegInfo declaration
  189. * - only for emulated register (either a part or whole bit).
  190. * - for passthrough register that need special behavior (like interacting with
  191. * other component), set emu_mask to all 0 and specify r/w func properly.
  192. * - do NOT use ALL F for init_val, otherwise the tbl will not be registered.
  193. */
  194. /********************
  195. * Header Type0
  196. */
  197. static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s,
  198. XenPTRegInfo *reg, uint32_t real_offset,
  199. uint32_t *data)
  200. {
  201. *data = s->real_device.vendor_id;
  202. return 0;
  203. }
  204. static int xen_pt_device_reg_init(XenPCIPassthroughState *s,
  205. XenPTRegInfo *reg, uint32_t real_offset,
  206. uint32_t *data)
  207. {
  208. *data = s->real_device.device_id;
  209. return 0;
  210. }
  211. static int xen_pt_status_reg_init(XenPCIPassthroughState *s,
  212. XenPTRegInfo *reg, uint32_t real_offset,
  213. uint32_t *data)
  214. {
  215. XenPTRegGroup *reg_grp_entry = NULL;
  216. XenPTReg *reg_entry = NULL;
  217. uint32_t reg_field = 0;
  218. /* find Header register group */
  219. reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST);
  220. if (reg_grp_entry) {
  221. /* find Capabilities Pointer register */
  222. reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST);
  223. if (reg_entry) {
  224. /* check Capabilities Pointer register */
  225. if (*reg_entry->ptr.half_word) {
  226. reg_field |= PCI_STATUS_CAP_LIST;
  227. } else {
  228. reg_field &= ~PCI_STATUS_CAP_LIST;
  229. }
  230. } else {
  231. xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*"
  232. " for Capabilities Pointer register."
  233. " (%s)\n", __func__);
  234. return -1;
  235. }
  236. } else {
  237. xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup"
  238. " for Header. (%s)\n", __func__);
  239. return -1;
  240. }
  241. *data = reg_field;
  242. return 0;
  243. }
  244. static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s,
  245. XenPTRegInfo *reg, uint32_t real_offset,
  246. uint32_t *data)
  247. {
  248. /* read PCI_HEADER_TYPE */
  249. *data = reg->init_val | 0x80;
  250. return 0;
  251. }
  252. /* initialize Interrupt Pin register */
  253. static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s,
  254. XenPTRegInfo *reg, uint32_t real_offset,
  255. uint32_t *data)
  256. {
  257. if (s->real_device.irq) {
  258. *data = xen_pt_pci_read_intx(s);
  259. }
  260. return 0;
  261. }
  262. /* Command register */
  263. static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  264. uint16_t *val, uint16_t dev_value,
  265. uint16_t valid_mask)
  266. {
  267. XenPTRegInfo *reg = cfg_entry->reg;
  268. uint16_t writable_mask = 0;
  269. uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  270. uint16_t *data = cfg_entry->ptr.half_word;
  271. /* modify emulate register */
  272. writable_mask = ~reg->ro_mask & valid_mask;
  273. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  274. /* create value for writing to I/O device register */
  275. if (*val & PCI_COMMAND_INTX_DISABLE) {
  276. throughable_mask |= PCI_COMMAND_INTX_DISABLE;
  277. } else {
  278. if (s->machine_irq) {
  279. throughable_mask |= PCI_COMMAND_INTX_DISABLE;
  280. }
  281. }
  282. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  283. return 0;
  284. }
  285. /* BAR */
  286. #define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */
  287. #define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */
  288. #define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */
  289. #define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */
  290. static bool is_64bit_bar(PCIIORegion *r)
  291. {
  292. return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64);
  293. }
  294. static uint64_t xen_pt_get_bar_size(PCIIORegion *r)
  295. {
  296. if (is_64bit_bar(r)) {
  297. uint64_t size64;
  298. size64 = (r + 1)->size;
  299. size64 <<= 32;
  300. size64 += r->size;
  301. return size64;
  302. }
  303. return r->size;
  304. }
  305. static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s,
  306. int index)
  307. {
  308. PCIDevice *d = PCI_DEVICE(s);
  309. XenPTRegion *region = NULL;
  310. PCIIORegion *r;
  311. /* check 64bit BAR */
  312. if ((0 < index) && (index < PCI_ROM_SLOT)) {
  313. int type = s->real_device.io_regions[index - 1].type;
  314. if ((type & XEN_HOST_PCI_REGION_TYPE_MEM)
  315. && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) {
  316. region = &s->bases[index - 1];
  317. if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) {
  318. return XEN_PT_BAR_FLAG_UPPER;
  319. }
  320. }
  321. }
  322. /* check unused BAR */
  323. r = &d->io_regions[index];
  324. if (!xen_pt_get_bar_size(r)) {
  325. return XEN_PT_BAR_FLAG_UNUSED;
  326. }
  327. /* for ExpROM BAR */
  328. if (index == PCI_ROM_SLOT) {
  329. return XEN_PT_BAR_FLAG_MEM;
  330. }
  331. /* check BAR I/O indicator */
  332. if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) {
  333. return XEN_PT_BAR_FLAG_IO;
  334. } else {
  335. return XEN_PT_BAR_FLAG_MEM;
  336. }
  337. }
  338. static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr)
  339. {
  340. if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) {
  341. return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK);
  342. } else {
  343. return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK);
  344. }
  345. }
  346. static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
  347. uint32_t real_offset, uint32_t *data)
  348. {
  349. uint32_t reg_field = 0;
  350. int index;
  351. index = xen_pt_bar_offset_to_index(reg->offset);
  352. if (index < 0 || index >= PCI_NUM_REGIONS) {
  353. XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
  354. return -1;
  355. }
  356. /* set BAR flag */
  357. s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, index);
  358. if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) {
  359. reg_field = XEN_PT_INVALID_REG;
  360. }
  361. *data = reg_field;
  362. return 0;
  363. }
  364. static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  365. uint32_t *value, uint32_t valid_mask)
  366. {
  367. XenPTRegInfo *reg = cfg_entry->reg;
  368. uint32_t valid_emu_mask = 0;
  369. uint32_t bar_emu_mask = 0;
  370. int index;
  371. /* get BAR index */
  372. index = xen_pt_bar_offset_to_index(reg->offset);
  373. if (index < 0 || index >= PCI_NUM_REGIONS - 1) {
  374. XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
  375. return -1;
  376. }
  377. /* use fixed-up value from kernel sysfs */
  378. *value = base_address_with_flags(&s->real_device.io_regions[index]);
  379. /* set emulate mask depend on BAR flag */
  380. switch (s->bases[index].bar_flag) {
  381. case XEN_PT_BAR_FLAG_MEM:
  382. bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
  383. break;
  384. case XEN_PT_BAR_FLAG_IO:
  385. bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
  386. break;
  387. case XEN_PT_BAR_FLAG_UPPER:
  388. bar_emu_mask = XEN_PT_BAR_ALLF;
  389. break;
  390. default:
  391. break;
  392. }
  393. /* emulate BAR */
  394. valid_emu_mask = bar_emu_mask & valid_mask;
  395. *value = XEN_PT_MERGE_VALUE(*value, *cfg_entry->ptr.word, ~valid_emu_mask);
  396. return 0;
  397. }
  398. static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  399. uint32_t *val, uint32_t dev_value,
  400. uint32_t valid_mask)
  401. {
  402. XenPTRegInfo *reg = cfg_entry->reg;
  403. XenPTRegion *base = NULL;
  404. PCIDevice *d = PCI_DEVICE(s);
  405. const PCIIORegion *r;
  406. uint32_t writable_mask = 0;
  407. uint32_t bar_emu_mask = 0;
  408. uint32_t bar_ro_mask = 0;
  409. uint32_t r_size = 0;
  410. int index = 0;
  411. uint32_t *data = cfg_entry->ptr.word;
  412. index = xen_pt_bar_offset_to_index(reg->offset);
  413. if (index < 0 || index >= PCI_NUM_REGIONS) {
  414. XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index);
  415. return -1;
  416. }
  417. r = &d->io_regions[index];
  418. base = &s->bases[index];
  419. r_size = xen_pt_get_emul_size(base->bar_flag, r->size);
  420. /* set emulate mask and read-only mask values depend on the BAR flag */
  421. switch (s->bases[index].bar_flag) {
  422. case XEN_PT_BAR_FLAG_MEM:
  423. bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
  424. if (!r_size) {
  425. /* low 32 bits mask for 64 bit bars */
  426. bar_ro_mask = XEN_PT_BAR_ALLF;
  427. } else {
  428. bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1);
  429. }
  430. break;
  431. case XEN_PT_BAR_FLAG_IO:
  432. bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
  433. bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1);
  434. break;
  435. case XEN_PT_BAR_FLAG_UPPER:
  436. assert(index > 0);
  437. r_size = d->io_regions[index - 1].size >> 32;
  438. bar_emu_mask = XEN_PT_BAR_ALLF;
  439. bar_ro_mask = r_size ? r_size - 1 : 0;
  440. break;
  441. default:
  442. break;
  443. }
  444. /* modify emulate register */
  445. writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask;
  446. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  447. /* check whether we need to update the virtual region address or not */
  448. switch (s->bases[index].bar_flag) {
  449. case XEN_PT_BAR_FLAG_UPPER:
  450. case XEN_PT_BAR_FLAG_MEM:
  451. /* nothing to do */
  452. break;
  453. case XEN_PT_BAR_FLAG_IO:
  454. /* nothing to do */
  455. break;
  456. default:
  457. break;
  458. }
  459. /* create value for writing to I/O device register */
  460. *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
  461. return 0;
  462. }
  463. /* write Exp ROM BAR */
  464. static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s,
  465. XenPTReg *cfg_entry, uint32_t *val,
  466. uint32_t dev_value, uint32_t valid_mask)
  467. {
  468. XenPTRegInfo *reg = cfg_entry->reg;
  469. XenPTRegion *base = NULL;
  470. PCIDevice *d = PCI_DEVICE(s);
  471. uint32_t writable_mask = 0;
  472. uint32_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  473. pcibus_t r_size = 0;
  474. uint32_t bar_ro_mask = 0;
  475. uint32_t *data = cfg_entry->ptr.word;
  476. r_size = d->io_regions[PCI_ROM_SLOT].size;
  477. base = &s->bases[PCI_ROM_SLOT];
  478. /* align memory type resource size */
  479. r_size = xen_pt_get_emul_size(base->bar_flag, r_size);
  480. /* set emulate mask and read-only mask */
  481. bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE;
  482. /* modify emulate register */
  483. writable_mask = ~bar_ro_mask & valid_mask;
  484. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  485. /* create value for writing to I/O device register */
  486. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  487. return 0;
  488. }
  489. static int xen_pt_intel_opregion_read(XenPCIPassthroughState *s,
  490. XenPTReg *cfg_entry,
  491. uint32_t *value, uint32_t valid_mask)
  492. {
  493. *value = igd_read_opregion(s);
  494. return 0;
  495. }
  496. static int xen_pt_intel_opregion_write(XenPCIPassthroughState *s,
  497. XenPTReg *cfg_entry, uint32_t *value,
  498. uint32_t dev_value, uint32_t valid_mask)
  499. {
  500. igd_write_opregion(s, *value);
  501. return 0;
  502. }
  503. /* Header Type0 reg static information table */
  504. static XenPTRegInfo xen_pt_emu_reg_header0[] = {
  505. /* Vendor ID reg */
  506. {
  507. .offset = PCI_VENDOR_ID,
  508. .size = 2,
  509. .init_val = 0x0000,
  510. .ro_mask = 0xFFFF,
  511. .emu_mask = 0xFFFF,
  512. .init = xen_pt_vendor_reg_init,
  513. .u.w.read = xen_pt_word_reg_read,
  514. .u.w.write = xen_pt_word_reg_write,
  515. },
  516. /* Device ID reg */
  517. {
  518. .offset = PCI_DEVICE_ID,
  519. .size = 2,
  520. .init_val = 0x0000,
  521. .ro_mask = 0xFFFF,
  522. .emu_mask = 0xFFFF,
  523. .init = xen_pt_device_reg_init,
  524. .u.w.read = xen_pt_word_reg_read,
  525. .u.w.write = xen_pt_word_reg_write,
  526. },
  527. /* Command reg */
  528. {
  529. .offset = PCI_COMMAND,
  530. .size = 2,
  531. .init_val = 0x0000,
  532. .res_mask = 0xF880,
  533. .emu_mask = 0x0743,
  534. .init = xen_pt_common_reg_init,
  535. .u.w.read = xen_pt_word_reg_read,
  536. .u.w.write = xen_pt_cmd_reg_write,
  537. },
  538. /* Capabilities Pointer reg */
  539. {
  540. .offset = PCI_CAPABILITY_LIST,
  541. .size = 1,
  542. .init_val = 0x00,
  543. .ro_mask = 0xFF,
  544. .emu_mask = 0xFF,
  545. .init = xen_pt_ptr_reg_init,
  546. .u.b.read = xen_pt_byte_reg_read,
  547. .u.b.write = xen_pt_byte_reg_write,
  548. },
  549. /* Status reg */
  550. /* use emulated Cap Ptr value to initialize,
  551. * so need to be declared after Cap Ptr reg
  552. */
  553. {
  554. .offset = PCI_STATUS,
  555. .size = 2,
  556. .init_val = 0x0000,
  557. .res_mask = 0x0007,
  558. .ro_mask = 0x06F8,
  559. .rw1c_mask = 0xF900,
  560. .emu_mask = 0x0010,
  561. .init = xen_pt_status_reg_init,
  562. .u.w.read = xen_pt_word_reg_read,
  563. .u.w.write = xen_pt_word_reg_write,
  564. },
  565. /* Cache Line Size reg */
  566. {
  567. .offset = PCI_CACHE_LINE_SIZE,
  568. .size = 1,
  569. .init_val = 0x00,
  570. .ro_mask = 0x00,
  571. .emu_mask = 0xFF,
  572. .init = xen_pt_common_reg_init,
  573. .u.b.read = xen_pt_byte_reg_read,
  574. .u.b.write = xen_pt_byte_reg_write,
  575. },
  576. /* Latency Timer reg */
  577. {
  578. .offset = PCI_LATENCY_TIMER,
  579. .size = 1,
  580. .init_val = 0x00,
  581. .ro_mask = 0x00,
  582. .emu_mask = 0xFF,
  583. .init = xen_pt_common_reg_init,
  584. .u.b.read = xen_pt_byte_reg_read,
  585. .u.b.write = xen_pt_byte_reg_write,
  586. },
  587. /* Header Type reg */
  588. {
  589. .offset = PCI_HEADER_TYPE,
  590. .size = 1,
  591. .init_val = 0x00,
  592. .ro_mask = 0xFF,
  593. .emu_mask = 0x00,
  594. .init = xen_pt_header_type_reg_init,
  595. .u.b.read = xen_pt_byte_reg_read,
  596. .u.b.write = xen_pt_byte_reg_write,
  597. },
  598. /* Interrupt Line reg */
  599. {
  600. .offset = PCI_INTERRUPT_LINE,
  601. .size = 1,
  602. .init_val = 0x00,
  603. .ro_mask = 0x00,
  604. .emu_mask = 0xFF,
  605. .init = xen_pt_common_reg_init,
  606. .u.b.read = xen_pt_byte_reg_read,
  607. .u.b.write = xen_pt_byte_reg_write,
  608. },
  609. /* Interrupt Pin reg */
  610. {
  611. .offset = PCI_INTERRUPT_PIN,
  612. .size = 1,
  613. .init_val = 0x00,
  614. .ro_mask = 0xFF,
  615. .emu_mask = 0xFF,
  616. .init = xen_pt_irqpin_reg_init,
  617. .u.b.read = xen_pt_byte_reg_read,
  618. .u.b.write = xen_pt_byte_reg_write,
  619. },
  620. /* BAR 0 reg */
  621. /* mask of BAR need to be decided later, depends on IO/MEM type */
  622. {
  623. .offset = PCI_BASE_ADDRESS_0,
  624. .size = 4,
  625. .init_val = 0x00000000,
  626. .init = xen_pt_bar_reg_init,
  627. .u.dw.read = xen_pt_bar_reg_read,
  628. .u.dw.write = xen_pt_bar_reg_write,
  629. },
  630. /* BAR 1 reg */
  631. {
  632. .offset = PCI_BASE_ADDRESS_1,
  633. .size = 4,
  634. .init_val = 0x00000000,
  635. .init = xen_pt_bar_reg_init,
  636. .u.dw.read = xen_pt_bar_reg_read,
  637. .u.dw.write = xen_pt_bar_reg_write,
  638. },
  639. /* BAR 2 reg */
  640. {
  641. .offset = PCI_BASE_ADDRESS_2,
  642. .size = 4,
  643. .init_val = 0x00000000,
  644. .init = xen_pt_bar_reg_init,
  645. .u.dw.read = xen_pt_bar_reg_read,
  646. .u.dw.write = xen_pt_bar_reg_write,
  647. },
  648. /* BAR 3 reg */
  649. {
  650. .offset = PCI_BASE_ADDRESS_3,
  651. .size = 4,
  652. .init_val = 0x00000000,
  653. .init = xen_pt_bar_reg_init,
  654. .u.dw.read = xen_pt_bar_reg_read,
  655. .u.dw.write = xen_pt_bar_reg_write,
  656. },
  657. /* BAR 4 reg */
  658. {
  659. .offset = PCI_BASE_ADDRESS_4,
  660. .size = 4,
  661. .init_val = 0x00000000,
  662. .init = xen_pt_bar_reg_init,
  663. .u.dw.read = xen_pt_bar_reg_read,
  664. .u.dw.write = xen_pt_bar_reg_write,
  665. },
  666. /* BAR 5 reg */
  667. {
  668. .offset = PCI_BASE_ADDRESS_5,
  669. .size = 4,
  670. .init_val = 0x00000000,
  671. .init = xen_pt_bar_reg_init,
  672. .u.dw.read = xen_pt_bar_reg_read,
  673. .u.dw.write = xen_pt_bar_reg_write,
  674. },
  675. /* Expansion ROM BAR reg */
  676. {
  677. .offset = PCI_ROM_ADDRESS,
  678. .size = 4,
  679. .init_val = 0x00000000,
  680. .ro_mask = ~PCI_ROM_ADDRESS_MASK & ~PCI_ROM_ADDRESS_ENABLE,
  681. .emu_mask = (uint32_t)PCI_ROM_ADDRESS_MASK,
  682. .init = xen_pt_bar_reg_init,
  683. .u.dw.read = xen_pt_long_reg_read,
  684. .u.dw.write = xen_pt_exp_rom_bar_reg_write,
  685. },
  686. {
  687. .size = 0,
  688. },
  689. };
  690. /*********************************
  691. * Vital Product Data Capability
  692. */
  693. /* Vital Product Data Capability Structure reg static information table */
  694. static XenPTRegInfo xen_pt_emu_reg_vpd[] = {
  695. {
  696. .offset = PCI_CAP_LIST_NEXT,
  697. .size = 1,
  698. .init_val = 0x00,
  699. .ro_mask = 0xFF,
  700. .emu_mask = 0xFF,
  701. .init = xen_pt_ptr_reg_init,
  702. .u.b.read = xen_pt_byte_reg_read,
  703. .u.b.write = xen_pt_byte_reg_write,
  704. },
  705. {
  706. .offset = PCI_VPD_ADDR,
  707. .size = 2,
  708. .ro_mask = 0x0003,
  709. .emu_mask = 0x0003,
  710. .init = xen_pt_common_reg_init,
  711. .u.w.read = xen_pt_word_reg_read,
  712. .u.w.write = xen_pt_word_reg_write,
  713. },
  714. {
  715. .size = 0,
  716. },
  717. };
  718. /**************************************
  719. * Vendor Specific Capability
  720. */
  721. /* Vendor Specific Capability Structure reg static information table */
  722. static XenPTRegInfo xen_pt_emu_reg_vendor[] = {
  723. {
  724. .offset = PCI_CAP_LIST_NEXT,
  725. .size = 1,
  726. .init_val = 0x00,
  727. .ro_mask = 0xFF,
  728. .emu_mask = 0xFF,
  729. .init = xen_pt_ptr_reg_init,
  730. .u.b.read = xen_pt_byte_reg_read,
  731. .u.b.write = xen_pt_byte_reg_write,
  732. },
  733. {
  734. .size = 0,
  735. },
  736. };
  737. /*****************************
  738. * PCI Express Capability
  739. */
  740. static inline uint8_t get_capability_version(XenPCIPassthroughState *s,
  741. uint32_t offset)
  742. {
  743. uint8_t flag;
  744. if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) {
  745. return 0;
  746. }
  747. return flag & PCI_EXP_FLAGS_VERS;
  748. }
  749. static inline uint8_t get_device_type(XenPCIPassthroughState *s,
  750. uint32_t offset)
  751. {
  752. uint8_t flag;
  753. if (xen_host_pci_get_byte(&s->real_device, offset + PCI_EXP_FLAGS, &flag)) {
  754. return 0;
  755. }
  756. return (flag & PCI_EXP_FLAGS_TYPE) >> 4;
  757. }
  758. /* initialize Link Control register */
  759. static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s,
  760. XenPTRegInfo *reg, uint32_t real_offset,
  761. uint32_t *data)
  762. {
  763. uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
  764. uint8_t dev_type = get_device_type(s, real_offset - reg->offset);
  765. /* no need to initialize in case of Root Complex Integrated Endpoint
  766. * with cap_ver 1.x
  767. */
  768. if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) {
  769. *data = XEN_PT_INVALID_REG;
  770. }
  771. *data = reg->init_val;
  772. return 0;
  773. }
  774. /* initialize Device Control 2 register */
  775. static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s,
  776. XenPTRegInfo *reg, uint32_t real_offset,
  777. uint32_t *data)
  778. {
  779. uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
  780. /* no need to initialize in case of cap_ver 1.x */
  781. if (cap_ver == 1) {
  782. *data = XEN_PT_INVALID_REG;
  783. }
  784. *data = reg->init_val;
  785. return 0;
  786. }
  787. /* initialize Link Control 2 register */
  788. static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s,
  789. XenPTRegInfo *reg, uint32_t real_offset,
  790. uint32_t *data)
  791. {
  792. uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
  793. uint32_t reg_field = 0;
  794. /* no need to initialize in case of cap_ver 1.x */
  795. if (cap_ver == 1) {
  796. reg_field = XEN_PT_INVALID_REG;
  797. } else {
  798. /* set Supported Link Speed */
  799. uint8_t lnkcap;
  800. int rc;
  801. rc = xen_host_pci_get_byte(&s->real_device,
  802. real_offset - reg->offset + PCI_EXP_LNKCAP,
  803. &lnkcap);
  804. if (rc) {
  805. return rc;
  806. }
  807. reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap;
  808. }
  809. *data = reg_field;
  810. return 0;
  811. }
  812. /* PCI Express Capability Structure reg static information table */
  813. static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
  814. /* Next Pointer reg */
  815. {
  816. .offset = PCI_CAP_LIST_NEXT,
  817. .size = 1,
  818. .init_val = 0x00,
  819. .ro_mask = 0xFF,
  820. .emu_mask = 0xFF,
  821. .init = xen_pt_ptr_reg_init,
  822. .u.b.read = xen_pt_byte_reg_read,
  823. .u.b.write = xen_pt_byte_reg_write,
  824. },
  825. /* Device Capabilities reg */
  826. {
  827. .offset = PCI_EXP_DEVCAP,
  828. .size = 4,
  829. .init_val = 0x00000000,
  830. .ro_mask = 0xFFFFFFFF,
  831. .emu_mask = 0x10000000,
  832. .init = xen_pt_common_reg_init,
  833. .u.dw.read = xen_pt_long_reg_read,
  834. .u.dw.write = xen_pt_long_reg_write,
  835. },
  836. /* Device Control reg */
  837. {
  838. .offset = PCI_EXP_DEVCTL,
  839. .size = 2,
  840. .init_val = 0x2810,
  841. .ro_mask = 0x8400,
  842. .emu_mask = 0xFFFF,
  843. .init = xen_pt_common_reg_init,
  844. .u.w.read = xen_pt_word_reg_read,
  845. .u.w.write = xen_pt_word_reg_write,
  846. },
  847. /* Device Status reg */
  848. {
  849. .offset = PCI_EXP_DEVSTA,
  850. .size = 2,
  851. .res_mask = 0xFFC0,
  852. .ro_mask = 0x0030,
  853. .rw1c_mask = 0x000F,
  854. .init = xen_pt_common_reg_init,
  855. .u.w.read = xen_pt_word_reg_read,
  856. .u.w.write = xen_pt_word_reg_write,
  857. },
  858. /* Link Control reg */
  859. {
  860. .offset = PCI_EXP_LNKCTL,
  861. .size = 2,
  862. .init_val = 0x0000,
  863. .ro_mask = 0xFC34,
  864. .emu_mask = 0xFFFF,
  865. .init = xen_pt_linkctrl_reg_init,
  866. .u.w.read = xen_pt_word_reg_read,
  867. .u.w.write = xen_pt_word_reg_write,
  868. },
  869. /* Link Status reg */
  870. {
  871. .offset = PCI_EXP_LNKSTA,
  872. .size = 2,
  873. .ro_mask = 0x3FFF,
  874. .rw1c_mask = 0xC000,
  875. .init = xen_pt_common_reg_init,
  876. .u.w.read = xen_pt_word_reg_read,
  877. .u.w.write = xen_pt_word_reg_write,
  878. },
  879. /* Device Control 2 reg */
  880. {
  881. .offset = 0x28,
  882. .size = 2,
  883. .init_val = 0x0000,
  884. .ro_mask = 0xFFE0,
  885. .emu_mask = 0xFFFF,
  886. .init = xen_pt_devctrl2_reg_init,
  887. .u.w.read = xen_pt_word_reg_read,
  888. .u.w.write = xen_pt_word_reg_write,
  889. },
  890. /* Link Control 2 reg */
  891. {
  892. .offset = 0x30,
  893. .size = 2,
  894. .init_val = 0x0000,
  895. .ro_mask = 0xE040,
  896. .emu_mask = 0xFFFF,
  897. .init = xen_pt_linkctrl2_reg_init,
  898. .u.w.read = xen_pt_word_reg_read,
  899. .u.w.write = xen_pt_word_reg_write,
  900. },
  901. {
  902. .size = 0,
  903. },
  904. };
  905. /*********************************
  906. * Power Management Capability
  907. */
  908. /* Power Management Capability reg static information table */
  909. static XenPTRegInfo xen_pt_emu_reg_pm[] = {
  910. /* Next Pointer reg */
  911. {
  912. .offset = PCI_CAP_LIST_NEXT,
  913. .size = 1,
  914. .init_val = 0x00,
  915. .ro_mask = 0xFF,
  916. .emu_mask = 0xFF,
  917. .init = xen_pt_ptr_reg_init,
  918. .u.b.read = xen_pt_byte_reg_read,
  919. .u.b.write = xen_pt_byte_reg_write,
  920. },
  921. /* Power Management Capabilities reg */
  922. {
  923. .offset = PCI_CAP_FLAGS,
  924. .size = 2,
  925. .init_val = 0x0000,
  926. .ro_mask = 0xFFFF,
  927. .emu_mask = 0xF9C8,
  928. .init = xen_pt_common_reg_init,
  929. .u.w.read = xen_pt_word_reg_read,
  930. .u.w.write = xen_pt_word_reg_write,
  931. },
  932. /* PCI Power Management Control/Status reg */
  933. {
  934. .offset = PCI_PM_CTRL,
  935. .size = 2,
  936. .init_val = 0x0008,
  937. .res_mask = 0x00F0,
  938. .ro_mask = 0x610C,
  939. .rw1c_mask = 0x8000,
  940. .emu_mask = 0x810B,
  941. .init = xen_pt_common_reg_init,
  942. .u.w.read = xen_pt_word_reg_read,
  943. .u.w.write = xen_pt_word_reg_write,
  944. },
  945. {
  946. .size = 0,
  947. },
  948. };
  949. /********************************
  950. * MSI Capability
  951. */
  952. /* Helper */
  953. #define xen_pt_msi_check_type(offset, flags, what) \
  954. ((offset) == ((flags) & PCI_MSI_FLAGS_64BIT ? \
  955. PCI_MSI_##what##_64 : PCI_MSI_##what##_32))
  956. /* Message Control register */
  957. static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s,
  958. XenPTRegInfo *reg, uint32_t real_offset,
  959. uint32_t *data)
  960. {
  961. XenPTMSI *msi = s->msi;
  962. uint16_t reg_field;
  963. int rc;
  964. /* use I/O device register's value as initial value */
  965. rc = xen_host_pci_get_word(&s->real_device, real_offset, &reg_field);
  966. if (rc) {
  967. return rc;
  968. }
  969. if (reg_field & PCI_MSI_FLAGS_ENABLE) {
  970. XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n");
  971. xen_host_pci_set_word(&s->real_device, real_offset,
  972. reg_field & ~PCI_MSI_FLAGS_ENABLE);
  973. }
  974. msi->flags |= reg_field;
  975. msi->ctrl_offset = real_offset;
  976. msi->initialized = false;
  977. msi->mapped = false;
  978. *data = reg->init_val;
  979. return 0;
  980. }
  981. static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
  982. XenPTReg *cfg_entry, uint16_t *val,
  983. uint16_t dev_value, uint16_t valid_mask)
  984. {
  985. XenPTRegInfo *reg = cfg_entry->reg;
  986. XenPTMSI *msi = s->msi;
  987. uint16_t writable_mask = 0;
  988. uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  989. uint16_t *data = cfg_entry->ptr.half_word;
  990. /* Currently no support for multi-vector */
  991. if (*val & PCI_MSI_FLAGS_QSIZE) {
  992. XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val);
  993. }
  994. /* modify emulate register */
  995. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  996. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  997. msi->flags |= *data & ~PCI_MSI_FLAGS_ENABLE;
  998. /* create value for writing to I/O device register */
  999. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  1000. /* update MSI */
  1001. if (*val & PCI_MSI_FLAGS_ENABLE) {
  1002. /* setup MSI pirq for the first time */
  1003. if (!msi->initialized) {
  1004. /* Init physical one */
  1005. XEN_PT_LOG(&s->dev, "setup MSI (register: %x).\n", *val);
  1006. if (xen_pt_msi_setup(s)) {
  1007. /* We do not broadcast the error to the framework code, so
  1008. * that MSI errors are contained in MSI emulation code and
  1009. * QEMU can go on running.
  1010. * Guest MSI would be actually not working.
  1011. */
  1012. *val &= ~PCI_MSI_FLAGS_ENABLE;
  1013. XEN_PT_WARN(&s->dev, "Can not map MSI (register: %x)!\n", *val);
  1014. return 0;
  1015. }
  1016. if (xen_pt_msi_update(s)) {
  1017. *val &= ~PCI_MSI_FLAGS_ENABLE;
  1018. XEN_PT_WARN(&s->dev, "Can not bind MSI (register: %x)!\n", *val);
  1019. return 0;
  1020. }
  1021. msi->initialized = true;
  1022. msi->mapped = true;
  1023. }
  1024. msi->flags |= PCI_MSI_FLAGS_ENABLE;
  1025. } else if (msi->mapped) {
  1026. xen_pt_msi_disable(s);
  1027. }
  1028. return 0;
  1029. }
  1030. /* initialize Message Upper Address register */
  1031. static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s,
  1032. XenPTRegInfo *reg, uint32_t real_offset,
  1033. uint32_t *data)
  1034. {
  1035. /* no need to initialize in case of 32 bit type */
  1036. if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
  1037. *data = XEN_PT_INVALID_REG;
  1038. } else {
  1039. *data = reg->init_val;
  1040. }
  1041. return 0;
  1042. }
  1043. /* this function will be called twice (for 32 bit and 64 bit type) */
  1044. /* initialize Message Data register */
  1045. static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s,
  1046. XenPTRegInfo *reg, uint32_t real_offset,
  1047. uint32_t *data)
  1048. {
  1049. uint32_t flags = s->msi->flags;
  1050. uint32_t offset = reg->offset;
  1051. /* check the offset whether matches the type or not */
  1052. if (xen_pt_msi_check_type(offset, flags, DATA)) {
  1053. *data = reg->init_val;
  1054. } else {
  1055. *data = XEN_PT_INVALID_REG;
  1056. }
  1057. return 0;
  1058. }
  1059. /* this function will be called twice (for 32 bit and 64 bit type) */
  1060. /* initialize Mask register */
  1061. static int xen_pt_mask_reg_init(XenPCIPassthroughState *s,
  1062. XenPTRegInfo *reg, uint32_t real_offset,
  1063. uint32_t *data)
  1064. {
  1065. uint32_t flags = s->msi->flags;
  1066. /* check the offset whether matches the type or not */
  1067. if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
  1068. *data = XEN_PT_INVALID_REG;
  1069. } else if (xen_pt_msi_check_type(reg->offset, flags, MASK)) {
  1070. *data = reg->init_val;
  1071. } else {
  1072. *data = XEN_PT_INVALID_REG;
  1073. }
  1074. return 0;
  1075. }
  1076. /* this function will be called twice (for 32 bit and 64 bit type) */
  1077. /* initialize Pending register */
  1078. static int xen_pt_pending_reg_init(XenPCIPassthroughState *s,
  1079. XenPTRegInfo *reg, uint32_t real_offset,
  1080. uint32_t *data)
  1081. {
  1082. uint32_t flags = s->msi->flags;
  1083. /* check the offset whether matches the type or not */
  1084. if (!(flags & PCI_MSI_FLAGS_MASKBIT)) {
  1085. *data = XEN_PT_INVALID_REG;
  1086. } else if (xen_pt_msi_check_type(reg->offset, flags, PENDING)) {
  1087. *data = reg->init_val;
  1088. } else {
  1089. *data = XEN_PT_INVALID_REG;
  1090. }
  1091. return 0;
  1092. }
  1093. /* write Message Address register */
  1094. static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s,
  1095. XenPTReg *cfg_entry, uint32_t *val,
  1096. uint32_t dev_value, uint32_t valid_mask)
  1097. {
  1098. XenPTRegInfo *reg = cfg_entry->reg;
  1099. uint32_t writable_mask = 0;
  1100. uint32_t old_addr = *cfg_entry->ptr.word;
  1101. uint32_t *data = cfg_entry->ptr.word;
  1102. /* modify emulate register */
  1103. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1104. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  1105. s->msi->addr_lo = *data;
  1106. /* create value for writing to I/O device register */
  1107. *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
  1108. /* update MSI */
  1109. if (*data != old_addr) {
  1110. if (s->msi->mapped) {
  1111. xen_pt_msi_update(s);
  1112. }
  1113. }
  1114. return 0;
  1115. }
  1116. /* write Message Upper Address register */
  1117. static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s,
  1118. XenPTReg *cfg_entry, uint32_t *val,
  1119. uint32_t dev_value, uint32_t valid_mask)
  1120. {
  1121. XenPTRegInfo *reg = cfg_entry->reg;
  1122. uint32_t writable_mask = 0;
  1123. uint32_t old_addr = *cfg_entry->ptr.word;
  1124. uint32_t *data = cfg_entry->ptr.word;
  1125. /* check whether the type is 64 bit or not */
  1126. if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
  1127. XEN_PT_ERR(&s->dev,
  1128. "Can't write to the upper address without 64 bit support\n");
  1129. return -1;
  1130. }
  1131. /* modify emulate register */
  1132. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1133. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  1134. /* update the msi_info too */
  1135. s->msi->addr_hi = *data;
  1136. /* create value for writing to I/O device register */
  1137. *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
  1138. /* update MSI */
  1139. if (*data != old_addr) {
  1140. if (s->msi->mapped) {
  1141. xen_pt_msi_update(s);
  1142. }
  1143. }
  1144. return 0;
  1145. }
  1146. /* this function will be called twice (for 32 bit and 64 bit type) */
  1147. /* write Message Data register */
  1148. static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s,
  1149. XenPTReg *cfg_entry, uint16_t *val,
  1150. uint16_t dev_value, uint16_t valid_mask)
  1151. {
  1152. XenPTRegInfo *reg = cfg_entry->reg;
  1153. XenPTMSI *msi = s->msi;
  1154. uint16_t writable_mask = 0;
  1155. uint16_t old_data = *cfg_entry->ptr.half_word;
  1156. uint32_t offset = reg->offset;
  1157. uint16_t *data = cfg_entry->ptr.half_word;
  1158. /* check the offset whether matches the type or not */
  1159. if (!xen_pt_msi_check_type(offset, msi->flags, DATA)) {
  1160. /* exit I/O emulator */
  1161. XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n");
  1162. return -1;
  1163. }
  1164. /* modify emulate register */
  1165. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1166. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  1167. /* update the msi_info too */
  1168. msi->data = *data;
  1169. /* create value for writing to I/O device register */
  1170. *val = XEN_PT_MERGE_VALUE(*val, dev_value, 0);
  1171. /* update MSI */
  1172. if (*data != old_data) {
  1173. if (msi->mapped) {
  1174. xen_pt_msi_update(s);
  1175. }
  1176. }
  1177. return 0;
  1178. }
  1179. static int xen_pt_mask_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  1180. uint32_t *val, uint32_t dev_value,
  1181. uint32_t valid_mask)
  1182. {
  1183. int rc;
  1184. rc = xen_pt_long_reg_write(s, cfg_entry, val, dev_value, valid_mask);
  1185. if (rc) {
  1186. return rc;
  1187. }
  1188. s->msi->mask = *val;
  1189. return 0;
  1190. }
  1191. /* MSI Capability Structure reg static information table */
  1192. static XenPTRegInfo xen_pt_emu_reg_msi[] = {
  1193. /* Next Pointer reg */
  1194. {
  1195. .offset = PCI_CAP_LIST_NEXT,
  1196. .size = 1,
  1197. .init_val = 0x00,
  1198. .ro_mask = 0xFF,
  1199. .emu_mask = 0xFF,
  1200. .init = xen_pt_ptr_reg_init,
  1201. .u.b.read = xen_pt_byte_reg_read,
  1202. .u.b.write = xen_pt_byte_reg_write,
  1203. },
  1204. /* Message Control reg */
  1205. {
  1206. .offset = PCI_MSI_FLAGS,
  1207. .size = 2,
  1208. .init_val = 0x0000,
  1209. .res_mask = 0xFE00,
  1210. .ro_mask = 0x018E,
  1211. .emu_mask = 0x017E,
  1212. .init = xen_pt_msgctrl_reg_init,
  1213. .u.w.read = xen_pt_word_reg_read,
  1214. .u.w.write = xen_pt_msgctrl_reg_write,
  1215. },
  1216. /* Message Address reg */
  1217. {
  1218. .offset = PCI_MSI_ADDRESS_LO,
  1219. .size = 4,
  1220. .init_val = 0x00000000,
  1221. .ro_mask = 0x00000003,
  1222. .emu_mask = 0xFFFFFFFF,
  1223. .init = xen_pt_common_reg_init,
  1224. .u.dw.read = xen_pt_long_reg_read,
  1225. .u.dw.write = xen_pt_msgaddr32_reg_write,
  1226. },
  1227. /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */
  1228. {
  1229. .offset = PCI_MSI_ADDRESS_HI,
  1230. .size = 4,
  1231. .init_val = 0x00000000,
  1232. .ro_mask = 0x00000000,
  1233. .emu_mask = 0xFFFFFFFF,
  1234. .init = xen_pt_msgaddr64_reg_init,
  1235. .u.dw.read = xen_pt_long_reg_read,
  1236. .u.dw.write = xen_pt_msgaddr64_reg_write,
  1237. },
  1238. /* Message Data reg (16 bits of data for 32-bit devices) */
  1239. {
  1240. .offset = PCI_MSI_DATA_32,
  1241. .size = 2,
  1242. .init_val = 0x0000,
  1243. .ro_mask = 0x0000,
  1244. .emu_mask = 0xFFFF,
  1245. .init = xen_pt_msgdata_reg_init,
  1246. .u.w.read = xen_pt_word_reg_read,
  1247. .u.w.write = xen_pt_msgdata_reg_write,
  1248. },
  1249. /* Message Data reg (16 bits of data for 64-bit devices) */
  1250. {
  1251. .offset = PCI_MSI_DATA_64,
  1252. .size = 2,
  1253. .init_val = 0x0000,
  1254. .ro_mask = 0x0000,
  1255. .emu_mask = 0xFFFF,
  1256. .init = xen_pt_msgdata_reg_init,
  1257. .u.w.read = xen_pt_word_reg_read,
  1258. .u.w.write = xen_pt_msgdata_reg_write,
  1259. },
  1260. /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
  1261. {
  1262. .offset = PCI_MSI_MASK_32,
  1263. .size = 4,
  1264. .init_val = 0x00000000,
  1265. .ro_mask = 0xFFFFFFFF,
  1266. .emu_mask = 0xFFFFFFFF,
  1267. .init = xen_pt_mask_reg_init,
  1268. .u.dw.read = xen_pt_long_reg_read,
  1269. .u.dw.write = xen_pt_mask_reg_write,
  1270. },
  1271. /* Mask reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
  1272. {
  1273. .offset = PCI_MSI_MASK_64,
  1274. .size = 4,
  1275. .init_val = 0x00000000,
  1276. .ro_mask = 0xFFFFFFFF,
  1277. .emu_mask = 0xFFFFFFFF,
  1278. .init = xen_pt_mask_reg_init,
  1279. .u.dw.read = xen_pt_long_reg_read,
  1280. .u.dw.write = xen_pt_mask_reg_write,
  1281. },
  1282. /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 32-bit devices) */
  1283. {
  1284. .offset = PCI_MSI_MASK_32 + 4,
  1285. .size = 4,
  1286. .init_val = 0x00000000,
  1287. .ro_mask = 0xFFFFFFFF,
  1288. .emu_mask = 0x00000000,
  1289. .init = xen_pt_pending_reg_init,
  1290. .u.dw.read = xen_pt_long_reg_read,
  1291. .u.dw.write = xen_pt_long_reg_write,
  1292. },
  1293. /* Pending reg (if PCI_MSI_FLAGS_MASKBIT set, for 64-bit devices) */
  1294. {
  1295. .offset = PCI_MSI_MASK_64 + 4,
  1296. .size = 4,
  1297. .init_val = 0x00000000,
  1298. .ro_mask = 0xFFFFFFFF,
  1299. .emu_mask = 0x00000000,
  1300. .init = xen_pt_pending_reg_init,
  1301. .u.dw.read = xen_pt_long_reg_read,
  1302. .u.dw.write = xen_pt_long_reg_write,
  1303. },
  1304. {
  1305. .size = 0,
  1306. },
  1307. };
  1308. /**************************************
  1309. * MSI-X Capability
  1310. */
  1311. /* Message Control register for MSI-X */
  1312. static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s,
  1313. XenPTRegInfo *reg, uint32_t real_offset,
  1314. uint32_t *data)
  1315. {
  1316. uint16_t reg_field;
  1317. int rc;
  1318. /* use I/O device register's value as initial value */
  1319. rc = xen_host_pci_get_word(&s->real_device, real_offset, &reg_field);
  1320. if (rc) {
  1321. return rc;
  1322. }
  1323. if (reg_field & PCI_MSIX_FLAGS_ENABLE) {
  1324. XEN_PT_LOG(&s->dev, "MSIX already enabled, disabling it first\n");
  1325. xen_host_pci_set_word(&s->real_device, real_offset,
  1326. reg_field & ~PCI_MSIX_FLAGS_ENABLE);
  1327. }
  1328. s->msix->ctrl_offset = real_offset;
  1329. *data = reg->init_val;
  1330. return 0;
  1331. }
  1332. static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s,
  1333. XenPTReg *cfg_entry, uint16_t *val,
  1334. uint16_t dev_value, uint16_t valid_mask)
  1335. {
  1336. XenPTRegInfo *reg = cfg_entry->reg;
  1337. uint16_t writable_mask = 0;
  1338. uint16_t throughable_mask = get_throughable_mask(s, reg, valid_mask);
  1339. int debug_msix_enabled_old;
  1340. uint16_t *data = cfg_entry->ptr.half_word;
  1341. /* modify emulate register */
  1342. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1343. *data = XEN_PT_MERGE_VALUE(*val, *data, writable_mask);
  1344. /* create value for writing to I/O device register */
  1345. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  1346. /* update MSI-X */
  1347. if ((*val & PCI_MSIX_FLAGS_ENABLE)
  1348. && !(*val & PCI_MSIX_FLAGS_MASKALL)) {
  1349. xen_pt_msix_update(s);
  1350. } else if (!(*val & PCI_MSIX_FLAGS_ENABLE) && s->msix->enabled) {
  1351. xen_pt_msix_disable(s);
  1352. }
  1353. s->msix->maskall = *val & PCI_MSIX_FLAGS_MASKALL;
  1354. debug_msix_enabled_old = s->msix->enabled;
  1355. s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE);
  1356. if (s->msix->enabled != debug_msix_enabled_old) {
  1357. XEN_PT_LOG(&s->dev, "%s MSI-X\n",
  1358. s->msix->enabled ? "enable" : "disable");
  1359. }
  1360. return 0;
  1361. }
  1362. /* MSI-X Capability Structure reg static information table */
  1363. static XenPTRegInfo xen_pt_emu_reg_msix[] = {
  1364. /* Next Pointer reg */
  1365. {
  1366. .offset = PCI_CAP_LIST_NEXT,
  1367. .size = 1,
  1368. .init_val = 0x00,
  1369. .ro_mask = 0xFF,
  1370. .emu_mask = 0xFF,
  1371. .init = xen_pt_ptr_reg_init,
  1372. .u.b.read = xen_pt_byte_reg_read,
  1373. .u.b.write = xen_pt_byte_reg_write,
  1374. },
  1375. /* Message Control reg */
  1376. {
  1377. .offset = PCI_MSI_FLAGS,
  1378. .size = 2,
  1379. .init_val = 0x0000,
  1380. .res_mask = 0x3800,
  1381. .ro_mask = 0x07FF,
  1382. .emu_mask = 0x0000,
  1383. .init = xen_pt_msixctrl_reg_init,
  1384. .u.w.read = xen_pt_word_reg_read,
  1385. .u.w.write = xen_pt_msixctrl_reg_write,
  1386. },
  1387. {
  1388. .size = 0,
  1389. },
  1390. };
  1391. static XenPTRegInfo xen_pt_emu_reg_igd_opregion[] = {
  1392. /* Intel IGFX OpRegion reg */
  1393. {
  1394. .offset = 0x0,
  1395. .size = 4,
  1396. .init_val = 0,
  1397. .emu_mask = 0xFFFFFFFF,
  1398. .u.dw.read = xen_pt_intel_opregion_read,
  1399. .u.dw.write = xen_pt_intel_opregion_write,
  1400. },
  1401. {
  1402. .size = 0,
  1403. },
  1404. };
  1405. /****************************
  1406. * Capabilities
  1407. */
  1408. /* capability structure register group size functions */
  1409. static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s,
  1410. const XenPTRegGroupInfo *grp_reg,
  1411. uint32_t base_offset, uint8_t *size)
  1412. {
  1413. *size = grp_reg->grp_size;
  1414. return 0;
  1415. }
  1416. /* get Vendor Specific Capability Structure register group size */
  1417. static int xen_pt_vendor_size_init(XenPCIPassthroughState *s,
  1418. const XenPTRegGroupInfo *grp_reg,
  1419. uint32_t base_offset, uint8_t *size)
  1420. {
  1421. return xen_host_pci_get_byte(&s->real_device, base_offset + 0x02, size);
  1422. }
  1423. /* get PCI Express Capability Structure register group size */
  1424. static int xen_pt_pcie_size_init(XenPCIPassthroughState *s,
  1425. const XenPTRegGroupInfo *grp_reg,
  1426. uint32_t base_offset, uint8_t *size)
  1427. {
  1428. PCIDevice *d = PCI_DEVICE(s);
  1429. uint8_t version = get_capability_version(s, base_offset);
  1430. uint8_t type = get_device_type(s, base_offset);
  1431. uint8_t pcie_size = 0;
  1432. /* calculate size depend on capability version and device/port type */
  1433. /* in case of PCI Express Base Specification Rev 1.x */
  1434. if (version == 1) {
  1435. /* The PCI Express Capabilities, Device Capabilities, and Device
  1436. * Status/Control registers are required for all PCI Express devices.
  1437. * The Link Capabilities and Link Status/Control are required for all
  1438. * Endpoints that are not Root Complex Integrated Endpoints. Endpoints
  1439. * are not required to implement registers other than those listed
  1440. * above and terminate the capability structure.
  1441. */
  1442. switch (type) {
  1443. case PCI_EXP_TYPE_ENDPOINT:
  1444. case PCI_EXP_TYPE_LEG_END:
  1445. pcie_size = 0x14;
  1446. break;
  1447. case PCI_EXP_TYPE_RC_END:
  1448. /* has no link */
  1449. pcie_size = 0x0C;
  1450. break;
  1451. /* only EndPoint passthrough is supported */
  1452. case PCI_EXP_TYPE_ROOT_PORT:
  1453. case PCI_EXP_TYPE_UPSTREAM:
  1454. case PCI_EXP_TYPE_DOWNSTREAM:
  1455. case PCI_EXP_TYPE_PCI_BRIDGE:
  1456. case PCI_EXP_TYPE_PCIE_BRIDGE:
  1457. case PCI_EXP_TYPE_RC_EC:
  1458. default:
  1459. XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
  1460. return -1;
  1461. }
  1462. }
  1463. /* in case of PCI Express Base Specification Rev 2.0 */
  1464. else if (version == 2) {
  1465. switch (type) {
  1466. case PCI_EXP_TYPE_ENDPOINT:
  1467. case PCI_EXP_TYPE_LEG_END:
  1468. case PCI_EXP_TYPE_RC_END:
  1469. /* For Functions that do not implement the registers,
  1470. * these spaces must be hardwired to 0b.
  1471. */
  1472. pcie_size = 0x3C;
  1473. break;
  1474. /* only EndPoint passthrough is supported */
  1475. case PCI_EXP_TYPE_ROOT_PORT:
  1476. case PCI_EXP_TYPE_UPSTREAM:
  1477. case PCI_EXP_TYPE_DOWNSTREAM:
  1478. case PCI_EXP_TYPE_PCI_BRIDGE:
  1479. case PCI_EXP_TYPE_PCIE_BRIDGE:
  1480. case PCI_EXP_TYPE_RC_EC:
  1481. default:
  1482. XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
  1483. return -1;
  1484. }
  1485. } else {
  1486. XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version);
  1487. return -1;
  1488. }
  1489. *size = pcie_size;
  1490. return 0;
  1491. }
  1492. /* get MSI Capability Structure register group size */
  1493. static int xen_pt_msi_size_init(XenPCIPassthroughState *s,
  1494. const XenPTRegGroupInfo *grp_reg,
  1495. uint32_t base_offset, uint8_t *size)
  1496. {
  1497. uint16_t msg_ctrl = 0;
  1498. uint8_t msi_size = 0xa;
  1499. int rc;
  1500. rc = xen_host_pci_get_word(&s->real_device, base_offset + PCI_MSI_FLAGS,
  1501. &msg_ctrl);
  1502. if (rc) {
  1503. return rc;
  1504. }
  1505. /* check if 64-bit address is capable of per-vector masking */
  1506. if (msg_ctrl & PCI_MSI_FLAGS_64BIT) {
  1507. msi_size += 4;
  1508. }
  1509. if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) {
  1510. msi_size += 10;
  1511. }
  1512. s->msi = g_new0(XenPTMSI, 1);
  1513. s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
  1514. *size = msi_size;
  1515. return 0;
  1516. }
  1517. /* get MSI-X Capability Structure register group size */
  1518. static int xen_pt_msix_size_init(XenPCIPassthroughState *s,
  1519. const XenPTRegGroupInfo *grp_reg,
  1520. uint32_t base_offset, uint8_t *size)
  1521. {
  1522. int rc = 0;
  1523. rc = xen_pt_msix_init(s, base_offset);
  1524. if (rc < 0) {
  1525. XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n");
  1526. return rc;
  1527. }
  1528. *size = grp_reg->grp_size;
  1529. return 0;
  1530. }
  1531. static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = {
  1532. /* Header Type0 reg group */
  1533. {
  1534. .grp_id = 0xFF,
  1535. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1536. .grp_size = 0x40,
  1537. .size_init = xen_pt_reg_grp_size_init,
  1538. .emu_regs = xen_pt_emu_reg_header0,
  1539. },
  1540. /* PCI PowerManagement Capability reg group */
  1541. {
  1542. .grp_id = PCI_CAP_ID_PM,
  1543. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1544. .grp_size = PCI_PM_SIZEOF,
  1545. .size_init = xen_pt_reg_grp_size_init,
  1546. .emu_regs = xen_pt_emu_reg_pm,
  1547. },
  1548. /* AGP Capability Structure reg group */
  1549. {
  1550. .grp_id = PCI_CAP_ID_AGP,
  1551. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1552. .grp_size = 0x30,
  1553. .size_init = xen_pt_reg_grp_size_init,
  1554. },
  1555. /* Vital Product Data Capability Structure reg group */
  1556. {
  1557. .grp_id = PCI_CAP_ID_VPD,
  1558. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1559. .grp_size = 0x08,
  1560. .size_init = xen_pt_reg_grp_size_init,
  1561. .emu_regs = xen_pt_emu_reg_vpd,
  1562. },
  1563. /* Slot Identification reg group */
  1564. {
  1565. .grp_id = PCI_CAP_ID_SLOTID,
  1566. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1567. .grp_size = 0x04,
  1568. .size_init = xen_pt_reg_grp_size_init,
  1569. },
  1570. /* MSI Capability Structure reg group */
  1571. {
  1572. .grp_id = PCI_CAP_ID_MSI,
  1573. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1574. .grp_size = 0xFF,
  1575. .size_init = xen_pt_msi_size_init,
  1576. .emu_regs = xen_pt_emu_reg_msi,
  1577. },
  1578. /* PCI-X Capabilities List Item reg group */
  1579. {
  1580. .grp_id = PCI_CAP_ID_PCIX,
  1581. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1582. .grp_size = 0x18,
  1583. .size_init = xen_pt_reg_grp_size_init,
  1584. },
  1585. /* Vendor Specific Capability Structure reg group */
  1586. {
  1587. .grp_id = PCI_CAP_ID_VNDR,
  1588. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1589. .grp_size = 0xFF,
  1590. .size_init = xen_pt_vendor_size_init,
  1591. .emu_regs = xen_pt_emu_reg_vendor,
  1592. },
  1593. /* SHPC Capability List Item reg group */
  1594. {
  1595. .grp_id = PCI_CAP_ID_SHPC,
  1596. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1597. .grp_size = 0x08,
  1598. .size_init = xen_pt_reg_grp_size_init,
  1599. },
  1600. /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */
  1601. {
  1602. .grp_id = PCI_CAP_ID_SSVID,
  1603. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1604. .grp_size = 0x08,
  1605. .size_init = xen_pt_reg_grp_size_init,
  1606. },
  1607. /* AGP 8x Capability Structure reg group */
  1608. {
  1609. .grp_id = PCI_CAP_ID_AGP3,
  1610. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1611. .grp_size = 0x30,
  1612. .size_init = xen_pt_reg_grp_size_init,
  1613. },
  1614. /* PCI Express Capability Structure reg group */
  1615. {
  1616. .grp_id = PCI_CAP_ID_EXP,
  1617. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1618. .grp_size = 0xFF,
  1619. .size_init = xen_pt_pcie_size_init,
  1620. .emu_regs = xen_pt_emu_reg_pcie,
  1621. },
  1622. /* MSI-X Capability Structure reg group */
  1623. {
  1624. .grp_id = PCI_CAP_ID_MSIX,
  1625. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1626. .grp_size = 0x0C,
  1627. .size_init = xen_pt_msix_size_init,
  1628. .emu_regs = xen_pt_emu_reg_msix,
  1629. },
  1630. /* Intel IGD Opregion group */
  1631. {
  1632. .grp_id = XEN_PCI_INTEL_OPREGION,
  1633. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1634. .grp_size = 0x4,
  1635. .size_init = xen_pt_reg_grp_size_init,
  1636. .emu_regs = xen_pt_emu_reg_igd_opregion,
  1637. },
  1638. {
  1639. .grp_size = 0,
  1640. },
  1641. };
  1642. /* initialize Capabilities Pointer or Next Pointer register */
  1643. static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s,
  1644. XenPTRegInfo *reg, uint32_t real_offset,
  1645. uint32_t *data)
  1646. {
  1647. int i, rc;
  1648. uint8_t reg_field;
  1649. uint8_t cap_id = 0;
  1650. rc = xen_host_pci_get_byte(&s->real_device, real_offset, &reg_field);
  1651. if (rc) {
  1652. return rc;
  1653. }
  1654. /* find capability offset */
  1655. while (reg_field) {
  1656. for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
  1657. if (xen_pt_hide_dev_cap(&s->real_device,
  1658. xen_pt_emu_reg_grps[i].grp_id)) {
  1659. continue;
  1660. }
  1661. rc = xen_host_pci_get_byte(&s->real_device,
  1662. reg_field + PCI_CAP_LIST_ID, &cap_id);
  1663. if (rc) {
  1664. XEN_PT_ERR(&s->dev, "Failed to read capability @0x%x (rc:%d)\n",
  1665. reg_field + PCI_CAP_LIST_ID, rc);
  1666. return rc;
  1667. }
  1668. if (xen_pt_emu_reg_grps[i].grp_id == cap_id) {
  1669. if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
  1670. goto out;
  1671. }
  1672. /* ignore the 0 hardwired capability, find next one */
  1673. break;
  1674. }
  1675. }
  1676. /* next capability */
  1677. rc = xen_host_pci_get_byte(&s->real_device,
  1678. reg_field + PCI_CAP_LIST_NEXT, &reg_field);
  1679. if (rc) {
  1680. return rc;
  1681. }
  1682. }
  1683. out:
  1684. *data = reg_field;
  1685. return 0;
  1686. }
  1687. /*************
  1688. * Main
  1689. */
  1690. static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap)
  1691. {
  1692. uint8_t id;
  1693. unsigned max_cap = XEN_PCI_CAP_MAX;
  1694. uint8_t pos = PCI_CAPABILITY_LIST;
  1695. uint8_t status = 0;
  1696. if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) {
  1697. return 0;
  1698. }
  1699. if ((status & PCI_STATUS_CAP_LIST) == 0) {
  1700. return 0;
  1701. }
  1702. while (max_cap--) {
  1703. if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) {
  1704. break;
  1705. }
  1706. if (pos < PCI_CONFIG_HEADER_SIZE) {
  1707. break;
  1708. }
  1709. pos &= ~3;
  1710. if (xen_host_pci_get_byte(&s->real_device,
  1711. pos + PCI_CAP_LIST_ID, &id)) {
  1712. break;
  1713. }
  1714. if (id == 0xff) {
  1715. break;
  1716. }
  1717. if (id == cap) {
  1718. return pos;
  1719. }
  1720. pos += PCI_CAP_LIST_NEXT;
  1721. }
  1722. return 0;
  1723. }
  1724. static void xen_pt_config_reg_init(XenPCIPassthroughState *s,
  1725. XenPTRegGroup *reg_grp, XenPTRegInfo *reg,
  1726. Error **errp)
  1727. {
  1728. XenPTReg *reg_entry;
  1729. uint32_t data = 0;
  1730. int rc = 0;
  1731. reg_entry = g_new0(XenPTReg, 1);
  1732. reg_entry->reg = reg;
  1733. if (reg->init) {
  1734. uint32_t host_mask, size_mask;
  1735. unsigned int offset;
  1736. uint32_t val;
  1737. /* initialize emulate register */
  1738. rc = reg->init(s, reg_entry->reg,
  1739. reg_grp->base_offset + reg->offset, &data);
  1740. if (rc < 0) {
  1741. g_free(reg_entry);
  1742. error_setg(errp, "Init emulate register fail");
  1743. return;
  1744. }
  1745. if (data == XEN_PT_INVALID_REG) {
  1746. /* free unused BAR register entry */
  1747. g_free(reg_entry);
  1748. return;
  1749. }
  1750. /* Sync up the data to dev.config */
  1751. offset = reg_grp->base_offset + reg->offset;
  1752. size_mask = 0xFFFFFFFF >> ((4 - reg->size) << 3);
  1753. switch (reg->size) {
  1754. case 1: rc = xen_host_pci_get_byte(&s->real_device, offset, (uint8_t *)&val);
  1755. break;
  1756. case 2: rc = xen_host_pci_get_word(&s->real_device, offset, (uint16_t *)&val);
  1757. break;
  1758. case 4: rc = xen_host_pci_get_long(&s->real_device, offset, &val);
  1759. break;
  1760. default: abort();
  1761. }
  1762. if (rc) {
  1763. /* Serious issues when we cannot read the host values! */
  1764. g_free(reg_entry);
  1765. error_setg(errp, "Cannot read host values");
  1766. return;
  1767. }
  1768. /* Set bits in emu_mask are the ones we emulate. The dev.config shall
  1769. * contain the emulated view of the guest - therefore we flip the mask
  1770. * to mask out the host values (which dev.config initially has) . */
  1771. host_mask = size_mask & ~reg->emu_mask;
  1772. if ((data & host_mask) != (val & host_mask)) {
  1773. uint32_t new_val;
  1774. /* Mask out host (including past size). */
  1775. new_val = val & host_mask;
  1776. /* Merge emulated ones (excluding the non-emulated ones). */
  1777. new_val |= data & host_mask;
  1778. /* Leave intact host and emulated values past the size - even though
  1779. * we do not care as we write per reg->size granularity, but for the
  1780. * logging below lets have the proper value. */
  1781. new_val |= ((val | data)) & ~size_mask;
  1782. XEN_PT_LOG(&s->dev,"Offset 0x%04x mismatch! Emulated=0x%04x, host=0x%04x, syncing to 0x%04x.\n",
  1783. offset, data, val, new_val);
  1784. val = new_val;
  1785. } else
  1786. val = data;
  1787. if (val & ~size_mask) {
  1788. error_setg(errp, "Offset 0x%04x:0x%04x expands past"
  1789. " register size (%d)", offset, val, reg->size);
  1790. g_free(reg_entry);
  1791. return;
  1792. }
  1793. /* This could be just pci_set_long as we don't modify the bits
  1794. * past reg->size, but in case this routine is run in parallel or the
  1795. * init value is larger, we do not want to over-write registers. */
  1796. switch (reg->size) {
  1797. case 1: pci_set_byte(s->dev.config + offset, (uint8_t)val);
  1798. break;
  1799. case 2: pci_set_word(s->dev.config + offset, (uint16_t)val);
  1800. break;
  1801. case 4: pci_set_long(s->dev.config + offset, val);
  1802. break;
  1803. default: abort();
  1804. }
  1805. /* set register value pointer to the data. */
  1806. reg_entry->ptr.byte = s->dev.config + offset;
  1807. }
  1808. /* list add register entry */
  1809. QLIST_INSERT_HEAD(&reg_grp->reg_tbl_list, reg_entry, entries);
  1810. }
  1811. void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp)
  1812. {
  1813. int i, rc;
  1814. Error *err = NULL;
  1815. QLIST_INIT(&s->reg_grps);
  1816. for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
  1817. uint32_t reg_grp_offset = 0;
  1818. XenPTRegGroup *reg_grp_entry = NULL;
  1819. if (xen_pt_emu_reg_grps[i].grp_id != 0xFF
  1820. && xen_pt_emu_reg_grps[i].grp_id != XEN_PCI_INTEL_OPREGION) {
  1821. if (xen_pt_hide_dev_cap(&s->real_device,
  1822. xen_pt_emu_reg_grps[i].grp_id)) {
  1823. continue;
  1824. }
  1825. reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id);
  1826. if (!reg_grp_offset) {
  1827. continue;
  1828. }
  1829. }
  1830. /*
  1831. * By default we will trap up to 0x40 in the cfg space.
  1832. * If an intel device is pass through we need to trap 0xfc,
  1833. * therefore the size should be 0xff.
  1834. */
  1835. if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) {
  1836. reg_grp_offset = XEN_PCI_INTEL_OPREGION;
  1837. }
  1838. reg_grp_entry = g_new0(XenPTRegGroup, 1);
  1839. QLIST_INIT(&reg_grp_entry->reg_tbl_list);
  1840. QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries);
  1841. reg_grp_entry->base_offset = reg_grp_offset;
  1842. reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i;
  1843. if (xen_pt_emu_reg_grps[i].size_init) {
  1844. /* get register group size */
  1845. rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp,
  1846. reg_grp_offset,
  1847. &reg_grp_entry->size);
  1848. if (rc < 0) {
  1849. error_setg(&err, "Failed to initialize %d/%zu, type = 0x%x,"
  1850. " rc: %d", i, ARRAY_SIZE(xen_pt_emu_reg_grps),
  1851. xen_pt_emu_reg_grps[i].grp_type, rc);
  1852. error_propagate(errp, err);
  1853. xen_pt_config_delete(s);
  1854. return;
  1855. }
  1856. }
  1857. if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
  1858. if (xen_pt_emu_reg_grps[i].emu_regs) {
  1859. int j = 0;
  1860. XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs;
  1861. /* initialize capability register */
  1862. for (j = 0; regs->size != 0; j++, regs++) {
  1863. xen_pt_config_reg_init(s, reg_grp_entry, regs, &err);
  1864. if (err) {
  1865. error_append_hint(&err, "Failed to init register %d"
  1866. " offsets 0x%x in grp_type = 0x%x (%d/%zu)", j,
  1867. regs->offset, xen_pt_emu_reg_grps[i].grp_type,
  1868. i, ARRAY_SIZE(xen_pt_emu_reg_grps));
  1869. error_propagate(errp, err);
  1870. xen_pt_config_delete(s);
  1871. return;
  1872. }
  1873. }
  1874. }
  1875. }
  1876. }
  1877. }
  1878. /* delete all emulate register */
  1879. void xen_pt_config_delete(XenPCIPassthroughState *s)
  1880. {
  1881. struct XenPTRegGroup *reg_group, *next_grp;
  1882. struct XenPTReg *reg, *next_reg;
  1883. /* free MSI/MSI-X info table */
  1884. if (s->msix) {
  1885. xen_pt_msix_unmap(s);
  1886. }
  1887. g_free(s->msi);
  1888. /* free all register group entry */
  1889. QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) {
  1890. /* free all register entry */
  1891. QLIST_FOREACH_SAFE(reg, &reg_group->reg_tbl_list, entries, next_reg) {
  1892. QLIST_REMOVE(reg, entries);
  1893. g_free(reg);
  1894. }
  1895. QLIST_REMOVE(reg_group, entries);
  1896. g_free(reg_group);
  1897. }
  1898. }