2
0

xen_pt_config_init.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882
  1. /*
  2. * Copyright (c) 2007, Neocleus Corporation.
  3. * Copyright (c) 2007, Intel Corporation.
  4. *
  5. * This work is licensed under the terms of the GNU GPL, version 2. See
  6. * the COPYING file in the top-level directory.
  7. *
  8. * Alex Novik <alex@neocleus.com>
  9. * Allen Kay <allen.m.kay@intel.com>
  10. * Guy Zana <guy@neocleus.com>
  11. *
  12. * This file implements direct PCI assignment to a HVM guest
  13. */
  14. #include "qemu/timer.h"
  15. #include "xen_backend.h"
  16. #include "xen_pt.h"
  17. #define XEN_PT_MERGE_VALUE(value, data, val_mask) \
  18. (((value) & (val_mask)) | ((data) & ~(val_mask)))
  19. #define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */
  20. /* prototype */
  21. static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
  22. uint32_t real_offset, uint32_t *data);
  23. /* helper */
  24. /* A return value of 1 means the capability should NOT be exposed to guest. */
  25. static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id)
  26. {
  27. switch (grp_id) {
  28. case PCI_CAP_ID_EXP:
  29. /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE
  30. * Controller looks trivial, e.g., the PCI Express Capabilities
  31. * Register is 0. We should not try to expose it to guest.
  32. *
  33. * The datasheet is available at
  34. * http://download.intel.com/design/network/datashts/82599_datasheet.pdf
  35. *
  36. * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the
  37. * PCI Express Capability Structure of the VF of Intel 82599 10GbE
  38. * Controller looks trivial, e.g., the PCI Express Capabilities
  39. * Register is 0, so the Capability Version is 0 and
  40. * xen_pt_pcie_size_init() would fail.
  41. */
  42. if (d->vendor_id == PCI_VENDOR_ID_INTEL &&
  43. d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) {
  44. return 1;
  45. }
  46. break;
  47. }
  48. return 0;
  49. }
  50. /* find emulate register group entry */
  51. XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address)
  52. {
  53. XenPTRegGroup *entry = NULL;
  54. /* find register group entry */
  55. QLIST_FOREACH(entry, &s->reg_grps, entries) {
  56. /* check address */
  57. if ((entry->base_offset <= address)
  58. && ((entry->base_offset + entry->size) > address)) {
  59. return entry;
  60. }
  61. }
  62. /* group entry not found */
  63. return NULL;
  64. }
  65. /* find emulate register entry */
  66. XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address)
  67. {
  68. XenPTReg *reg_entry = NULL;
  69. XenPTRegInfo *reg = NULL;
  70. uint32_t real_offset = 0;
  71. /* find register entry */
  72. QLIST_FOREACH(reg_entry, &reg_grp->reg_tbl_list, entries) {
  73. reg = reg_entry->reg;
  74. real_offset = reg_grp->base_offset + reg->offset;
  75. /* check address */
  76. if ((real_offset <= address)
  77. && ((real_offset + reg->size) > address)) {
  78. return reg_entry;
  79. }
  80. }
  81. return NULL;
  82. }
  83. /****************
  84. * general register functions
  85. */
  86. /* register initialization function */
  87. static int xen_pt_common_reg_init(XenPCIPassthroughState *s,
  88. XenPTRegInfo *reg, uint32_t real_offset,
  89. uint32_t *data)
  90. {
  91. *data = reg->init_val;
  92. return 0;
  93. }
  94. /* Read register functions */
  95. static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  96. uint8_t *value, uint8_t valid_mask)
  97. {
  98. XenPTRegInfo *reg = cfg_entry->reg;
  99. uint8_t valid_emu_mask = 0;
  100. /* emulate byte register */
  101. valid_emu_mask = reg->emu_mask & valid_mask;
  102. *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
  103. return 0;
  104. }
  105. static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  106. uint16_t *value, uint16_t valid_mask)
  107. {
  108. XenPTRegInfo *reg = cfg_entry->reg;
  109. uint16_t valid_emu_mask = 0;
  110. /* emulate word register */
  111. valid_emu_mask = reg->emu_mask & valid_mask;
  112. *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
  113. return 0;
  114. }
  115. static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  116. uint32_t *value, uint32_t valid_mask)
  117. {
  118. XenPTRegInfo *reg = cfg_entry->reg;
  119. uint32_t valid_emu_mask = 0;
  120. /* emulate long register */
  121. valid_emu_mask = reg->emu_mask & valid_mask;
  122. *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
  123. return 0;
  124. }
  125. /* Write register functions */
  126. static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  127. uint8_t *val, uint8_t dev_value,
  128. uint8_t valid_mask)
  129. {
  130. XenPTRegInfo *reg = cfg_entry->reg;
  131. uint8_t writable_mask = 0;
  132. uint8_t throughable_mask = 0;
  133. /* modify emulate register */
  134. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  135. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  136. /* create value for writing to I/O device register */
  137. throughable_mask = ~reg->emu_mask & valid_mask;
  138. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  139. return 0;
  140. }
  141. static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  142. uint16_t *val, uint16_t dev_value,
  143. uint16_t valid_mask)
  144. {
  145. XenPTRegInfo *reg = cfg_entry->reg;
  146. uint16_t writable_mask = 0;
  147. uint16_t throughable_mask = 0;
  148. /* modify emulate register */
  149. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  150. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  151. /* create value for writing to I/O device register */
  152. throughable_mask = ~reg->emu_mask & valid_mask;
  153. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  154. return 0;
  155. }
  156. static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  157. uint32_t *val, uint32_t dev_value,
  158. uint32_t valid_mask)
  159. {
  160. XenPTRegInfo *reg = cfg_entry->reg;
  161. uint32_t writable_mask = 0;
  162. uint32_t throughable_mask = 0;
  163. /* modify emulate register */
  164. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  165. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  166. /* create value for writing to I/O device register */
  167. throughable_mask = ~reg->emu_mask & valid_mask;
  168. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  169. return 0;
  170. }
  171. /* XenPTRegInfo declaration
  172. * - only for emulated register (either a part or whole bit).
  173. * - for passthrough register that need special behavior (like interacting with
  174. * other component), set emu_mask to all 0 and specify r/w func properly.
  175. * - do NOT use ALL F for init_val, otherwise the tbl will not be registered.
  176. */
  177. /********************
  178. * Header Type0
  179. */
  180. static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s,
  181. XenPTRegInfo *reg, uint32_t real_offset,
  182. uint32_t *data)
  183. {
  184. *data = s->real_device.vendor_id;
  185. return 0;
  186. }
  187. static int xen_pt_device_reg_init(XenPCIPassthroughState *s,
  188. XenPTRegInfo *reg, uint32_t real_offset,
  189. uint32_t *data)
  190. {
  191. *data = s->real_device.device_id;
  192. return 0;
  193. }
  194. static int xen_pt_status_reg_init(XenPCIPassthroughState *s,
  195. XenPTRegInfo *reg, uint32_t real_offset,
  196. uint32_t *data)
  197. {
  198. XenPTRegGroup *reg_grp_entry = NULL;
  199. XenPTReg *reg_entry = NULL;
  200. uint32_t reg_field = 0;
  201. /* find Header register group */
  202. reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST);
  203. if (reg_grp_entry) {
  204. /* find Capabilities Pointer register */
  205. reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST);
  206. if (reg_entry) {
  207. /* check Capabilities Pointer register */
  208. if (reg_entry->data) {
  209. reg_field |= PCI_STATUS_CAP_LIST;
  210. } else {
  211. reg_field &= ~PCI_STATUS_CAP_LIST;
  212. }
  213. } else {
  214. xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*"
  215. " for Capabilities Pointer register."
  216. " (%s)\n", __func__);
  217. return -1;
  218. }
  219. } else {
  220. xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup"
  221. " for Header. (%s)\n", __func__);
  222. return -1;
  223. }
  224. *data = reg_field;
  225. return 0;
  226. }
  227. static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s,
  228. XenPTRegInfo *reg, uint32_t real_offset,
  229. uint32_t *data)
  230. {
  231. /* read PCI_HEADER_TYPE */
  232. *data = reg->init_val | 0x80;
  233. return 0;
  234. }
  235. /* initialize Interrupt Pin register */
  236. static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s,
  237. XenPTRegInfo *reg, uint32_t real_offset,
  238. uint32_t *data)
  239. {
  240. *data = xen_pt_pci_read_intx(s);
  241. return 0;
  242. }
  243. /* Command register */
  244. static int xen_pt_cmd_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  245. uint16_t *value, uint16_t valid_mask)
  246. {
  247. XenPTRegInfo *reg = cfg_entry->reg;
  248. uint16_t valid_emu_mask = 0;
  249. uint16_t emu_mask = reg->emu_mask;
  250. if (s->is_virtfn) {
  251. emu_mask |= PCI_COMMAND_MEMORY;
  252. }
  253. /* emulate word register */
  254. valid_emu_mask = emu_mask & valid_mask;
  255. *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
  256. return 0;
  257. }
  258. static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  259. uint16_t *val, uint16_t dev_value,
  260. uint16_t valid_mask)
  261. {
  262. XenPTRegInfo *reg = cfg_entry->reg;
  263. uint16_t writable_mask = 0;
  264. uint16_t throughable_mask = 0;
  265. uint16_t emu_mask = reg->emu_mask;
  266. if (s->is_virtfn) {
  267. emu_mask |= PCI_COMMAND_MEMORY;
  268. }
  269. /* modify emulate register */
  270. writable_mask = ~reg->ro_mask & valid_mask;
  271. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  272. /* create value for writing to I/O device register */
  273. throughable_mask = ~emu_mask & valid_mask;
  274. if (*val & PCI_COMMAND_INTX_DISABLE) {
  275. throughable_mask |= PCI_COMMAND_INTX_DISABLE;
  276. } else {
  277. if (s->machine_irq) {
  278. throughable_mask |= PCI_COMMAND_INTX_DISABLE;
  279. }
  280. }
  281. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  282. return 0;
  283. }
  284. /* BAR */
  285. #define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */
  286. #define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */
  287. #define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */
  288. #define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */
  289. static bool is_64bit_bar(PCIIORegion *r)
  290. {
  291. return !!(r->type & PCI_BASE_ADDRESS_MEM_TYPE_64);
  292. }
  293. static uint64_t xen_pt_get_bar_size(PCIIORegion *r)
  294. {
  295. if (is_64bit_bar(r)) {
  296. uint64_t size64;
  297. size64 = (r + 1)->size;
  298. size64 <<= 32;
  299. size64 += r->size;
  300. return size64;
  301. }
  302. return r->size;
  303. }
  304. static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s,
  305. XenPTRegInfo *reg)
  306. {
  307. PCIDevice *d = &s->dev;
  308. XenPTRegion *region = NULL;
  309. PCIIORegion *r;
  310. int index = 0;
  311. /* check 64bit BAR */
  312. index = xen_pt_bar_offset_to_index(reg->offset);
  313. if ((0 < index) && (index < PCI_ROM_SLOT)) {
  314. int type = s->real_device.io_regions[index - 1].type;
  315. if ((type & XEN_HOST_PCI_REGION_TYPE_MEM)
  316. && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) {
  317. region = &s->bases[index - 1];
  318. if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) {
  319. return XEN_PT_BAR_FLAG_UPPER;
  320. }
  321. }
  322. }
  323. /* check unused BAR */
  324. r = &d->io_regions[index];
  325. if (!xen_pt_get_bar_size(r)) {
  326. return XEN_PT_BAR_FLAG_UNUSED;
  327. }
  328. /* for ExpROM BAR */
  329. if (index == PCI_ROM_SLOT) {
  330. return XEN_PT_BAR_FLAG_MEM;
  331. }
  332. /* check BAR I/O indicator */
  333. if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) {
  334. return XEN_PT_BAR_FLAG_IO;
  335. } else {
  336. return XEN_PT_BAR_FLAG_MEM;
  337. }
  338. }
  339. static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr)
  340. {
  341. if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) {
  342. return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK);
  343. } else {
  344. return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK);
  345. }
  346. }
  347. static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
  348. uint32_t real_offset, uint32_t *data)
  349. {
  350. uint32_t reg_field = 0;
  351. int index;
  352. index = xen_pt_bar_offset_to_index(reg->offset);
  353. if (index < 0 || index >= PCI_NUM_REGIONS) {
  354. XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
  355. return -1;
  356. }
  357. /* set BAR flag */
  358. s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, reg);
  359. if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) {
  360. reg_field = XEN_PT_INVALID_REG;
  361. }
  362. *data = reg_field;
  363. return 0;
  364. }
  365. static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  366. uint32_t *value, uint32_t valid_mask)
  367. {
  368. XenPTRegInfo *reg = cfg_entry->reg;
  369. uint32_t valid_emu_mask = 0;
  370. uint32_t bar_emu_mask = 0;
  371. int index;
  372. /* get BAR index */
  373. index = xen_pt_bar_offset_to_index(reg->offset);
  374. if (index < 0 || index >= PCI_NUM_REGIONS) {
  375. XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
  376. return -1;
  377. }
  378. /* use fixed-up value from kernel sysfs */
  379. *value = base_address_with_flags(&s->real_device.io_regions[index]);
  380. /* set emulate mask depend on BAR flag */
  381. switch (s->bases[index].bar_flag) {
  382. case XEN_PT_BAR_FLAG_MEM:
  383. bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
  384. break;
  385. case XEN_PT_BAR_FLAG_IO:
  386. bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
  387. break;
  388. case XEN_PT_BAR_FLAG_UPPER:
  389. bar_emu_mask = XEN_PT_BAR_ALLF;
  390. break;
  391. default:
  392. break;
  393. }
  394. /* emulate BAR */
  395. valid_emu_mask = bar_emu_mask & valid_mask;
  396. *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
  397. return 0;
  398. }
  399. static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  400. uint32_t *val, uint32_t dev_value,
  401. uint32_t valid_mask)
  402. {
  403. XenPTRegInfo *reg = cfg_entry->reg;
  404. XenPTRegion *base = NULL;
  405. PCIDevice *d = &s->dev;
  406. const PCIIORegion *r;
  407. uint32_t writable_mask = 0;
  408. uint32_t throughable_mask = 0;
  409. uint32_t bar_emu_mask = 0;
  410. uint32_t bar_ro_mask = 0;
  411. uint32_t r_size = 0;
  412. int index = 0;
  413. index = xen_pt_bar_offset_to_index(reg->offset);
  414. if (index < 0 || index >= PCI_NUM_REGIONS) {
  415. XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index);
  416. return -1;
  417. }
  418. r = &d->io_regions[index];
  419. base = &s->bases[index];
  420. r_size = xen_pt_get_emul_size(base->bar_flag, r->size);
  421. /* set emulate mask and read-only mask values depend on the BAR flag */
  422. switch (s->bases[index].bar_flag) {
  423. case XEN_PT_BAR_FLAG_MEM:
  424. bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
  425. if (!r_size) {
  426. /* low 32 bits mask for 64 bit bars */
  427. bar_ro_mask = XEN_PT_BAR_ALLF;
  428. } else {
  429. bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1);
  430. }
  431. break;
  432. case XEN_PT_BAR_FLAG_IO:
  433. bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
  434. bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1);
  435. break;
  436. case XEN_PT_BAR_FLAG_UPPER:
  437. bar_emu_mask = XEN_PT_BAR_ALLF;
  438. bar_ro_mask = r_size ? r_size - 1 : 0;
  439. break;
  440. default:
  441. break;
  442. }
  443. /* modify emulate register */
  444. writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask;
  445. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  446. /* check whether we need to update the virtual region address or not */
  447. switch (s->bases[index].bar_flag) {
  448. case XEN_PT_BAR_FLAG_UPPER:
  449. case XEN_PT_BAR_FLAG_MEM:
  450. /* nothing to do */
  451. break;
  452. case XEN_PT_BAR_FLAG_IO:
  453. /* nothing to do */
  454. break;
  455. default:
  456. break;
  457. }
  458. /* create value for writing to I/O device register */
  459. throughable_mask = ~bar_emu_mask & valid_mask;
  460. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  461. return 0;
  462. }
  463. /* write Exp ROM BAR */
  464. static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s,
  465. XenPTReg *cfg_entry, uint32_t *val,
  466. uint32_t dev_value, uint32_t valid_mask)
  467. {
  468. XenPTRegInfo *reg = cfg_entry->reg;
  469. XenPTRegion *base = NULL;
  470. PCIDevice *d = (PCIDevice *)&s->dev;
  471. uint32_t writable_mask = 0;
  472. uint32_t throughable_mask = 0;
  473. pcibus_t r_size = 0;
  474. uint32_t bar_emu_mask = 0;
  475. uint32_t bar_ro_mask = 0;
  476. r_size = d->io_regions[PCI_ROM_SLOT].size;
  477. base = &s->bases[PCI_ROM_SLOT];
  478. /* align memory type resource size */
  479. r_size = xen_pt_get_emul_size(base->bar_flag, r_size);
  480. /* set emulate mask and read-only mask */
  481. bar_emu_mask = reg->emu_mask;
  482. bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE;
  483. /* modify emulate register */
  484. writable_mask = ~bar_ro_mask & valid_mask;
  485. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  486. /* create value for writing to I/O device register */
  487. throughable_mask = ~bar_emu_mask & valid_mask;
  488. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  489. return 0;
  490. }
  491. /* Header Type0 reg static information table */
  492. static XenPTRegInfo xen_pt_emu_reg_header0[] = {
  493. /* Vendor ID reg */
  494. {
  495. .offset = PCI_VENDOR_ID,
  496. .size = 2,
  497. .init_val = 0x0000,
  498. .ro_mask = 0xFFFF,
  499. .emu_mask = 0xFFFF,
  500. .init = xen_pt_vendor_reg_init,
  501. .u.w.read = xen_pt_word_reg_read,
  502. .u.w.write = xen_pt_word_reg_write,
  503. },
  504. /* Device ID reg */
  505. {
  506. .offset = PCI_DEVICE_ID,
  507. .size = 2,
  508. .init_val = 0x0000,
  509. .ro_mask = 0xFFFF,
  510. .emu_mask = 0xFFFF,
  511. .init = xen_pt_device_reg_init,
  512. .u.w.read = xen_pt_word_reg_read,
  513. .u.w.write = xen_pt_word_reg_write,
  514. },
  515. /* Command reg */
  516. {
  517. .offset = PCI_COMMAND,
  518. .size = 2,
  519. .init_val = 0x0000,
  520. .ro_mask = 0xF880,
  521. .emu_mask = 0x0740,
  522. .init = xen_pt_common_reg_init,
  523. .u.w.read = xen_pt_cmd_reg_read,
  524. .u.w.write = xen_pt_cmd_reg_write,
  525. },
  526. /* Capabilities Pointer reg */
  527. {
  528. .offset = PCI_CAPABILITY_LIST,
  529. .size = 1,
  530. .init_val = 0x00,
  531. .ro_mask = 0xFF,
  532. .emu_mask = 0xFF,
  533. .init = xen_pt_ptr_reg_init,
  534. .u.b.read = xen_pt_byte_reg_read,
  535. .u.b.write = xen_pt_byte_reg_write,
  536. },
  537. /* Status reg */
  538. /* use emulated Cap Ptr value to initialize,
  539. * so need to be declared after Cap Ptr reg
  540. */
  541. {
  542. .offset = PCI_STATUS,
  543. .size = 2,
  544. .init_val = 0x0000,
  545. .ro_mask = 0x06FF,
  546. .emu_mask = 0x0010,
  547. .init = xen_pt_status_reg_init,
  548. .u.w.read = xen_pt_word_reg_read,
  549. .u.w.write = xen_pt_word_reg_write,
  550. },
  551. /* Cache Line Size reg */
  552. {
  553. .offset = PCI_CACHE_LINE_SIZE,
  554. .size = 1,
  555. .init_val = 0x00,
  556. .ro_mask = 0x00,
  557. .emu_mask = 0xFF,
  558. .init = xen_pt_common_reg_init,
  559. .u.b.read = xen_pt_byte_reg_read,
  560. .u.b.write = xen_pt_byte_reg_write,
  561. },
  562. /* Latency Timer reg */
  563. {
  564. .offset = PCI_LATENCY_TIMER,
  565. .size = 1,
  566. .init_val = 0x00,
  567. .ro_mask = 0x00,
  568. .emu_mask = 0xFF,
  569. .init = xen_pt_common_reg_init,
  570. .u.b.read = xen_pt_byte_reg_read,
  571. .u.b.write = xen_pt_byte_reg_write,
  572. },
  573. /* Header Type reg */
  574. {
  575. .offset = PCI_HEADER_TYPE,
  576. .size = 1,
  577. .init_val = 0x00,
  578. .ro_mask = 0xFF,
  579. .emu_mask = 0x00,
  580. .init = xen_pt_header_type_reg_init,
  581. .u.b.read = xen_pt_byte_reg_read,
  582. .u.b.write = xen_pt_byte_reg_write,
  583. },
  584. /* Interrupt Line reg */
  585. {
  586. .offset = PCI_INTERRUPT_LINE,
  587. .size = 1,
  588. .init_val = 0x00,
  589. .ro_mask = 0x00,
  590. .emu_mask = 0xFF,
  591. .init = xen_pt_common_reg_init,
  592. .u.b.read = xen_pt_byte_reg_read,
  593. .u.b.write = xen_pt_byte_reg_write,
  594. },
  595. /* Interrupt Pin reg */
  596. {
  597. .offset = PCI_INTERRUPT_PIN,
  598. .size = 1,
  599. .init_val = 0x00,
  600. .ro_mask = 0xFF,
  601. .emu_mask = 0xFF,
  602. .init = xen_pt_irqpin_reg_init,
  603. .u.b.read = xen_pt_byte_reg_read,
  604. .u.b.write = xen_pt_byte_reg_write,
  605. },
  606. /* BAR 0 reg */
  607. /* mask of BAR need to be decided later, depends on IO/MEM type */
  608. {
  609. .offset = PCI_BASE_ADDRESS_0,
  610. .size = 4,
  611. .init_val = 0x00000000,
  612. .init = xen_pt_bar_reg_init,
  613. .u.dw.read = xen_pt_bar_reg_read,
  614. .u.dw.write = xen_pt_bar_reg_write,
  615. },
  616. /* BAR 1 reg */
  617. {
  618. .offset = PCI_BASE_ADDRESS_1,
  619. .size = 4,
  620. .init_val = 0x00000000,
  621. .init = xen_pt_bar_reg_init,
  622. .u.dw.read = xen_pt_bar_reg_read,
  623. .u.dw.write = xen_pt_bar_reg_write,
  624. },
  625. /* BAR 2 reg */
  626. {
  627. .offset = PCI_BASE_ADDRESS_2,
  628. .size = 4,
  629. .init_val = 0x00000000,
  630. .init = xen_pt_bar_reg_init,
  631. .u.dw.read = xen_pt_bar_reg_read,
  632. .u.dw.write = xen_pt_bar_reg_write,
  633. },
  634. /* BAR 3 reg */
  635. {
  636. .offset = PCI_BASE_ADDRESS_3,
  637. .size = 4,
  638. .init_val = 0x00000000,
  639. .init = xen_pt_bar_reg_init,
  640. .u.dw.read = xen_pt_bar_reg_read,
  641. .u.dw.write = xen_pt_bar_reg_write,
  642. },
  643. /* BAR 4 reg */
  644. {
  645. .offset = PCI_BASE_ADDRESS_4,
  646. .size = 4,
  647. .init_val = 0x00000000,
  648. .init = xen_pt_bar_reg_init,
  649. .u.dw.read = xen_pt_bar_reg_read,
  650. .u.dw.write = xen_pt_bar_reg_write,
  651. },
  652. /* BAR 5 reg */
  653. {
  654. .offset = PCI_BASE_ADDRESS_5,
  655. .size = 4,
  656. .init_val = 0x00000000,
  657. .init = xen_pt_bar_reg_init,
  658. .u.dw.read = xen_pt_bar_reg_read,
  659. .u.dw.write = xen_pt_bar_reg_write,
  660. },
  661. /* Expansion ROM BAR reg */
  662. {
  663. .offset = PCI_ROM_ADDRESS,
  664. .size = 4,
  665. .init_val = 0x00000000,
  666. .ro_mask = 0x000007FE,
  667. .emu_mask = 0xFFFFF800,
  668. .init = xen_pt_bar_reg_init,
  669. .u.dw.read = xen_pt_long_reg_read,
  670. .u.dw.write = xen_pt_exp_rom_bar_reg_write,
  671. },
  672. {
  673. .size = 0,
  674. },
  675. };
  676. /*********************************
  677. * Vital Product Data Capability
  678. */
  679. /* Vital Product Data Capability Structure reg static information table */
  680. static XenPTRegInfo xen_pt_emu_reg_vpd[] = {
  681. {
  682. .offset = PCI_CAP_LIST_NEXT,
  683. .size = 1,
  684. .init_val = 0x00,
  685. .ro_mask = 0xFF,
  686. .emu_mask = 0xFF,
  687. .init = xen_pt_ptr_reg_init,
  688. .u.b.read = xen_pt_byte_reg_read,
  689. .u.b.write = xen_pt_byte_reg_write,
  690. },
  691. {
  692. .size = 0,
  693. },
  694. };
  695. /**************************************
  696. * Vendor Specific Capability
  697. */
  698. /* Vendor Specific Capability Structure reg static information table */
  699. static XenPTRegInfo xen_pt_emu_reg_vendor[] = {
  700. {
  701. .offset = PCI_CAP_LIST_NEXT,
  702. .size = 1,
  703. .init_val = 0x00,
  704. .ro_mask = 0xFF,
  705. .emu_mask = 0xFF,
  706. .init = xen_pt_ptr_reg_init,
  707. .u.b.read = xen_pt_byte_reg_read,
  708. .u.b.write = xen_pt_byte_reg_write,
  709. },
  710. {
  711. .size = 0,
  712. },
  713. };
  714. /*****************************
  715. * PCI Express Capability
  716. */
  717. static inline uint8_t get_capability_version(XenPCIPassthroughState *s,
  718. uint32_t offset)
  719. {
  720. uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS);
  721. return flags & PCI_EXP_FLAGS_VERS;
  722. }
  723. static inline uint8_t get_device_type(XenPCIPassthroughState *s,
  724. uint32_t offset)
  725. {
  726. uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS);
  727. return (flags & PCI_EXP_FLAGS_TYPE) >> 4;
  728. }
  729. /* initialize Link Control register */
  730. static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s,
  731. XenPTRegInfo *reg, uint32_t real_offset,
  732. uint32_t *data)
  733. {
  734. uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
  735. uint8_t dev_type = get_device_type(s, real_offset - reg->offset);
  736. /* no need to initialize in case of Root Complex Integrated Endpoint
  737. * with cap_ver 1.x
  738. */
  739. if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) {
  740. *data = XEN_PT_INVALID_REG;
  741. }
  742. *data = reg->init_val;
  743. return 0;
  744. }
  745. /* initialize Device Control 2 register */
  746. static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s,
  747. XenPTRegInfo *reg, uint32_t real_offset,
  748. uint32_t *data)
  749. {
  750. uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
  751. /* no need to initialize in case of cap_ver 1.x */
  752. if (cap_ver == 1) {
  753. *data = XEN_PT_INVALID_REG;
  754. }
  755. *data = reg->init_val;
  756. return 0;
  757. }
  758. /* initialize Link Control 2 register */
  759. static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s,
  760. XenPTRegInfo *reg, uint32_t real_offset,
  761. uint32_t *data)
  762. {
  763. uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
  764. uint32_t reg_field = 0;
  765. /* no need to initialize in case of cap_ver 1.x */
  766. if (cap_ver == 1) {
  767. reg_field = XEN_PT_INVALID_REG;
  768. } else {
  769. /* set Supported Link Speed */
  770. uint8_t lnkcap = pci_get_byte(s->dev.config + real_offset - reg->offset
  771. + PCI_EXP_LNKCAP);
  772. reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap;
  773. }
  774. *data = reg_field;
  775. return 0;
  776. }
  777. /* PCI Express Capability Structure reg static information table */
  778. static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
  779. /* Next Pointer reg */
  780. {
  781. .offset = PCI_CAP_LIST_NEXT,
  782. .size = 1,
  783. .init_val = 0x00,
  784. .ro_mask = 0xFF,
  785. .emu_mask = 0xFF,
  786. .init = xen_pt_ptr_reg_init,
  787. .u.b.read = xen_pt_byte_reg_read,
  788. .u.b.write = xen_pt_byte_reg_write,
  789. },
  790. /* Device Capabilities reg */
  791. {
  792. .offset = PCI_EXP_DEVCAP,
  793. .size = 4,
  794. .init_val = 0x00000000,
  795. .ro_mask = 0x1FFCFFFF,
  796. .emu_mask = 0x10000000,
  797. .init = xen_pt_common_reg_init,
  798. .u.dw.read = xen_pt_long_reg_read,
  799. .u.dw.write = xen_pt_long_reg_write,
  800. },
  801. /* Device Control reg */
  802. {
  803. .offset = PCI_EXP_DEVCTL,
  804. .size = 2,
  805. .init_val = 0x2810,
  806. .ro_mask = 0x8400,
  807. .emu_mask = 0xFFFF,
  808. .init = xen_pt_common_reg_init,
  809. .u.w.read = xen_pt_word_reg_read,
  810. .u.w.write = xen_pt_word_reg_write,
  811. },
  812. /* Link Control reg */
  813. {
  814. .offset = PCI_EXP_LNKCTL,
  815. .size = 2,
  816. .init_val = 0x0000,
  817. .ro_mask = 0xFC34,
  818. .emu_mask = 0xFFFF,
  819. .init = xen_pt_linkctrl_reg_init,
  820. .u.w.read = xen_pt_word_reg_read,
  821. .u.w.write = xen_pt_word_reg_write,
  822. },
  823. /* Device Control 2 reg */
  824. {
  825. .offset = 0x28,
  826. .size = 2,
  827. .init_val = 0x0000,
  828. .ro_mask = 0xFFE0,
  829. .emu_mask = 0xFFFF,
  830. .init = xen_pt_devctrl2_reg_init,
  831. .u.w.read = xen_pt_word_reg_read,
  832. .u.w.write = xen_pt_word_reg_write,
  833. },
  834. /* Link Control 2 reg */
  835. {
  836. .offset = 0x30,
  837. .size = 2,
  838. .init_val = 0x0000,
  839. .ro_mask = 0xE040,
  840. .emu_mask = 0xFFFF,
  841. .init = xen_pt_linkctrl2_reg_init,
  842. .u.w.read = xen_pt_word_reg_read,
  843. .u.w.write = xen_pt_word_reg_write,
  844. },
  845. {
  846. .size = 0,
  847. },
  848. };
  849. /*********************************
  850. * Power Management Capability
  851. */
  852. /* read Power Management Control/Status register */
  853. static int xen_pt_pmcsr_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  854. uint16_t *value, uint16_t valid_mask)
  855. {
  856. XenPTRegInfo *reg = cfg_entry->reg;
  857. uint16_t valid_emu_mask = reg->emu_mask;
  858. valid_emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET;
  859. valid_emu_mask = valid_emu_mask & valid_mask;
  860. *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
  861. return 0;
  862. }
  863. /* write Power Management Control/Status register */
  864. static int xen_pt_pmcsr_reg_write(XenPCIPassthroughState *s,
  865. XenPTReg *cfg_entry, uint16_t *val,
  866. uint16_t dev_value, uint16_t valid_mask)
  867. {
  868. XenPTRegInfo *reg = cfg_entry->reg;
  869. uint16_t emu_mask = reg->emu_mask;
  870. uint16_t writable_mask = 0;
  871. uint16_t throughable_mask = 0;
  872. emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET;
  873. /* modify emulate register */
  874. writable_mask = emu_mask & ~reg->ro_mask & valid_mask;
  875. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  876. /* create value for writing to I/O device register */
  877. throughable_mask = ~emu_mask & valid_mask;
  878. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  879. return 0;
  880. }
  881. /* Power Management Capability reg static information table */
  882. static XenPTRegInfo xen_pt_emu_reg_pm[] = {
  883. /* Next Pointer reg */
  884. {
  885. .offset = PCI_CAP_LIST_NEXT,
  886. .size = 1,
  887. .init_val = 0x00,
  888. .ro_mask = 0xFF,
  889. .emu_mask = 0xFF,
  890. .init = xen_pt_ptr_reg_init,
  891. .u.b.read = xen_pt_byte_reg_read,
  892. .u.b.write = xen_pt_byte_reg_write,
  893. },
  894. /* Power Management Capabilities reg */
  895. {
  896. .offset = PCI_CAP_FLAGS,
  897. .size = 2,
  898. .init_val = 0x0000,
  899. .ro_mask = 0xFFFF,
  900. .emu_mask = 0xF9C8,
  901. .init = xen_pt_common_reg_init,
  902. .u.w.read = xen_pt_word_reg_read,
  903. .u.w.write = xen_pt_word_reg_write,
  904. },
  905. /* PCI Power Management Control/Status reg */
  906. {
  907. .offset = PCI_PM_CTRL,
  908. .size = 2,
  909. .init_val = 0x0008,
  910. .ro_mask = 0xE1FC,
  911. .emu_mask = 0x8100,
  912. .init = xen_pt_common_reg_init,
  913. .u.w.read = xen_pt_pmcsr_reg_read,
  914. .u.w.write = xen_pt_pmcsr_reg_write,
  915. },
  916. {
  917. .size = 0,
  918. },
  919. };
  920. /********************************
  921. * MSI Capability
  922. */
  923. /* Helper */
  924. static bool xen_pt_msgdata_check_type(uint32_t offset, uint16_t flags)
  925. {
  926. /* check the offset whether matches the type or not */
  927. bool is_32 = (offset == PCI_MSI_DATA_32) && !(flags & PCI_MSI_FLAGS_64BIT);
  928. bool is_64 = (offset == PCI_MSI_DATA_64) && (flags & PCI_MSI_FLAGS_64BIT);
  929. return is_32 || is_64;
  930. }
  931. /* Message Control register */
  932. static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s,
  933. XenPTRegInfo *reg, uint32_t real_offset,
  934. uint32_t *data)
  935. {
  936. PCIDevice *d = &s->dev;
  937. XenPTMSI *msi = s->msi;
  938. uint16_t reg_field = 0;
  939. /* use I/O device register's value as initial value */
  940. reg_field = pci_get_word(d->config + real_offset);
  941. if (reg_field & PCI_MSI_FLAGS_ENABLE) {
  942. XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n");
  943. xen_host_pci_set_word(&s->real_device, real_offset,
  944. reg_field & ~PCI_MSI_FLAGS_ENABLE);
  945. }
  946. msi->flags |= reg_field;
  947. msi->ctrl_offset = real_offset;
  948. msi->initialized = false;
  949. msi->mapped = false;
  950. *data = reg->init_val;
  951. return 0;
  952. }
  953. static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
  954. XenPTReg *cfg_entry, uint16_t *val,
  955. uint16_t dev_value, uint16_t valid_mask)
  956. {
  957. XenPTRegInfo *reg = cfg_entry->reg;
  958. XenPTMSI *msi = s->msi;
  959. uint16_t writable_mask = 0;
  960. uint16_t throughable_mask = 0;
  961. uint16_t raw_val;
  962. /* Currently no support for multi-vector */
  963. if (*val & PCI_MSI_FLAGS_QSIZE) {
  964. XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val);
  965. }
  966. /* modify emulate register */
  967. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  968. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  969. msi->flags |= cfg_entry->data & ~PCI_MSI_FLAGS_ENABLE;
  970. /* create value for writing to I/O device register */
  971. raw_val = *val;
  972. throughable_mask = ~reg->emu_mask & valid_mask;
  973. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  974. /* update MSI */
  975. if (raw_val & PCI_MSI_FLAGS_ENABLE) {
  976. /* setup MSI pirq for the first time */
  977. if (!msi->initialized) {
  978. /* Init physical one */
  979. XEN_PT_LOG(&s->dev, "setup MSI\n");
  980. if (xen_pt_msi_setup(s)) {
  981. /* We do not broadcast the error to the framework code, so
  982. * that MSI errors are contained in MSI emulation code and
  983. * QEMU can go on running.
  984. * Guest MSI would be actually not working.
  985. */
  986. *val &= ~PCI_MSI_FLAGS_ENABLE;
  987. XEN_PT_WARN(&s->dev, "Can not map MSI.\n");
  988. return 0;
  989. }
  990. if (xen_pt_msi_update(s)) {
  991. *val &= ~PCI_MSI_FLAGS_ENABLE;
  992. XEN_PT_WARN(&s->dev, "Can not bind MSI\n");
  993. return 0;
  994. }
  995. msi->initialized = true;
  996. msi->mapped = true;
  997. }
  998. msi->flags |= PCI_MSI_FLAGS_ENABLE;
  999. } else {
  1000. msi->flags &= ~PCI_MSI_FLAGS_ENABLE;
  1001. }
  1002. /* pass through MSI_ENABLE bit */
  1003. *val &= ~PCI_MSI_FLAGS_ENABLE;
  1004. *val |= raw_val & PCI_MSI_FLAGS_ENABLE;
  1005. return 0;
  1006. }
  1007. /* initialize Message Upper Address register */
  1008. static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s,
  1009. XenPTRegInfo *reg, uint32_t real_offset,
  1010. uint32_t *data)
  1011. {
  1012. /* no need to initialize in case of 32 bit type */
  1013. if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
  1014. *data = XEN_PT_INVALID_REG;
  1015. } else {
  1016. *data = reg->init_val;
  1017. }
  1018. return 0;
  1019. }
  1020. /* this function will be called twice (for 32 bit and 64 bit type) */
  1021. /* initialize Message Data register */
  1022. static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s,
  1023. XenPTRegInfo *reg, uint32_t real_offset,
  1024. uint32_t *data)
  1025. {
  1026. uint32_t flags = s->msi->flags;
  1027. uint32_t offset = reg->offset;
  1028. /* check the offset whether matches the type or not */
  1029. if (xen_pt_msgdata_check_type(offset, flags)) {
  1030. *data = reg->init_val;
  1031. } else {
  1032. *data = XEN_PT_INVALID_REG;
  1033. }
  1034. return 0;
  1035. }
  1036. /* write Message Address register */
  1037. static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s,
  1038. XenPTReg *cfg_entry, uint32_t *val,
  1039. uint32_t dev_value, uint32_t valid_mask)
  1040. {
  1041. XenPTRegInfo *reg = cfg_entry->reg;
  1042. uint32_t writable_mask = 0;
  1043. uint32_t throughable_mask = 0;
  1044. uint32_t old_addr = cfg_entry->data;
  1045. /* modify emulate register */
  1046. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1047. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  1048. s->msi->addr_lo = cfg_entry->data;
  1049. /* create value for writing to I/O device register */
  1050. throughable_mask = ~reg->emu_mask & valid_mask;
  1051. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  1052. /* update MSI */
  1053. if (cfg_entry->data != old_addr) {
  1054. if (s->msi->mapped) {
  1055. xen_pt_msi_update(s);
  1056. }
  1057. }
  1058. return 0;
  1059. }
  1060. /* write Message Upper Address register */
  1061. static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s,
  1062. XenPTReg *cfg_entry, uint32_t *val,
  1063. uint32_t dev_value, uint32_t valid_mask)
  1064. {
  1065. XenPTRegInfo *reg = cfg_entry->reg;
  1066. uint32_t writable_mask = 0;
  1067. uint32_t throughable_mask = 0;
  1068. uint32_t old_addr = cfg_entry->data;
  1069. /* check whether the type is 64 bit or not */
  1070. if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
  1071. XEN_PT_ERR(&s->dev,
  1072. "Can't write to the upper address without 64 bit support\n");
  1073. return -1;
  1074. }
  1075. /* modify emulate register */
  1076. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1077. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  1078. /* update the msi_info too */
  1079. s->msi->addr_hi = cfg_entry->data;
  1080. /* create value for writing to I/O device register */
  1081. throughable_mask = ~reg->emu_mask & valid_mask;
  1082. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  1083. /* update MSI */
  1084. if (cfg_entry->data != old_addr) {
  1085. if (s->msi->mapped) {
  1086. xen_pt_msi_update(s);
  1087. }
  1088. }
  1089. return 0;
  1090. }
  1091. /* this function will be called twice (for 32 bit and 64 bit type) */
  1092. /* write Message Data register */
  1093. static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s,
  1094. XenPTReg *cfg_entry, uint16_t *val,
  1095. uint16_t dev_value, uint16_t valid_mask)
  1096. {
  1097. XenPTRegInfo *reg = cfg_entry->reg;
  1098. XenPTMSI *msi = s->msi;
  1099. uint16_t writable_mask = 0;
  1100. uint16_t throughable_mask = 0;
  1101. uint16_t old_data = cfg_entry->data;
  1102. uint32_t offset = reg->offset;
  1103. /* check the offset whether matches the type or not */
  1104. if (!xen_pt_msgdata_check_type(offset, msi->flags)) {
  1105. /* exit I/O emulator */
  1106. XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n");
  1107. return -1;
  1108. }
  1109. /* modify emulate register */
  1110. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1111. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  1112. /* update the msi_info too */
  1113. msi->data = cfg_entry->data;
  1114. /* create value for writing to I/O device register */
  1115. throughable_mask = ~reg->emu_mask & valid_mask;
  1116. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  1117. /* update MSI */
  1118. if (cfg_entry->data != old_data) {
  1119. if (msi->mapped) {
  1120. xen_pt_msi_update(s);
  1121. }
  1122. }
  1123. return 0;
  1124. }
  1125. /* MSI Capability Structure reg static information table */
  1126. static XenPTRegInfo xen_pt_emu_reg_msi[] = {
  1127. /* Next Pointer reg */
  1128. {
  1129. .offset = PCI_CAP_LIST_NEXT,
  1130. .size = 1,
  1131. .init_val = 0x00,
  1132. .ro_mask = 0xFF,
  1133. .emu_mask = 0xFF,
  1134. .init = xen_pt_ptr_reg_init,
  1135. .u.b.read = xen_pt_byte_reg_read,
  1136. .u.b.write = xen_pt_byte_reg_write,
  1137. },
  1138. /* Message Control reg */
  1139. {
  1140. .offset = PCI_MSI_FLAGS,
  1141. .size = 2,
  1142. .init_val = 0x0000,
  1143. .ro_mask = 0xFF8E,
  1144. .emu_mask = 0x007F,
  1145. .init = xen_pt_msgctrl_reg_init,
  1146. .u.w.read = xen_pt_word_reg_read,
  1147. .u.w.write = xen_pt_msgctrl_reg_write,
  1148. },
  1149. /* Message Address reg */
  1150. {
  1151. .offset = PCI_MSI_ADDRESS_LO,
  1152. .size = 4,
  1153. .init_val = 0x00000000,
  1154. .ro_mask = 0x00000003,
  1155. .emu_mask = 0xFFFFFFFF,
  1156. .no_wb = 1,
  1157. .init = xen_pt_common_reg_init,
  1158. .u.dw.read = xen_pt_long_reg_read,
  1159. .u.dw.write = xen_pt_msgaddr32_reg_write,
  1160. },
  1161. /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */
  1162. {
  1163. .offset = PCI_MSI_ADDRESS_HI,
  1164. .size = 4,
  1165. .init_val = 0x00000000,
  1166. .ro_mask = 0x00000000,
  1167. .emu_mask = 0xFFFFFFFF,
  1168. .no_wb = 1,
  1169. .init = xen_pt_msgaddr64_reg_init,
  1170. .u.dw.read = xen_pt_long_reg_read,
  1171. .u.dw.write = xen_pt_msgaddr64_reg_write,
  1172. },
  1173. /* Message Data reg (16 bits of data for 32-bit devices) */
  1174. {
  1175. .offset = PCI_MSI_DATA_32,
  1176. .size = 2,
  1177. .init_val = 0x0000,
  1178. .ro_mask = 0x0000,
  1179. .emu_mask = 0xFFFF,
  1180. .no_wb = 1,
  1181. .init = xen_pt_msgdata_reg_init,
  1182. .u.w.read = xen_pt_word_reg_read,
  1183. .u.w.write = xen_pt_msgdata_reg_write,
  1184. },
  1185. /* Message Data reg (16 bits of data for 64-bit devices) */
  1186. {
  1187. .offset = PCI_MSI_DATA_64,
  1188. .size = 2,
  1189. .init_val = 0x0000,
  1190. .ro_mask = 0x0000,
  1191. .emu_mask = 0xFFFF,
  1192. .no_wb = 1,
  1193. .init = xen_pt_msgdata_reg_init,
  1194. .u.w.read = xen_pt_word_reg_read,
  1195. .u.w.write = xen_pt_msgdata_reg_write,
  1196. },
  1197. {
  1198. .size = 0,
  1199. },
  1200. };
  1201. /**************************************
  1202. * MSI-X Capability
  1203. */
  1204. /* Message Control register for MSI-X */
  1205. static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s,
  1206. XenPTRegInfo *reg, uint32_t real_offset,
  1207. uint32_t *data)
  1208. {
  1209. PCIDevice *d = &s->dev;
  1210. uint16_t reg_field = 0;
  1211. /* use I/O device register's value as initial value */
  1212. reg_field = pci_get_word(d->config + real_offset);
  1213. if (reg_field & PCI_MSIX_FLAGS_ENABLE) {
  1214. XEN_PT_LOG(d, "MSIX already enabled, disabling it first\n");
  1215. xen_host_pci_set_word(&s->real_device, real_offset,
  1216. reg_field & ~PCI_MSIX_FLAGS_ENABLE);
  1217. }
  1218. s->msix->ctrl_offset = real_offset;
  1219. *data = reg->init_val;
  1220. return 0;
  1221. }
  1222. static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s,
  1223. XenPTReg *cfg_entry, uint16_t *val,
  1224. uint16_t dev_value, uint16_t valid_mask)
  1225. {
  1226. XenPTRegInfo *reg = cfg_entry->reg;
  1227. uint16_t writable_mask = 0;
  1228. uint16_t throughable_mask = 0;
  1229. int debug_msix_enabled_old;
  1230. /* modify emulate register */
  1231. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1232. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  1233. /* create value for writing to I/O device register */
  1234. throughable_mask = ~reg->emu_mask & valid_mask;
  1235. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  1236. /* update MSI-X */
  1237. if ((*val & PCI_MSIX_FLAGS_ENABLE)
  1238. && !(*val & PCI_MSIX_FLAGS_MASKALL)) {
  1239. xen_pt_msix_update(s);
  1240. }
  1241. debug_msix_enabled_old = s->msix->enabled;
  1242. s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE);
  1243. if (s->msix->enabled != debug_msix_enabled_old) {
  1244. XEN_PT_LOG(&s->dev, "%s MSI-X\n",
  1245. s->msix->enabled ? "enable" : "disable");
  1246. }
  1247. return 0;
  1248. }
  1249. /* MSI-X Capability Structure reg static information table */
  1250. static XenPTRegInfo xen_pt_emu_reg_msix[] = {
  1251. /* Next Pointer reg */
  1252. {
  1253. .offset = PCI_CAP_LIST_NEXT,
  1254. .size = 1,
  1255. .init_val = 0x00,
  1256. .ro_mask = 0xFF,
  1257. .emu_mask = 0xFF,
  1258. .init = xen_pt_ptr_reg_init,
  1259. .u.b.read = xen_pt_byte_reg_read,
  1260. .u.b.write = xen_pt_byte_reg_write,
  1261. },
  1262. /* Message Control reg */
  1263. {
  1264. .offset = PCI_MSI_FLAGS,
  1265. .size = 2,
  1266. .init_val = 0x0000,
  1267. .ro_mask = 0x3FFF,
  1268. .emu_mask = 0x0000,
  1269. .init = xen_pt_msixctrl_reg_init,
  1270. .u.w.read = xen_pt_word_reg_read,
  1271. .u.w.write = xen_pt_msixctrl_reg_write,
  1272. },
  1273. {
  1274. .size = 0,
  1275. },
  1276. };
  1277. /****************************
  1278. * Capabilities
  1279. */
  1280. /* capability structure register group size functions */
  1281. static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s,
  1282. const XenPTRegGroupInfo *grp_reg,
  1283. uint32_t base_offset, uint8_t *size)
  1284. {
  1285. *size = grp_reg->grp_size;
  1286. return 0;
  1287. }
  1288. /* get Vendor Specific Capability Structure register group size */
  1289. static int xen_pt_vendor_size_init(XenPCIPassthroughState *s,
  1290. const XenPTRegGroupInfo *grp_reg,
  1291. uint32_t base_offset, uint8_t *size)
  1292. {
  1293. *size = pci_get_byte(s->dev.config + base_offset + 0x02);
  1294. return 0;
  1295. }
  1296. /* get PCI Express Capability Structure register group size */
  1297. static int xen_pt_pcie_size_init(XenPCIPassthroughState *s,
  1298. const XenPTRegGroupInfo *grp_reg,
  1299. uint32_t base_offset, uint8_t *size)
  1300. {
  1301. PCIDevice *d = &s->dev;
  1302. uint8_t version = get_capability_version(s, base_offset);
  1303. uint8_t type = get_device_type(s, base_offset);
  1304. uint8_t pcie_size = 0;
  1305. /* calculate size depend on capability version and device/port type */
  1306. /* in case of PCI Express Base Specification Rev 1.x */
  1307. if (version == 1) {
  1308. /* The PCI Express Capabilities, Device Capabilities, and Device
  1309. * Status/Control registers are required for all PCI Express devices.
  1310. * The Link Capabilities and Link Status/Control are required for all
  1311. * Endpoints that are not Root Complex Integrated Endpoints. Endpoints
  1312. * are not required to implement registers other than those listed
  1313. * above and terminate the capability structure.
  1314. */
  1315. switch (type) {
  1316. case PCI_EXP_TYPE_ENDPOINT:
  1317. case PCI_EXP_TYPE_LEG_END:
  1318. pcie_size = 0x14;
  1319. break;
  1320. case PCI_EXP_TYPE_RC_END:
  1321. /* has no link */
  1322. pcie_size = 0x0C;
  1323. break;
  1324. /* only EndPoint passthrough is supported */
  1325. case PCI_EXP_TYPE_ROOT_PORT:
  1326. case PCI_EXP_TYPE_UPSTREAM:
  1327. case PCI_EXP_TYPE_DOWNSTREAM:
  1328. case PCI_EXP_TYPE_PCI_BRIDGE:
  1329. case PCI_EXP_TYPE_PCIE_BRIDGE:
  1330. case PCI_EXP_TYPE_RC_EC:
  1331. default:
  1332. XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
  1333. return -1;
  1334. }
  1335. }
  1336. /* in case of PCI Express Base Specification Rev 2.0 */
  1337. else if (version == 2) {
  1338. switch (type) {
  1339. case PCI_EXP_TYPE_ENDPOINT:
  1340. case PCI_EXP_TYPE_LEG_END:
  1341. case PCI_EXP_TYPE_RC_END:
  1342. /* For Functions that do not implement the registers,
  1343. * these spaces must be hardwired to 0b.
  1344. */
  1345. pcie_size = 0x3C;
  1346. break;
  1347. /* only EndPoint passthrough is supported */
  1348. case PCI_EXP_TYPE_ROOT_PORT:
  1349. case PCI_EXP_TYPE_UPSTREAM:
  1350. case PCI_EXP_TYPE_DOWNSTREAM:
  1351. case PCI_EXP_TYPE_PCI_BRIDGE:
  1352. case PCI_EXP_TYPE_PCIE_BRIDGE:
  1353. case PCI_EXP_TYPE_RC_EC:
  1354. default:
  1355. XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
  1356. return -1;
  1357. }
  1358. } else {
  1359. XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version);
  1360. return -1;
  1361. }
  1362. *size = pcie_size;
  1363. return 0;
  1364. }
  1365. /* get MSI Capability Structure register group size */
  1366. static int xen_pt_msi_size_init(XenPCIPassthroughState *s,
  1367. const XenPTRegGroupInfo *grp_reg,
  1368. uint32_t base_offset, uint8_t *size)
  1369. {
  1370. PCIDevice *d = &s->dev;
  1371. uint16_t msg_ctrl = 0;
  1372. uint8_t msi_size = 0xa;
  1373. msg_ctrl = pci_get_word(d->config + (base_offset + PCI_MSI_FLAGS));
  1374. /* check if 64-bit address is capable of per-vector masking */
  1375. if (msg_ctrl & PCI_MSI_FLAGS_64BIT) {
  1376. msi_size += 4;
  1377. }
  1378. if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) {
  1379. msi_size += 10;
  1380. }
  1381. s->msi = g_new0(XenPTMSI, 1);
  1382. s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
  1383. *size = msi_size;
  1384. return 0;
  1385. }
  1386. /* get MSI-X Capability Structure register group size */
  1387. static int xen_pt_msix_size_init(XenPCIPassthroughState *s,
  1388. const XenPTRegGroupInfo *grp_reg,
  1389. uint32_t base_offset, uint8_t *size)
  1390. {
  1391. int rc = 0;
  1392. rc = xen_pt_msix_init(s, base_offset);
  1393. if (rc < 0) {
  1394. XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n");
  1395. return rc;
  1396. }
  1397. *size = grp_reg->grp_size;
  1398. return 0;
  1399. }
  1400. static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = {
  1401. /* Header Type0 reg group */
  1402. {
  1403. .grp_id = 0xFF,
  1404. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1405. .grp_size = 0x40,
  1406. .size_init = xen_pt_reg_grp_size_init,
  1407. .emu_regs = xen_pt_emu_reg_header0,
  1408. },
  1409. /* PCI PowerManagement Capability reg group */
  1410. {
  1411. .grp_id = PCI_CAP_ID_PM,
  1412. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1413. .grp_size = PCI_PM_SIZEOF,
  1414. .size_init = xen_pt_reg_grp_size_init,
  1415. .emu_regs = xen_pt_emu_reg_pm,
  1416. },
  1417. /* AGP Capability Structure reg group */
  1418. {
  1419. .grp_id = PCI_CAP_ID_AGP,
  1420. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1421. .grp_size = 0x30,
  1422. .size_init = xen_pt_reg_grp_size_init,
  1423. },
  1424. /* Vital Product Data Capability Structure reg group */
  1425. {
  1426. .grp_id = PCI_CAP_ID_VPD,
  1427. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1428. .grp_size = 0x08,
  1429. .size_init = xen_pt_reg_grp_size_init,
  1430. .emu_regs = xen_pt_emu_reg_vpd,
  1431. },
  1432. /* Slot Identification reg group */
  1433. {
  1434. .grp_id = PCI_CAP_ID_SLOTID,
  1435. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1436. .grp_size = 0x04,
  1437. .size_init = xen_pt_reg_grp_size_init,
  1438. },
  1439. /* MSI Capability Structure reg group */
  1440. {
  1441. .grp_id = PCI_CAP_ID_MSI,
  1442. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1443. .grp_size = 0xFF,
  1444. .size_init = xen_pt_msi_size_init,
  1445. .emu_regs = xen_pt_emu_reg_msi,
  1446. },
  1447. /* PCI-X Capabilities List Item reg group */
  1448. {
  1449. .grp_id = PCI_CAP_ID_PCIX,
  1450. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1451. .grp_size = 0x18,
  1452. .size_init = xen_pt_reg_grp_size_init,
  1453. },
  1454. /* Vendor Specific Capability Structure reg group */
  1455. {
  1456. .grp_id = PCI_CAP_ID_VNDR,
  1457. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1458. .grp_size = 0xFF,
  1459. .size_init = xen_pt_vendor_size_init,
  1460. .emu_regs = xen_pt_emu_reg_vendor,
  1461. },
  1462. /* SHPC Capability List Item reg group */
  1463. {
  1464. .grp_id = PCI_CAP_ID_SHPC,
  1465. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1466. .grp_size = 0x08,
  1467. .size_init = xen_pt_reg_grp_size_init,
  1468. },
  1469. /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */
  1470. {
  1471. .grp_id = PCI_CAP_ID_SSVID,
  1472. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1473. .grp_size = 0x08,
  1474. .size_init = xen_pt_reg_grp_size_init,
  1475. },
  1476. /* AGP 8x Capability Structure reg group */
  1477. {
  1478. .grp_id = PCI_CAP_ID_AGP3,
  1479. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1480. .grp_size = 0x30,
  1481. .size_init = xen_pt_reg_grp_size_init,
  1482. },
  1483. /* PCI Express Capability Structure reg group */
  1484. {
  1485. .grp_id = PCI_CAP_ID_EXP,
  1486. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1487. .grp_size = 0xFF,
  1488. .size_init = xen_pt_pcie_size_init,
  1489. .emu_regs = xen_pt_emu_reg_pcie,
  1490. },
  1491. /* MSI-X Capability Structure reg group */
  1492. {
  1493. .grp_id = PCI_CAP_ID_MSIX,
  1494. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1495. .grp_size = 0x0C,
  1496. .size_init = xen_pt_msix_size_init,
  1497. .emu_regs = xen_pt_emu_reg_msix,
  1498. },
  1499. {
  1500. .grp_size = 0,
  1501. },
  1502. };
  1503. /* initialize Capabilities Pointer or Next Pointer register */
  1504. static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s,
  1505. XenPTRegInfo *reg, uint32_t real_offset,
  1506. uint32_t *data)
  1507. {
  1508. int i;
  1509. uint8_t *config = s->dev.config;
  1510. uint32_t reg_field = pci_get_byte(config + real_offset);
  1511. uint8_t cap_id = 0;
  1512. /* find capability offset */
  1513. while (reg_field) {
  1514. for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
  1515. if (xen_pt_hide_dev_cap(&s->real_device,
  1516. xen_pt_emu_reg_grps[i].grp_id)) {
  1517. continue;
  1518. }
  1519. cap_id = pci_get_byte(config + reg_field + PCI_CAP_LIST_ID);
  1520. if (xen_pt_emu_reg_grps[i].grp_id == cap_id) {
  1521. if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
  1522. goto out;
  1523. }
  1524. /* ignore the 0 hardwired capability, find next one */
  1525. break;
  1526. }
  1527. }
  1528. /* next capability */
  1529. reg_field = pci_get_byte(config + reg_field + PCI_CAP_LIST_NEXT);
  1530. }
  1531. out:
  1532. *data = reg_field;
  1533. return 0;
  1534. }
  1535. /*************
  1536. * Main
  1537. */
  1538. static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap)
  1539. {
  1540. uint8_t id;
  1541. unsigned max_cap = PCI_CAP_MAX;
  1542. uint8_t pos = PCI_CAPABILITY_LIST;
  1543. uint8_t status = 0;
  1544. if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) {
  1545. return 0;
  1546. }
  1547. if ((status & PCI_STATUS_CAP_LIST) == 0) {
  1548. return 0;
  1549. }
  1550. while (max_cap--) {
  1551. if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) {
  1552. break;
  1553. }
  1554. if (pos < PCI_CONFIG_HEADER_SIZE) {
  1555. break;
  1556. }
  1557. pos &= ~3;
  1558. if (xen_host_pci_get_byte(&s->real_device,
  1559. pos + PCI_CAP_LIST_ID, &id)) {
  1560. break;
  1561. }
  1562. if (id == 0xff) {
  1563. break;
  1564. }
  1565. if (id == cap) {
  1566. return pos;
  1567. }
  1568. pos += PCI_CAP_LIST_NEXT;
  1569. }
  1570. return 0;
  1571. }
  1572. static int xen_pt_config_reg_init(XenPCIPassthroughState *s,
  1573. XenPTRegGroup *reg_grp, XenPTRegInfo *reg)
  1574. {
  1575. XenPTReg *reg_entry;
  1576. uint32_t data = 0;
  1577. int rc = 0;
  1578. reg_entry = g_new0(XenPTReg, 1);
  1579. reg_entry->reg = reg;
  1580. if (reg->init) {
  1581. /* initialize emulate register */
  1582. rc = reg->init(s, reg_entry->reg,
  1583. reg_grp->base_offset + reg->offset, &data);
  1584. if (rc < 0) {
  1585. free(reg_entry);
  1586. return rc;
  1587. }
  1588. if (data == XEN_PT_INVALID_REG) {
  1589. /* free unused BAR register entry */
  1590. free(reg_entry);
  1591. return 0;
  1592. }
  1593. /* set register value */
  1594. reg_entry->data = data;
  1595. }
  1596. /* list add register entry */
  1597. QLIST_INSERT_HEAD(&reg_grp->reg_tbl_list, reg_entry, entries);
  1598. return 0;
  1599. }
  1600. int xen_pt_config_init(XenPCIPassthroughState *s)
  1601. {
  1602. int i, rc;
  1603. QLIST_INIT(&s->reg_grps);
  1604. for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
  1605. uint32_t reg_grp_offset = 0;
  1606. XenPTRegGroup *reg_grp_entry = NULL;
  1607. if (xen_pt_emu_reg_grps[i].grp_id != 0xFF) {
  1608. if (xen_pt_hide_dev_cap(&s->real_device,
  1609. xen_pt_emu_reg_grps[i].grp_id)) {
  1610. continue;
  1611. }
  1612. reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id);
  1613. if (!reg_grp_offset) {
  1614. continue;
  1615. }
  1616. }
  1617. reg_grp_entry = g_new0(XenPTRegGroup, 1);
  1618. QLIST_INIT(&reg_grp_entry->reg_tbl_list);
  1619. QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries);
  1620. reg_grp_entry->base_offset = reg_grp_offset;
  1621. reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i;
  1622. if (xen_pt_emu_reg_grps[i].size_init) {
  1623. /* get register group size */
  1624. rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp,
  1625. reg_grp_offset,
  1626. &reg_grp_entry->size);
  1627. if (rc < 0) {
  1628. xen_pt_config_delete(s);
  1629. return rc;
  1630. }
  1631. }
  1632. if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
  1633. if (xen_pt_emu_reg_grps[i].emu_regs) {
  1634. int j = 0;
  1635. XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs;
  1636. /* initialize capability register */
  1637. for (j = 0; regs->size != 0; j++, regs++) {
  1638. /* initialize capability register */
  1639. rc = xen_pt_config_reg_init(s, reg_grp_entry, regs);
  1640. if (rc < 0) {
  1641. xen_pt_config_delete(s);
  1642. return rc;
  1643. }
  1644. }
  1645. }
  1646. }
  1647. }
  1648. return 0;
  1649. }
  1650. /* delete all emulate register */
  1651. void xen_pt_config_delete(XenPCIPassthroughState *s)
  1652. {
  1653. struct XenPTRegGroup *reg_group, *next_grp;
  1654. struct XenPTReg *reg, *next_reg;
  1655. /* free MSI/MSI-X info table */
  1656. if (s->msix) {
  1657. xen_pt_msix_delete(s);
  1658. }
  1659. if (s->msi) {
  1660. g_free(s->msi);
  1661. }
  1662. /* free all register group entry */
  1663. QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) {
  1664. /* free all register entry */
  1665. QLIST_FOREACH_SAFE(reg, &reg_group->reg_tbl_list, entries, next_reg) {
  1666. QLIST_REMOVE(reg, entries);
  1667. g_free(reg);
  1668. }
  1669. QLIST_REMOVE(reg_group, entries);
  1670. g_free(reg_group);
  1671. }
  1672. }