2
0

xen_pt_config_init.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869
  1. /*
  2. * Copyright (c) 2007, Neocleus Corporation.
  3. * Copyright (c) 2007, Intel Corporation.
  4. *
  5. * This work is licensed under the terms of the GNU GPL, version 2. See
  6. * the COPYING file in the top-level directory.
  7. *
  8. * Alex Novik <alex@neocleus.com>
  9. * Allen Kay <allen.m.kay@intel.com>
  10. * Guy Zana <guy@neocleus.com>
  11. *
  12. * This file implements direct PCI assignment to a HVM guest
  13. */
  14. #include "qemu-timer.h"
  15. #include "xen_backend.h"
  16. #include "xen_pt.h"
  17. #define XEN_PT_MERGE_VALUE(value, data, val_mask) \
  18. (((value) & (val_mask)) | ((data) & ~(val_mask)))
  19. #define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */
  20. /* prototype */
  21. static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
  22. uint32_t real_offset, uint32_t *data);
  23. /* helper */
  24. /* A return value of 1 means the capability should NOT be exposed to guest. */
  25. static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id)
  26. {
  27. switch (grp_id) {
  28. case PCI_CAP_ID_EXP:
  29. /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE
  30. * Controller looks trivial, e.g., the PCI Express Capabilities
  31. * Register is 0. We should not try to expose it to guest.
  32. *
  33. * The datasheet is available at
  34. * http://download.intel.com/design/network/datashts/82599_datasheet.pdf
  35. *
  36. * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the
  37. * PCI Express Capability Structure of the VF of Intel 82599 10GbE
  38. * Controller looks trivial, e.g., the PCI Express Capabilities
  39. * Register is 0, so the Capability Version is 0 and
  40. * xen_pt_pcie_size_init() would fail.
  41. */
  42. if (d->vendor_id == PCI_VENDOR_ID_INTEL &&
  43. d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) {
  44. return 1;
  45. }
  46. break;
  47. }
  48. return 0;
  49. }
  50. /* find emulate register group entry */
  51. XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address)
  52. {
  53. XenPTRegGroup *entry = NULL;
  54. /* find register group entry */
  55. QLIST_FOREACH(entry, &s->reg_grps, entries) {
  56. /* check address */
  57. if ((entry->base_offset <= address)
  58. && ((entry->base_offset + entry->size) > address)) {
  59. return entry;
  60. }
  61. }
  62. /* group entry not found */
  63. return NULL;
  64. }
  65. /* find emulate register entry */
  66. XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address)
  67. {
  68. XenPTReg *reg_entry = NULL;
  69. XenPTRegInfo *reg = NULL;
  70. uint32_t real_offset = 0;
  71. /* find register entry */
  72. QLIST_FOREACH(reg_entry, &reg_grp->reg_tbl_list, entries) {
  73. reg = reg_entry->reg;
  74. real_offset = reg_grp->base_offset + reg->offset;
  75. /* check address */
  76. if ((real_offset <= address)
  77. && ((real_offset + reg->size) > address)) {
  78. return reg_entry;
  79. }
  80. }
  81. return NULL;
  82. }
  83. /****************
  84. * general register functions
  85. */
  86. /* register initialization function */
  87. static int xen_pt_common_reg_init(XenPCIPassthroughState *s,
  88. XenPTRegInfo *reg, uint32_t real_offset,
  89. uint32_t *data)
  90. {
  91. *data = reg->init_val;
  92. return 0;
  93. }
  94. /* Read register functions */
  95. static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  96. uint8_t *value, uint8_t valid_mask)
  97. {
  98. XenPTRegInfo *reg = cfg_entry->reg;
  99. uint8_t valid_emu_mask = 0;
  100. /* emulate byte register */
  101. valid_emu_mask = reg->emu_mask & valid_mask;
  102. *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
  103. return 0;
  104. }
  105. static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  106. uint16_t *value, uint16_t valid_mask)
  107. {
  108. XenPTRegInfo *reg = cfg_entry->reg;
  109. uint16_t valid_emu_mask = 0;
  110. /* emulate word register */
  111. valid_emu_mask = reg->emu_mask & valid_mask;
  112. *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
  113. return 0;
  114. }
  115. static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  116. uint32_t *value, uint32_t valid_mask)
  117. {
  118. XenPTRegInfo *reg = cfg_entry->reg;
  119. uint32_t valid_emu_mask = 0;
  120. /* emulate long register */
  121. valid_emu_mask = reg->emu_mask & valid_mask;
  122. *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
  123. return 0;
  124. }
  125. /* Write register functions */
  126. static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  127. uint8_t *val, uint8_t dev_value,
  128. uint8_t valid_mask)
  129. {
  130. XenPTRegInfo *reg = cfg_entry->reg;
  131. uint8_t writable_mask = 0;
  132. uint8_t throughable_mask = 0;
  133. /* modify emulate register */
  134. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  135. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  136. /* create value for writing to I/O device register */
  137. throughable_mask = ~reg->emu_mask & valid_mask;
  138. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  139. return 0;
  140. }
  141. static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  142. uint16_t *val, uint16_t dev_value,
  143. uint16_t valid_mask)
  144. {
  145. XenPTRegInfo *reg = cfg_entry->reg;
  146. uint16_t writable_mask = 0;
  147. uint16_t throughable_mask = 0;
  148. /* modify emulate register */
  149. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  150. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  151. /* create value for writing to I/O device register */
  152. throughable_mask = ~reg->emu_mask & valid_mask;
  153. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  154. return 0;
  155. }
  156. static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  157. uint32_t *val, uint32_t dev_value,
  158. uint32_t valid_mask)
  159. {
  160. XenPTRegInfo *reg = cfg_entry->reg;
  161. uint32_t writable_mask = 0;
  162. uint32_t throughable_mask = 0;
  163. /* modify emulate register */
  164. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  165. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  166. /* create value for writing to I/O device register */
  167. throughable_mask = ~reg->emu_mask & valid_mask;
  168. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  169. return 0;
  170. }
  171. /* XenPTRegInfo declaration
  172. * - only for emulated register (either a part or whole bit).
  173. * - for passthrough register that need special behavior (like interacting with
  174. * other component), set emu_mask to all 0 and specify r/w func properly.
  175. * - do NOT use ALL F for init_val, otherwise the tbl will not be registered.
  176. */
  177. /********************
  178. * Header Type0
  179. */
  180. static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s,
  181. XenPTRegInfo *reg, uint32_t real_offset,
  182. uint32_t *data)
  183. {
  184. *data = s->real_device.vendor_id;
  185. return 0;
  186. }
  187. static int xen_pt_device_reg_init(XenPCIPassthroughState *s,
  188. XenPTRegInfo *reg, uint32_t real_offset,
  189. uint32_t *data)
  190. {
  191. *data = s->real_device.device_id;
  192. return 0;
  193. }
  194. static int xen_pt_status_reg_init(XenPCIPassthroughState *s,
  195. XenPTRegInfo *reg, uint32_t real_offset,
  196. uint32_t *data)
  197. {
  198. XenPTRegGroup *reg_grp_entry = NULL;
  199. XenPTReg *reg_entry = NULL;
  200. uint32_t reg_field = 0;
  201. /* find Header register group */
  202. reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST);
  203. if (reg_grp_entry) {
  204. /* find Capabilities Pointer register */
  205. reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST);
  206. if (reg_entry) {
  207. /* check Capabilities Pointer register */
  208. if (reg_entry->data) {
  209. reg_field |= PCI_STATUS_CAP_LIST;
  210. } else {
  211. reg_field &= ~PCI_STATUS_CAP_LIST;
  212. }
  213. } else {
  214. xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*"
  215. " for Capabilities Pointer register."
  216. " (%s)\n", __func__);
  217. return -1;
  218. }
  219. } else {
  220. xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup"
  221. " for Header. (%s)\n", __func__);
  222. return -1;
  223. }
  224. *data = reg_field;
  225. return 0;
  226. }
  227. static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s,
  228. XenPTRegInfo *reg, uint32_t real_offset,
  229. uint32_t *data)
  230. {
  231. /* read PCI_HEADER_TYPE */
  232. *data = reg->init_val | 0x80;
  233. return 0;
  234. }
  235. /* initialize Interrupt Pin register */
  236. static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s,
  237. XenPTRegInfo *reg, uint32_t real_offset,
  238. uint32_t *data)
  239. {
  240. *data = xen_pt_pci_read_intx(s);
  241. return 0;
  242. }
  243. /* Command register */
  244. static int xen_pt_cmd_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  245. uint16_t *value, uint16_t valid_mask)
  246. {
  247. XenPTRegInfo *reg = cfg_entry->reg;
  248. uint16_t valid_emu_mask = 0;
  249. uint16_t emu_mask = reg->emu_mask;
  250. if (s->is_virtfn) {
  251. emu_mask |= PCI_COMMAND_MEMORY;
  252. }
  253. /* emulate word register */
  254. valid_emu_mask = emu_mask & valid_mask;
  255. *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
  256. return 0;
  257. }
  258. static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  259. uint16_t *val, uint16_t dev_value,
  260. uint16_t valid_mask)
  261. {
  262. XenPTRegInfo *reg = cfg_entry->reg;
  263. uint16_t writable_mask = 0;
  264. uint16_t throughable_mask = 0;
  265. uint16_t emu_mask = reg->emu_mask;
  266. if (s->is_virtfn) {
  267. emu_mask |= PCI_COMMAND_MEMORY;
  268. }
  269. /* modify emulate register */
  270. writable_mask = ~reg->ro_mask & valid_mask;
  271. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  272. /* create value for writing to I/O device register */
  273. throughable_mask = ~emu_mask & valid_mask;
  274. if (*val & PCI_COMMAND_INTX_DISABLE) {
  275. throughable_mask |= PCI_COMMAND_INTX_DISABLE;
  276. } else {
  277. if (s->machine_irq) {
  278. throughable_mask |= PCI_COMMAND_INTX_DISABLE;
  279. }
  280. }
  281. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  282. return 0;
  283. }
  284. /* BAR */
  285. #define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */
  286. #define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */
  287. #define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */
  288. #define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */
  289. static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s,
  290. XenPTRegInfo *reg)
  291. {
  292. PCIDevice *d = &s->dev;
  293. XenPTRegion *region = NULL;
  294. PCIIORegion *r;
  295. int index = 0;
  296. /* check 64bit BAR */
  297. index = xen_pt_bar_offset_to_index(reg->offset);
  298. if ((0 < index) && (index < PCI_ROM_SLOT)) {
  299. int type = s->real_device.io_regions[index - 1].type;
  300. if ((type & XEN_HOST_PCI_REGION_TYPE_MEM)
  301. && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) {
  302. region = &s->bases[index - 1];
  303. if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) {
  304. return XEN_PT_BAR_FLAG_UPPER;
  305. }
  306. }
  307. }
  308. /* check unused BAR */
  309. r = &d->io_regions[index];
  310. if (r->size == 0) {
  311. return XEN_PT_BAR_FLAG_UNUSED;
  312. }
  313. /* for ExpROM BAR */
  314. if (index == PCI_ROM_SLOT) {
  315. return XEN_PT_BAR_FLAG_MEM;
  316. }
  317. /* check BAR I/O indicator */
  318. if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) {
  319. return XEN_PT_BAR_FLAG_IO;
  320. } else {
  321. return XEN_PT_BAR_FLAG_MEM;
  322. }
  323. }
  324. static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr)
  325. {
  326. if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) {
  327. return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK);
  328. } else {
  329. return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK);
  330. }
  331. }
  332. static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
  333. uint32_t real_offset, uint32_t *data)
  334. {
  335. uint32_t reg_field = 0;
  336. int index;
  337. index = xen_pt_bar_offset_to_index(reg->offset);
  338. if (index < 0 || index >= PCI_NUM_REGIONS) {
  339. XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
  340. return -1;
  341. }
  342. /* set BAR flag */
  343. s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, reg);
  344. if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) {
  345. reg_field = XEN_PT_INVALID_REG;
  346. }
  347. *data = reg_field;
  348. return 0;
  349. }
  350. static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  351. uint32_t *value, uint32_t valid_mask)
  352. {
  353. XenPTRegInfo *reg = cfg_entry->reg;
  354. uint32_t valid_emu_mask = 0;
  355. uint32_t bar_emu_mask = 0;
  356. int index;
  357. /* get BAR index */
  358. index = xen_pt_bar_offset_to_index(reg->offset);
  359. if (index < 0 || index >= PCI_NUM_REGIONS) {
  360. XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
  361. return -1;
  362. }
  363. /* use fixed-up value from kernel sysfs */
  364. *value = base_address_with_flags(&s->real_device.io_regions[index]);
  365. /* set emulate mask depend on BAR flag */
  366. switch (s->bases[index].bar_flag) {
  367. case XEN_PT_BAR_FLAG_MEM:
  368. bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
  369. break;
  370. case XEN_PT_BAR_FLAG_IO:
  371. bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
  372. break;
  373. case XEN_PT_BAR_FLAG_UPPER:
  374. bar_emu_mask = XEN_PT_BAR_ALLF;
  375. break;
  376. default:
  377. break;
  378. }
  379. /* emulate BAR */
  380. valid_emu_mask = bar_emu_mask & valid_mask;
  381. *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
  382. return 0;
  383. }
  384. static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  385. uint32_t *val, uint32_t dev_value,
  386. uint32_t valid_mask)
  387. {
  388. XenPTRegInfo *reg = cfg_entry->reg;
  389. XenPTRegion *base = NULL;
  390. PCIDevice *d = &s->dev;
  391. const PCIIORegion *r;
  392. uint32_t writable_mask = 0;
  393. uint32_t throughable_mask = 0;
  394. uint32_t bar_emu_mask = 0;
  395. uint32_t bar_ro_mask = 0;
  396. uint32_t r_size = 0;
  397. int index = 0;
  398. index = xen_pt_bar_offset_to_index(reg->offset);
  399. if (index < 0 || index >= PCI_NUM_REGIONS) {
  400. XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index);
  401. return -1;
  402. }
  403. r = &d->io_regions[index];
  404. base = &s->bases[index];
  405. r_size = xen_pt_get_emul_size(base->bar_flag, r->size);
  406. /* set emulate mask and read-only mask values depend on the BAR flag */
  407. switch (s->bases[index].bar_flag) {
  408. case XEN_PT_BAR_FLAG_MEM:
  409. bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
  410. bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1);
  411. break;
  412. case XEN_PT_BAR_FLAG_IO:
  413. bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
  414. bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1);
  415. break;
  416. case XEN_PT_BAR_FLAG_UPPER:
  417. bar_emu_mask = XEN_PT_BAR_ALLF;
  418. bar_ro_mask = 0; /* all upper 32bit are R/W */
  419. break;
  420. default:
  421. break;
  422. }
  423. /* modify emulate register */
  424. writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask;
  425. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  426. /* check whether we need to update the virtual region address or not */
  427. switch (s->bases[index].bar_flag) {
  428. case XEN_PT_BAR_FLAG_MEM:
  429. /* nothing to do */
  430. break;
  431. case XEN_PT_BAR_FLAG_IO:
  432. /* nothing to do */
  433. break;
  434. case XEN_PT_BAR_FLAG_UPPER:
  435. if (cfg_entry->data) {
  436. if (cfg_entry->data != (XEN_PT_BAR_ALLF & ~bar_ro_mask)) {
  437. XEN_PT_WARN(d, "Guest attempt to set high MMIO Base Address. "
  438. "Ignore mapping. "
  439. "(offset: 0x%02x, high address: 0x%08x)\n",
  440. reg->offset, cfg_entry->data);
  441. }
  442. }
  443. break;
  444. default:
  445. break;
  446. }
  447. /* create value for writing to I/O device register */
  448. throughable_mask = ~bar_emu_mask & valid_mask;
  449. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  450. return 0;
  451. }
  452. /* write Exp ROM BAR */
  453. static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s,
  454. XenPTReg *cfg_entry, uint32_t *val,
  455. uint32_t dev_value, uint32_t valid_mask)
  456. {
  457. XenPTRegInfo *reg = cfg_entry->reg;
  458. XenPTRegion *base = NULL;
  459. PCIDevice *d = (PCIDevice *)&s->dev;
  460. uint32_t writable_mask = 0;
  461. uint32_t throughable_mask = 0;
  462. pcibus_t r_size = 0;
  463. uint32_t bar_emu_mask = 0;
  464. uint32_t bar_ro_mask = 0;
  465. r_size = d->io_regions[PCI_ROM_SLOT].size;
  466. base = &s->bases[PCI_ROM_SLOT];
  467. /* align memory type resource size */
  468. r_size = xen_pt_get_emul_size(base->bar_flag, r_size);
  469. /* set emulate mask and read-only mask */
  470. bar_emu_mask = reg->emu_mask;
  471. bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE;
  472. /* modify emulate register */
  473. writable_mask = ~bar_ro_mask & valid_mask;
  474. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  475. /* create value for writing to I/O device register */
  476. throughable_mask = ~bar_emu_mask & valid_mask;
  477. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  478. return 0;
  479. }
  480. /* Header Type0 reg static information table */
  481. static XenPTRegInfo xen_pt_emu_reg_header0[] = {
  482. /* Vendor ID reg */
  483. {
  484. .offset = PCI_VENDOR_ID,
  485. .size = 2,
  486. .init_val = 0x0000,
  487. .ro_mask = 0xFFFF,
  488. .emu_mask = 0xFFFF,
  489. .init = xen_pt_vendor_reg_init,
  490. .u.w.read = xen_pt_word_reg_read,
  491. .u.w.write = xen_pt_word_reg_write,
  492. },
  493. /* Device ID reg */
  494. {
  495. .offset = PCI_DEVICE_ID,
  496. .size = 2,
  497. .init_val = 0x0000,
  498. .ro_mask = 0xFFFF,
  499. .emu_mask = 0xFFFF,
  500. .init = xen_pt_device_reg_init,
  501. .u.w.read = xen_pt_word_reg_read,
  502. .u.w.write = xen_pt_word_reg_write,
  503. },
  504. /* Command reg */
  505. {
  506. .offset = PCI_COMMAND,
  507. .size = 2,
  508. .init_val = 0x0000,
  509. .ro_mask = 0xF880,
  510. .emu_mask = 0x0740,
  511. .init = xen_pt_common_reg_init,
  512. .u.w.read = xen_pt_cmd_reg_read,
  513. .u.w.write = xen_pt_cmd_reg_write,
  514. },
  515. /* Capabilities Pointer reg */
  516. {
  517. .offset = PCI_CAPABILITY_LIST,
  518. .size = 1,
  519. .init_val = 0x00,
  520. .ro_mask = 0xFF,
  521. .emu_mask = 0xFF,
  522. .init = xen_pt_ptr_reg_init,
  523. .u.b.read = xen_pt_byte_reg_read,
  524. .u.b.write = xen_pt_byte_reg_write,
  525. },
  526. /* Status reg */
  527. /* use emulated Cap Ptr value to initialize,
  528. * so need to be declared after Cap Ptr reg
  529. */
  530. {
  531. .offset = PCI_STATUS,
  532. .size = 2,
  533. .init_val = 0x0000,
  534. .ro_mask = 0x06FF,
  535. .emu_mask = 0x0010,
  536. .init = xen_pt_status_reg_init,
  537. .u.w.read = xen_pt_word_reg_read,
  538. .u.w.write = xen_pt_word_reg_write,
  539. },
  540. /* Cache Line Size reg */
  541. {
  542. .offset = PCI_CACHE_LINE_SIZE,
  543. .size = 1,
  544. .init_val = 0x00,
  545. .ro_mask = 0x00,
  546. .emu_mask = 0xFF,
  547. .init = xen_pt_common_reg_init,
  548. .u.b.read = xen_pt_byte_reg_read,
  549. .u.b.write = xen_pt_byte_reg_write,
  550. },
  551. /* Latency Timer reg */
  552. {
  553. .offset = PCI_LATENCY_TIMER,
  554. .size = 1,
  555. .init_val = 0x00,
  556. .ro_mask = 0x00,
  557. .emu_mask = 0xFF,
  558. .init = xen_pt_common_reg_init,
  559. .u.b.read = xen_pt_byte_reg_read,
  560. .u.b.write = xen_pt_byte_reg_write,
  561. },
  562. /* Header Type reg */
  563. {
  564. .offset = PCI_HEADER_TYPE,
  565. .size = 1,
  566. .init_val = 0x00,
  567. .ro_mask = 0xFF,
  568. .emu_mask = 0x00,
  569. .init = xen_pt_header_type_reg_init,
  570. .u.b.read = xen_pt_byte_reg_read,
  571. .u.b.write = xen_pt_byte_reg_write,
  572. },
  573. /* Interrupt Line reg */
  574. {
  575. .offset = PCI_INTERRUPT_LINE,
  576. .size = 1,
  577. .init_val = 0x00,
  578. .ro_mask = 0x00,
  579. .emu_mask = 0xFF,
  580. .init = xen_pt_common_reg_init,
  581. .u.b.read = xen_pt_byte_reg_read,
  582. .u.b.write = xen_pt_byte_reg_write,
  583. },
  584. /* Interrupt Pin reg */
  585. {
  586. .offset = PCI_INTERRUPT_PIN,
  587. .size = 1,
  588. .init_val = 0x00,
  589. .ro_mask = 0xFF,
  590. .emu_mask = 0xFF,
  591. .init = xen_pt_irqpin_reg_init,
  592. .u.b.read = xen_pt_byte_reg_read,
  593. .u.b.write = xen_pt_byte_reg_write,
  594. },
  595. /* BAR 0 reg */
  596. /* mask of BAR need to be decided later, depends on IO/MEM type */
  597. {
  598. .offset = PCI_BASE_ADDRESS_0,
  599. .size = 4,
  600. .init_val = 0x00000000,
  601. .init = xen_pt_bar_reg_init,
  602. .u.dw.read = xen_pt_bar_reg_read,
  603. .u.dw.write = xen_pt_bar_reg_write,
  604. },
  605. /* BAR 1 reg */
  606. {
  607. .offset = PCI_BASE_ADDRESS_1,
  608. .size = 4,
  609. .init_val = 0x00000000,
  610. .init = xen_pt_bar_reg_init,
  611. .u.dw.read = xen_pt_bar_reg_read,
  612. .u.dw.write = xen_pt_bar_reg_write,
  613. },
  614. /* BAR 2 reg */
  615. {
  616. .offset = PCI_BASE_ADDRESS_2,
  617. .size = 4,
  618. .init_val = 0x00000000,
  619. .init = xen_pt_bar_reg_init,
  620. .u.dw.read = xen_pt_bar_reg_read,
  621. .u.dw.write = xen_pt_bar_reg_write,
  622. },
  623. /* BAR 3 reg */
  624. {
  625. .offset = PCI_BASE_ADDRESS_3,
  626. .size = 4,
  627. .init_val = 0x00000000,
  628. .init = xen_pt_bar_reg_init,
  629. .u.dw.read = xen_pt_bar_reg_read,
  630. .u.dw.write = xen_pt_bar_reg_write,
  631. },
  632. /* BAR 4 reg */
  633. {
  634. .offset = PCI_BASE_ADDRESS_4,
  635. .size = 4,
  636. .init_val = 0x00000000,
  637. .init = xen_pt_bar_reg_init,
  638. .u.dw.read = xen_pt_bar_reg_read,
  639. .u.dw.write = xen_pt_bar_reg_write,
  640. },
  641. /* BAR 5 reg */
  642. {
  643. .offset = PCI_BASE_ADDRESS_5,
  644. .size = 4,
  645. .init_val = 0x00000000,
  646. .init = xen_pt_bar_reg_init,
  647. .u.dw.read = xen_pt_bar_reg_read,
  648. .u.dw.write = xen_pt_bar_reg_write,
  649. },
  650. /* Expansion ROM BAR reg */
  651. {
  652. .offset = PCI_ROM_ADDRESS,
  653. .size = 4,
  654. .init_val = 0x00000000,
  655. .ro_mask = 0x000007FE,
  656. .emu_mask = 0xFFFFF800,
  657. .init = xen_pt_bar_reg_init,
  658. .u.dw.read = xen_pt_long_reg_read,
  659. .u.dw.write = xen_pt_exp_rom_bar_reg_write,
  660. },
  661. {
  662. .size = 0,
  663. },
  664. };
  665. /*********************************
  666. * Vital Product Data Capability
  667. */
  668. /* Vital Product Data Capability Structure reg static information table */
  669. static XenPTRegInfo xen_pt_emu_reg_vpd[] = {
  670. {
  671. .offset = PCI_CAP_LIST_NEXT,
  672. .size = 1,
  673. .init_val = 0x00,
  674. .ro_mask = 0xFF,
  675. .emu_mask = 0xFF,
  676. .init = xen_pt_ptr_reg_init,
  677. .u.b.read = xen_pt_byte_reg_read,
  678. .u.b.write = xen_pt_byte_reg_write,
  679. },
  680. {
  681. .size = 0,
  682. },
  683. };
  684. /**************************************
  685. * Vendor Specific Capability
  686. */
  687. /* Vendor Specific Capability Structure reg static information table */
  688. static XenPTRegInfo xen_pt_emu_reg_vendor[] = {
  689. {
  690. .offset = PCI_CAP_LIST_NEXT,
  691. .size = 1,
  692. .init_val = 0x00,
  693. .ro_mask = 0xFF,
  694. .emu_mask = 0xFF,
  695. .init = xen_pt_ptr_reg_init,
  696. .u.b.read = xen_pt_byte_reg_read,
  697. .u.b.write = xen_pt_byte_reg_write,
  698. },
  699. {
  700. .size = 0,
  701. },
  702. };
  703. /*****************************
  704. * PCI Express Capability
  705. */
  706. static inline uint8_t get_capability_version(XenPCIPassthroughState *s,
  707. uint32_t offset)
  708. {
  709. uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS);
  710. return flags & PCI_EXP_FLAGS_VERS;
  711. }
  712. static inline uint8_t get_device_type(XenPCIPassthroughState *s,
  713. uint32_t offset)
  714. {
  715. uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS);
  716. return (flags & PCI_EXP_FLAGS_TYPE) >> 4;
  717. }
  718. /* initialize Link Control register */
  719. static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s,
  720. XenPTRegInfo *reg, uint32_t real_offset,
  721. uint32_t *data)
  722. {
  723. uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
  724. uint8_t dev_type = get_device_type(s, real_offset - reg->offset);
  725. /* no need to initialize in case of Root Complex Integrated Endpoint
  726. * with cap_ver 1.x
  727. */
  728. if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) {
  729. *data = XEN_PT_INVALID_REG;
  730. }
  731. *data = reg->init_val;
  732. return 0;
  733. }
  734. /* initialize Device Control 2 register */
  735. static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s,
  736. XenPTRegInfo *reg, uint32_t real_offset,
  737. uint32_t *data)
  738. {
  739. uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
  740. /* no need to initialize in case of cap_ver 1.x */
  741. if (cap_ver == 1) {
  742. *data = XEN_PT_INVALID_REG;
  743. }
  744. *data = reg->init_val;
  745. return 0;
  746. }
  747. /* initialize Link Control 2 register */
  748. static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s,
  749. XenPTRegInfo *reg, uint32_t real_offset,
  750. uint32_t *data)
  751. {
  752. uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
  753. uint32_t reg_field = 0;
  754. /* no need to initialize in case of cap_ver 1.x */
  755. if (cap_ver == 1) {
  756. reg_field = XEN_PT_INVALID_REG;
  757. } else {
  758. /* set Supported Link Speed */
  759. uint8_t lnkcap = pci_get_byte(s->dev.config + real_offset - reg->offset
  760. + PCI_EXP_LNKCAP);
  761. reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap;
  762. }
  763. *data = reg_field;
  764. return 0;
  765. }
  766. /* PCI Express Capability Structure reg static information table */
  767. static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
  768. /* Next Pointer reg */
  769. {
  770. .offset = PCI_CAP_LIST_NEXT,
  771. .size = 1,
  772. .init_val = 0x00,
  773. .ro_mask = 0xFF,
  774. .emu_mask = 0xFF,
  775. .init = xen_pt_ptr_reg_init,
  776. .u.b.read = xen_pt_byte_reg_read,
  777. .u.b.write = xen_pt_byte_reg_write,
  778. },
  779. /* Device Capabilities reg */
  780. {
  781. .offset = PCI_EXP_DEVCAP,
  782. .size = 4,
  783. .init_val = 0x00000000,
  784. .ro_mask = 0x1FFCFFFF,
  785. .emu_mask = 0x10000000,
  786. .init = xen_pt_common_reg_init,
  787. .u.dw.read = xen_pt_long_reg_read,
  788. .u.dw.write = xen_pt_long_reg_write,
  789. },
  790. /* Device Control reg */
  791. {
  792. .offset = PCI_EXP_DEVCTL,
  793. .size = 2,
  794. .init_val = 0x2810,
  795. .ro_mask = 0x8400,
  796. .emu_mask = 0xFFFF,
  797. .init = xen_pt_common_reg_init,
  798. .u.w.read = xen_pt_word_reg_read,
  799. .u.w.write = xen_pt_word_reg_write,
  800. },
  801. /* Link Control reg */
  802. {
  803. .offset = PCI_EXP_LNKCTL,
  804. .size = 2,
  805. .init_val = 0x0000,
  806. .ro_mask = 0xFC34,
  807. .emu_mask = 0xFFFF,
  808. .init = xen_pt_linkctrl_reg_init,
  809. .u.w.read = xen_pt_word_reg_read,
  810. .u.w.write = xen_pt_word_reg_write,
  811. },
  812. /* Device Control 2 reg */
  813. {
  814. .offset = 0x28,
  815. .size = 2,
  816. .init_val = 0x0000,
  817. .ro_mask = 0xFFE0,
  818. .emu_mask = 0xFFFF,
  819. .init = xen_pt_devctrl2_reg_init,
  820. .u.w.read = xen_pt_word_reg_read,
  821. .u.w.write = xen_pt_word_reg_write,
  822. },
  823. /* Link Control 2 reg */
  824. {
  825. .offset = 0x30,
  826. .size = 2,
  827. .init_val = 0x0000,
  828. .ro_mask = 0xE040,
  829. .emu_mask = 0xFFFF,
  830. .init = xen_pt_linkctrl2_reg_init,
  831. .u.w.read = xen_pt_word_reg_read,
  832. .u.w.write = xen_pt_word_reg_write,
  833. },
  834. {
  835. .size = 0,
  836. },
  837. };
  838. /*********************************
  839. * Power Management Capability
  840. */
  841. /* read Power Management Control/Status register */
  842. static int xen_pt_pmcsr_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
  843. uint16_t *value, uint16_t valid_mask)
  844. {
  845. XenPTRegInfo *reg = cfg_entry->reg;
  846. uint16_t valid_emu_mask = reg->emu_mask;
  847. valid_emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET;
  848. valid_emu_mask = valid_emu_mask & valid_mask;
  849. *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
  850. return 0;
  851. }
  852. /* write Power Management Control/Status register */
  853. static int xen_pt_pmcsr_reg_write(XenPCIPassthroughState *s,
  854. XenPTReg *cfg_entry, uint16_t *val,
  855. uint16_t dev_value, uint16_t valid_mask)
  856. {
  857. XenPTRegInfo *reg = cfg_entry->reg;
  858. uint16_t emu_mask = reg->emu_mask;
  859. uint16_t writable_mask = 0;
  860. uint16_t throughable_mask = 0;
  861. emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET;
  862. /* modify emulate register */
  863. writable_mask = emu_mask & ~reg->ro_mask & valid_mask;
  864. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  865. /* create value for writing to I/O device register */
  866. throughable_mask = ~emu_mask & valid_mask;
  867. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  868. return 0;
  869. }
  870. /* Power Management Capability reg static information table */
  871. static XenPTRegInfo xen_pt_emu_reg_pm[] = {
  872. /* Next Pointer reg */
  873. {
  874. .offset = PCI_CAP_LIST_NEXT,
  875. .size = 1,
  876. .init_val = 0x00,
  877. .ro_mask = 0xFF,
  878. .emu_mask = 0xFF,
  879. .init = xen_pt_ptr_reg_init,
  880. .u.b.read = xen_pt_byte_reg_read,
  881. .u.b.write = xen_pt_byte_reg_write,
  882. },
  883. /* Power Management Capabilities reg */
  884. {
  885. .offset = PCI_CAP_FLAGS,
  886. .size = 2,
  887. .init_val = 0x0000,
  888. .ro_mask = 0xFFFF,
  889. .emu_mask = 0xF9C8,
  890. .init = xen_pt_common_reg_init,
  891. .u.w.read = xen_pt_word_reg_read,
  892. .u.w.write = xen_pt_word_reg_write,
  893. },
  894. /* PCI Power Management Control/Status reg */
  895. {
  896. .offset = PCI_PM_CTRL,
  897. .size = 2,
  898. .init_val = 0x0008,
  899. .ro_mask = 0xE1FC,
  900. .emu_mask = 0x8100,
  901. .init = xen_pt_common_reg_init,
  902. .u.w.read = xen_pt_pmcsr_reg_read,
  903. .u.w.write = xen_pt_pmcsr_reg_write,
  904. },
  905. {
  906. .size = 0,
  907. },
  908. };
  909. /********************************
  910. * MSI Capability
  911. */
  912. /* Helper */
  913. static bool xen_pt_msgdata_check_type(uint32_t offset, uint16_t flags)
  914. {
  915. /* check the offset whether matches the type or not */
  916. bool is_32 = (offset == PCI_MSI_DATA_32) && !(flags & PCI_MSI_FLAGS_64BIT);
  917. bool is_64 = (offset == PCI_MSI_DATA_64) && (flags & PCI_MSI_FLAGS_64BIT);
  918. return is_32 || is_64;
  919. }
  920. /* Message Control register */
  921. static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s,
  922. XenPTRegInfo *reg, uint32_t real_offset,
  923. uint32_t *data)
  924. {
  925. PCIDevice *d = &s->dev;
  926. XenPTMSI *msi = s->msi;
  927. uint16_t reg_field = 0;
  928. /* use I/O device register's value as initial value */
  929. reg_field = pci_get_word(d->config + real_offset);
  930. if (reg_field & PCI_MSI_FLAGS_ENABLE) {
  931. XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n");
  932. xen_host_pci_set_word(&s->real_device, real_offset,
  933. reg_field & ~PCI_MSI_FLAGS_ENABLE);
  934. }
  935. msi->flags |= reg_field;
  936. msi->ctrl_offset = real_offset;
  937. msi->initialized = false;
  938. msi->mapped = false;
  939. *data = reg->init_val;
  940. return 0;
  941. }
  942. static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
  943. XenPTReg *cfg_entry, uint16_t *val,
  944. uint16_t dev_value, uint16_t valid_mask)
  945. {
  946. XenPTRegInfo *reg = cfg_entry->reg;
  947. XenPTMSI *msi = s->msi;
  948. uint16_t writable_mask = 0;
  949. uint16_t throughable_mask = 0;
  950. uint16_t raw_val;
  951. /* Currently no support for multi-vector */
  952. if (*val & PCI_MSI_FLAGS_QSIZE) {
  953. XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val);
  954. }
  955. /* modify emulate register */
  956. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  957. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  958. msi->flags |= cfg_entry->data & ~PCI_MSI_FLAGS_ENABLE;
  959. /* create value for writing to I/O device register */
  960. raw_val = *val;
  961. throughable_mask = ~reg->emu_mask & valid_mask;
  962. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  963. /* update MSI */
  964. if (raw_val & PCI_MSI_FLAGS_ENABLE) {
  965. /* setup MSI pirq for the first time */
  966. if (!msi->initialized) {
  967. /* Init physical one */
  968. XEN_PT_LOG(&s->dev, "setup MSI\n");
  969. if (xen_pt_msi_setup(s)) {
  970. /* We do not broadcast the error to the framework code, so
  971. * that MSI errors are contained in MSI emulation code and
  972. * QEMU can go on running.
  973. * Guest MSI would be actually not working.
  974. */
  975. *val &= ~PCI_MSI_FLAGS_ENABLE;
  976. XEN_PT_WARN(&s->dev, "Can not map MSI.\n");
  977. return 0;
  978. }
  979. if (xen_pt_msi_update(s)) {
  980. *val &= ~PCI_MSI_FLAGS_ENABLE;
  981. XEN_PT_WARN(&s->dev, "Can not bind MSI\n");
  982. return 0;
  983. }
  984. msi->initialized = true;
  985. msi->mapped = true;
  986. }
  987. msi->flags |= PCI_MSI_FLAGS_ENABLE;
  988. } else {
  989. msi->flags &= ~PCI_MSI_FLAGS_ENABLE;
  990. }
  991. /* pass through MSI_ENABLE bit */
  992. *val &= ~PCI_MSI_FLAGS_ENABLE;
  993. *val |= raw_val & PCI_MSI_FLAGS_ENABLE;
  994. return 0;
  995. }
  996. /* initialize Message Upper Address register */
  997. static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s,
  998. XenPTRegInfo *reg, uint32_t real_offset,
  999. uint32_t *data)
  1000. {
  1001. /* no need to initialize in case of 32 bit type */
  1002. if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
  1003. *data = XEN_PT_INVALID_REG;
  1004. } else {
  1005. *data = reg->init_val;
  1006. }
  1007. return 0;
  1008. }
  1009. /* this function will be called twice (for 32 bit and 64 bit type) */
  1010. /* initialize Message Data register */
  1011. static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s,
  1012. XenPTRegInfo *reg, uint32_t real_offset,
  1013. uint32_t *data)
  1014. {
  1015. uint32_t flags = s->msi->flags;
  1016. uint32_t offset = reg->offset;
  1017. /* check the offset whether matches the type or not */
  1018. if (xen_pt_msgdata_check_type(offset, flags)) {
  1019. *data = reg->init_val;
  1020. } else {
  1021. *data = XEN_PT_INVALID_REG;
  1022. }
  1023. return 0;
  1024. }
  1025. /* write Message Address register */
  1026. static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s,
  1027. XenPTReg *cfg_entry, uint32_t *val,
  1028. uint32_t dev_value, uint32_t valid_mask)
  1029. {
  1030. XenPTRegInfo *reg = cfg_entry->reg;
  1031. uint32_t writable_mask = 0;
  1032. uint32_t throughable_mask = 0;
  1033. uint32_t old_addr = cfg_entry->data;
  1034. /* modify emulate register */
  1035. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1036. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  1037. s->msi->addr_lo = cfg_entry->data;
  1038. /* create value for writing to I/O device register */
  1039. throughable_mask = ~reg->emu_mask & valid_mask;
  1040. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  1041. /* update MSI */
  1042. if (cfg_entry->data != old_addr) {
  1043. if (s->msi->mapped) {
  1044. xen_pt_msi_update(s);
  1045. }
  1046. }
  1047. return 0;
  1048. }
  1049. /* write Message Upper Address register */
  1050. static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s,
  1051. XenPTReg *cfg_entry, uint32_t *val,
  1052. uint32_t dev_value, uint32_t valid_mask)
  1053. {
  1054. XenPTRegInfo *reg = cfg_entry->reg;
  1055. uint32_t writable_mask = 0;
  1056. uint32_t throughable_mask = 0;
  1057. uint32_t old_addr = cfg_entry->data;
  1058. /* check whether the type is 64 bit or not */
  1059. if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
  1060. XEN_PT_ERR(&s->dev,
  1061. "Can't write to the upper address without 64 bit support\n");
  1062. return -1;
  1063. }
  1064. /* modify emulate register */
  1065. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1066. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  1067. /* update the msi_info too */
  1068. s->msi->addr_hi = cfg_entry->data;
  1069. /* create value for writing to I/O device register */
  1070. throughable_mask = ~reg->emu_mask & valid_mask;
  1071. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  1072. /* update MSI */
  1073. if (cfg_entry->data != old_addr) {
  1074. if (s->msi->mapped) {
  1075. xen_pt_msi_update(s);
  1076. }
  1077. }
  1078. return 0;
  1079. }
  1080. /* this function will be called twice (for 32 bit and 64 bit type) */
  1081. /* write Message Data register */
  1082. static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s,
  1083. XenPTReg *cfg_entry, uint16_t *val,
  1084. uint16_t dev_value, uint16_t valid_mask)
  1085. {
  1086. XenPTRegInfo *reg = cfg_entry->reg;
  1087. XenPTMSI *msi = s->msi;
  1088. uint16_t writable_mask = 0;
  1089. uint16_t throughable_mask = 0;
  1090. uint16_t old_data = cfg_entry->data;
  1091. uint32_t offset = reg->offset;
  1092. /* check the offset whether matches the type or not */
  1093. if (!xen_pt_msgdata_check_type(offset, msi->flags)) {
  1094. /* exit I/O emulator */
  1095. XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n");
  1096. return -1;
  1097. }
  1098. /* modify emulate register */
  1099. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1100. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  1101. /* update the msi_info too */
  1102. msi->data = cfg_entry->data;
  1103. /* create value for writing to I/O device register */
  1104. throughable_mask = ~reg->emu_mask & valid_mask;
  1105. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  1106. /* update MSI */
  1107. if (cfg_entry->data != old_data) {
  1108. if (msi->mapped) {
  1109. xen_pt_msi_update(s);
  1110. }
  1111. }
  1112. return 0;
  1113. }
  1114. /* MSI Capability Structure reg static information table */
  1115. static XenPTRegInfo xen_pt_emu_reg_msi[] = {
  1116. /* Next Pointer reg */
  1117. {
  1118. .offset = PCI_CAP_LIST_NEXT,
  1119. .size = 1,
  1120. .init_val = 0x00,
  1121. .ro_mask = 0xFF,
  1122. .emu_mask = 0xFF,
  1123. .init = xen_pt_ptr_reg_init,
  1124. .u.b.read = xen_pt_byte_reg_read,
  1125. .u.b.write = xen_pt_byte_reg_write,
  1126. },
  1127. /* Message Control reg */
  1128. {
  1129. .offset = PCI_MSI_FLAGS,
  1130. .size = 2,
  1131. .init_val = 0x0000,
  1132. .ro_mask = 0xFF8E,
  1133. .emu_mask = 0x007F,
  1134. .init = xen_pt_msgctrl_reg_init,
  1135. .u.w.read = xen_pt_word_reg_read,
  1136. .u.w.write = xen_pt_msgctrl_reg_write,
  1137. },
  1138. /* Message Address reg */
  1139. {
  1140. .offset = PCI_MSI_ADDRESS_LO,
  1141. .size = 4,
  1142. .init_val = 0x00000000,
  1143. .ro_mask = 0x00000003,
  1144. .emu_mask = 0xFFFFFFFF,
  1145. .no_wb = 1,
  1146. .init = xen_pt_common_reg_init,
  1147. .u.dw.read = xen_pt_long_reg_read,
  1148. .u.dw.write = xen_pt_msgaddr32_reg_write,
  1149. },
  1150. /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */
  1151. {
  1152. .offset = PCI_MSI_ADDRESS_HI,
  1153. .size = 4,
  1154. .init_val = 0x00000000,
  1155. .ro_mask = 0x00000000,
  1156. .emu_mask = 0xFFFFFFFF,
  1157. .no_wb = 1,
  1158. .init = xen_pt_msgaddr64_reg_init,
  1159. .u.dw.read = xen_pt_long_reg_read,
  1160. .u.dw.write = xen_pt_msgaddr64_reg_write,
  1161. },
  1162. /* Message Data reg (16 bits of data for 32-bit devices) */
  1163. {
  1164. .offset = PCI_MSI_DATA_32,
  1165. .size = 2,
  1166. .init_val = 0x0000,
  1167. .ro_mask = 0x0000,
  1168. .emu_mask = 0xFFFF,
  1169. .no_wb = 1,
  1170. .init = xen_pt_msgdata_reg_init,
  1171. .u.w.read = xen_pt_word_reg_read,
  1172. .u.w.write = xen_pt_msgdata_reg_write,
  1173. },
  1174. /* Message Data reg (16 bits of data for 64-bit devices) */
  1175. {
  1176. .offset = PCI_MSI_DATA_64,
  1177. .size = 2,
  1178. .init_val = 0x0000,
  1179. .ro_mask = 0x0000,
  1180. .emu_mask = 0xFFFF,
  1181. .no_wb = 1,
  1182. .init = xen_pt_msgdata_reg_init,
  1183. .u.w.read = xen_pt_word_reg_read,
  1184. .u.w.write = xen_pt_msgdata_reg_write,
  1185. },
  1186. {
  1187. .size = 0,
  1188. },
  1189. };
  1190. /**************************************
  1191. * MSI-X Capability
  1192. */
  1193. /* Message Control register for MSI-X */
  1194. static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s,
  1195. XenPTRegInfo *reg, uint32_t real_offset,
  1196. uint32_t *data)
  1197. {
  1198. PCIDevice *d = &s->dev;
  1199. uint16_t reg_field = 0;
  1200. /* use I/O device register's value as initial value */
  1201. reg_field = pci_get_word(d->config + real_offset);
  1202. if (reg_field & PCI_MSIX_FLAGS_ENABLE) {
  1203. XEN_PT_LOG(d, "MSIX already enabled, disabling it first\n");
  1204. xen_host_pci_set_word(&s->real_device, real_offset,
  1205. reg_field & ~PCI_MSIX_FLAGS_ENABLE);
  1206. }
  1207. s->msix->ctrl_offset = real_offset;
  1208. *data = reg->init_val;
  1209. return 0;
  1210. }
  1211. static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s,
  1212. XenPTReg *cfg_entry, uint16_t *val,
  1213. uint16_t dev_value, uint16_t valid_mask)
  1214. {
  1215. XenPTRegInfo *reg = cfg_entry->reg;
  1216. uint16_t writable_mask = 0;
  1217. uint16_t throughable_mask = 0;
  1218. int debug_msix_enabled_old;
  1219. /* modify emulate register */
  1220. writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
  1221. cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
  1222. /* create value for writing to I/O device register */
  1223. throughable_mask = ~reg->emu_mask & valid_mask;
  1224. *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
  1225. /* update MSI-X */
  1226. if ((*val & PCI_MSIX_FLAGS_ENABLE)
  1227. && !(*val & PCI_MSIX_FLAGS_MASKALL)) {
  1228. xen_pt_msix_update(s);
  1229. }
  1230. debug_msix_enabled_old = s->msix->enabled;
  1231. s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE);
  1232. if (s->msix->enabled != debug_msix_enabled_old) {
  1233. XEN_PT_LOG(&s->dev, "%s MSI-X\n",
  1234. s->msix->enabled ? "enable" : "disable");
  1235. }
  1236. return 0;
  1237. }
  1238. /* MSI-X Capability Structure reg static information table */
  1239. static XenPTRegInfo xen_pt_emu_reg_msix[] = {
  1240. /* Next Pointer reg */
  1241. {
  1242. .offset = PCI_CAP_LIST_NEXT,
  1243. .size = 1,
  1244. .init_val = 0x00,
  1245. .ro_mask = 0xFF,
  1246. .emu_mask = 0xFF,
  1247. .init = xen_pt_ptr_reg_init,
  1248. .u.b.read = xen_pt_byte_reg_read,
  1249. .u.b.write = xen_pt_byte_reg_write,
  1250. },
  1251. /* Message Control reg */
  1252. {
  1253. .offset = PCI_MSI_FLAGS,
  1254. .size = 2,
  1255. .init_val = 0x0000,
  1256. .ro_mask = 0x3FFF,
  1257. .emu_mask = 0x0000,
  1258. .init = xen_pt_msixctrl_reg_init,
  1259. .u.w.read = xen_pt_word_reg_read,
  1260. .u.w.write = xen_pt_msixctrl_reg_write,
  1261. },
  1262. {
  1263. .size = 0,
  1264. },
  1265. };
  1266. /****************************
  1267. * Capabilities
  1268. */
  1269. /* capability structure register group size functions */
  1270. static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s,
  1271. const XenPTRegGroupInfo *grp_reg,
  1272. uint32_t base_offset, uint8_t *size)
  1273. {
  1274. *size = grp_reg->grp_size;
  1275. return 0;
  1276. }
  1277. /* get Vendor Specific Capability Structure register group size */
  1278. static int xen_pt_vendor_size_init(XenPCIPassthroughState *s,
  1279. const XenPTRegGroupInfo *grp_reg,
  1280. uint32_t base_offset, uint8_t *size)
  1281. {
  1282. *size = pci_get_byte(s->dev.config + base_offset + 0x02);
  1283. return 0;
  1284. }
  1285. /* get PCI Express Capability Structure register group size */
  1286. static int xen_pt_pcie_size_init(XenPCIPassthroughState *s,
  1287. const XenPTRegGroupInfo *grp_reg,
  1288. uint32_t base_offset, uint8_t *size)
  1289. {
  1290. PCIDevice *d = &s->dev;
  1291. uint8_t version = get_capability_version(s, base_offset);
  1292. uint8_t type = get_device_type(s, base_offset);
  1293. uint8_t pcie_size = 0;
  1294. /* calculate size depend on capability version and device/port type */
  1295. /* in case of PCI Express Base Specification Rev 1.x */
  1296. if (version == 1) {
  1297. /* The PCI Express Capabilities, Device Capabilities, and Device
  1298. * Status/Control registers are required for all PCI Express devices.
  1299. * The Link Capabilities and Link Status/Control are required for all
  1300. * Endpoints that are not Root Complex Integrated Endpoints. Endpoints
  1301. * are not required to implement registers other than those listed
  1302. * above and terminate the capability structure.
  1303. */
  1304. switch (type) {
  1305. case PCI_EXP_TYPE_ENDPOINT:
  1306. case PCI_EXP_TYPE_LEG_END:
  1307. pcie_size = 0x14;
  1308. break;
  1309. case PCI_EXP_TYPE_RC_END:
  1310. /* has no link */
  1311. pcie_size = 0x0C;
  1312. break;
  1313. /* only EndPoint passthrough is supported */
  1314. case PCI_EXP_TYPE_ROOT_PORT:
  1315. case PCI_EXP_TYPE_UPSTREAM:
  1316. case PCI_EXP_TYPE_DOWNSTREAM:
  1317. case PCI_EXP_TYPE_PCI_BRIDGE:
  1318. case PCI_EXP_TYPE_PCIE_BRIDGE:
  1319. case PCI_EXP_TYPE_RC_EC:
  1320. default:
  1321. XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
  1322. return -1;
  1323. }
  1324. }
  1325. /* in case of PCI Express Base Specification Rev 2.0 */
  1326. else if (version == 2) {
  1327. switch (type) {
  1328. case PCI_EXP_TYPE_ENDPOINT:
  1329. case PCI_EXP_TYPE_LEG_END:
  1330. case PCI_EXP_TYPE_RC_END:
  1331. /* For Functions that do not implement the registers,
  1332. * these spaces must be hardwired to 0b.
  1333. */
  1334. pcie_size = 0x3C;
  1335. break;
  1336. /* only EndPoint passthrough is supported */
  1337. case PCI_EXP_TYPE_ROOT_PORT:
  1338. case PCI_EXP_TYPE_UPSTREAM:
  1339. case PCI_EXP_TYPE_DOWNSTREAM:
  1340. case PCI_EXP_TYPE_PCI_BRIDGE:
  1341. case PCI_EXP_TYPE_PCIE_BRIDGE:
  1342. case PCI_EXP_TYPE_RC_EC:
  1343. default:
  1344. XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
  1345. return -1;
  1346. }
  1347. } else {
  1348. XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version);
  1349. return -1;
  1350. }
  1351. *size = pcie_size;
  1352. return 0;
  1353. }
  1354. /* get MSI Capability Structure register group size */
  1355. static int xen_pt_msi_size_init(XenPCIPassthroughState *s,
  1356. const XenPTRegGroupInfo *grp_reg,
  1357. uint32_t base_offset, uint8_t *size)
  1358. {
  1359. PCIDevice *d = &s->dev;
  1360. uint16_t msg_ctrl = 0;
  1361. uint8_t msi_size = 0xa;
  1362. msg_ctrl = pci_get_word(d->config + (base_offset + PCI_MSI_FLAGS));
  1363. /* check if 64-bit address is capable of per-vector masking */
  1364. if (msg_ctrl & PCI_MSI_FLAGS_64BIT) {
  1365. msi_size += 4;
  1366. }
  1367. if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) {
  1368. msi_size += 10;
  1369. }
  1370. s->msi = g_new0(XenPTMSI, 1);
  1371. s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
  1372. *size = msi_size;
  1373. return 0;
  1374. }
  1375. /* get MSI-X Capability Structure register group size */
  1376. static int xen_pt_msix_size_init(XenPCIPassthroughState *s,
  1377. const XenPTRegGroupInfo *grp_reg,
  1378. uint32_t base_offset, uint8_t *size)
  1379. {
  1380. int rc = 0;
  1381. rc = xen_pt_msix_init(s, base_offset);
  1382. if (rc < 0) {
  1383. XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n");
  1384. return rc;
  1385. }
  1386. *size = grp_reg->grp_size;
  1387. return 0;
  1388. }
  1389. static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = {
  1390. /* Header Type0 reg group */
  1391. {
  1392. .grp_id = 0xFF,
  1393. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1394. .grp_size = 0x40,
  1395. .size_init = xen_pt_reg_grp_size_init,
  1396. .emu_regs = xen_pt_emu_reg_header0,
  1397. },
  1398. /* PCI PowerManagement Capability reg group */
  1399. {
  1400. .grp_id = PCI_CAP_ID_PM,
  1401. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1402. .grp_size = PCI_PM_SIZEOF,
  1403. .size_init = xen_pt_reg_grp_size_init,
  1404. .emu_regs = xen_pt_emu_reg_pm,
  1405. },
  1406. /* AGP Capability Structure reg group */
  1407. {
  1408. .grp_id = PCI_CAP_ID_AGP,
  1409. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1410. .grp_size = 0x30,
  1411. .size_init = xen_pt_reg_grp_size_init,
  1412. },
  1413. /* Vital Product Data Capability Structure reg group */
  1414. {
  1415. .grp_id = PCI_CAP_ID_VPD,
  1416. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1417. .grp_size = 0x08,
  1418. .size_init = xen_pt_reg_grp_size_init,
  1419. .emu_regs = xen_pt_emu_reg_vpd,
  1420. },
  1421. /* Slot Identification reg group */
  1422. {
  1423. .grp_id = PCI_CAP_ID_SLOTID,
  1424. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1425. .grp_size = 0x04,
  1426. .size_init = xen_pt_reg_grp_size_init,
  1427. },
  1428. /* MSI Capability Structure reg group */
  1429. {
  1430. .grp_id = PCI_CAP_ID_MSI,
  1431. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1432. .grp_size = 0xFF,
  1433. .size_init = xen_pt_msi_size_init,
  1434. .emu_regs = xen_pt_emu_reg_msi,
  1435. },
  1436. /* PCI-X Capabilities List Item reg group */
  1437. {
  1438. .grp_id = PCI_CAP_ID_PCIX,
  1439. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1440. .grp_size = 0x18,
  1441. .size_init = xen_pt_reg_grp_size_init,
  1442. },
  1443. /* Vendor Specific Capability Structure reg group */
  1444. {
  1445. .grp_id = PCI_CAP_ID_VNDR,
  1446. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1447. .grp_size = 0xFF,
  1448. .size_init = xen_pt_vendor_size_init,
  1449. .emu_regs = xen_pt_emu_reg_vendor,
  1450. },
  1451. /* SHPC Capability List Item reg group */
  1452. {
  1453. .grp_id = PCI_CAP_ID_SHPC,
  1454. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1455. .grp_size = 0x08,
  1456. .size_init = xen_pt_reg_grp_size_init,
  1457. },
  1458. /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */
  1459. {
  1460. .grp_id = PCI_CAP_ID_SSVID,
  1461. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1462. .grp_size = 0x08,
  1463. .size_init = xen_pt_reg_grp_size_init,
  1464. },
  1465. /* AGP 8x Capability Structure reg group */
  1466. {
  1467. .grp_id = PCI_CAP_ID_AGP3,
  1468. .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
  1469. .grp_size = 0x30,
  1470. .size_init = xen_pt_reg_grp_size_init,
  1471. },
  1472. /* PCI Express Capability Structure reg group */
  1473. {
  1474. .grp_id = PCI_CAP_ID_EXP,
  1475. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1476. .grp_size = 0xFF,
  1477. .size_init = xen_pt_pcie_size_init,
  1478. .emu_regs = xen_pt_emu_reg_pcie,
  1479. },
  1480. /* MSI-X Capability Structure reg group */
  1481. {
  1482. .grp_id = PCI_CAP_ID_MSIX,
  1483. .grp_type = XEN_PT_GRP_TYPE_EMU,
  1484. .grp_size = 0x0C,
  1485. .size_init = xen_pt_msix_size_init,
  1486. .emu_regs = xen_pt_emu_reg_msix,
  1487. },
  1488. {
  1489. .grp_size = 0,
  1490. },
  1491. };
  1492. /* initialize Capabilities Pointer or Next Pointer register */
  1493. static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s,
  1494. XenPTRegInfo *reg, uint32_t real_offset,
  1495. uint32_t *data)
  1496. {
  1497. int i;
  1498. uint8_t *config = s->dev.config;
  1499. uint32_t reg_field = pci_get_byte(config + real_offset);
  1500. uint8_t cap_id = 0;
  1501. /* find capability offset */
  1502. while (reg_field) {
  1503. for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
  1504. if (xen_pt_hide_dev_cap(&s->real_device,
  1505. xen_pt_emu_reg_grps[i].grp_id)) {
  1506. continue;
  1507. }
  1508. cap_id = pci_get_byte(config + reg_field + PCI_CAP_LIST_ID);
  1509. if (xen_pt_emu_reg_grps[i].grp_id == cap_id) {
  1510. if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
  1511. goto out;
  1512. }
  1513. /* ignore the 0 hardwired capability, find next one */
  1514. break;
  1515. }
  1516. }
  1517. /* next capability */
  1518. reg_field = pci_get_byte(config + reg_field + PCI_CAP_LIST_NEXT);
  1519. }
  1520. out:
  1521. *data = reg_field;
  1522. return 0;
  1523. }
  1524. /*************
  1525. * Main
  1526. */
  1527. static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap)
  1528. {
  1529. uint8_t id;
  1530. unsigned max_cap = PCI_CAP_MAX;
  1531. uint8_t pos = PCI_CAPABILITY_LIST;
  1532. uint8_t status = 0;
  1533. if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) {
  1534. return 0;
  1535. }
  1536. if ((status & PCI_STATUS_CAP_LIST) == 0) {
  1537. return 0;
  1538. }
  1539. while (max_cap--) {
  1540. if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) {
  1541. break;
  1542. }
  1543. if (pos < PCI_CONFIG_HEADER_SIZE) {
  1544. break;
  1545. }
  1546. pos &= ~3;
  1547. if (xen_host_pci_get_byte(&s->real_device,
  1548. pos + PCI_CAP_LIST_ID, &id)) {
  1549. break;
  1550. }
  1551. if (id == 0xff) {
  1552. break;
  1553. }
  1554. if (id == cap) {
  1555. return pos;
  1556. }
  1557. pos += PCI_CAP_LIST_NEXT;
  1558. }
  1559. return 0;
  1560. }
  1561. static int xen_pt_config_reg_init(XenPCIPassthroughState *s,
  1562. XenPTRegGroup *reg_grp, XenPTRegInfo *reg)
  1563. {
  1564. XenPTReg *reg_entry;
  1565. uint32_t data = 0;
  1566. int rc = 0;
  1567. reg_entry = g_new0(XenPTReg, 1);
  1568. reg_entry->reg = reg;
  1569. if (reg->init) {
  1570. /* initialize emulate register */
  1571. rc = reg->init(s, reg_entry->reg,
  1572. reg_grp->base_offset + reg->offset, &data);
  1573. if (rc < 0) {
  1574. free(reg_entry);
  1575. return rc;
  1576. }
  1577. if (data == XEN_PT_INVALID_REG) {
  1578. /* free unused BAR register entry */
  1579. free(reg_entry);
  1580. return 0;
  1581. }
  1582. /* set register value */
  1583. reg_entry->data = data;
  1584. }
  1585. /* list add register entry */
  1586. QLIST_INSERT_HEAD(&reg_grp->reg_tbl_list, reg_entry, entries);
  1587. return 0;
  1588. }
  1589. int xen_pt_config_init(XenPCIPassthroughState *s)
  1590. {
  1591. int i, rc;
  1592. QLIST_INIT(&s->reg_grps);
  1593. for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
  1594. uint32_t reg_grp_offset = 0;
  1595. XenPTRegGroup *reg_grp_entry = NULL;
  1596. if (xen_pt_emu_reg_grps[i].grp_id != 0xFF) {
  1597. if (xen_pt_hide_dev_cap(&s->real_device,
  1598. xen_pt_emu_reg_grps[i].grp_id)) {
  1599. continue;
  1600. }
  1601. reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id);
  1602. if (!reg_grp_offset) {
  1603. continue;
  1604. }
  1605. }
  1606. reg_grp_entry = g_new0(XenPTRegGroup, 1);
  1607. QLIST_INIT(&reg_grp_entry->reg_tbl_list);
  1608. QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries);
  1609. reg_grp_entry->base_offset = reg_grp_offset;
  1610. reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i;
  1611. if (xen_pt_emu_reg_grps[i].size_init) {
  1612. /* get register group size */
  1613. rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp,
  1614. reg_grp_offset,
  1615. &reg_grp_entry->size);
  1616. if (rc < 0) {
  1617. xen_pt_config_delete(s);
  1618. return rc;
  1619. }
  1620. }
  1621. if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
  1622. if (xen_pt_emu_reg_grps[i].emu_regs) {
  1623. int j = 0;
  1624. XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs;
  1625. /* initialize capability register */
  1626. for (j = 0; regs->size != 0; j++, regs++) {
  1627. /* initialize capability register */
  1628. rc = xen_pt_config_reg_init(s, reg_grp_entry, regs);
  1629. if (rc < 0) {
  1630. xen_pt_config_delete(s);
  1631. return rc;
  1632. }
  1633. }
  1634. }
  1635. }
  1636. }
  1637. return 0;
  1638. }
  1639. /* delete all emulate register */
  1640. void xen_pt_config_delete(XenPCIPassthroughState *s)
  1641. {
  1642. struct XenPTRegGroup *reg_group, *next_grp;
  1643. struct XenPTReg *reg, *next_reg;
  1644. /* free MSI/MSI-X info table */
  1645. if (s->msix) {
  1646. xen_pt_msix_delete(s);
  1647. }
  1648. if (s->msi) {
  1649. g_free(s->msi);
  1650. }
  1651. /* free all register group entry */
  1652. QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) {
  1653. /* free all register entry */
  1654. QLIST_FOREACH_SAFE(reg, &reg_group->reg_tbl_list, entries, next_reg) {
  1655. QLIST_REMOVE(reg, entries);
  1656. g_free(reg);
  1657. }
  1658. QLIST_REMOVE(reg_group, entries);
  1659. g_free(reg_group);
  1660. }
  1661. }