amd_iommu.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653
  1. /*
  2. * QEMU emulation of AMD IOMMU (AMD-Vi)
  3. *
  4. * Copyright (C) 2011 Eduard - Gabriel Munteanu
  5. * Copyright (C) 2015, 2016 David Kiarie Kahurani
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program; if not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * Cache implementation inspired by hw/i386/intel_iommu.c
  19. */
  20. #include "qemu/osdep.h"
  21. #include "hw/i386/pc.h"
  22. #include "hw/pci/msi.h"
  23. #include "hw/pci/pci_bus.h"
  24. #include "migration/vmstate.h"
  25. #include "amd_iommu.h"
  26. #include "qapi/error.h"
  27. #include "qemu/error-report.h"
  28. #include "hw/i386/apic_internal.h"
  29. #include "trace.h"
  30. #include "hw/i386/apic-msidef.h"
  31. /* used AMD-Vi MMIO registers */
  32. const char *amdvi_mmio_low[] = {
  33. "AMDVI_MMIO_DEVTAB_BASE",
  34. "AMDVI_MMIO_CMDBUF_BASE",
  35. "AMDVI_MMIO_EVTLOG_BASE",
  36. "AMDVI_MMIO_CONTROL",
  37. "AMDVI_MMIO_EXCL_BASE",
  38. "AMDVI_MMIO_EXCL_LIMIT",
  39. "AMDVI_MMIO_EXT_FEATURES",
  40. "AMDVI_MMIO_PPR_BASE",
  41. "UNHANDLED"
  42. };
  43. const char *amdvi_mmio_high[] = {
  44. "AMDVI_MMIO_COMMAND_HEAD",
  45. "AMDVI_MMIO_COMMAND_TAIL",
  46. "AMDVI_MMIO_EVTLOG_HEAD",
  47. "AMDVI_MMIO_EVTLOG_TAIL",
  48. "AMDVI_MMIO_STATUS",
  49. "AMDVI_MMIO_PPR_HEAD",
  50. "AMDVI_MMIO_PPR_TAIL",
  51. "UNHANDLED"
  52. };
  53. struct AMDVIAddressSpace {
  54. uint8_t bus_num; /* bus number */
  55. uint8_t devfn; /* device function */
  56. AMDVIState *iommu_state; /* AMDVI - one per machine */
  57. MemoryRegion root; /* AMDVI Root memory map region */
  58. IOMMUMemoryRegion iommu; /* Device's address translation region */
  59. MemoryRegion iommu_ir; /* Device's interrupt remapping region */
  60. AddressSpace as; /* device's corresponding address space */
  61. };
  62. /* AMDVI cache entry */
  63. typedef struct AMDVIIOTLBEntry {
  64. uint16_t domid; /* assigned domain id */
  65. uint16_t devid; /* device owning entry */
  66. uint64_t perms; /* access permissions */
  67. uint64_t translated_addr; /* translated address */
  68. uint64_t page_mask; /* physical page size */
  69. } AMDVIIOTLBEntry;
  70. /* configure MMIO registers at startup/reset */
  71. static void amdvi_set_quad(AMDVIState *s, hwaddr addr, uint64_t val,
  72. uint64_t romask, uint64_t w1cmask)
  73. {
  74. stq_le_p(&s->mmior[addr], val);
  75. stq_le_p(&s->romask[addr], romask);
  76. stq_le_p(&s->w1cmask[addr], w1cmask);
  77. }
  78. static uint16_t amdvi_readw(AMDVIState *s, hwaddr addr)
  79. {
  80. return lduw_le_p(&s->mmior[addr]);
  81. }
  82. static uint32_t amdvi_readl(AMDVIState *s, hwaddr addr)
  83. {
  84. return ldl_le_p(&s->mmior[addr]);
  85. }
  86. static uint64_t amdvi_readq(AMDVIState *s, hwaddr addr)
  87. {
  88. return ldq_le_p(&s->mmior[addr]);
  89. }
  90. /* internal write */
  91. static void amdvi_writeq_raw(AMDVIState *s, uint64_t val, hwaddr addr)
  92. {
  93. stq_le_p(&s->mmior[addr], val);
  94. }
  95. /* external write */
  96. static void amdvi_writew(AMDVIState *s, hwaddr addr, uint16_t val)
  97. {
  98. uint16_t romask = lduw_le_p(&s->romask[addr]);
  99. uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
  100. uint16_t oldval = lduw_le_p(&s->mmior[addr]);
  101. stw_le_p(&s->mmior[addr],
  102. ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
  103. }
  104. static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val)
  105. {
  106. uint32_t romask = ldl_le_p(&s->romask[addr]);
  107. uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
  108. uint32_t oldval = ldl_le_p(&s->mmior[addr]);
  109. stl_le_p(&s->mmior[addr],
  110. ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
  111. }
  112. static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val)
  113. {
  114. uint64_t romask = ldq_le_p(&s->romask[addr]);
  115. uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
  116. uint32_t oldval = ldq_le_p(&s->mmior[addr]);
  117. stq_le_p(&s->mmior[addr],
  118. ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
  119. }
  120. /* OR a 64-bit register with a 64-bit value */
  121. static bool amdvi_test_mask(AMDVIState *s, hwaddr addr, uint64_t val)
  122. {
  123. return amdvi_readq(s, addr) | val;
  124. }
  125. /* OR a 64-bit register with a 64-bit value storing result in the register */
  126. static void amdvi_assign_orq(AMDVIState *s, hwaddr addr, uint64_t val)
  127. {
  128. amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) | val);
  129. }
  130. /* AND a 64-bit register with a 64-bit value storing result in the register */
  131. static void amdvi_assign_andq(AMDVIState *s, hwaddr addr, uint64_t val)
  132. {
  133. amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) & val);
  134. }
  135. static void amdvi_generate_msi_interrupt(AMDVIState *s)
  136. {
  137. MSIMessage msg = {};
  138. MemTxAttrs attrs = {
  139. .requester_id = pci_requester_id(&s->pci.dev)
  140. };
  141. if (msi_enabled(&s->pci.dev)) {
  142. msg = msi_get_message(&s->pci.dev, 0);
  143. address_space_stl_le(&address_space_memory, msg.address, msg.data,
  144. attrs, NULL);
  145. }
  146. }
  147. static void amdvi_log_event(AMDVIState *s, uint64_t *evt)
  148. {
  149. /* event logging not enabled */
  150. if (!s->evtlog_enabled || amdvi_test_mask(s, AMDVI_MMIO_STATUS,
  151. AMDVI_MMIO_STATUS_EVT_OVF)) {
  152. return;
  153. }
  154. /* event log buffer full */
  155. if (s->evtlog_tail >= s->evtlog_len) {
  156. amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF);
  157. /* generate interrupt */
  158. amdvi_generate_msi_interrupt(s);
  159. return;
  160. }
  161. if (dma_memory_write(&address_space_memory, s->evtlog + s->evtlog_tail,
  162. &evt, AMDVI_EVENT_LEN)) {
  163. trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail);
  164. }
  165. s->evtlog_tail += AMDVI_EVENT_LEN;
  166. amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT);
  167. amdvi_generate_msi_interrupt(s);
  168. }
  169. static void amdvi_setevent_bits(uint64_t *buffer, uint64_t value, int start,
  170. int length)
  171. {
  172. int index = start / 64, bitpos = start % 64;
  173. uint64_t mask = MAKE_64BIT_MASK(start, length);
  174. buffer[index] &= ~mask;
  175. buffer[index] |= (value << bitpos) & mask;
  176. }
  177. /*
  178. * AMDVi event structure
  179. * 0:15 -> DeviceID
  180. * 55:63 -> event type + miscellaneous info
  181. * 63:127 -> related address
  182. */
  183. static void amdvi_encode_event(uint64_t *evt, uint16_t devid, uint64_t addr,
  184. uint16_t info)
  185. {
  186. amdvi_setevent_bits(evt, devid, 0, 16);
  187. amdvi_setevent_bits(evt, info, 55, 8);
  188. amdvi_setevent_bits(evt, addr, 63, 64);
  189. }
  190. /* log an error encountered during a page walk
  191. *
  192. * @addr: virtual address in translation request
  193. */
  194. static void amdvi_page_fault(AMDVIState *s, uint16_t devid,
  195. hwaddr addr, uint16_t info)
  196. {
  197. uint64_t evt[4];
  198. info |= AMDVI_EVENT_IOPF_I | AMDVI_EVENT_IOPF;
  199. amdvi_encode_event(evt, devid, addr, info);
  200. amdvi_log_event(s, evt);
  201. pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
  202. PCI_STATUS_SIG_TARGET_ABORT);
  203. }
  204. /*
  205. * log a master abort accessing device table
  206. * @devtab : address of device table entry
  207. * @info : error flags
  208. */
  209. static void amdvi_log_devtab_error(AMDVIState *s, uint16_t devid,
  210. hwaddr devtab, uint16_t info)
  211. {
  212. uint64_t evt[4];
  213. info |= AMDVI_EVENT_DEV_TAB_HW_ERROR;
  214. amdvi_encode_event(evt, devid, devtab, info);
  215. amdvi_log_event(s, evt);
  216. pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
  217. PCI_STATUS_SIG_TARGET_ABORT);
  218. }
  219. /* log an event trying to access command buffer
  220. * @addr : address that couldn't be accessed
  221. */
  222. static void amdvi_log_command_error(AMDVIState *s, hwaddr addr)
  223. {
  224. uint64_t evt[4], info = AMDVI_EVENT_COMMAND_HW_ERROR;
  225. amdvi_encode_event(evt, 0, addr, info);
  226. amdvi_log_event(s, evt);
  227. pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
  228. PCI_STATUS_SIG_TARGET_ABORT);
  229. }
  230. /* log an illegal comand event
  231. * @addr : address of illegal command
  232. */
  233. static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info,
  234. hwaddr addr)
  235. {
  236. uint64_t evt[4];
  237. info |= AMDVI_EVENT_ILLEGAL_COMMAND_ERROR;
  238. amdvi_encode_event(evt, 0, addr, info);
  239. amdvi_log_event(s, evt);
  240. }
  241. /* log an error accessing device table
  242. *
  243. * @devid : device owning the table entry
  244. * @devtab : address of device table entry
  245. * @info : error flags
  246. */
  247. static void amdvi_log_illegaldevtab_error(AMDVIState *s, uint16_t devid,
  248. hwaddr addr, uint16_t info)
  249. {
  250. uint64_t evt[4];
  251. info |= AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY;
  252. amdvi_encode_event(evt, devid, addr, info);
  253. amdvi_log_event(s, evt);
  254. }
  255. /* log an error accessing a PTE entry
  256. * @addr : address that couldn't be accessed
  257. */
  258. static void amdvi_log_pagetab_error(AMDVIState *s, uint16_t devid,
  259. hwaddr addr, uint16_t info)
  260. {
  261. uint64_t evt[4];
  262. info |= AMDVI_EVENT_PAGE_TAB_HW_ERROR;
  263. amdvi_encode_event(evt, devid, addr, info);
  264. amdvi_log_event(s, evt);
  265. pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
  266. PCI_STATUS_SIG_TARGET_ABORT);
  267. }
  268. static gboolean amdvi_uint64_equal(gconstpointer v1, gconstpointer v2)
  269. {
  270. return *((const uint64_t *)v1) == *((const uint64_t *)v2);
  271. }
  272. static guint amdvi_uint64_hash(gconstpointer v)
  273. {
  274. return (guint)*(const uint64_t *)v;
  275. }
  276. static AMDVIIOTLBEntry *amdvi_iotlb_lookup(AMDVIState *s, hwaddr addr,
  277. uint64_t devid)
  278. {
  279. uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
  280. ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
  281. return g_hash_table_lookup(s->iotlb, &key);
  282. }
  283. static void amdvi_iotlb_reset(AMDVIState *s)
  284. {
  285. assert(s->iotlb);
  286. trace_amdvi_iotlb_reset();
  287. g_hash_table_remove_all(s->iotlb);
  288. }
  289. static gboolean amdvi_iotlb_remove_by_devid(gpointer key, gpointer value,
  290. gpointer user_data)
  291. {
  292. AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value;
  293. uint16_t devid = *(uint16_t *)user_data;
  294. return entry->devid == devid;
  295. }
  296. static void amdvi_iotlb_remove_page(AMDVIState *s, hwaddr addr,
  297. uint64_t devid)
  298. {
  299. uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
  300. ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
  301. g_hash_table_remove(s->iotlb, &key);
  302. }
  303. static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid,
  304. uint64_t gpa, IOMMUTLBEntry to_cache,
  305. uint16_t domid)
  306. {
  307. AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1);
  308. uint64_t *key = g_new(uint64_t, 1);
  309. uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K;
  310. /* don't cache erroneous translations */
  311. if (to_cache.perm != IOMMU_NONE) {
  312. trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid),
  313. PCI_FUNC(devid), gpa, to_cache.translated_addr);
  314. if (g_hash_table_size(s->iotlb) >= AMDVI_IOTLB_MAX_SIZE) {
  315. amdvi_iotlb_reset(s);
  316. }
  317. entry->domid = domid;
  318. entry->perms = to_cache.perm;
  319. entry->translated_addr = to_cache.translated_addr;
  320. entry->page_mask = to_cache.addr_mask;
  321. *key = gfn | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
  322. g_hash_table_replace(s->iotlb, key, entry);
  323. }
  324. }
  325. static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd)
  326. {
  327. /* pad the last 3 bits */
  328. hwaddr addr = cpu_to_le64(extract64(cmd[0], 3, 49)) << 3;
  329. uint64_t data = cpu_to_le64(cmd[1]);
  330. if (extract64(cmd[0], 51, 8)) {
  331. amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
  332. s->cmdbuf + s->cmdbuf_head);
  333. }
  334. if (extract64(cmd[0], 0, 1)) {
  335. if (dma_memory_write(&address_space_memory, addr, &data,
  336. AMDVI_COMPLETION_DATA_SIZE)) {
  337. trace_amdvi_completion_wait_fail(addr);
  338. }
  339. }
  340. /* set completion interrupt */
  341. if (extract64(cmd[0], 1, 1)) {
  342. amdvi_test_mask(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT);
  343. /* generate interrupt */
  344. amdvi_generate_msi_interrupt(s);
  345. }
  346. trace_amdvi_completion_wait(addr, data);
  347. }
  348. /* log error without aborting since linux seems to be using reserved bits */
  349. static void amdvi_inval_devtab_entry(AMDVIState *s, uint64_t *cmd)
  350. {
  351. uint16_t devid = cpu_to_le16((uint16_t)extract64(cmd[0], 0, 16));
  352. /* This command should invalidate internal caches of which there isn't */
  353. if (extract64(cmd[0], 15, 16) || cmd[1]) {
  354. amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
  355. s->cmdbuf + s->cmdbuf_head);
  356. }
  357. trace_amdvi_devtab_inval(PCI_BUS_NUM(devid), PCI_SLOT(devid),
  358. PCI_FUNC(devid));
  359. }
  360. static void amdvi_complete_ppr(AMDVIState *s, uint64_t *cmd)
  361. {
  362. if (extract64(cmd[0], 15, 16) || extract64(cmd[0], 19, 8) ||
  363. extract64(cmd[1], 0, 2) || extract64(cmd[1], 3, 29)
  364. || extract64(cmd[1], 47, 16)) {
  365. amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
  366. s->cmdbuf + s->cmdbuf_head);
  367. }
  368. trace_amdvi_ppr_exec();
  369. }
  370. static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd)
  371. {
  372. if (extract64(cmd[0], 0, 60) || cmd[1]) {
  373. amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
  374. s->cmdbuf + s->cmdbuf_head);
  375. }
  376. amdvi_iotlb_reset(s);
  377. trace_amdvi_all_inval();
  378. }
  379. static gboolean amdvi_iotlb_remove_by_domid(gpointer key, gpointer value,
  380. gpointer user_data)
  381. {
  382. AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value;
  383. uint16_t domid = *(uint16_t *)user_data;
  384. return entry->domid == domid;
  385. }
  386. /* we don't have devid - we can't remove pages by address */
  387. static void amdvi_inval_pages(AMDVIState *s, uint64_t *cmd)
  388. {
  389. uint16_t domid = cpu_to_le16((uint16_t)extract64(cmd[0], 32, 16));
  390. if (extract64(cmd[0], 20, 12) || extract64(cmd[0], 16, 12) ||
  391. extract64(cmd[0], 3, 10)) {
  392. amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
  393. s->cmdbuf + s->cmdbuf_head);
  394. }
  395. g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_domid,
  396. &domid);
  397. trace_amdvi_pages_inval(domid);
  398. }
  399. static void amdvi_prefetch_pages(AMDVIState *s, uint64_t *cmd)
  400. {
  401. if (extract64(cmd[0], 16, 8) || extract64(cmd[0], 20, 8) ||
  402. extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) ||
  403. extract64(cmd[1], 5, 7)) {
  404. amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
  405. s->cmdbuf + s->cmdbuf_head);
  406. }
  407. trace_amdvi_prefetch_pages();
  408. }
  409. static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd)
  410. {
  411. if (extract64(cmd[0], 16, 16) || cmd[1]) {
  412. amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
  413. s->cmdbuf + s->cmdbuf_head);
  414. return;
  415. }
  416. trace_amdvi_intr_inval();
  417. }
  418. /* FIXME: Try to work with the specified size instead of all the pages
  419. * when the S bit is on
  420. */
  421. static void iommu_inval_iotlb(AMDVIState *s, uint64_t *cmd)
  422. {
  423. uint16_t devid = extract64(cmd[0], 0, 16);
  424. if (extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 9)) {
  425. amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
  426. s->cmdbuf + s->cmdbuf_head);
  427. return;
  428. }
  429. if (extract64(cmd[1], 0, 1)) {
  430. g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_devid,
  431. &devid);
  432. } else {
  433. amdvi_iotlb_remove_page(s, cpu_to_le64(extract64(cmd[1], 12, 52)) << 12,
  434. cpu_to_le16(extract64(cmd[1], 0, 16)));
  435. }
  436. trace_amdvi_iotlb_inval();
  437. }
  438. /* not honouring reserved bits is regarded as an illegal command */
  439. static void amdvi_cmdbuf_exec(AMDVIState *s)
  440. {
  441. uint64_t cmd[2];
  442. if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head,
  443. cmd, AMDVI_COMMAND_SIZE)) {
  444. trace_amdvi_command_read_fail(s->cmdbuf, s->cmdbuf_head);
  445. amdvi_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
  446. return;
  447. }
  448. switch (extract64(cmd[0], 60, 4)) {
  449. case AMDVI_CMD_COMPLETION_WAIT:
  450. amdvi_completion_wait(s, cmd);
  451. break;
  452. case AMDVI_CMD_INVAL_DEVTAB_ENTRY:
  453. amdvi_inval_devtab_entry(s, cmd);
  454. break;
  455. case AMDVI_CMD_INVAL_AMDVI_PAGES:
  456. amdvi_inval_pages(s, cmd);
  457. break;
  458. case AMDVI_CMD_INVAL_IOTLB_PAGES:
  459. iommu_inval_iotlb(s, cmd);
  460. break;
  461. case AMDVI_CMD_INVAL_INTR_TABLE:
  462. amdvi_inval_inttable(s, cmd);
  463. break;
  464. case AMDVI_CMD_PREFETCH_AMDVI_PAGES:
  465. amdvi_prefetch_pages(s, cmd);
  466. break;
  467. case AMDVI_CMD_COMPLETE_PPR_REQUEST:
  468. amdvi_complete_ppr(s, cmd);
  469. break;
  470. case AMDVI_CMD_INVAL_AMDVI_ALL:
  471. amdvi_inval_all(s, cmd);
  472. break;
  473. default:
  474. trace_amdvi_unhandled_command(extract64(cmd[1], 60, 4));
  475. /* log illegal command */
  476. amdvi_log_illegalcom_error(s, extract64(cmd[1], 60, 4),
  477. s->cmdbuf + s->cmdbuf_head);
  478. }
  479. }
  480. static void amdvi_cmdbuf_run(AMDVIState *s)
  481. {
  482. if (!s->cmdbuf_enabled) {
  483. trace_amdvi_command_error(amdvi_readq(s, AMDVI_MMIO_CONTROL));
  484. return;
  485. }
  486. /* check if there is work to do. */
  487. while (s->cmdbuf_head != s->cmdbuf_tail) {
  488. trace_amdvi_command_exec(s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf);
  489. amdvi_cmdbuf_exec(s);
  490. s->cmdbuf_head += AMDVI_COMMAND_SIZE;
  491. amdvi_writeq_raw(s, s->cmdbuf_head, AMDVI_MMIO_COMMAND_HEAD);
  492. /* wrap head pointer */
  493. if (s->cmdbuf_head >= s->cmdbuf_len * AMDVI_COMMAND_SIZE) {
  494. s->cmdbuf_head = 0;
  495. }
  496. }
  497. }
  498. static void amdvi_mmio_trace(hwaddr addr, unsigned size)
  499. {
  500. uint8_t index = (addr & ~0x2000) / 8;
  501. if ((addr & 0x2000)) {
  502. /* high table */
  503. index = index >= AMDVI_MMIO_REGS_HIGH ? AMDVI_MMIO_REGS_HIGH : index;
  504. trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07);
  505. } else {
  506. index = index >= AMDVI_MMIO_REGS_LOW ? AMDVI_MMIO_REGS_LOW : index;
  507. trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07);
  508. }
  509. }
  510. static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size)
  511. {
  512. AMDVIState *s = opaque;
  513. uint64_t val = -1;
  514. if (addr + size > AMDVI_MMIO_SIZE) {
  515. trace_amdvi_mmio_read_invalid(AMDVI_MMIO_SIZE, addr, size);
  516. return (uint64_t)-1;
  517. }
  518. if (size == 2) {
  519. val = amdvi_readw(s, addr);
  520. } else if (size == 4) {
  521. val = amdvi_readl(s, addr);
  522. } else if (size == 8) {
  523. val = amdvi_readq(s, addr);
  524. }
  525. amdvi_mmio_trace(addr, size);
  526. return val;
  527. }
  528. static void amdvi_handle_control_write(AMDVIState *s)
  529. {
  530. unsigned long control = amdvi_readq(s, AMDVI_MMIO_CONTROL);
  531. s->enabled = !!(control & AMDVI_MMIO_CONTROL_AMDVIEN);
  532. s->ats_enabled = !!(control & AMDVI_MMIO_CONTROL_HTTUNEN);
  533. s->evtlog_enabled = s->enabled && !!(control &
  534. AMDVI_MMIO_CONTROL_EVENTLOGEN);
  535. s->evtlog_intr = !!(control & AMDVI_MMIO_CONTROL_EVENTINTEN);
  536. s->completion_wait_intr = !!(control & AMDVI_MMIO_CONTROL_COMWAITINTEN);
  537. s->cmdbuf_enabled = s->enabled && !!(control &
  538. AMDVI_MMIO_CONTROL_CMDBUFLEN);
  539. s->ga_enabled = !!(control & AMDVI_MMIO_CONTROL_GAEN);
  540. /* update the flags depending on the control register */
  541. if (s->cmdbuf_enabled) {
  542. amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_CMDBUF_RUN);
  543. } else {
  544. amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_CMDBUF_RUN);
  545. }
  546. if (s->evtlog_enabled) {
  547. amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_RUN);
  548. } else {
  549. amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_EVT_RUN);
  550. }
  551. trace_amdvi_control_status(control);
  552. amdvi_cmdbuf_run(s);
  553. }
  554. static inline void amdvi_handle_devtab_write(AMDVIState *s)
  555. {
  556. uint64_t val = amdvi_readq(s, AMDVI_MMIO_DEVICE_TABLE);
  557. s->devtab = (val & AMDVI_MMIO_DEVTAB_BASE_MASK);
  558. /* set device table length */
  559. s->devtab_len = ((val & AMDVI_MMIO_DEVTAB_SIZE_MASK) + 1 *
  560. (AMDVI_MMIO_DEVTAB_SIZE_UNIT /
  561. AMDVI_MMIO_DEVTAB_ENTRY_SIZE));
  562. }
  563. static inline void amdvi_handle_cmdhead_write(AMDVIState *s)
  564. {
  565. s->cmdbuf_head = amdvi_readq(s, AMDVI_MMIO_COMMAND_HEAD)
  566. & AMDVI_MMIO_CMDBUF_HEAD_MASK;
  567. amdvi_cmdbuf_run(s);
  568. }
  569. static inline void amdvi_handle_cmdbase_write(AMDVIState *s)
  570. {
  571. s->cmdbuf = amdvi_readq(s, AMDVI_MMIO_COMMAND_BASE)
  572. & AMDVI_MMIO_CMDBUF_BASE_MASK;
  573. s->cmdbuf_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_CMDBUF_SIZE_BYTE)
  574. & AMDVI_MMIO_CMDBUF_SIZE_MASK);
  575. s->cmdbuf_head = s->cmdbuf_tail = 0;
  576. }
  577. static inline void amdvi_handle_cmdtail_write(AMDVIState *s)
  578. {
  579. s->cmdbuf_tail = amdvi_readq(s, AMDVI_MMIO_COMMAND_TAIL)
  580. & AMDVI_MMIO_CMDBUF_TAIL_MASK;
  581. amdvi_cmdbuf_run(s);
  582. }
  583. static inline void amdvi_handle_excllim_write(AMDVIState *s)
  584. {
  585. uint64_t val = amdvi_readq(s, AMDVI_MMIO_EXCL_LIMIT);
  586. s->excl_limit = (val & AMDVI_MMIO_EXCL_LIMIT_MASK) |
  587. AMDVI_MMIO_EXCL_LIMIT_LOW;
  588. }
  589. static inline void amdvi_handle_evtbase_write(AMDVIState *s)
  590. {
  591. uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_BASE);
  592. s->evtlog = val & AMDVI_MMIO_EVTLOG_BASE_MASK;
  593. s->evtlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_EVTLOG_SIZE_BYTE)
  594. & AMDVI_MMIO_EVTLOG_SIZE_MASK);
  595. }
  596. static inline void amdvi_handle_evttail_write(AMDVIState *s)
  597. {
  598. uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_TAIL);
  599. s->evtlog_tail = val & AMDVI_MMIO_EVTLOG_TAIL_MASK;
  600. }
  601. static inline void amdvi_handle_evthead_write(AMDVIState *s)
  602. {
  603. uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_HEAD);
  604. s->evtlog_head = val & AMDVI_MMIO_EVTLOG_HEAD_MASK;
  605. }
  606. static inline void amdvi_handle_pprbase_write(AMDVIState *s)
  607. {
  608. uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_BASE);
  609. s->ppr_log = val & AMDVI_MMIO_PPRLOG_BASE_MASK;
  610. s->pprlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_PPRLOG_SIZE_BYTE)
  611. & AMDVI_MMIO_PPRLOG_SIZE_MASK);
  612. }
  613. static inline void amdvi_handle_pprhead_write(AMDVIState *s)
  614. {
  615. uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_HEAD);
  616. s->pprlog_head = val & AMDVI_MMIO_PPRLOG_HEAD_MASK;
  617. }
  618. static inline void amdvi_handle_pprtail_write(AMDVIState *s)
  619. {
  620. uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_TAIL);
  621. s->pprlog_tail = val & AMDVI_MMIO_PPRLOG_TAIL_MASK;
  622. }
  623. /* FIXME: something might go wrong if System Software writes in chunks
  624. * of one byte but linux writes in chunks of 4 bytes so currently it
  625. * works correctly with linux but will definitely be busted if software
  626. * reads/writes 8 bytes
  627. */
  628. static void amdvi_mmio_reg_write(AMDVIState *s, unsigned size, uint64_t val,
  629. hwaddr addr)
  630. {
  631. if (size == 2) {
  632. amdvi_writew(s, addr, val);
  633. } else if (size == 4) {
  634. amdvi_writel(s, addr, val);
  635. } else if (size == 8) {
  636. amdvi_writeq(s, addr, val);
  637. }
  638. }
  639. static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val,
  640. unsigned size)
  641. {
  642. AMDVIState *s = opaque;
  643. unsigned long offset = addr & 0x07;
  644. if (addr + size > AMDVI_MMIO_SIZE) {
  645. trace_amdvi_mmio_write("error: addr outside region: max ",
  646. (uint64_t)AMDVI_MMIO_SIZE, size, val, offset);
  647. return;
  648. }
  649. amdvi_mmio_trace(addr, size);
  650. switch (addr & ~0x07) {
  651. case AMDVI_MMIO_CONTROL:
  652. amdvi_mmio_reg_write(s, size, val, addr);
  653. amdvi_handle_control_write(s);
  654. break;
  655. case AMDVI_MMIO_DEVICE_TABLE:
  656. amdvi_mmio_reg_write(s, size, val, addr);
  657. /* set device table address
  658. * This also suffers from inability to tell whether software
  659. * is done writing
  660. */
  661. if (offset || (size == 8)) {
  662. amdvi_handle_devtab_write(s);
  663. }
  664. break;
  665. case AMDVI_MMIO_COMMAND_HEAD:
  666. amdvi_mmio_reg_write(s, size, val, addr);
  667. amdvi_handle_cmdhead_write(s);
  668. break;
  669. case AMDVI_MMIO_COMMAND_BASE:
  670. amdvi_mmio_reg_write(s, size, val, addr);
  671. /* FIXME - make sure System Software has finished writing incase
  672. * it writes in chucks less than 8 bytes in a robust way.As for
  673. * now, this hacks works for the linux driver
  674. */
  675. if (offset || (size == 8)) {
  676. amdvi_handle_cmdbase_write(s);
  677. }
  678. break;
  679. case AMDVI_MMIO_COMMAND_TAIL:
  680. amdvi_mmio_reg_write(s, size, val, addr);
  681. amdvi_handle_cmdtail_write(s);
  682. break;
  683. case AMDVI_MMIO_EVENT_BASE:
  684. amdvi_mmio_reg_write(s, size, val, addr);
  685. amdvi_handle_evtbase_write(s);
  686. break;
  687. case AMDVI_MMIO_EVENT_HEAD:
  688. amdvi_mmio_reg_write(s, size, val, addr);
  689. amdvi_handle_evthead_write(s);
  690. break;
  691. case AMDVI_MMIO_EVENT_TAIL:
  692. amdvi_mmio_reg_write(s, size, val, addr);
  693. amdvi_handle_evttail_write(s);
  694. break;
  695. case AMDVI_MMIO_EXCL_LIMIT:
  696. amdvi_mmio_reg_write(s, size, val, addr);
  697. amdvi_handle_excllim_write(s);
  698. break;
  699. /* PPR log base - unused for now */
  700. case AMDVI_MMIO_PPR_BASE:
  701. amdvi_mmio_reg_write(s, size, val, addr);
  702. amdvi_handle_pprbase_write(s);
  703. break;
  704. /* PPR log head - also unused for now */
  705. case AMDVI_MMIO_PPR_HEAD:
  706. amdvi_mmio_reg_write(s, size, val, addr);
  707. amdvi_handle_pprhead_write(s);
  708. break;
  709. /* PPR log tail - unused for now */
  710. case AMDVI_MMIO_PPR_TAIL:
  711. amdvi_mmio_reg_write(s, size, val, addr);
  712. amdvi_handle_pprtail_write(s);
  713. break;
  714. }
  715. }
  716. static inline uint64_t amdvi_get_perms(uint64_t entry)
  717. {
  718. return (entry & (AMDVI_DEV_PERM_READ | AMDVI_DEV_PERM_WRITE)) >>
  719. AMDVI_DEV_PERM_SHIFT;
  720. }
  721. /* validate that reserved bits are honoured */
  722. static bool amdvi_validate_dte(AMDVIState *s, uint16_t devid,
  723. uint64_t *dte)
  724. {
  725. if ((dte[0] & AMDVI_DTE_LOWER_QUAD_RESERVED)
  726. || (dte[1] & AMDVI_DTE_MIDDLE_QUAD_RESERVED)
  727. || (dte[2] & AMDVI_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
  728. amdvi_log_illegaldevtab_error(s, devid,
  729. s->devtab +
  730. devid * AMDVI_DEVTAB_ENTRY_SIZE, 0);
  731. return false;
  732. }
  733. return true;
  734. }
  735. /* get a device table entry given the devid */
  736. static bool amdvi_get_dte(AMDVIState *s, int devid, uint64_t *entry)
  737. {
  738. uint32_t offset = devid * AMDVI_DEVTAB_ENTRY_SIZE;
  739. if (dma_memory_read(&address_space_memory, s->devtab + offset, entry,
  740. AMDVI_DEVTAB_ENTRY_SIZE)) {
  741. trace_amdvi_dte_get_fail(s->devtab, offset);
  742. /* log error accessing dte */
  743. amdvi_log_devtab_error(s, devid, s->devtab + offset, 0);
  744. return false;
  745. }
  746. *entry = le64_to_cpu(*entry);
  747. if (!amdvi_validate_dte(s, devid, entry)) {
  748. trace_amdvi_invalid_dte(entry[0]);
  749. return false;
  750. }
  751. return true;
  752. }
  753. /* get pte translation mode */
  754. static inline uint8_t get_pte_translation_mode(uint64_t pte)
  755. {
  756. return (pte >> AMDVI_DEV_MODE_RSHIFT) & AMDVI_DEV_MODE_MASK;
  757. }
  758. static inline uint64_t pte_override_page_mask(uint64_t pte)
  759. {
  760. uint8_t page_mask = 12;
  761. uint64_t addr = (pte & AMDVI_DEV_PT_ROOT_MASK) ^ AMDVI_DEV_PT_ROOT_MASK;
  762. /* find the first zero bit */
  763. while (addr & 1) {
  764. page_mask++;
  765. addr = addr >> 1;
  766. }
  767. return ~((1ULL << page_mask) - 1);
  768. }
  769. static inline uint64_t pte_get_page_mask(uint64_t oldlevel)
  770. {
  771. return ~((1UL << ((oldlevel * 9) + 3)) - 1);
  772. }
  773. static inline uint64_t amdvi_get_pte_entry(AMDVIState *s, uint64_t pte_addr,
  774. uint16_t devid)
  775. {
  776. uint64_t pte;
  777. if (dma_memory_read(&address_space_memory, pte_addr, &pte, sizeof(pte))) {
  778. trace_amdvi_get_pte_hwerror(pte_addr);
  779. amdvi_log_pagetab_error(s, devid, pte_addr, 0);
  780. pte = 0;
  781. return pte;
  782. }
  783. pte = le64_to_cpu(pte);
  784. return pte;
  785. }
  786. static void amdvi_page_walk(AMDVIAddressSpace *as, uint64_t *dte,
  787. IOMMUTLBEntry *ret, unsigned perms,
  788. hwaddr addr)
  789. {
  790. unsigned level, present, pte_perms, oldlevel;
  791. uint64_t pte = dte[0], pte_addr, page_mask;
  792. /* make sure the DTE has TV = 1 */
  793. if (pte & AMDVI_DEV_TRANSLATION_VALID) {
  794. level = get_pte_translation_mode(pte);
  795. if (level >= 7) {
  796. trace_amdvi_mode_invalid(level, addr);
  797. return;
  798. }
  799. if (level == 0) {
  800. goto no_remap;
  801. }
  802. /* we are at the leaf page table or page table encodes a huge page */
  803. while (level > 0) {
  804. pte_perms = amdvi_get_perms(pte);
  805. present = pte & 1;
  806. if (!present || perms != (perms & pte_perms)) {
  807. amdvi_page_fault(as->iommu_state, as->devfn, addr, perms);
  808. trace_amdvi_page_fault(addr);
  809. return;
  810. }
  811. /* go to the next lower level */
  812. pte_addr = pte & AMDVI_DEV_PT_ROOT_MASK;
  813. /* add offset and load pte */
  814. pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
  815. pte = amdvi_get_pte_entry(as->iommu_state, pte_addr, as->devfn);
  816. if (!pte) {
  817. return;
  818. }
  819. oldlevel = level;
  820. level = get_pte_translation_mode(pte);
  821. if (level == 0x7) {
  822. break;
  823. }
  824. }
  825. if (level == 0x7) {
  826. page_mask = pte_override_page_mask(pte);
  827. } else {
  828. page_mask = pte_get_page_mask(oldlevel);
  829. }
  830. /* get access permissions from pte */
  831. ret->iova = addr & page_mask;
  832. ret->translated_addr = (pte & AMDVI_DEV_PT_ROOT_MASK) & page_mask;
  833. ret->addr_mask = ~page_mask;
  834. ret->perm = amdvi_get_perms(pte);
  835. return;
  836. }
  837. no_remap:
  838. ret->iova = addr & AMDVI_PAGE_MASK_4K;
  839. ret->translated_addr = addr & AMDVI_PAGE_MASK_4K;
  840. ret->addr_mask = ~AMDVI_PAGE_MASK_4K;
  841. ret->perm = amdvi_get_perms(pte);
  842. }
  843. static void amdvi_do_translate(AMDVIAddressSpace *as, hwaddr addr,
  844. bool is_write, IOMMUTLBEntry *ret)
  845. {
  846. AMDVIState *s = as->iommu_state;
  847. uint16_t devid = PCI_BUILD_BDF(as->bus_num, as->devfn);
  848. AMDVIIOTLBEntry *iotlb_entry = amdvi_iotlb_lookup(s, addr, devid);
  849. uint64_t entry[4];
  850. if (iotlb_entry) {
  851. trace_amdvi_iotlb_hit(PCI_BUS_NUM(devid), PCI_SLOT(devid),
  852. PCI_FUNC(devid), addr, iotlb_entry->translated_addr);
  853. ret->iova = addr & ~iotlb_entry->page_mask;
  854. ret->translated_addr = iotlb_entry->translated_addr;
  855. ret->addr_mask = iotlb_entry->page_mask;
  856. ret->perm = iotlb_entry->perms;
  857. return;
  858. }
  859. if (!amdvi_get_dte(s, devid, entry)) {
  860. return;
  861. }
  862. /* devices with V = 0 are not translated */
  863. if (!(entry[0] & AMDVI_DEV_VALID)) {
  864. goto out;
  865. }
  866. amdvi_page_walk(as, entry, ret,
  867. is_write ? AMDVI_PERM_WRITE : AMDVI_PERM_READ, addr);
  868. amdvi_update_iotlb(s, devid, addr, *ret,
  869. entry[1] & AMDVI_DEV_DOMID_ID_MASK);
  870. return;
  871. out:
  872. ret->iova = addr & AMDVI_PAGE_MASK_4K;
  873. ret->translated_addr = addr & AMDVI_PAGE_MASK_4K;
  874. ret->addr_mask = ~AMDVI_PAGE_MASK_4K;
  875. ret->perm = IOMMU_RW;
  876. }
  877. static inline bool amdvi_is_interrupt_addr(hwaddr addr)
  878. {
  879. return addr >= AMDVI_INT_ADDR_FIRST && addr <= AMDVI_INT_ADDR_LAST;
  880. }
  881. static IOMMUTLBEntry amdvi_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
  882. IOMMUAccessFlags flag, int iommu_idx)
  883. {
  884. AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu);
  885. AMDVIState *s = as->iommu_state;
  886. IOMMUTLBEntry ret = {
  887. .target_as = &address_space_memory,
  888. .iova = addr,
  889. .translated_addr = 0,
  890. .addr_mask = ~(hwaddr)0,
  891. .perm = IOMMU_NONE
  892. };
  893. if (!s->enabled) {
  894. /* AMDVI disabled - corresponds to iommu=off not
  895. * failure to provide any parameter
  896. */
  897. ret.iova = addr & AMDVI_PAGE_MASK_4K;
  898. ret.translated_addr = addr & AMDVI_PAGE_MASK_4K;
  899. ret.addr_mask = ~AMDVI_PAGE_MASK_4K;
  900. ret.perm = IOMMU_RW;
  901. return ret;
  902. } else if (amdvi_is_interrupt_addr(addr)) {
  903. ret.iova = addr & AMDVI_PAGE_MASK_4K;
  904. ret.translated_addr = addr & AMDVI_PAGE_MASK_4K;
  905. ret.addr_mask = ~AMDVI_PAGE_MASK_4K;
  906. ret.perm = IOMMU_WO;
  907. return ret;
  908. }
  909. amdvi_do_translate(as, addr, flag & IOMMU_WO, &ret);
  910. trace_amdvi_translation_result(as->bus_num, PCI_SLOT(as->devfn),
  911. PCI_FUNC(as->devfn), addr, ret.translated_addr);
  912. return ret;
  913. }
  914. static int amdvi_get_irte(AMDVIState *s, MSIMessage *origin, uint64_t *dte,
  915. union irte *irte, uint16_t devid)
  916. {
  917. uint64_t irte_root, offset;
  918. irte_root = dte[2] & AMDVI_IR_PHYS_ADDR_MASK;
  919. offset = (origin->data & AMDVI_IRTE_OFFSET) << 2;
  920. trace_amdvi_ir_irte(irte_root, offset);
  921. if (dma_memory_read(&address_space_memory, irte_root + offset,
  922. irte, sizeof(*irte))) {
  923. trace_amdvi_ir_err("failed to get irte");
  924. return -AMDVI_IR_GET_IRTE;
  925. }
  926. trace_amdvi_ir_irte_val(irte->val);
  927. return 0;
  928. }
  929. static int amdvi_int_remap_legacy(AMDVIState *iommu,
  930. MSIMessage *origin,
  931. MSIMessage *translated,
  932. uint64_t *dte,
  933. X86IOMMUIrq *irq,
  934. uint16_t sid)
  935. {
  936. int ret;
  937. union irte irte;
  938. /* get interrupt remapping table */
  939. ret = amdvi_get_irte(iommu, origin, dte, &irte, sid);
  940. if (ret < 0) {
  941. return ret;
  942. }
  943. if (!irte.fields.valid) {
  944. trace_amdvi_ir_target_abort("RemapEn is disabled");
  945. return -AMDVI_IR_TARGET_ABORT;
  946. }
  947. if (irte.fields.guest_mode) {
  948. error_report_once("guest mode is not zero");
  949. return -AMDVI_IR_ERR;
  950. }
  951. if (irte.fields.int_type > AMDVI_IOAPIC_INT_TYPE_ARBITRATED) {
  952. error_report_once("reserved int_type");
  953. return -AMDVI_IR_ERR;
  954. }
  955. irq->delivery_mode = irte.fields.int_type;
  956. irq->vector = irte.fields.vector;
  957. irq->dest_mode = irte.fields.dm;
  958. irq->redir_hint = irte.fields.rq_eoi;
  959. irq->dest = irte.fields.destination;
  960. return 0;
  961. }
  962. static int amdvi_get_irte_ga(AMDVIState *s, MSIMessage *origin, uint64_t *dte,
  963. struct irte_ga *irte, uint16_t devid)
  964. {
  965. uint64_t irte_root, offset;
  966. irte_root = dte[2] & AMDVI_IR_PHYS_ADDR_MASK;
  967. offset = (origin->data & AMDVI_IRTE_OFFSET) << 4;
  968. trace_amdvi_ir_irte(irte_root, offset);
  969. if (dma_memory_read(&address_space_memory, irte_root + offset,
  970. irte, sizeof(*irte))) {
  971. trace_amdvi_ir_err("failed to get irte_ga");
  972. return -AMDVI_IR_GET_IRTE;
  973. }
  974. trace_amdvi_ir_irte_ga_val(irte->hi.val, irte->lo.val);
  975. return 0;
  976. }
  977. static int amdvi_int_remap_ga(AMDVIState *iommu,
  978. MSIMessage *origin,
  979. MSIMessage *translated,
  980. uint64_t *dte,
  981. X86IOMMUIrq *irq,
  982. uint16_t sid)
  983. {
  984. int ret;
  985. struct irte_ga irte;
  986. /* get interrupt remapping table */
  987. ret = amdvi_get_irte_ga(iommu, origin, dte, &irte, sid);
  988. if (ret < 0) {
  989. return ret;
  990. }
  991. if (!irte.lo.fields_remap.valid) {
  992. trace_amdvi_ir_target_abort("RemapEn is disabled");
  993. return -AMDVI_IR_TARGET_ABORT;
  994. }
  995. if (irte.lo.fields_remap.guest_mode) {
  996. error_report_once("guest mode is not zero");
  997. return -AMDVI_IR_ERR;
  998. }
  999. if (irte.lo.fields_remap.int_type > AMDVI_IOAPIC_INT_TYPE_ARBITRATED) {
  1000. error_report_once("reserved int_type is set");
  1001. return -AMDVI_IR_ERR;
  1002. }
  1003. irq->delivery_mode = irte.lo.fields_remap.int_type;
  1004. irq->vector = irte.hi.fields.vector;
  1005. irq->dest_mode = irte.lo.fields_remap.dm;
  1006. irq->redir_hint = irte.lo.fields_remap.rq_eoi;
  1007. irq->dest = irte.lo.fields_remap.destination;
  1008. return 0;
  1009. }
  1010. static int __amdvi_int_remap_msi(AMDVIState *iommu,
  1011. MSIMessage *origin,
  1012. MSIMessage *translated,
  1013. uint64_t *dte,
  1014. X86IOMMUIrq *irq,
  1015. uint16_t sid)
  1016. {
  1017. int ret;
  1018. uint8_t int_ctl;
  1019. int_ctl = (dte[2] >> AMDVI_IR_INTCTL_SHIFT) & 3;
  1020. trace_amdvi_ir_intctl(int_ctl);
  1021. switch (int_ctl) {
  1022. case AMDVI_IR_INTCTL_PASS:
  1023. memcpy(translated, origin, sizeof(*origin));
  1024. return 0;
  1025. case AMDVI_IR_INTCTL_REMAP:
  1026. break;
  1027. case AMDVI_IR_INTCTL_ABORT:
  1028. trace_amdvi_ir_target_abort("int_ctl abort");
  1029. return -AMDVI_IR_TARGET_ABORT;
  1030. default:
  1031. trace_amdvi_ir_err("int_ctl reserved");
  1032. return -AMDVI_IR_ERR;
  1033. }
  1034. if (iommu->ga_enabled) {
  1035. ret = amdvi_int_remap_ga(iommu, origin, translated, dte, irq, sid);
  1036. } else {
  1037. ret = amdvi_int_remap_legacy(iommu, origin, translated, dte, irq, sid);
  1038. }
  1039. return ret;
  1040. }
  1041. /* Interrupt remapping for MSI/MSI-X entry */
  1042. static int amdvi_int_remap_msi(AMDVIState *iommu,
  1043. MSIMessage *origin,
  1044. MSIMessage *translated,
  1045. uint16_t sid)
  1046. {
  1047. int ret = 0;
  1048. uint64_t pass = 0;
  1049. uint64_t dte[4] = { 0 };
  1050. X86IOMMUIrq irq = { 0 };
  1051. uint8_t dest_mode, delivery_mode;
  1052. assert(origin && translated);
  1053. /*
  1054. * When IOMMU is enabled, interrupt remap request will come either from
  1055. * IO-APIC or PCI device. If interrupt is from PCI device then it will
  1056. * have a valid requester id but if the interrupt is from IO-APIC
  1057. * then requester id will be invalid.
  1058. */
  1059. if (sid == X86_IOMMU_SID_INVALID) {
  1060. sid = AMDVI_IOAPIC_SB_DEVID;
  1061. }
  1062. trace_amdvi_ir_remap_msi_req(origin->address, origin->data, sid);
  1063. /* check if device table entry is set before we go further. */
  1064. if (!iommu || !iommu->devtab_len) {
  1065. memcpy(translated, origin, sizeof(*origin));
  1066. goto out;
  1067. }
  1068. if (!amdvi_get_dte(iommu, sid, dte)) {
  1069. return -AMDVI_IR_ERR;
  1070. }
  1071. /* Check if IR is enabled in DTE */
  1072. if (!(dte[2] & AMDVI_IR_REMAP_ENABLE)) {
  1073. memcpy(translated, origin, sizeof(*origin));
  1074. goto out;
  1075. }
  1076. /* validate that we are configure with intremap=on */
  1077. if (!x86_iommu_ir_supported(X86_IOMMU_DEVICE(iommu))) {
  1078. trace_amdvi_err("Interrupt remapping is enabled in the guest but "
  1079. "not in the host. Use intremap=on to enable interrupt "
  1080. "remapping in amd-iommu.");
  1081. return -AMDVI_IR_ERR;
  1082. }
  1083. if (origin->address & AMDVI_MSI_ADDR_HI_MASK) {
  1084. trace_amdvi_err("MSI address high 32 bits non-zero when "
  1085. "Interrupt Remapping enabled.");
  1086. return -AMDVI_IR_ERR;
  1087. }
  1088. if ((origin->address & AMDVI_MSI_ADDR_LO_MASK) != APIC_DEFAULT_ADDRESS) {
  1089. trace_amdvi_err("MSI is not from IOAPIC.");
  1090. return -AMDVI_IR_ERR;
  1091. }
  1092. /*
  1093. * The MSI data register [10:8] are used to get the upstream interrupt type.
  1094. *
  1095. * See MSI/MSI-X format:
  1096. * https://pdfs.semanticscholar.org/presentation/9420/c279e942eca568157711ef5c92b800c40a79.pdf
  1097. * (page 5)
  1098. */
  1099. delivery_mode = (origin->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 7;
  1100. switch (delivery_mode) {
  1101. case AMDVI_IOAPIC_INT_TYPE_FIXED:
  1102. case AMDVI_IOAPIC_INT_TYPE_ARBITRATED:
  1103. trace_amdvi_ir_delivery_mode("fixed/arbitrated");
  1104. ret = __amdvi_int_remap_msi(iommu, origin, translated, dte, &irq, sid);
  1105. if (ret < 0) {
  1106. goto remap_fail;
  1107. } else {
  1108. /* Translate IRQ to MSI messages */
  1109. x86_iommu_irq_to_msi_message(&irq, translated);
  1110. goto out;
  1111. }
  1112. break;
  1113. case AMDVI_IOAPIC_INT_TYPE_SMI:
  1114. error_report("SMI is not supported!");
  1115. ret = -AMDVI_IR_ERR;
  1116. break;
  1117. case AMDVI_IOAPIC_INT_TYPE_NMI:
  1118. pass = dte[3] & AMDVI_DEV_NMI_PASS_MASK;
  1119. trace_amdvi_ir_delivery_mode("nmi");
  1120. break;
  1121. case AMDVI_IOAPIC_INT_TYPE_INIT:
  1122. pass = dte[3] & AMDVI_DEV_INT_PASS_MASK;
  1123. trace_amdvi_ir_delivery_mode("init");
  1124. break;
  1125. case AMDVI_IOAPIC_INT_TYPE_EINT:
  1126. pass = dte[3] & AMDVI_DEV_EINT_PASS_MASK;
  1127. trace_amdvi_ir_delivery_mode("eint");
  1128. break;
  1129. default:
  1130. trace_amdvi_ir_delivery_mode("unsupported delivery_mode");
  1131. ret = -AMDVI_IR_ERR;
  1132. break;
  1133. }
  1134. if (ret < 0) {
  1135. goto remap_fail;
  1136. }
  1137. /*
  1138. * The MSI address register bit[2] is used to get the destination
  1139. * mode. The dest_mode 1 is valid for fixed and arbitrated interrupts
  1140. * only.
  1141. */
  1142. dest_mode = (origin->address >> MSI_ADDR_DEST_MODE_SHIFT) & 1;
  1143. if (dest_mode) {
  1144. trace_amdvi_ir_err("invalid dest_mode");
  1145. ret = -AMDVI_IR_ERR;
  1146. goto remap_fail;
  1147. }
  1148. if (pass) {
  1149. memcpy(translated, origin, sizeof(*origin));
  1150. } else {
  1151. trace_amdvi_ir_err("passthrough is not enabled");
  1152. ret = -AMDVI_IR_ERR;
  1153. goto remap_fail;
  1154. }
  1155. out:
  1156. trace_amdvi_ir_remap_msi(origin->address, origin->data,
  1157. translated->address, translated->data);
  1158. return 0;
  1159. remap_fail:
  1160. return ret;
  1161. }
  1162. static int amdvi_int_remap(X86IOMMUState *iommu,
  1163. MSIMessage *origin,
  1164. MSIMessage *translated,
  1165. uint16_t sid)
  1166. {
  1167. return amdvi_int_remap_msi(AMD_IOMMU_DEVICE(iommu), origin,
  1168. translated, sid);
  1169. }
  1170. static MemTxResult amdvi_mem_ir_write(void *opaque, hwaddr addr,
  1171. uint64_t value, unsigned size,
  1172. MemTxAttrs attrs)
  1173. {
  1174. int ret;
  1175. MSIMessage from = { 0, 0 }, to = { 0, 0 };
  1176. uint16_t sid = AMDVI_IOAPIC_SB_DEVID;
  1177. from.address = (uint64_t) addr + AMDVI_INT_ADDR_FIRST;
  1178. from.data = (uint32_t) value;
  1179. trace_amdvi_mem_ir_write_req(addr, value, size);
  1180. if (!attrs.unspecified) {
  1181. /* We have explicit Source ID */
  1182. sid = attrs.requester_id;
  1183. }
  1184. ret = amdvi_int_remap_msi(opaque, &from, &to, sid);
  1185. if (ret < 0) {
  1186. /* TODO: log the event using IOMMU log event interface */
  1187. error_report_once("failed to remap interrupt from devid 0x%x", sid);
  1188. return MEMTX_ERROR;
  1189. }
  1190. apic_get_class()->send_msi(&to);
  1191. trace_amdvi_mem_ir_write(to.address, to.data);
  1192. return MEMTX_OK;
  1193. }
  1194. static MemTxResult amdvi_mem_ir_read(void *opaque, hwaddr addr,
  1195. uint64_t *data, unsigned size,
  1196. MemTxAttrs attrs)
  1197. {
  1198. return MEMTX_OK;
  1199. }
  1200. static const MemoryRegionOps amdvi_ir_ops = {
  1201. .read_with_attrs = amdvi_mem_ir_read,
  1202. .write_with_attrs = amdvi_mem_ir_write,
  1203. .endianness = DEVICE_LITTLE_ENDIAN,
  1204. .impl = {
  1205. .min_access_size = 4,
  1206. .max_access_size = 4,
  1207. },
  1208. .valid = {
  1209. .min_access_size = 4,
  1210. .max_access_size = 4,
  1211. }
  1212. };
  1213. static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
  1214. {
  1215. char name[128];
  1216. AMDVIState *s = opaque;
  1217. AMDVIAddressSpace **iommu_as, *amdvi_dev_as;
  1218. int bus_num = pci_bus_num(bus);
  1219. iommu_as = s->address_spaces[bus_num];
  1220. /* allocate memory during the first run */
  1221. if (!iommu_as) {
  1222. iommu_as = g_malloc0(sizeof(AMDVIAddressSpace *) * PCI_DEVFN_MAX);
  1223. s->address_spaces[bus_num] = iommu_as;
  1224. }
  1225. /* set up AMD-Vi region */
  1226. if (!iommu_as[devfn]) {
  1227. snprintf(name, sizeof(name), "amd_iommu_devfn_%d", devfn);
  1228. iommu_as[devfn] = g_malloc0(sizeof(AMDVIAddressSpace));
  1229. iommu_as[devfn]->bus_num = (uint8_t)bus_num;
  1230. iommu_as[devfn]->devfn = (uint8_t)devfn;
  1231. iommu_as[devfn]->iommu_state = s;
  1232. amdvi_dev_as = iommu_as[devfn];
  1233. /*
  1234. * Memory region relationships looks like (Address range shows
  1235. * only lower 32 bits to make it short in length...):
  1236. *
  1237. * |-----------------+-------------------+----------|
  1238. * | Name | Address range | Priority |
  1239. * |-----------------+-------------------+----------+
  1240. * | amdvi_root | 00000000-ffffffff | 0 |
  1241. * | amdvi_iommu | 00000000-ffffffff | 1 |
  1242. * | amdvi_iommu_ir | fee00000-feefffff | 64 |
  1243. * |-----------------+-------------------+----------|
  1244. */
  1245. memory_region_init_iommu(&amdvi_dev_as->iommu,
  1246. sizeof(amdvi_dev_as->iommu),
  1247. TYPE_AMD_IOMMU_MEMORY_REGION,
  1248. OBJECT(s),
  1249. "amd_iommu", UINT64_MAX);
  1250. memory_region_init(&amdvi_dev_as->root, OBJECT(s),
  1251. "amdvi_root", UINT64_MAX);
  1252. address_space_init(&amdvi_dev_as->as, &amdvi_dev_as->root, name);
  1253. memory_region_init_io(&amdvi_dev_as->iommu_ir, OBJECT(s),
  1254. &amdvi_ir_ops, s, "amd_iommu_ir",
  1255. AMDVI_INT_ADDR_SIZE);
  1256. memory_region_add_subregion_overlap(&amdvi_dev_as->root,
  1257. AMDVI_INT_ADDR_FIRST,
  1258. &amdvi_dev_as->iommu_ir,
  1259. 64);
  1260. memory_region_add_subregion_overlap(&amdvi_dev_as->root, 0,
  1261. MEMORY_REGION(&amdvi_dev_as->iommu),
  1262. 1);
  1263. }
  1264. return &iommu_as[devfn]->as;
  1265. }
  1266. static const MemoryRegionOps mmio_mem_ops = {
  1267. .read = amdvi_mmio_read,
  1268. .write = amdvi_mmio_write,
  1269. .endianness = DEVICE_LITTLE_ENDIAN,
  1270. .impl = {
  1271. .min_access_size = 1,
  1272. .max_access_size = 8,
  1273. .unaligned = false,
  1274. },
  1275. .valid = {
  1276. .min_access_size = 1,
  1277. .max_access_size = 8,
  1278. }
  1279. };
  1280. static int amdvi_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
  1281. IOMMUNotifierFlag old,
  1282. IOMMUNotifierFlag new,
  1283. Error **errp)
  1284. {
  1285. AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu);
  1286. if (new & IOMMU_NOTIFIER_MAP) {
  1287. error_setg(errp,
  1288. "device %02x.%02x.%x requires iommu notifier which is not "
  1289. "currently supported", as->bus_num, PCI_SLOT(as->devfn),
  1290. PCI_FUNC(as->devfn));
  1291. return -EINVAL;
  1292. }
  1293. return 0;
  1294. }
  1295. static void amdvi_init(AMDVIState *s)
  1296. {
  1297. amdvi_iotlb_reset(s);
  1298. s->devtab_len = 0;
  1299. s->cmdbuf_len = 0;
  1300. s->cmdbuf_head = 0;
  1301. s->cmdbuf_tail = 0;
  1302. s->evtlog_head = 0;
  1303. s->evtlog_tail = 0;
  1304. s->excl_enabled = false;
  1305. s->excl_allow = false;
  1306. s->mmio_enabled = false;
  1307. s->enabled = false;
  1308. s->ats_enabled = false;
  1309. s->cmdbuf_enabled = false;
  1310. /* reset MMIO */
  1311. memset(s->mmior, 0, AMDVI_MMIO_SIZE);
  1312. amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES, AMDVI_EXT_FEATURES,
  1313. 0xffffffffffffffef, 0);
  1314. amdvi_set_quad(s, AMDVI_MMIO_STATUS, 0, 0x98, 0x67);
  1315. /* reset device ident */
  1316. pci_config_set_vendor_id(s->pci.dev.config, PCI_VENDOR_ID_AMD);
  1317. pci_config_set_prog_interface(s->pci.dev.config, 00);
  1318. pci_config_set_device_id(s->pci.dev.config, s->devid);
  1319. pci_config_set_class(s->pci.dev.config, 0x0806);
  1320. /* reset AMDVI specific capabilities, all r/o */
  1321. pci_set_long(s->pci.dev.config + s->capab_offset, AMDVI_CAPAB_FEATURES);
  1322. pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_LOW,
  1323. s->mmio.addr & ~(0xffff0000));
  1324. pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH,
  1325. (s->mmio.addr & ~(0xffff)) >> 16);
  1326. pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_RANGE,
  1327. 0xff000000);
  1328. pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, 0);
  1329. pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC,
  1330. AMDVI_MAX_PH_ADDR | AMDVI_MAX_GVA_ADDR | AMDVI_MAX_VA_ADDR);
  1331. }
  1332. static void amdvi_reset(DeviceState *dev)
  1333. {
  1334. AMDVIState *s = AMD_IOMMU_DEVICE(dev);
  1335. msi_reset(&s->pci.dev);
  1336. amdvi_init(s);
  1337. }
  1338. static void amdvi_realize(DeviceState *dev, Error **err)
  1339. {
  1340. int ret = 0;
  1341. AMDVIState *s = AMD_IOMMU_DEVICE(dev);
  1342. X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
  1343. MachineState *ms = MACHINE(qdev_get_machine());
  1344. PCMachineState *pcms = PC_MACHINE(ms);
  1345. X86MachineState *x86ms = X86_MACHINE(ms);
  1346. PCIBus *bus = pcms->bus;
  1347. s->iotlb = g_hash_table_new_full(amdvi_uint64_hash,
  1348. amdvi_uint64_equal, g_free, g_free);
  1349. /* This device should take care of IOMMU PCI properties */
  1350. x86_iommu->type = TYPE_AMD;
  1351. qdev_set_parent_bus(DEVICE(&s->pci), &bus->qbus);
  1352. object_property_set_bool(OBJECT(&s->pci), true, "realized", err);
  1353. ret = pci_add_capability(&s->pci.dev, AMDVI_CAPAB_ID_SEC, 0,
  1354. AMDVI_CAPAB_SIZE, err);
  1355. if (ret < 0) {
  1356. return;
  1357. }
  1358. s->capab_offset = ret;
  1359. ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_MSI, 0,
  1360. AMDVI_CAPAB_REG_SIZE, err);
  1361. if (ret < 0) {
  1362. return;
  1363. }
  1364. ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_HT, 0,
  1365. AMDVI_CAPAB_REG_SIZE, err);
  1366. if (ret < 0) {
  1367. return;
  1368. }
  1369. /* Pseudo address space under root PCI bus. */
  1370. x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID);
  1371. /* set up MMIO */
  1372. memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "amdvi-mmio",
  1373. AMDVI_MMIO_SIZE);
  1374. sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio);
  1375. sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, AMDVI_BASE_ADDR);
  1376. pci_setup_iommu(bus, amdvi_host_dma_iommu, s);
  1377. s->devid = object_property_get_int(OBJECT(&s->pci), "addr", err);
  1378. msi_init(&s->pci.dev, 0, 1, true, false, err);
  1379. amdvi_init(s);
  1380. }
  1381. static const VMStateDescription vmstate_amdvi = {
  1382. .name = "amd-iommu",
  1383. .unmigratable = 1
  1384. };
  1385. static void amdvi_instance_init(Object *klass)
  1386. {
  1387. AMDVIState *s = AMD_IOMMU_DEVICE(klass);
  1388. object_initialize(&s->pci, sizeof(s->pci), TYPE_AMD_IOMMU_PCI);
  1389. }
  1390. static void amdvi_class_init(ObjectClass *klass, void* data)
  1391. {
  1392. DeviceClass *dc = DEVICE_CLASS(klass);
  1393. X86IOMMUClass *dc_class = X86_IOMMU_CLASS(klass);
  1394. dc->reset = amdvi_reset;
  1395. dc->vmsd = &vmstate_amdvi;
  1396. dc->hotpluggable = false;
  1397. dc_class->realize = amdvi_realize;
  1398. dc_class->int_remap = amdvi_int_remap;
  1399. /* Supported by the pc-q35-* machine types */
  1400. dc->user_creatable = true;
  1401. set_bit(DEVICE_CATEGORY_MISC, dc->categories);
  1402. dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device";
  1403. }
  1404. static const TypeInfo amdvi = {
  1405. .name = TYPE_AMD_IOMMU_DEVICE,
  1406. .parent = TYPE_X86_IOMMU_DEVICE,
  1407. .instance_size = sizeof(AMDVIState),
  1408. .instance_init = amdvi_instance_init,
  1409. .class_init = amdvi_class_init
  1410. };
  1411. static const TypeInfo amdviPCI = {
  1412. .name = "AMDVI-PCI",
  1413. .parent = TYPE_PCI_DEVICE,
  1414. .instance_size = sizeof(AMDVIPCIState),
  1415. .interfaces = (InterfaceInfo[]) {
  1416. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  1417. { },
  1418. },
  1419. };
  1420. static void amdvi_iommu_memory_region_class_init(ObjectClass *klass, void *data)
  1421. {
  1422. IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
  1423. imrc->translate = amdvi_translate;
  1424. imrc->notify_flag_changed = amdvi_iommu_notify_flag_changed;
  1425. }
  1426. static const TypeInfo amdvi_iommu_memory_region_info = {
  1427. .parent = TYPE_IOMMU_MEMORY_REGION,
  1428. .name = TYPE_AMD_IOMMU_MEMORY_REGION,
  1429. .class_init = amdvi_iommu_memory_region_class_init,
  1430. };
  1431. static void amdviPCI_register_types(void)
  1432. {
  1433. type_register_static(&amdviPCI);
  1434. type_register_static(&amdvi);
  1435. type_register_static(&amdvi_iommu_memory_region_info);
  1436. }
  1437. type_init(amdviPCI_register_types);