2
0

cxl_type3.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959
  1. #include "qemu/osdep.h"
  2. #include "qemu/units.h"
  3. #include "qemu/error-report.h"
  4. #include "qapi/qapi-commands-cxl.h"
  5. #include "hw/mem/memory-device.h"
  6. #include "hw/mem/pc-dimm.h"
  7. #include "hw/pci/pci.h"
  8. #include "hw/qdev-properties.h"
  9. #include "qapi/error.h"
  10. #include "qemu/log.h"
  11. #include "qemu/module.h"
  12. #include "qemu/pmem.h"
  13. #include "qemu/range.h"
  14. #include "qemu/rcu.h"
  15. #include "sysemu/hostmem.h"
  16. #include "sysemu/numa.h"
  17. #include "hw/cxl/cxl.h"
  18. #include "hw/pci/msix.h"
  19. #define DWORD_BYTE 4
  20. /* Default CDAT entries for a memory region */
  21. enum {
  22. CT3_CDAT_DSMAS,
  23. CT3_CDAT_DSLBIS0,
  24. CT3_CDAT_DSLBIS1,
  25. CT3_CDAT_DSLBIS2,
  26. CT3_CDAT_DSLBIS3,
  27. CT3_CDAT_DSEMTS,
  28. CT3_CDAT_NUM_ENTRIES
  29. };
  30. static int ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
  31. int dsmad_handle, MemoryRegion *mr)
  32. {
  33. g_autofree CDATDsmas *dsmas = NULL;
  34. g_autofree CDATDslbis *dslbis0 = NULL;
  35. g_autofree CDATDslbis *dslbis1 = NULL;
  36. g_autofree CDATDslbis *dslbis2 = NULL;
  37. g_autofree CDATDslbis *dslbis3 = NULL;
  38. g_autofree CDATDsemts *dsemts = NULL;
  39. dsmas = g_malloc(sizeof(*dsmas));
  40. if (!dsmas) {
  41. return -ENOMEM;
  42. }
  43. *dsmas = (CDATDsmas) {
  44. .header = {
  45. .type = CDAT_TYPE_DSMAS,
  46. .length = sizeof(*dsmas),
  47. },
  48. .DSMADhandle = dsmad_handle,
  49. .flags = CDAT_DSMAS_FLAG_NV,
  50. .DPA_base = 0,
  51. .DPA_length = int128_get64(mr->size),
  52. };
  53. /* For now, no memory side cache, plausiblish numbers */
  54. dslbis0 = g_malloc(sizeof(*dslbis0));
  55. if (!dslbis0) {
  56. return -ENOMEM;
  57. }
  58. *dslbis0 = (CDATDslbis) {
  59. .header = {
  60. .type = CDAT_TYPE_DSLBIS,
  61. .length = sizeof(*dslbis0),
  62. },
  63. .handle = dsmad_handle,
  64. .flags = HMAT_LB_MEM_MEMORY,
  65. .data_type = HMAT_LB_DATA_READ_LATENCY,
  66. .entry_base_unit = 10000, /* 10ns base */
  67. .entry[0] = 15, /* 150ns */
  68. };
  69. dslbis1 = g_malloc(sizeof(*dslbis1));
  70. if (!dslbis1) {
  71. return -ENOMEM;
  72. }
  73. *dslbis1 = (CDATDslbis) {
  74. .header = {
  75. .type = CDAT_TYPE_DSLBIS,
  76. .length = sizeof(*dslbis1),
  77. },
  78. .handle = dsmad_handle,
  79. .flags = HMAT_LB_MEM_MEMORY,
  80. .data_type = HMAT_LB_DATA_WRITE_LATENCY,
  81. .entry_base_unit = 10000,
  82. .entry[0] = 25, /* 250ns */
  83. };
  84. dslbis2 = g_malloc(sizeof(*dslbis2));
  85. if (!dslbis2) {
  86. return -ENOMEM;
  87. }
  88. *dslbis2 = (CDATDslbis) {
  89. .header = {
  90. .type = CDAT_TYPE_DSLBIS,
  91. .length = sizeof(*dslbis2),
  92. },
  93. .handle = dsmad_handle,
  94. .flags = HMAT_LB_MEM_MEMORY,
  95. .data_type = HMAT_LB_DATA_READ_BANDWIDTH,
  96. .entry_base_unit = 1000, /* GB/s */
  97. .entry[0] = 16,
  98. };
  99. dslbis3 = g_malloc(sizeof(*dslbis3));
  100. if (!dslbis3) {
  101. return -ENOMEM;
  102. }
  103. *dslbis3 = (CDATDslbis) {
  104. .header = {
  105. .type = CDAT_TYPE_DSLBIS,
  106. .length = sizeof(*dslbis3),
  107. },
  108. .handle = dsmad_handle,
  109. .flags = HMAT_LB_MEM_MEMORY,
  110. .data_type = HMAT_LB_DATA_WRITE_BANDWIDTH,
  111. .entry_base_unit = 1000, /* GB/s */
  112. .entry[0] = 16,
  113. };
  114. dsemts = g_malloc(sizeof(*dsemts));
  115. if (!dsemts) {
  116. return -ENOMEM;
  117. }
  118. *dsemts = (CDATDsemts) {
  119. .header = {
  120. .type = CDAT_TYPE_DSEMTS,
  121. .length = sizeof(*dsemts),
  122. },
  123. .DSMAS_handle = dsmad_handle,
  124. /* Reserved - the non volatile from DSMAS matters */
  125. .EFI_memory_type_attr = 2,
  126. .DPA_offset = 0,
  127. .DPA_length = int128_get64(mr->size),
  128. };
  129. /* Header always at start of structure */
  130. cdat_table[CT3_CDAT_DSMAS] = g_steal_pointer(&dsmas);
  131. cdat_table[CT3_CDAT_DSLBIS0] = g_steal_pointer(&dslbis0);
  132. cdat_table[CT3_CDAT_DSLBIS1] = g_steal_pointer(&dslbis1);
  133. cdat_table[CT3_CDAT_DSLBIS2] = g_steal_pointer(&dslbis2);
  134. cdat_table[CT3_CDAT_DSLBIS3] = g_steal_pointer(&dslbis3);
  135. cdat_table[CT3_CDAT_DSEMTS] = g_steal_pointer(&dsemts);
  136. return 0;
  137. }
  138. static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
  139. {
  140. g_autofree CDATSubHeader **table = NULL;
  141. MemoryRegion *nonvolatile_mr;
  142. CXLType3Dev *ct3d = priv;
  143. int dsmad_handle = 0;
  144. int rc;
  145. if (!ct3d->hostmem) {
  146. return 0;
  147. }
  148. nonvolatile_mr = host_memory_backend_get_memory(ct3d->hostmem);
  149. if (!nonvolatile_mr) {
  150. return -EINVAL;
  151. }
  152. table = g_malloc0(CT3_CDAT_NUM_ENTRIES * sizeof(*table));
  153. if (!table) {
  154. return -ENOMEM;
  155. }
  156. rc = ct3_build_cdat_entries_for_mr(table, dsmad_handle++, nonvolatile_mr);
  157. if (rc < 0) {
  158. return rc;
  159. }
  160. *cdat_table = g_steal_pointer(&table);
  161. return CT3_CDAT_NUM_ENTRIES;
  162. }
  163. static void ct3_free_cdat_table(CDATSubHeader **cdat_table, int num, void *priv)
  164. {
  165. int i;
  166. for (i = 0; i < num; i++) {
  167. g_free(cdat_table[i]);
  168. }
  169. g_free(cdat_table);
  170. }
  171. static bool cxl_doe_cdat_rsp(DOECap *doe_cap)
  172. {
  173. CDATObject *cdat = &CXL_TYPE3(doe_cap->pdev)->cxl_cstate.cdat;
  174. uint16_t ent;
  175. void *base;
  176. uint32_t len;
  177. CDATReq *req = pcie_doe_get_write_mbox_ptr(doe_cap);
  178. CDATRsp rsp;
  179. assert(cdat->entry_len);
  180. /* Discard if request length mismatched */
  181. if (pcie_doe_get_obj_len(req) <
  182. DIV_ROUND_UP(sizeof(CDATReq), DWORD_BYTE)) {
  183. return false;
  184. }
  185. ent = req->entry_handle;
  186. base = cdat->entry[ent].base;
  187. len = cdat->entry[ent].length;
  188. rsp = (CDATRsp) {
  189. .header = {
  190. .vendor_id = CXL_VENDOR_ID,
  191. .data_obj_type = CXL_DOE_TABLE_ACCESS,
  192. .reserved = 0x0,
  193. .length = DIV_ROUND_UP((sizeof(rsp) + len), DWORD_BYTE),
  194. },
  195. .rsp_code = CXL_DOE_TAB_RSP,
  196. .table_type = CXL_DOE_TAB_TYPE_CDAT,
  197. .entry_handle = (ent < cdat->entry_len - 1) ?
  198. ent + 1 : CXL_DOE_TAB_ENT_MAX,
  199. };
  200. memcpy(doe_cap->read_mbox, &rsp, sizeof(rsp));
  201. memcpy(doe_cap->read_mbox + DIV_ROUND_UP(sizeof(rsp), DWORD_BYTE),
  202. base, len);
  203. doe_cap->read_mbox_len += rsp.header.length;
  204. return true;
  205. }
  206. static uint32_t ct3d_config_read(PCIDevice *pci_dev, uint32_t addr, int size)
  207. {
  208. CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
  209. uint32_t val;
  210. if (pcie_doe_read_config(&ct3d->doe_cdat, addr, size, &val)) {
  211. return val;
  212. }
  213. return pci_default_read_config(pci_dev, addr, size);
  214. }
  215. static void ct3d_config_write(PCIDevice *pci_dev, uint32_t addr, uint32_t val,
  216. int size)
  217. {
  218. CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
  219. pcie_doe_write_config(&ct3d->doe_cdat, addr, val, size);
  220. pci_default_write_config(pci_dev, addr, val, size);
  221. pcie_aer_write_config(pci_dev, addr, val, size);
  222. }
  223. /*
  224. * Null value of all Fs suggested by IEEE RA guidelines for use of
  225. * EU, OUI and CID
  226. */
  227. #define UI64_NULL ~(0ULL)
  228. static void build_dvsecs(CXLType3Dev *ct3d)
  229. {
  230. CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
  231. uint8_t *dvsec;
  232. dvsec = (uint8_t *)&(CXLDVSECDevice){
  233. .cap = 0x1e,
  234. .ctrl = 0x2,
  235. .status2 = 0x2,
  236. .range1_size_hi = ct3d->hostmem->size >> 32,
  237. .range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
  238. (ct3d->hostmem->size & 0xF0000000),
  239. .range1_base_hi = 0,
  240. .range1_base_lo = 0,
  241. };
  242. cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
  243. PCIE_CXL_DEVICE_DVSEC_LENGTH,
  244. PCIE_CXL_DEVICE_DVSEC,
  245. PCIE_CXL2_DEVICE_DVSEC_REVID, dvsec);
  246. dvsec = (uint8_t *)&(CXLDVSECRegisterLocator){
  247. .rsvd = 0,
  248. .reg0_base_lo = RBI_COMPONENT_REG | CXL_COMPONENT_REG_BAR_IDX,
  249. .reg0_base_hi = 0,
  250. .reg1_base_lo = RBI_CXL_DEVICE_REG | CXL_DEVICE_REG_BAR_IDX,
  251. .reg1_base_hi = 0,
  252. };
  253. cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
  254. REG_LOC_DVSEC_LENGTH, REG_LOC_DVSEC,
  255. REG_LOC_DVSEC_REVID, dvsec);
  256. dvsec = (uint8_t *)&(CXLDVSECDeviceGPF){
  257. .phase2_duration = 0x603, /* 3 seconds */
  258. .phase2_power = 0x33, /* 0x33 miliwatts */
  259. };
  260. cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
  261. GPF_DEVICE_DVSEC_LENGTH, GPF_DEVICE_DVSEC,
  262. GPF_DEVICE_DVSEC_REVID, dvsec);
  263. dvsec = (uint8_t *)&(CXLDVSECPortFlexBus){
  264. .cap = 0x26, /* 68B, IO, Mem, non-MLD */
  265. .ctrl = 0x02, /* IO always enabled */
  266. .status = 0x26, /* same as capabilities */
  267. .rcvd_mod_ts_data_phase1 = 0xef, /* WTF? */
  268. };
  269. cxl_component_create_dvsec(cxl_cstate, CXL2_TYPE3_DEVICE,
  270. PCIE_FLEXBUS_PORT_DVSEC_LENGTH_2_0,
  271. PCIE_FLEXBUS_PORT_DVSEC,
  272. PCIE_FLEXBUS_PORT_DVSEC_REVID_2_0, dvsec);
  273. }
  274. static void hdm_decoder_commit(CXLType3Dev *ct3d, int which)
  275. {
  276. ComponentRegisters *cregs = &ct3d->cxl_cstate.crb;
  277. uint32_t *cache_mem = cregs->cache_mem_registers;
  278. assert(which == 0);
  279. /* TODO: Sanity checks that the decoder is possible */
  280. ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0);
  281. ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0);
  282. ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
  283. }
  284. static int ct3d_qmp_uncor_err_to_cxl(CxlUncorErrorType qmp_err)
  285. {
  286. switch (qmp_err) {
  287. case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_PARITY:
  288. return CXL_RAS_UNC_ERR_CACHE_DATA_PARITY;
  289. case CXL_UNCOR_ERROR_TYPE_CACHE_ADDRESS_PARITY:
  290. return CXL_RAS_UNC_ERR_CACHE_ADDRESS_PARITY;
  291. case CXL_UNCOR_ERROR_TYPE_CACHE_BE_PARITY:
  292. return CXL_RAS_UNC_ERR_CACHE_BE_PARITY;
  293. case CXL_UNCOR_ERROR_TYPE_CACHE_DATA_ECC:
  294. return CXL_RAS_UNC_ERR_CACHE_DATA_ECC;
  295. case CXL_UNCOR_ERROR_TYPE_MEM_DATA_PARITY:
  296. return CXL_RAS_UNC_ERR_MEM_DATA_PARITY;
  297. case CXL_UNCOR_ERROR_TYPE_MEM_ADDRESS_PARITY:
  298. return CXL_RAS_UNC_ERR_MEM_ADDRESS_PARITY;
  299. case CXL_UNCOR_ERROR_TYPE_MEM_BE_PARITY:
  300. return CXL_RAS_UNC_ERR_MEM_BE_PARITY;
  301. case CXL_UNCOR_ERROR_TYPE_MEM_DATA_ECC:
  302. return CXL_RAS_UNC_ERR_MEM_DATA_ECC;
  303. case CXL_UNCOR_ERROR_TYPE_REINIT_THRESHOLD:
  304. return CXL_RAS_UNC_ERR_REINIT_THRESHOLD;
  305. case CXL_UNCOR_ERROR_TYPE_RSVD_ENCODING:
  306. return CXL_RAS_UNC_ERR_RSVD_ENCODING;
  307. case CXL_UNCOR_ERROR_TYPE_POISON_RECEIVED:
  308. return CXL_RAS_UNC_ERR_POISON_RECEIVED;
  309. case CXL_UNCOR_ERROR_TYPE_RECEIVER_OVERFLOW:
  310. return CXL_RAS_UNC_ERR_RECEIVER_OVERFLOW;
  311. case CXL_UNCOR_ERROR_TYPE_INTERNAL:
  312. return CXL_RAS_UNC_ERR_INTERNAL;
  313. case CXL_UNCOR_ERROR_TYPE_CXL_IDE_TX:
  314. return CXL_RAS_UNC_ERR_CXL_IDE_TX;
  315. case CXL_UNCOR_ERROR_TYPE_CXL_IDE_RX:
  316. return CXL_RAS_UNC_ERR_CXL_IDE_RX;
  317. default:
  318. return -EINVAL;
  319. }
  320. }
  321. static int ct3d_qmp_cor_err_to_cxl(CxlCorErrorType qmp_err)
  322. {
  323. switch (qmp_err) {
  324. case CXL_COR_ERROR_TYPE_CACHE_DATA_ECC:
  325. return CXL_RAS_COR_ERR_CACHE_DATA_ECC;
  326. case CXL_COR_ERROR_TYPE_MEM_DATA_ECC:
  327. return CXL_RAS_COR_ERR_MEM_DATA_ECC;
  328. case CXL_COR_ERROR_TYPE_CRC_THRESHOLD:
  329. return CXL_RAS_COR_ERR_CRC_THRESHOLD;
  330. case CXL_COR_ERROR_TYPE_RETRY_THRESHOLD:
  331. return CXL_RAS_COR_ERR_RETRY_THRESHOLD;
  332. case CXL_COR_ERROR_TYPE_CACHE_POISON_RECEIVED:
  333. return CXL_RAS_COR_ERR_CACHE_POISON_RECEIVED;
  334. case CXL_COR_ERROR_TYPE_MEM_POISON_RECEIVED:
  335. return CXL_RAS_COR_ERR_MEM_POISON_RECEIVED;
  336. case CXL_COR_ERROR_TYPE_PHYSICAL:
  337. return CXL_RAS_COR_ERR_PHYSICAL;
  338. default:
  339. return -EINVAL;
  340. }
  341. }
  342. static void ct3d_reg_write(void *opaque, hwaddr offset, uint64_t value,
  343. unsigned size)
  344. {
  345. CXLComponentState *cxl_cstate = opaque;
  346. ComponentRegisters *cregs = &cxl_cstate->crb;
  347. CXLType3Dev *ct3d = container_of(cxl_cstate, CXLType3Dev, cxl_cstate);
  348. uint32_t *cache_mem = cregs->cache_mem_registers;
  349. bool should_commit = false;
  350. int which_hdm = -1;
  351. assert(size == 4);
  352. g_assert(offset < CXL2_COMPONENT_CM_REGION_SIZE);
  353. switch (offset) {
  354. case A_CXL_HDM_DECODER0_CTRL:
  355. should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
  356. which_hdm = 0;
  357. break;
  358. case A_CXL_RAS_UNC_ERR_STATUS:
  359. {
  360. uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
  361. uint32_t fe = FIELD_EX32(capctrl, CXL_RAS_ERR_CAP_CTRL, FIRST_ERROR_POINTER);
  362. CXLError *cxl_err;
  363. uint32_t unc_err;
  364. /*
  365. * If single bit written that corresponds to the first error
  366. * pointer being cleared, update the status and header log.
  367. */
  368. if (!QTAILQ_EMPTY(&ct3d->error_list)) {
  369. if ((1 << fe) ^ value) {
  370. CXLError *cxl_next;
  371. /*
  372. * Software is using wrong flow for multiple header recording
  373. * Following behavior in PCIe r6.0 and assuming multiple
  374. * header support. Implementation defined choice to clear all
  375. * matching records if more than one bit set - which corresponds
  376. * closest to behavior of hardware not capable of multiple
  377. * header recording.
  378. */
  379. QTAILQ_FOREACH_SAFE(cxl_err, &ct3d->error_list, node, cxl_next) {
  380. if ((1 << cxl_err->type) & value) {
  381. QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
  382. g_free(cxl_err);
  383. }
  384. }
  385. } else {
  386. /* Done with previous FE, so drop from list */
  387. cxl_err = QTAILQ_FIRST(&ct3d->error_list);
  388. QTAILQ_REMOVE(&ct3d->error_list, cxl_err, node);
  389. g_free(cxl_err);
  390. }
  391. /*
  392. * If there is another FE, then put that in place and update
  393. * the header log
  394. */
  395. if (!QTAILQ_EMPTY(&ct3d->error_list)) {
  396. uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
  397. int i;
  398. cxl_err = QTAILQ_FIRST(&ct3d->error_list);
  399. for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
  400. stl_le_p(header_log + i, cxl_err->header[i]);
  401. }
  402. capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
  403. FIRST_ERROR_POINTER, cxl_err->type);
  404. } else {
  405. /*
  406. * If no more errors, then follow recomendation of PCI spec
  407. * r6.0 6.2.4.2 to set the first error pointer to a status
  408. * bit that will never be used.
  409. */
  410. capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
  411. FIRST_ERROR_POINTER,
  412. CXL_RAS_UNC_ERR_CXL_UNUSED);
  413. }
  414. stl_le_p((uint8_t *)cache_mem + A_CXL_RAS_ERR_CAP_CTRL, capctrl);
  415. }
  416. unc_err = 0;
  417. QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
  418. unc_err |= 1 << cxl_err->type;
  419. }
  420. stl_le_p((uint8_t *)cache_mem + offset, unc_err);
  421. return;
  422. }
  423. case A_CXL_RAS_COR_ERR_STATUS:
  424. {
  425. uint32_t rw1c = value;
  426. uint32_t temp = ldl_le_p((uint8_t *)cache_mem + offset);
  427. temp &= ~rw1c;
  428. stl_le_p((uint8_t *)cache_mem + offset, temp);
  429. return;
  430. }
  431. default:
  432. break;
  433. }
  434. stl_le_p((uint8_t *)cache_mem + offset, value);
  435. if (should_commit) {
  436. hdm_decoder_commit(ct3d, which_hdm);
  437. }
  438. }
  439. static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
  440. {
  441. DeviceState *ds = DEVICE(ct3d);
  442. MemoryRegion *mr;
  443. char *name;
  444. if (!ct3d->hostmem) {
  445. error_setg(errp, "memdev property must be set");
  446. return false;
  447. }
  448. mr = host_memory_backend_get_memory(ct3d->hostmem);
  449. if (!mr) {
  450. error_setg(errp, "memdev property must be set");
  451. return false;
  452. }
  453. memory_region_set_nonvolatile(mr, true);
  454. memory_region_set_enabled(mr, true);
  455. host_memory_backend_set_mapped(ct3d->hostmem, true);
  456. if (ds->id) {
  457. name = g_strdup_printf("cxl-type3-dpa-space:%s", ds->id);
  458. } else {
  459. name = g_strdup("cxl-type3-dpa-space");
  460. }
  461. address_space_init(&ct3d->hostmem_as, mr, name);
  462. g_free(name);
  463. ct3d->cxl_dstate.pmem_size = ct3d->hostmem->size;
  464. if (!ct3d->lsa) {
  465. error_setg(errp, "lsa property must be set");
  466. return false;
  467. }
  468. return true;
  469. }
  470. static DOEProtocol doe_cdat_prot[] = {
  471. { CXL_VENDOR_ID, CXL_DOE_TABLE_ACCESS, cxl_doe_cdat_rsp },
  472. { }
  473. };
  474. static void ct3_realize(PCIDevice *pci_dev, Error **errp)
  475. {
  476. CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
  477. CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
  478. ComponentRegisters *regs = &cxl_cstate->crb;
  479. MemoryRegion *mr = &regs->component_registers;
  480. uint8_t *pci_conf = pci_dev->config;
  481. unsigned short msix_num = 1;
  482. int i, rc;
  483. QTAILQ_INIT(&ct3d->error_list);
  484. if (!cxl_setup_memory(ct3d, errp)) {
  485. return;
  486. }
  487. pci_config_set_prog_interface(pci_conf, 0x10);
  488. pcie_endpoint_cap_init(pci_dev, 0x80);
  489. if (ct3d->sn != UI64_NULL) {
  490. pcie_dev_ser_num_init(pci_dev, 0x100, ct3d->sn);
  491. cxl_cstate->dvsec_offset = 0x100 + 0x0c;
  492. } else {
  493. cxl_cstate->dvsec_offset = 0x100;
  494. }
  495. ct3d->cxl_cstate.pdev = pci_dev;
  496. build_dvsecs(ct3d);
  497. regs->special_ops = g_new0(MemoryRegionOps, 1);
  498. regs->special_ops->write = ct3d_reg_write;
  499. cxl_component_register_block_init(OBJECT(pci_dev), cxl_cstate,
  500. TYPE_CXL_TYPE3);
  501. pci_register_bar(
  502. pci_dev, CXL_COMPONENT_REG_BAR_IDX,
  503. PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, mr);
  504. cxl_device_register_block_init(OBJECT(pci_dev), &ct3d->cxl_dstate);
  505. pci_register_bar(pci_dev, CXL_DEVICE_REG_BAR_IDX,
  506. PCI_BASE_ADDRESS_SPACE_MEMORY |
  507. PCI_BASE_ADDRESS_MEM_TYPE_64,
  508. &ct3d->cxl_dstate.device_registers);
  509. /* MSI(-X) Initailization */
  510. rc = msix_init_exclusive_bar(pci_dev, msix_num, 4, NULL);
  511. if (rc) {
  512. goto err_address_space_free;
  513. }
  514. for (i = 0; i < msix_num; i++) {
  515. msix_vector_use(pci_dev, i);
  516. }
  517. /* DOE Initailization */
  518. pcie_doe_init(pci_dev, &ct3d->doe_cdat, 0x190, doe_cdat_prot, true, 0);
  519. cxl_cstate->cdat.build_cdat_table = ct3_build_cdat_table;
  520. cxl_cstate->cdat.free_cdat_table = ct3_free_cdat_table;
  521. cxl_cstate->cdat.private = ct3d;
  522. cxl_doe_cdat_init(cxl_cstate, errp);
  523. pcie_cap_deverr_init(pci_dev);
  524. /* Leave a bit of room for expansion */
  525. rc = pcie_aer_init(pci_dev, PCI_ERR_VER, 0x200, PCI_ERR_SIZEOF, NULL);
  526. if (rc) {
  527. goto err_release_cdat;
  528. }
  529. return;
  530. err_release_cdat:
  531. cxl_doe_cdat_release(cxl_cstate);
  532. g_free(regs->special_ops);
  533. err_address_space_free:
  534. address_space_destroy(&ct3d->hostmem_as);
  535. return;
  536. }
  537. static void ct3_exit(PCIDevice *pci_dev)
  538. {
  539. CXLType3Dev *ct3d = CXL_TYPE3(pci_dev);
  540. CXLComponentState *cxl_cstate = &ct3d->cxl_cstate;
  541. ComponentRegisters *regs = &cxl_cstate->crb;
  542. pcie_aer_exit(pci_dev);
  543. cxl_doe_cdat_release(cxl_cstate);
  544. g_free(regs->special_ops);
  545. address_space_destroy(&ct3d->hostmem_as);
  546. }
  547. /* TODO: Support multiple HDM decoders and DPA skip */
  548. static bool cxl_type3_dpa(CXLType3Dev *ct3d, hwaddr host_addr, uint64_t *dpa)
  549. {
  550. uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
  551. uint64_t decoder_base, decoder_size, hpa_offset;
  552. uint32_t hdm0_ctrl;
  553. int ig, iw;
  554. decoder_base = (((uint64_t)cache_mem[R_CXL_HDM_DECODER0_BASE_HI] << 32) |
  555. cache_mem[R_CXL_HDM_DECODER0_BASE_LO]);
  556. if ((uint64_t)host_addr < decoder_base) {
  557. return false;
  558. }
  559. hpa_offset = (uint64_t)host_addr - decoder_base;
  560. decoder_size = ((uint64_t)cache_mem[R_CXL_HDM_DECODER0_SIZE_HI] << 32) |
  561. cache_mem[R_CXL_HDM_DECODER0_SIZE_LO];
  562. if (hpa_offset >= decoder_size) {
  563. return false;
  564. }
  565. hdm0_ctrl = cache_mem[R_CXL_HDM_DECODER0_CTRL];
  566. iw = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IW);
  567. ig = FIELD_EX32(hdm0_ctrl, CXL_HDM_DECODER0_CTRL, IG);
  568. *dpa = (MAKE_64BIT_MASK(0, 8 + ig) & hpa_offset) |
  569. ((MAKE_64BIT_MASK(8 + ig + iw, 64 - 8 - ig - iw) & hpa_offset) >> iw);
  570. return true;
  571. }
  572. MemTxResult cxl_type3_read(PCIDevice *d, hwaddr host_addr, uint64_t *data,
  573. unsigned size, MemTxAttrs attrs)
  574. {
  575. CXLType3Dev *ct3d = CXL_TYPE3(d);
  576. uint64_t dpa_offset;
  577. MemoryRegion *mr;
  578. /* TODO support volatile region */
  579. mr = host_memory_backend_get_memory(ct3d->hostmem);
  580. if (!mr) {
  581. return MEMTX_ERROR;
  582. }
  583. if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) {
  584. return MEMTX_ERROR;
  585. }
  586. if (dpa_offset > int128_get64(mr->size)) {
  587. return MEMTX_ERROR;
  588. }
  589. return address_space_read(&ct3d->hostmem_as, dpa_offset, attrs, data, size);
  590. }
  591. MemTxResult cxl_type3_write(PCIDevice *d, hwaddr host_addr, uint64_t data,
  592. unsigned size, MemTxAttrs attrs)
  593. {
  594. CXLType3Dev *ct3d = CXL_TYPE3(d);
  595. uint64_t dpa_offset;
  596. MemoryRegion *mr;
  597. mr = host_memory_backend_get_memory(ct3d->hostmem);
  598. if (!mr) {
  599. return MEMTX_OK;
  600. }
  601. if (!cxl_type3_dpa(ct3d, host_addr, &dpa_offset)) {
  602. return MEMTX_OK;
  603. }
  604. if (dpa_offset > int128_get64(mr->size)) {
  605. return MEMTX_OK;
  606. }
  607. return address_space_write(&ct3d->hostmem_as, dpa_offset, attrs,
  608. &data, size);
  609. }
  610. static void ct3d_reset(DeviceState *dev)
  611. {
  612. CXLType3Dev *ct3d = CXL_TYPE3(dev);
  613. uint32_t *reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
  614. uint32_t *write_msk = ct3d->cxl_cstate.crb.cache_mem_regs_write_mask;
  615. cxl_component_register_init_common(reg_state, write_msk, CXL2_TYPE3_DEVICE);
  616. cxl_device_register_init_common(&ct3d->cxl_dstate);
  617. }
  618. static Property ct3_props[] = {
  619. DEFINE_PROP_LINK("memdev", CXLType3Dev, hostmem, TYPE_MEMORY_BACKEND,
  620. HostMemoryBackend *),
  621. DEFINE_PROP_LINK("lsa", CXLType3Dev, lsa, TYPE_MEMORY_BACKEND,
  622. HostMemoryBackend *),
  623. DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
  624. DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
  625. DEFINE_PROP_END_OF_LIST(),
  626. };
  627. static uint64_t get_lsa_size(CXLType3Dev *ct3d)
  628. {
  629. MemoryRegion *mr;
  630. mr = host_memory_backend_get_memory(ct3d->lsa);
  631. return memory_region_size(mr);
  632. }
  633. static void validate_lsa_access(MemoryRegion *mr, uint64_t size,
  634. uint64_t offset)
  635. {
  636. assert(offset + size <= memory_region_size(mr));
  637. assert(offset + size > offset);
  638. }
  639. static uint64_t get_lsa(CXLType3Dev *ct3d, void *buf, uint64_t size,
  640. uint64_t offset)
  641. {
  642. MemoryRegion *mr;
  643. void *lsa;
  644. mr = host_memory_backend_get_memory(ct3d->lsa);
  645. validate_lsa_access(mr, size, offset);
  646. lsa = memory_region_get_ram_ptr(mr) + offset;
  647. memcpy(buf, lsa, size);
  648. return size;
  649. }
  650. static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
  651. uint64_t offset)
  652. {
  653. MemoryRegion *mr;
  654. void *lsa;
  655. mr = host_memory_backend_get_memory(ct3d->lsa);
  656. validate_lsa_access(mr, size, offset);
  657. lsa = memory_region_get_ram_ptr(mr) + offset;
  658. memcpy(lsa, buf, size);
  659. memory_region_set_dirty(mr, offset, size);
  660. /*
  661. * Just like the PMEM, if the guest is not allowed to exit gracefully, label
  662. * updates will get lost.
  663. */
  664. }
  665. /* For uncorrectable errors include support for multiple header recording */
  666. void qmp_cxl_inject_uncorrectable_errors(const char *path,
  667. CXLUncorErrorRecordList *errors,
  668. Error **errp)
  669. {
  670. Object *obj = object_resolve_path(path, NULL);
  671. static PCIEAERErr err = {};
  672. CXLType3Dev *ct3d;
  673. CXLError *cxl_err;
  674. uint32_t *reg_state;
  675. uint32_t unc_err;
  676. bool first;
  677. if (!obj) {
  678. error_setg(errp, "Unable to resolve path");
  679. return;
  680. }
  681. if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
  682. error_setg(errp, "Path does not point to a CXL type 3 device");
  683. return;
  684. }
  685. err.status = PCI_ERR_UNC_INTN;
  686. err.source_id = pci_requester_id(PCI_DEVICE(obj));
  687. err.flags = 0;
  688. ct3d = CXL_TYPE3(obj);
  689. first = QTAILQ_EMPTY(&ct3d->error_list);
  690. reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
  691. while (errors) {
  692. uint32List *header = errors->value->header;
  693. uint8_t header_count = 0;
  694. int cxl_err_code;
  695. cxl_err_code = ct3d_qmp_uncor_err_to_cxl(errors->value->type);
  696. if (cxl_err_code < 0) {
  697. error_setg(errp, "Unknown error code");
  698. return;
  699. }
  700. /* If the error is masked, nothing to do here */
  701. if (!((1 << cxl_err_code) &
  702. ~ldl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK))) {
  703. errors = errors->next;
  704. continue;
  705. }
  706. cxl_err = g_malloc0(sizeof(*cxl_err));
  707. if (!cxl_err) {
  708. return;
  709. }
  710. cxl_err->type = cxl_err_code;
  711. while (header && header_count < 32) {
  712. cxl_err->header[header_count++] = header->value;
  713. header = header->next;
  714. }
  715. if (header_count > 32) {
  716. error_setg(errp, "Header must be 32 DWORD or less");
  717. return;
  718. }
  719. QTAILQ_INSERT_TAIL(&ct3d->error_list, cxl_err, node);
  720. errors = errors->next;
  721. }
  722. if (first && !QTAILQ_EMPTY(&ct3d->error_list)) {
  723. uint32_t *cache_mem = ct3d->cxl_cstate.crb.cache_mem_registers;
  724. uint32_t capctrl = ldl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL);
  725. uint32_t *header_log = &cache_mem[R_CXL_RAS_ERR_HEADER0];
  726. int i;
  727. cxl_err = QTAILQ_FIRST(&ct3d->error_list);
  728. for (i = 0; i < CXL_RAS_ERR_HEADER_NUM; i++) {
  729. stl_le_p(header_log + i, cxl_err->header[i]);
  730. }
  731. capctrl = FIELD_DP32(capctrl, CXL_RAS_ERR_CAP_CTRL,
  732. FIRST_ERROR_POINTER, cxl_err->type);
  733. stl_le_p(cache_mem + R_CXL_RAS_ERR_CAP_CTRL, capctrl);
  734. }
  735. unc_err = 0;
  736. QTAILQ_FOREACH(cxl_err, &ct3d->error_list, node) {
  737. unc_err |= (1 << cxl_err->type);
  738. }
  739. if (!unc_err) {
  740. return;
  741. }
  742. stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, unc_err);
  743. pcie_aer_inject_error(PCI_DEVICE(obj), &err);
  744. return;
  745. }
  746. void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
  747. Error **errp)
  748. {
  749. static PCIEAERErr err = {};
  750. Object *obj = object_resolve_path(path, NULL);
  751. CXLType3Dev *ct3d;
  752. uint32_t *reg_state;
  753. uint32_t cor_err;
  754. int cxl_err_type;
  755. if (!obj) {
  756. error_setg(errp, "Unable to resolve path");
  757. return;
  758. }
  759. if (!object_dynamic_cast(obj, TYPE_CXL_TYPE3)) {
  760. error_setg(errp, "Path does not point to a CXL type 3 device");
  761. return;
  762. }
  763. err.status = PCI_ERR_COR_INTERNAL;
  764. err.source_id = pci_requester_id(PCI_DEVICE(obj));
  765. err.flags = PCIE_AER_ERR_IS_CORRECTABLE;
  766. ct3d = CXL_TYPE3(obj);
  767. reg_state = ct3d->cxl_cstate.crb.cache_mem_registers;
  768. cor_err = ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS);
  769. cxl_err_type = ct3d_qmp_cor_err_to_cxl(type);
  770. if (cxl_err_type < 0) {
  771. error_setg(errp, "Invalid COR error");
  772. return;
  773. }
  774. /* If the error is masked, nothting to do here */
  775. if (!((1 << cxl_err_type) & ~ldl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK))) {
  776. return;
  777. }
  778. cor_err |= (1 << cxl_err_type);
  779. stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, cor_err);
  780. pcie_aer_inject_error(PCI_DEVICE(obj), &err);
  781. }
  782. static void ct3_class_init(ObjectClass *oc, void *data)
  783. {
  784. DeviceClass *dc = DEVICE_CLASS(oc);
  785. PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
  786. CXLType3Class *cvc = CXL_TYPE3_CLASS(oc);
  787. pc->realize = ct3_realize;
  788. pc->exit = ct3_exit;
  789. pc->class_id = PCI_CLASS_MEMORY_CXL;
  790. pc->vendor_id = PCI_VENDOR_ID_INTEL;
  791. pc->device_id = 0xd93; /* LVF for now */
  792. pc->revision = 1;
  793. pc->config_write = ct3d_config_write;
  794. pc->config_read = ct3d_config_read;
  795. set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
  796. dc->desc = "CXL PMEM Device (Type 3)";
  797. dc->reset = ct3d_reset;
  798. device_class_set_props(dc, ct3_props);
  799. cvc->get_lsa_size = get_lsa_size;
  800. cvc->get_lsa = get_lsa;
  801. cvc->set_lsa = set_lsa;
  802. }
  803. static const TypeInfo ct3d_info = {
  804. .name = TYPE_CXL_TYPE3,
  805. .parent = TYPE_PCI_DEVICE,
  806. .class_size = sizeof(struct CXLType3Class),
  807. .class_init = ct3_class_init,
  808. .instance_size = sizeof(CXLType3Dev),
  809. .interfaces = (InterfaceInfo[]) {
  810. { INTERFACE_CXL_DEVICE },
  811. { INTERFACE_PCIE_DEVICE },
  812. {}
  813. },
  814. };
  815. static void ct3d_registers(void)
  816. {
  817. type_register_static(&ct3d_info);
  818. }
  819. type_init(ct3d_registers);