2
0

cxl-component-utils.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. /*
  2. * CXL Utility library for components
  3. *
  4. * Copyright(C) 2020 Intel Corporation.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2. See the
  7. * COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "qemu/log.h"
  11. #include "qapi/error.h"
  12. #include "hw/pci/pci.h"
  13. #include "hw/cxl/cxl.h"
  14. static uint64_t cxl_cache_mem_read_reg(void *opaque, hwaddr offset,
  15. unsigned size)
  16. {
  17. CXLComponentState *cxl_cstate = opaque;
  18. ComponentRegisters *cregs = &cxl_cstate->crb;
  19. if (size == 8) {
  20. qemu_log_mask(LOG_UNIMP,
  21. "CXL 8 byte cache mem registers not implemented\n");
  22. return 0;
  23. }
  24. if (cregs->special_ops && cregs->special_ops->read) {
  25. return cregs->special_ops->read(cxl_cstate, offset, size);
  26. } else {
  27. return cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)];
  28. }
  29. }
  30. static void dumb_hdm_handler(CXLComponentState *cxl_cstate, hwaddr offset,
  31. uint32_t value)
  32. {
  33. ComponentRegisters *cregs = &cxl_cstate->crb;
  34. uint32_t *cache_mem = cregs->cache_mem_registers;
  35. bool should_commit = false;
  36. switch (offset) {
  37. case A_CXL_HDM_DECODER0_CTRL:
  38. should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
  39. break;
  40. default:
  41. break;
  42. }
  43. memory_region_transaction_begin();
  44. stl_le_p((uint8_t *)cache_mem + offset, value);
  45. if (should_commit) {
  46. ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMIT, 0);
  47. ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, ERR, 0);
  48. ARRAY_FIELD_DP32(cache_mem, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
  49. }
  50. memory_region_transaction_commit();
  51. }
  52. static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value,
  53. unsigned size)
  54. {
  55. CXLComponentState *cxl_cstate = opaque;
  56. ComponentRegisters *cregs = &cxl_cstate->crb;
  57. uint32_t mask;
  58. if (size == 8) {
  59. qemu_log_mask(LOG_UNIMP,
  60. "CXL 8 byte cache mem registers not implemented\n");
  61. return;
  62. }
  63. mask = cregs->cache_mem_regs_write_mask[offset / sizeof(*cregs->cache_mem_regs_write_mask)];
  64. value &= mask;
  65. /* RO bits should remain constant. Done by reading existing value */
  66. value |= ~mask & cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)];
  67. if (cregs->special_ops && cregs->special_ops->write) {
  68. cregs->special_ops->write(cxl_cstate, offset, value, size);
  69. return;
  70. }
  71. if (offset >= A_CXL_HDM_DECODER_CAPABILITY &&
  72. offset <= A_CXL_HDM_DECODER0_TARGET_LIST_HI) {
  73. dumb_hdm_handler(cxl_cstate, offset, value);
  74. } else {
  75. cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)] = value;
  76. }
  77. }
  78. /*
  79. * 8.2.3
  80. * The access restrictions specified in Section 8.2.2 also apply to CXL 2.0
  81. * Component Registers.
  82. *
  83. * 8.2.2
  84. * • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial
  85. * reads are not permitted.
  86. * • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial
  87. * reads are not permitted.
  88. *
  89. * As of the spec defined today, only 4 byte registers exist.
  90. */
  91. static const MemoryRegionOps cache_mem_ops = {
  92. .read = cxl_cache_mem_read_reg,
  93. .write = cxl_cache_mem_write_reg,
  94. .endianness = DEVICE_LITTLE_ENDIAN,
  95. .valid = {
  96. .min_access_size = 4,
  97. .max_access_size = 8,
  98. .unaligned = false,
  99. },
  100. .impl = {
  101. .min_access_size = 4,
  102. .max_access_size = 8,
  103. },
  104. };
  105. void cxl_component_register_block_init(Object *obj,
  106. CXLComponentState *cxl_cstate,
  107. const char *type)
  108. {
  109. ComponentRegisters *cregs = &cxl_cstate->crb;
  110. memory_region_init(&cregs->component_registers, obj, type,
  111. CXL2_COMPONENT_BLOCK_SIZE);
  112. /* io registers controls link which we don't care about in QEMU */
  113. memory_region_init_io(&cregs->io, obj, NULL, cregs, ".io",
  114. CXL2_COMPONENT_IO_REGION_SIZE);
  115. memory_region_init_io(&cregs->cache_mem, obj, &cache_mem_ops, cregs,
  116. ".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE);
  117. memory_region_add_subregion(&cregs->component_registers, 0, &cregs->io);
  118. memory_region_add_subregion(&cregs->component_registers,
  119. CXL2_COMPONENT_IO_REGION_SIZE,
  120. &cregs->cache_mem);
  121. }
  122. static void ras_init_common(uint32_t *reg_state, uint32_t *write_msk)
  123. {
  124. /*
  125. * Error status is RW1C but given bits are not yet set, it can
  126. * be handled as RO.
  127. */
  128. stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, 0);
  129. stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_STATUS, 0x1cfff);
  130. /* Bits 12-13 and 17-31 reserved in CXL 2.0 */
  131. stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff);
  132. stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff);
  133. stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff);
  134. stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff);
  135. stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, 0);
  136. stl_le_p(write_msk + R_CXL_RAS_COR_ERR_STATUS, 0x7f);
  137. stl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK, 0x7f);
  138. stl_le_p(write_msk + R_CXL_RAS_COR_ERR_MASK, 0x7f);
  139. /* CXL switches and devices must set */
  140. stl_le_p(reg_state + R_CXL_RAS_ERR_CAP_CTRL, 0x200);
  141. }
  142. static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk,
  143. enum reg_type type)
  144. {
  145. int decoder_count = 1;
  146. int i;
  147. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, DECODER_COUNT,
  148. cxl_decoder_count_enc(decoder_count));
  149. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 1);
  150. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 1);
  151. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1);
  152. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 0);
  153. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_GLOBAL_CONTROL,
  154. HDM_DECODER_ENABLE, 0);
  155. write_msk[R_CXL_HDM_DECODER_GLOBAL_CONTROL] = 0x3;
  156. for (i = 0; i < decoder_count; i++) {
  157. write_msk[R_CXL_HDM_DECODER0_BASE_LO + i * 0x20] = 0xf0000000;
  158. write_msk[R_CXL_HDM_DECODER0_BASE_HI + i * 0x20] = 0xffffffff;
  159. write_msk[R_CXL_HDM_DECODER0_SIZE_LO + i * 0x20] = 0xf0000000;
  160. write_msk[R_CXL_HDM_DECODER0_SIZE_HI + i * 0x20] = 0xffffffff;
  161. write_msk[R_CXL_HDM_DECODER0_CTRL + i * 0x20] = 0x13ff;
  162. if (type == CXL2_DEVICE ||
  163. type == CXL2_TYPE3_DEVICE ||
  164. type == CXL2_LOGICAL_DEVICE) {
  165. write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * 0x20] = 0xf0000000;
  166. } else {
  167. write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * 0x20] = 0xffffffff;
  168. }
  169. write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_HI + i * 0x20] = 0xffffffff;
  170. }
  171. }
  172. void cxl_component_register_init_common(uint32_t *reg_state, uint32_t *write_msk,
  173. enum reg_type type)
  174. {
  175. int caps = 0;
  176. /*
  177. * In CXL 2.0 the capabilities required for each CXL component are such that,
  178. * with the ordering chosen here, a single number can be used to define
  179. * which capabilities should be provided.
  180. */
  181. switch (type) {
  182. case CXL2_DOWNSTREAM_PORT:
  183. case CXL2_DEVICE:
  184. /* RAS, Link */
  185. caps = 2;
  186. break;
  187. case CXL2_UPSTREAM_PORT:
  188. case CXL2_TYPE3_DEVICE:
  189. case CXL2_LOGICAL_DEVICE:
  190. /* + HDM */
  191. caps = 3;
  192. break;
  193. case CXL2_ROOT_PORT:
  194. /* + Extended Security, + Snoop */
  195. caps = 5;
  196. break;
  197. default:
  198. abort();
  199. }
  200. memset(reg_state, 0, CXL2_COMPONENT_CM_REGION_SIZE);
  201. /* CXL Capability Header Register */
  202. ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ID, 1);
  203. ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, VERSION, 1);
  204. ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 1);
  205. ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ARRAY_SIZE, caps);
  206. #define init_cap_reg(reg, id, version) \
  207. QEMU_BUILD_BUG_ON(CXL_##reg##_REGISTERS_OFFSET == 0); \
  208. do { \
  209. int which = R_CXL_##reg##_CAPABILITY_HEADER; \
  210. reg_state[which] = FIELD_DP32(reg_state[which], \
  211. CXL_##reg##_CAPABILITY_HEADER, ID, id); \
  212. reg_state[which] = \
  213. FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, \
  214. VERSION, version); \
  215. reg_state[which] = \
  216. FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, PTR, \
  217. CXL_##reg##_REGISTERS_OFFSET); \
  218. } while (0)
  219. init_cap_reg(RAS, 2, 2);
  220. ras_init_common(reg_state, write_msk);
  221. init_cap_reg(LINK, 4, 2);
  222. if (caps < 3) {
  223. return;
  224. }
  225. init_cap_reg(HDM, 5, 1);
  226. hdm_init_common(reg_state, write_msk, type);
  227. if (caps < 5) {
  228. return;
  229. }
  230. init_cap_reg(EXTSEC, 6, 1);
  231. init_cap_reg(SNOOP, 8, 1);
  232. #undef init_cap_reg
  233. }
  234. /*
  235. * Helper to creates a DVSEC header for a CXL entity. The caller is responsible
  236. * for tracking the valid offset.
  237. *
  238. * This function will build the DVSEC header on behalf of the caller and then
  239. * copy in the remaining data for the vendor specific bits.
  240. * It will also set up appropriate write masks.
  241. */
  242. void cxl_component_create_dvsec(CXLComponentState *cxl,
  243. enum reg_type cxl_dev_type, uint16_t length,
  244. uint16_t type, uint8_t rev, uint8_t *body)
  245. {
  246. PCIDevice *pdev = cxl->pdev;
  247. uint16_t offset = cxl->dvsec_offset;
  248. uint8_t *wmask = pdev->wmask;
  249. assert(offset >= PCI_CFG_SPACE_SIZE &&
  250. ((offset + length) < PCI_CFG_SPACE_EXP_SIZE));
  251. assert((length & 0xf000) == 0);
  252. assert((rev & ~0xf) == 0);
  253. /* Create the DVSEC in the MCFG space */
  254. pcie_add_capability(pdev, PCI_EXT_CAP_ID_DVSEC, 1, offset, length);
  255. pci_set_long(pdev->config + offset + PCIE_DVSEC_HEADER1_OFFSET,
  256. (length << 20) | (rev << 16) | CXL_VENDOR_ID);
  257. pci_set_word(pdev->config + offset + PCIE_DVSEC_ID_OFFSET, type);
  258. memcpy(pdev->config + offset + sizeof(DVSECHeader),
  259. body + sizeof(DVSECHeader),
  260. length - sizeof(DVSECHeader));
  261. /* Configure write masks */
  262. switch (type) {
  263. case PCIE_CXL_DEVICE_DVSEC:
  264. /* Cntrl RW Lock - so needs explicit blocking when lock is set */
  265. wmask[offset + offsetof(CXLDVSECDevice, ctrl)] = 0xFD;
  266. wmask[offset + offsetof(CXLDVSECDevice, ctrl) + 1] = 0x4F;
  267. /* Status is RW1CS */
  268. wmask[offset + offsetof(CXLDVSECDevice, ctrl2)] = 0x0F;
  269. /* Lock is RW Once */
  270. wmask[offset + offsetof(CXLDVSECDevice, lock)] = 0x01;
  271. /* range1/2_base_high/low is RW Lock */
  272. wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi)] = 0xFF;
  273. wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 1] = 0xFF;
  274. wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 2] = 0xFF;
  275. wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 3] = 0xFF;
  276. wmask[offset + offsetof(CXLDVSECDevice, range1_base_lo) + 3] = 0xF0;
  277. wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi)] = 0xFF;
  278. wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 1] = 0xFF;
  279. wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 2] = 0xFF;
  280. wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 3] = 0xFF;
  281. wmask[offset + offsetof(CXLDVSECDevice, range2_base_lo) + 3] = 0xF0;
  282. break;
  283. case NON_CXL_FUNCTION_MAP_DVSEC:
  284. break; /* Not yet implemented */
  285. case EXTENSIONS_PORT_DVSEC:
  286. wmask[offset + offsetof(CXLDVSECPortExtensions, control)] = 0x0F;
  287. wmask[offset + offsetof(CXLDVSECPortExtensions, control) + 1] = 0x40;
  288. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_base)] = 0xFF;
  289. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_limit)] = 0xFF;
  290. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base)] = 0xF0;
  291. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base) + 1] = 0xFF;
  292. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit)] = 0xF0;
  293. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit) + 1] = 0xFF;
  294. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base)] = 0xF0;
  295. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base) + 1] = 0xFF;
  296. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit)] = 0xF0;
  297. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit) + 1] = 0xFF;
  298. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high)] = 0xFF;
  299. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 1] = 0xFF;
  300. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 2] = 0xFF;
  301. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 3] = 0xFF;
  302. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high)] = 0xFF;
  303. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 1] = 0xFF;
  304. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 2] = 0xFF;
  305. wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 3] = 0xFF;
  306. break;
  307. case GPF_PORT_DVSEC:
  308. wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl)] = 0x0F;
  309. wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl) + 1] = 0x0F;
  310. wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl)] = 0x0F;
  311. wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl) + 1] = 0x0F;
  312. break;
  313. case GPF_DEVICE_DVSEC:
  314. wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration)] = 0x0F;
  315. wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration) + 1] = 0x0F;
  316. wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power)] = 0xFF;
  317. wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 1] = 0xFF;
  318. wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 2] = 0xFF;
  319. wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 3] = 0xFF;
  320. break;
  321. case PCIE_FLEXBUS_PORT_DVSEC:
  322. switch (cxl_dev_type) {
  323. case CXL2_ROOT_PORT:
  324. /* No MLD */
  325. wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xbd;
  326. break;
  327. case CXL2_DOWNSTREAM_PORT:
  328. wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xfd;
  329. break;
  330. default: /* Registers are RO for other component types */
  331. break;
  332. }
  333. /* There are rw1cs bits in the status register but never set currently */
  334. break;
  335. }
  336. /* Update state for future DVSEC additions */
  337. range_init_nofail(&cxl->dvsecs[type], cxl->dvsec_offset, length);
  338. cxl->dvsec_offset += length;
  339. }
  340. uint8_t cxl_interleave_ways_enc(int iw, Error **errp)
  341. {
  342. switch (iw) {
  343. case 1: return 0x0;
  344. case 2: return 0x1;
  345. case 4: return 0x2;
  346. case 8: return 0x3;
  347. case 16: return 0x4;
  348. case 3: return 0x8;
  349. case 6: return 0x9;
  350. case 12: return 0xa;
  351. default:
  352. error_setg(errp, "Interleave ways: %d not supported", iw);
  353. return 0;
  354. }
  355. }
  356. uint8_t cxl_interleave_granularity_enc(uint64_t gran, Error **errp)
  357. {
  358. switch (gran) {
  359. case 256: return 0;
  360. case 512: return 1;
  361. case 1024: return 2;
  362. case 2048: return 3;
  363. case 4096: return 4;
  364. case 8192: return 5;
  365. case 16384: return 6;
  366. default:
  367. error_setg(errp, "Interleave granularity: %" PRIu64 " invalid", gran);
  368. return 0;
  369. }
  370. }