cxl-component-utils.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. /*
  2. * CXL Utility library for components
  3. *
  4. * Copyright(C) 2020 Intel Corporation.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2. See the
  7. * COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "qemu/log.h"
  11. #include "qapi/error.h"
  12. #include "hw/pci/pci.h"
  13. #include "hw/cxl/cxl.h"
  14. /* CXL r3.1 Section 8.2.4.20.1 CXL HDM Decoder Capability Register */
  15. int cxl_decoder_count_enc(int count)
  16. {
  17. switch (count) {
  18. case 1: return 0x0;
  19. case 2: return 0x1;
  20. case 4: return 0x2;
  21. case 6: return 0x3;
  22. case 8: return 0x4;
  23. case 10: return 0x5;
  24. /* Switches and Host Bridges may have more than 10 decoders */
  25. case 12: return 0x6;
  26. case 14: return 0x7;
  27. case 16: return 0x8;
  28. case 20: return 0x9;
  29. case 24: return 0xa;
  30. case 28: return 0xb;
  31. case 32: return 0xc;
  32. }
  33. return 0;
  34. }
  35. int cxl_decoder_count_dec(int enc_cnt)
  36. {
  37. switch (enc_cnt) {
  38. case 0x0: return 1;
  39. case 0x1: return 2;
  40. case 0x2: return 4;
  41. case 0x3: return 6;
  42. case 0x4: return 8;
  43. case 0x5: return 10;
  44. /* Switches and Host Bridges may have more than 10 decoders */
  45. case 0x6: return 12;
  46. case 0x7: return 14;
  47. case 0x8: return 16;
  48. case 0x9: return 20;
  49. case 0xa: return 24;
  50. case 0xb: return 28;
  51. case 0xc: return 32;
  52. }
  53. return 0;
  54. }
  55. hwaddr cxl_decode_ig(int ig)
  56. {
  57. return 1ULL << (ig + 8);
  58. }
  59. static uint64_t cxl_cache_mem_read_reg(void *opaque, hwaddr offset,
  60. unsigned size)
  61. {
  62. CXLComponentState *cxl_cstate = opaque;
  63. ComponentRegisters *cregs = &cxl_cstate->crb;
  64. switch (size) {
  65. case 4:
  66. if (cregs->special_ops && cregs->special_ops->read) {
  67. return cregs->special_ops->read(cxl_cstate, offset, 4);
  68. } else {
  69. QEMU_BUILD_BUG_ON(sizeof(*cregs->cache_mem_registers) != 4);
  70. return cregs->cache_mem_registers[offset / 4];
  71. }
  72. case 8:
  73. qemu_log_mask(LOG_UNIMP,
  74. "CXL 8 byte cache mem registers not implemented\n");
  75. return 0;
  76. default:
  77. /*
  78. * In line with specification limitaions on access sizes, this
  79. * routine is not called with other sizes.
  80. */
  81. g_assert_not_reached();
  82. }
  83. }
  84. static void dumb_hdm_handler(CXLComponentState *cxl_cstate, hwaddr offset,
  85. uint32_t value)
  86. {
  87. ComponentRegisters *cregs = &cxl_cstate->crb;
  88. uint32_t *cache_mem = cregs->cache_mem_registers;
  89. bool should_commit = false;
  90. bool should_uncommit = false;
  91. switch (offset) {
  92. case A_CXL_HDM_DECODER0_CTRL:
  93. case A_CXL_HDM_DECODER1_CTRL:
  94. case A_CXL_HDM_DECODER2_CTRL:
  95. case A_CXL_HDM_DECODER3_CTRL:
  96. should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
  97. should_uncommit = !should_commit;
  98. break;
  99. default:
  100. break;
  101. }
  102. if (should_commit) {
  103. value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0);
  104. value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
  105. } else if (should_uncommit) {
  106. value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0);
  107. value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
  108. }
  109. stl_le_p((uint8_t *)cache_mem + offset, value);
  110. }
  111. static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value,
  112. unsigned size)
  113. {
  114. CXLComponentState *cxl_cstate = opaque;
  115. ComponentRegisters *cregs = &cxl_cstate->crb;
  116. uint32_t mask;
  117. switch (size) {
  118. case 4: {
  119. QEMU_BUILD_BUG_ON(sizeof(*cregs->cache_mem_regs_write_mask) != 4);
  120. QEMU_BUILD_BUG_ON(sizeof(*cregs->cache_mem_registers) != 4);
  121. mask = cregs->cache_mem_regs_write_mask[offset / 4];
  122. value &= mask;
  123. /* RO bits should remain constant. Done by reading existing value */
  124. value |= ~mask & cregs->cache_mem_registers[offset / 4];
  125. if (cregs->special_ops && cregs->special_ops->write) {
  126. cregs->special_ops->write(cxl_cstate, offset, value, size);
  127. return;
  128. }
  129. if (offset >= A_CXL_HDM_DECODER_CAPABILITY &&
  130. offset <= A_CXL_HDM_DECODER3_TARGET_LIST_HI) {
  131. dumb_hdm_handler(cxl_cstate, offset, value);
  132. } else {
  133. cregs->cache_mem_registers[offset / 4] = value;
  134. }
  135. return;
  136. }
  137. case 8:
  138. qemu_log_mask(LOG_UNIMP,
  139. "CXL 8 byte cache mem registers not implemented\n");
  140. return;
  141. default:
  142. /*
  143. * In line with specification limitaions on access sizes, this
  144. * routine is not called with other sizes.
  145. */
  146. g_assert_not_reached();
  147. }
  148. }
  149. /*
  150. * CXL r3.1 Section 8.2.3: Component Register Layout and Definition
  151. * The access restrictions specified in Section 8.2.2 also apply to CXL 2.0
  152. * Component Registers.
  153. *
  154. * CXL r3.1 Section 8.2.2: Accessing Component Registers
  155. * • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial
  156. * reads are not permitted.
  157. * • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial
  158. * reads are not permitted.
  159. *
  160. * As of the spec defined today, only 4 byte registers exist.
  161. */
  162. static const MemoryRegionOps cache_mem_ops = {
  163. .read = cxl_cache_mem_read_reg,
  164. .write = cxl_cache_mem_write_reg,
  165. .endianness = DEVICE_LITTLE_ENDIAN,
  166. .valid = {
  167. .min_access_size = 4,
  168. .max_access_size = 8,
  169. .unaligned = false,
  170. },
  171. .impl = {
  172. .min_access_size = 4,
  173. .max_access_size = 8,
  174. },
  175. };
  176. void cxl_component_register_block_init(Object *obj,
  177. CXLComponentState *cxl_cstate,
  178. const char *type)
  179. {
  180. ComponentRegisters *cregs = &cxl_cstate->crb;
  181. memory_region_init(&cregs->component_registers, obj, type,
  182. CXL2_COMPONENT_BLOCK_SIZE);
  183. /* io registers controls link which we don't care about in QEMU */
  184. memory_region_init_io(&cregs->io, obj, NULL, NULL, ".io",
  185. CXL2_COMPONENT_IO_REGION_SIZE);
  186. memory_region_init_io(&cregs->cache_mem, obj, &cache_mem_ops, cxl_cstate,
  187. ".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE);
  188. memory_region_add_subregion(&cregs->component_registers, 0, &cregs->io);
  189. memory_region_add_subregion(&cregs->component_registers,
  190. CXL2_COMPONENT_IO_REGION_SIZE,
  191. &cregs->cache_mem);
  192. }
  193. static void ras_init_common(uint32_t *reg_state, uint32_t *write_msk)
  194. {
  195. /*
  196. * Error status is RW1C but given bits are not yet set, it can
  197. * be handled as RO.
  198. */
  199. stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, 0);
  200. stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_STATUS, 0x1cfff);
  201. /* Bits 12-13 and 17-31 reserved in CXL 2.0 */
  202. stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff);
  203. stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff);
  204. stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff);
  205. stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff);
  206. stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, 0);
  207. stl_le_p(write_msk + R_CXL_RAS_COR_ERR_STATUS, 0x7f);
  208. stl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK, 0x7f);
  209. stl_le_p(write_msk + R_CXL_RAS_COR_ERR_MASK, 0x7f);
  210. /* CXL switches and devices must set */
  211. stl_le_p(reg_state + R_CXL_RAS_ERR_CAP_CTRL, 0x200);
  212. }
  213. static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk,
  214. enum reg_type type)
  215. {
  216. int decoder_count = CXL_HDM_DECODER_COUNT;
  217. int hdm_inc = R_CXL_HDM_DECODER1_BASE_LO - R_CXL_HDM_DECODER0_BASE_LO;
  218. int i;
  219. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, DECODER_COUNT,
  220. cxl_decoder_count_enc(decoder_count));
  221. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 1);
  222. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 1);
  223. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1);
  224. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY,
  225. POISON_ON_ERR_CAP, 0);
  226. if (type == CXL2_TYPE3_DEVICE) {
  227. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 3_6_12_WAY, 1);
  228. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 16_WAY, 1);
  229. } else {
  230. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 3_6_12_WAY, 0);
  231. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, 16_WAY, 0);
  232. }
  233. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, UIO, 0);
  234. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY,
  235. UIO_DECODER_COUNT, 0);
  236. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, MEMDATA_NXM_CAP, 0);
  237. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY,
  238. SUPPORTED_COHERENCY_MODEL, 0); /* Unknown */
  239. ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_GLOBAL_CONTROL,
  240. HDM_DECODER_ENABLE, 0);
  241. write_msk[R_CXL_HDM_DECODER_GLOBAL_CONTROL] = 0x3;
  242. for (i = 0; i < decoder_count; i++) {
  243. write_msk[R_CXL_HDM_DECODER0_BASE_LO + i * hdm_inc] = 0xf0000000;
  244. write_msk[R_CXL_HDM_DECODER0_BASE_HI + i * hdm_inc] = 0xffffffff;
  245. write_msk[R_CXL_HDM_DECODER0_SIZE_LO + i * hdm_inc] = 0xf0000000;
  246. write_msk[R_CXL_HDM_DECODER0_SIZE_HI + i * hdm_inc] = 0xffffffff;
  247. write_msk[R_CXL_HDM_DECODER0_CTRL + i * hdm_inc] = 0x13ff;
  248. if (type == CXL2_DEVICE ||
  249. type == CXL2_TYPE3_DEVICE ||
  250. type == CXL2_LOGICAL_DEVICE) {
  251. write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * hdm_inc] =
  252. 0xf0000000;
  253. } else {
  254. write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * hdm_inc] =
  255. 0xffffffff;
  256. }
  257. write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_HI + i * hdm_inc] = 0xffffffff;
  258. }
  259. }
  260. void cxl_component_register_init_common(uint32_t *reg_state,
  261. uint32_t *write_msk,
  262. enum reg_type type)
  263. {
  264. int caps = 0;
  265. /*
  266. * In CXL 2.0 the capabilities required for each CXL component are such
  267. * that, with the ordering chosen here, a single number can be used to
  268. * define which capabilities should be provided.
  269. */
  270. switch (type) {
  271. case CXL2_DOWNSTREAM_PORT:
  272. case CXL2_DEVICE:
  273. /* RAS, Link */
  274. caps = 2;
  275. break;
  276. case CXL2_UPSTREAM_PORT:
  277. case CXL2_TYPE3_DEVICE:
  278. case CXL2_LOGICAL_DEVICE:
  279. /* + HDM */
  280. caps = 3;
  281. break;
  282. case CXL2_ROOT_PORT:
  283. case CXL2_RC:
  284. /* + Extended Security, + Snoop */
  285. caps = 5;
  286. break;
  287. default:
  288. abort();
  289. }
  290. memset(reg_state, 0, CXL2_COMPONENT_CM_REGION_SIZE);
  291. /* CXL Capability Header Register */
  292. ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ID, 1);
  293. ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, VERSION,
  294. CXL_CAPABILITY_VERSION);
  295. ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 1);
  296. ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ARRAY_SIZE, caps);
  297. #define init_cap_reg(reg, id, version) \
  298. do { \
  299. int which = R_CXL_##reg##_CAPABILITY_HEADER; \
  300. reg_state[which] = FIELD_DP32(reg_state[which], \
  301. CXL_##reg##_CAPABILITY_HEADER, ID, id); \
  302. reg_state[which] = \
  303. FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, \
  304. VERSION, version); \
  305. reg_state[which] = \
  306. FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, PTR, \
  307. CXL_##reg##_REGISTERS_OFFSET); \
  308. } while (0)
  309. switch (type) {
  310. case CXL2_DEVICE:
  311. case CXL2_TYPE3_DEVICE:
  312. case CXL2_LOGICAL_DEVICE:
  313. case CXL2_ROOT_PORT:
  314. case CXL2_UPSTREAM_PORT:
  315. case CXL2_DOWNSTREAM_PORT:
  316. init_cap_reg(RAS, 2, CXL_RAS_CAPABILITY_VERSION);
  317. ras_init_common(reg_state, write_msk);
  318. break;
  319. default:
  320. break;
  321. }
  322. init_cap_reg(LINK, 4, CXL_LINK_CAPABILITY_VERSION);
  323. if (caps < 3) {
  324. return;
  325. }
  326. if (type != CXL2_ROOT_PORT) {
  327. init_cap_reg(HDM, 5, CXL_HDM_CAPABILITY_VERSION);
  328. hdm_init_common(reg_state, write_msk, type);
  329. }
  330. if (caps < 5) {
  331. return;
  332. }
  333. init_cap_reg(EXTSEC, 6, CXL_EXTSEC_CAP_VERSION);
  334. init_cap_reg(SNOOP, 8, CXL_SNOOP_CAP_VERSION);
  335. #undef init_cap_reg
  336. }
  337. /*
  338. * Helper to creates a DVSEC header for a CXL entity. The caller is responsible
  339. * for tracking the valid offset.
  340. *
  341. * This function will build the DVSEC header on behalf of the caller and then
  342. * copy in the remaining data for the vendor specific bits.
  343. * It will also set up appropriate write masks.
  344. */
  345. void cxl_component_create_dvsec(CXLComponentState *cxl,
  346. enum reg_type cxl_dev_type, uint16_t length,
  347. uint16_t type, uint8_t rev, uint8_t *body)
  348. {
  349. PCIDevice *pdev = cxl->pdev;
  350. uint16_t offset = cxl->dvsec_offset;
  351. uint8_t *wmask = pdev->wmask;
  352. assert(offset >= PCI_CFG_SPACE_SIZE &&
  353. ((offset + length) < PCI_CFG_SPACE_EXP_SIZE));
  354. assert((length & 0xf000) == 0);
  355. assert((rev & ~0xf) == 0);
  356. /* Create the DVSEC in the MCFG space */
  357. pcie_add_capability(pdev, PCI_EXT_CAP_ID_DVSEC, 1, offset, length);
  358. pci_set_long(pdev->config + offset + PCIE_DVSEC_HEADER1_OFFSET,
  359. (length << 20) | (rev << 16) | CXL_VENDOR_ID);
  360. pci_set_word(pdev->config + offset + PCIE_DVSEC_ID_OFFSET, type);
  361. memcpy(pdev->config + offset + sizeof(DVSECHeader),
  362. body + sizeof(DVSECHeader),
  363. length - sizeof(DVSECHeader));
  364. /* Configure write masks */
  365. switch (type) {
  366. case PCIE_CXL_DEVICE_DVSEC:
  367. /* Cntrl RW Lock - so needs explicit blocking when lock is set */
  368. wmask[offset + offsetof(CXLDVSECDevice, ctrl)] = 0xFD;
  369. wmask[offset + offsetof(CXLDVSECDevice, ctrl) + 1] = 0x4F;
  370. /* Status is RW1CS */
  371. wmask[offset + offsetof(CXLDVSECDevice, ctrl2)] = 0x0F;
  372. /* Lock is RW Once */
  373. wmask[offset + offsetof(CXLDVSECDevice, lock)] = 0x01;
  374. /* range1/2_base_high/low is RW Lock */
  375. wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi)] = 0xFF;
  376. wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 1] = 0xFF;
  377. wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 2] = 0xFF;
  378. wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 3] = 0xFF;
  379. wmask[offset + offsetof(CXLDVSECDevice, range1_base_lo) + 3] = 0xF0;
  380. wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi)] = 0xFF;
  381. wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 1] = 0xFF;
  382. wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 2] = 0xFF;
  383. wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 3] = 0xFF;
  384. wmask[offset + offsetof(CXLDVSECDevice, range2_base_lo) + 3] = 0xF0;
  385. break;
  386. case NON_CXL_FUNCTION_MAP_DVSEC:
  387. break; /* Not yet implemented */
  388. case EXTENSIONS_PORT_DVSEC:
  389. wmask[offset + offsetof(CXLDVSECPortExt, control)] = 0x0F;
  390. wmask[offset + offsetof(CXLDVSECPortExt, control) + 1] = 0x40;
  391. wmask[offset + offsetof(CXLDVSECPortExt, alt_bus_base)] = 0xFF;
  392. wmask[offset + offsetof(CXLDVSECPortExt, alt_bus_limit)] = 0xFF;
  393. wmask[offset + offsetof(CXLDVSECPortExt, alt_memory_base)] = 0xF0;
  394. wmask[offset + offsetof(CXLDVSECPortExt, alt_memory_base) + 1] = 0xFF;
  395. wmask[offset + offsetof(CXLDVSECPortExt, alt_memory_limit)] = 0xF0;
  396. wmask[offset + offsetof(CXLDVSECPortExt, alt_memory_limit) + 1] = 0xFF;
  397. wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_base)] = 0xF0;
  398. wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_base) + 1] = 0xFF;
  399. wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_limit)] = 0xF0;
  400. wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_limit) + 1] =
  401. 0xFF;
  402. wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_base_high)] =
  403. 0xFF;
  404. wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_base_high) + 1] =
  405. 0xFF;
  406. wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_base_high) + 2] =
  407. 0xFF;
  408. wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_base_high) + 3] =
  409. 0xFF;
  410. wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_limit_high)] =
  411. 0xFF;
  412. wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_limit_high) + 1] =
  413. 0xFF;
  414. wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_limit_high) + 2] =
  415. 0xFF;
  416. wmask[offset + offsetof(CXLDVSECPortExt, alt_prefetch_limit_high) + 3] =
  417. 0xFF;
  418. break;
  419. case GPF_PORT_DVSEC:
  420. wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl)] = 0x0F;
  421. wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl) + 1] = 0x0F;
  422. wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl)] = 0x0F;
  423. wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl) + 1] = 0x0F;
  424. break;
  425. case GPF_DEVICE_DVSEC:
  426. wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration)] = 0x0F;
  427. wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration) + 1] = 0x0F;
  428. wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power)] = 0xFF;
  429. wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 1] = 0xFF;
  430. wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 2] = 0xFF;
  431. wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 3] = 0xFF;
  432. break;
  433. case PCIE_FLEXBUS_PORT_DVSEC:
  434. switch (cxl_dev_type) {
  435. case CXL2_ROOT_PORT:
  436. /* No MLD */
  437. wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xbd;
  438. break;
  439. case CXL2_DOWNSTREAM_PORT:
  440. wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xfd;
  441. break;
  442. default: /* Registers are RO for other component types */
  443. break;
  444. }
  445. /* There are rw1cs bits in the status register but never set */
  446. break;
  447. }
  448. /* Update state for future DVSEC additions */
  449. range_init_nofail(&cxl->dvsecs[type], cxl->dvsec_offset, length);
  450. cxl->dvsec_offset += length;
  451. }
  452. /* CXL r3.1 Section 8.2.4.20.7 CXL HDM Decoder n Control Register */
  453. uint8_t cxl_interleave_ways_enc(int iw, Error **errp)
  454. {
  455. switch (iw) {
  456. case 1: return 0x0;
  457. case 2: return 0x1;
  458. case 4: return 0x2;
  459. case 8: return 0x3;
  460. case 16: return 0x4;
  461. case 3: return 0x8;
  462. case 6: return 0x9;
  463. case 12: return 0xa;
  464. default:
  465. error_setg(errp, "Interleave ways: %d not supported", iw);
  466. return 0;
  467. }
  468. }
  469. int cxl_interleave_ways_dec(uint8_t iw_enc, Error **errp)
  470. {
  471. switch (iw_enc) {
  472. case 0x0: return 1;
  473. case 0x1: return 2;
  474. case 0x2: return 4;
  475. case 0x3: return 8;
  476. case 0x4: return 16;
  477. case 0x8: return 3;
  478. case 0x9: return 6;
  479. case 0xa: return 12;
  480. default:
  481. error_setg(errp, "Encoded interleave ways: %d not supported", iw_enc);
  482. return 0;
  483. }
  484. }
  485. uint8_t cxl_interleave_granularity_enc(uint64_t gran, Error **errp)
  486. {
  487. switch (gran) {
  488. case 256: return 0;
  489. case 512: return 1;
  490. case 1024: return 2;
  491. case 2048: return 3;
  492. case 4096: return 4;
  493. case 8192: return 5;
  494. case 16384: return 6;
  495. default:
  496. error_setg(errp, "Interleave granularity: %" PRIu64 " invalid", gran);
  497. return 0;
  498. }
  499. }