2
0

cxl-device-utils.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. /*
  2. * CXL Utility library for devices
  3. *
  4. * Copyright(C) 2020 Intel Corporation.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2. See the
  7. * COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "qemu/log.h"
  11. #include "hw/cxl/cxl.h"
  12. /*
  13. * Device registers have no restrictions per the spec, and so fall back to the
  14. * default memory mapped register rules in CXL r3.1 Section 8.2:
  15. * Software shall use CXL.io Memory Read and Write to access memory mapped
  16. * register defined in this section. Unless otherwise specified, software
  17. * shall restrict the accesses width based on the following:
  18. * • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes
  19. * quantity.
  20. * • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8
  21. * Bytes
  22. * • The address shall be a multiple of the access width, e.g. when
  23. * accessing a register as a 4 Byte quantity, the address shall be
  24. * multiple of 4.
  25. * • The accesses shall map to contiguous bytes.If these rules are not
  26. * followed, the behavior is undefined
  27. */
  28. static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size)
  29. {
  30. CXLDeviceState *cxl_dstate = opaque;
  31. switch (size) {
  32. case 4:
  33. return cxl_dstate->caps_reg_state32[offset / size];
  34. case 8:
  35. return cxl_dstate->caps_reg_state64[offset / size];
  36. default:
  37. g_assert_not_reached();
  38. }
  39. }
  40. static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size)
  41. {
  42. CXLDeviceState *cxl_dstate = opaque;
  43. switch (size) {
  44. case 1:
  45. return cxl_dstate->dev_reg_state[offset];
  46. case 2:
  47. return cxl_dstate->dev_reg_state16[offset / size];
  48. case 4:
  49. return cxl_dstate->dev_reg_state32[offset / size];
  50. case 8:
  51. return cxl_dstate->dev_reg_state64[offset / size];
  52. default:
  53. g_assert_not_reached();
  54. }
  55. }
  56. static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
  57. {
  58. CXLDeviceState *cxl_dstate;
  59. CXLCCI *cci = opaque;
  60. if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) {
  61. cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate;
  62. } else if (object_dynamic_cast(OBJECT(cci->intf),
  63. TYPE_CXL_SWITCH_MAILBOX_CCI)) {
  64. cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate;
  65. } else {
  66. return 0;
  67. }
  68. switch (size) {
  69. case 1:
  70. return cxl_dstate->mbox_reg_state[offset];
  71. case 2:
  72. return cxl_dstate->mbox_reg_state16[offset / size];
  73. case 4:
  74. return cxl_dstate->mbox_reg_state32[offset / size];
  75. case 8:
  76. if (offset == A_CXL_DEV_BG_CMD_STS) {
  77. uint64_t bg_status_reg;
  78. bg_status_reg = FIELD_DP64(0, CXL_DEV_BG_CMD_STS, OP,
  79. cci->bg.opcode);
  80. bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS,
  81. PERCENTAGE_COMP, cci->bg.complete_pct);
  82. bg_status_reg = FIELD_DP64(bg_status_reg, CXL_DEV_BG_CMD_STS,
  83. RET_CODE, cci->bg.ret_code);
  84. /* endian? */
  85. cxl_dstate->mbox_reg_state64[offset / size] = bg_status_reg;
  86. }
  87. if (offset == A_CXL_DEV_MAILBOX_STS) {
  88. uint64_t status_reg = cxl_dstate->mbox_reg_state64[offset / size];
  89. if (cci->bg.complete_pct) {
  90. status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, BG_OP,
  91. 0);
  92. cxl_dstate->mbox_reg_state64[offset / size] = status_reg;
  93. }
  94. }
  95. return cxl_dstate->mbox_reg_state64[offset / size];
  96. default:
  97. g_assert_not_reached();
  98. }
  99. }
  100. static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset,
  101. uint64_t value)
  102. {
  103. switch (offset) {
  104. case A_CXL_DEV_MAILBOX_CTRL:
  105. /* fallthrough */
  106. case A_CXL_DEV_MAILBOX_CAP:
  107. /* RO register */
  108. break;
  109. default:
  110. qemu_log_mask(LOG_UNIMP,
  111. "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n",
  112. __func__, offset);
  113. return;
  114. }
  115. reg_state[offset / sizeof(*reg_state)] = value;
  116. }
  117. static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset,
  118. uint64_t value)
  119. {
  120. switch (offset) {
  121. case A_CXL_DEV_MAILBOX_CMD:
  122. break;
  123. case A_CXL_DEV_BG_CMD_STS:
  124. break;
  125. case A_CXL_DEV_MAILBOX_STS:
  126. /* Read only register, will get updated by the state machine */
  127. return;
  128. default:
  129. qemu_log_mask(LOG_UNIMP,
  130. "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n",
  131. __func__, offset);
  132. return;
  133. }
  134. reg_state[offset / sizeof(*reg_state)] = value;
  135. }
  136. static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value,
  137. unsigned size)
  138. {
  139. CXLDeviceState *cxl_dstate;
  140. CXLCCI *cci = opaque;
  141. if (object_dynamic_cast(OBJECT(cci->intf), TYPE_CXL_TYPE3)) {
  142. cxl_dstate = &CXL_TYPE3(cci->intf)->cxl_dstate;
  143. } else if (object_dynamic_cast(OBJECT(cci->intf),
  144. TYPE_CXL_SWITCH_MAILBOX_CCI)) {
  145. cxl_dstate = &CXL_SWITCH_MAILBOX_CCI(cci->intf)->cxl_dstate;
  146. } else {
  147. return;
  148. }
  149. if (offset >= A_CXL_DEV_CMD_PAYLOAD) {
  150. memcpy(cxl_dstate->mbox_reg_state + offset, &value, size);
  151. return;
  152. }
  153. switch (size) {
  154. case 4:
  155. mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value);
  156. break;
  157. case 8:
  158. mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value);
  159. break;
  160. default:
  161. g_assert_not_reached();
  162. }
  163. if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
  164. DOORBELL)) {
  165. uint64_t command_reg =
  166. cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD];
  167. uint8_t cmd_set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD,
  168. COMMAND_SET);
  169. uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND);
  170. size_t len_in = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH);
  171. uint8_t *pl = cxl_dstate->mbox_reg_state + A_CXL_DEV_CMD_PAYLOAD;
  172. /*
  173. * Copy taken to avoid need for individual command handlers to care
  174. * about aliasing.
  175. */
  176. g_autofree uint8_t *pl_in_copy = NULL;
  177. size_t len_out = 0;
  178. uint64_t status_reg;
  179. bool bg_started = false;
  180. int rc;
  181. pl_in_copy = g_memdup2(pl, len_in);
  182. if (len_in == 0 || pl_in_copy) {
  183. /* Avoid stale data - including from earlier cmds */
  184. memset(pl, 0, CXL_MAILBOX_MAX_PAYLOAD_SIZE);
  185. rc = cxl_process_cci_message(cci, cmd_set, cmd, len_in, pl_in_copy,
  186. &len_out, pl, &bg_started);
  187. } else {
  188. rc = CXL_MBOX_INTERNAL_ERROR;
  189. }
  190. /* Set bg and the return code */
  191. status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, BG_OP,
  192. bg_started ? 1 : 0);
  193. status_reg = FIELD_DP64(status_reg, CXL_DEV_MAILBOX_STS, ERRNO, rc);
  194. /* Set the return length */
  195. command_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_CMD, COMMAND_SET, cmd_set);
  196. command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD,
  197. COMMAND, cmd);
  198. command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD,
  199. LENGTH, len_out);
  200. cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg;
  201. cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg;
  202. /* Tell the host we're done */
  203. ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
  204. DOORBELL, 0);
  205. }
  206. }
  207. static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size)
  208. {
  209. CXLDeviceState *cxl_dstate = opaque;
  210. return cxl_dstate->memdev_status;
  211. }
  212. static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value,
  213. unsigned size)
  214. {
  215. /* Many register sets are read only */
  216. }
  217. static const MemoryRegionOps mdev_ops = {
  218. .read = mdev_reg_read,
  219. .write = ro_reg_write,
  220. .endianness = DEVICE_LITTLE_ENDIAN,
  221. .valid = {
  222. .min_access_size = 1,
  223. .max_access_size = 8,
  224. .unaligned = false,
  225. },
  226. .impl = {
  227. .min_access_size = 8,
  228. .max_access_size = 8,
  229. },
  230. };
  231. static const MemoryRegionOps mailbox_ops = {
  232. .read = mailbox_reg_read,
  233. .write = mailbox_reg_write,
  234. .endianness = DEVICE_LITTLE_ENDIAN,
  235. .valid = {
  236. .min_access_size = 1,
  237. .max_access_size = 8,
  238. .unaligned = false,
  239. },
  240. .impl = {
  241. .min_access_size = 1,
  242. .max_access_size = 8,
  243. },
  244. };
  245. static const MemoryRegionOps dev_ops = {
  246. .read = dev_reg_read,
  247. .write = ro_reg_write,
  248. .endianness = DEVICE_LITTLE_ENDIAN,
  249. .valid = {
  250. .min_access_size = 1,
  251. .max_access_size = 8,
  252. .unaligned = false,
  253. },
  254. .impl = {
  255. .min_access_size = 1,
  256. .max_access_size = 8,
  257. },
  258. };
  259. static const MemoryRegionOps caps_ops = {
  260. .read = caps_reg_read,
  261. .write = ro_reg_write,
  262. .endianness = DEVICE_LITTLE_ENDIAN,
  263. .valid = {
  264. .min_access_size = 1,
  265. .max_access_size = 8,
  266. .unaligned = false,
  267. },
  268. .impl = {
  269. .min_access_size = 4,
  270. .max_access_size = 8,
  271. },
  272. };
  273. void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate,
  274. CXLCCI *cci)
  275. {
  276. /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */
  277. memory_region_init(&cxl_dstate->device_registers, obj, "device-registers",
  278. pow2ceil(CXL_MMIO_SIZE));
  279. memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate,
  280. "cap-array", CXL_CAPS_SIZE);
  281. memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate,
  282. "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH);
  283. memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cci,
  284. "mailbox", CXL_MAILBOX_REGISTERS_LENGTH);
  285. memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops,
  286. cxl_dstate, "memory device caps",
  287. CXL_MEMORY_DEVICE_REGISTERS_LENGTH);
  288. memory_region_add_subregion(&cxl_dstate->device_registers, 0,
  289. &cxl_dstate->caps);
  290. memory_region_add_subregion(&cxl_dstate->device_registers,
  291. CXL_DEVICE_STATUS_REGISTERS_OFFSET,
  292. &cxl_dstate->device);
  293. memory_region_add_subregion(&cxl_dstate->device_registers,
  294. CXL_MAILBOX_REGISTERS_OFFSET,
  295. &cxl_dstate->mailbox);
  296. memory_region_add_subregion(&cxl_dstate->device_registers,
  297. CXL_MEMORY_DEVICE_REGISTERS_OFFSET,
  298. &cxl_dstate->memory_device);
  299. }
  300. void cxl_event_set_status(CXLDeviceState *cxl_dstate, CXLEventLogType log_type,
  301. bool available)
  302. {
  303. if (available) {
  304. cxl_dstate->event_status |= (1 << log_type);
  305. } else {
  306. cxl_dstate->event_status &= ~(1 << log_type);
  307. }
  308. ARRAY_FIELD_DP64(cxl_dstate->dev_reg_state64, CXL_DEV_EVENT_STATUS,
  309. EVENT_STATUS, cxl_dstate->event_status);
  310. }
  311. static void device_reg_init_common(CXLDeviceState *cxl_dstate)
  312. {
  313. CXLEventLogType log;
  314. for (log = 0; log < CXL_EVENT_TYPE_MAX; log++) {
  315. cxl_event_set_status(cxl_dstate, log, false);
  316. }
  317. }
  318. static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate, int msi_n)
  319. {
  320. /* 2048 payload size */
  321. ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
  322. PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT);
  323. cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE;
  324. /* irq support */
  325. ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
  326. BG_INT_CAP, 1);
  327. ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
  328. MSI_N, msi_n);
  329. cxl_dstate->mbox_msi_n = msi_n;
  330. ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
  331. MBOX_READY_TIME, 0); /* Not reported */
  332. ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
  333. TYPE, 0); /* Inferred from class code */
  334. }
  335. static void memdev_reg_init_common(CXLDeviceState *cxl_dstate)
  336. {
  337. uint64_t memdev_status_reg;
  338. memdev_status_reg = FIELD_DP64(0, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
  339. memdev_status_reg = FIELD_DP64(memdev_status_reg, CXL_MEM_DEV_STS,
  340. MBOX_READY, 1);
  341. cxl_dstate->memdev_status = memdev_status_reg;
  342. }
  343. void cxl_device_register_init_t3(CXLType3Dev *ct3d, int msi_n)
  344. {
  345. CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
  346. uint64_t *cap_h = cxl_dstate->caps_reg_state64;
  347. const int cap_count = 3;
  348. /* CXL Device Capabilities Array Register */
  349. ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
  350. ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
  351. ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
  352. cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1,
  353. CXL_DEVICE_STATUS_VERSION);
  354. device_reg_init_common(cxl_dstate);
  355. cxl_device_cap_init(cxl_dstate, MAILBOX, 2, CXL_DEV_MAILBOX_VERSION);
  356. mailbox_reg_init_common(cxl_dstate, msi_n);
  357. cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000,
  358. CXL_MEM_DEV_STATUS_VERSION);
  359. memdev_reg_init_common(cxl_dstate);
  360. cxl_initialize_mailbox_t3(&ct3d->cci, DEVICE(ct3d),
  361. CXL_MAILBOX_MAX_PAYLOAD_SIZE);
  362. }
  363. void cxl_device_register_init_swcci(CSWMBCCIDev *sw, int msi_n)
  364. {
  365. CXLDeviceState *cxl_dstate = &sw->cxl_dstate;
  366. uint64_t *cap_h = cxl_dstate->caps_reg_state64;
  367. const int cap_count = 3;
  368. /* CXL Device Capabilities Array Register */
  369. ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
  370. ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
  371. ARRAY_FIELD_DP64(cap_h, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
  372. cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1, 2);
  373. device_reg_init_common(cxl_dstate);
  374. cxl_device_cap_init(cxl_dstate, MAILBOX, 2, 1);
  375. mailbox_reg_init_common(cxl_dstate, msi_n);
  376. cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000, 1);
  377. memdev_reg_init_common(cxl_dstate);
  378. }
  379. uint64_t cxl_device_get_timestamp(CXLDeviceState *cxl_dstate)
  380. {
  381. uint64_t time, delta;
  382. uint64_t final_time = 0;
  383. if (cxl_dstate->timestamp.set) {
  384. /* Find the delta from the last time the host set the time. */
  385. time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
  386. delta = time - cxl_dstate->timestamp.last_set;
  387. final_time = cxl_dstate->timestamp.host_set + delta;
  388. }
  389. return final_time;
  390. }