cxl-device-utils.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. /*
  2. * CXL Utility library for devices
  3. *
  4. * Copyright(C) 2020 Intel Corporation.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2. See the
  7. * COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "qemu/log.h"
  11. #include "hw/cxl/cxl.h"
  12. /*
  13. * Device registers have no restrictions per the spec, and so fall back to the
  14. * default memory mapped register rules in 8.2:
  15. * Software shall use CXL.io Memory Read and Write to access memory mapped
  16. * register defined in this section. Unless otherwise specified, software
  17. * shall restrict the accesses width based on the following:
  18. * • A 32 bit register shall be accessed as a 1 Byte, 2 Bytes or 4 Bytes
  19. * quantity.
  20. * • A 64 bit register shall be accessed as a 1 Byte, 2 Bytes, 4 Bytes or 8
  21. * Bytes
  22. * • The address shall be a multiple of the access width, e.g. when
  23. * accessing a register as a 4 Byte quantity, the address shall be
  24. * multiple of 4.
  25. * • The accesses shall map to contiguous bytes.If these rules are not
  26. * followed, the behavior is undefined
  27. */
  28. static uint64_t caps_reg_read(void *opaque, hwaddr offset, unsigned size)
  29. {
  30. CXLDeviceState *cxl_dstate = opaque;
  31. if (size == 4) {
  32. return cxl_dstate->caps_reg_state32[offset / sizeof(*cxl_dstate->caps_reg_state32)];
  33. } else {
  34. return cxl_dstate->caps_reg_state64[offset / sizeof(*cxl_dstate->caps_reg_state64)];
  35. }
  36. }
  37. static uint64_t dev_reg_read(void *opaque, hwaddr offset, unsigned size)
  38. {
  39. return 0;
  40. }
  41. static uint64_t mailbox_reg_read(void *opaque, hwaddr offset, unsigned size)
  42. {
  43. CXLDeviceState *cxl_dstate = opaque;
  44. switch (size) {
  45. case 1:
  46. return cxl_dstate->mbox_reg_state[offset];
  47. case 2:
  48. return cxl_dstate->mbox_reg_state16[offset / size];
  49. case 4:
  50. return cxl_dstate->mbox_reg_state32[offset / size];
  51. case 8:
  52. return cxl_dstate->mbox_reg_state64[offset / size];
  53. default:
  54. g_assert_not_reached();
  55. }
  56. }
  57. static void mailbox_mem_writel(uint32_t *reg_state, hwaddr offset,
  58. uint64_t value)
  59. {
  60. switch (offset) {
  61. case A_CXL_DEV_MAILBOX_CTRL:
  62. /* fallthrough */
  63. case A_CXL_DEV_MAILBOX_CAP:
  64. /* RO register */
  65. break;
  66. default:
  67. qemu_log_mask(LOG_UNIMP,
  68. "%s Unexpected 32-bit access to 0x%" PRIx64 " (WI)\n",
  69. __func__, offset);
  70. return;
  71. }
  72. reg_state[offset / sizeof(*reg_state)] = value;
  73. }
  74. static void mailbox_mem_writeq(uint64_t *reg_state, hwaddr offset,
  75. uint64_t value)
  76. {
  77. switch (offset) {
  78. case A_CXL_DEV_MAILBOX_CMD:
  79. break;
  80. case A_CXL_DEV_BG_CMD_STS:
  81. /* BG not supported */
  82. /* fallthrough */
  83. case A_CXL_DEV_MAILBOX_STS:
  84. /* Read only register, will get updated by the state machine */
  85. return;
  86. default:
  87. qemu_log_mask(LOG_UNIMP,
  88. "%s Unexpected 64-bit access to 0x%" PRIx64 " (WI)\n",
  89. __func__, offset);
  90. return;
  91. }
  92. reg_state[offset / sizeof(*reg_state)] = value;
  93. }
  94. static void mailbox_reg_write(void *opaque, hwaddr offset, uint64_t value,
  95. unsigned size)
  96. {
  97. CXLDeviceState *cxl_dstate = opaque;
  98. if (offset >= A_CXL_DEV_CMD_PAYLOAD) {
  99. memcpy(cxl_dstate->mbox_reg_state + offset, &value, size);
  100. return;
  101. }
  102. switch (size) {
  103. case 4:
  104. mailbox_mem_writel(cxl_dstate->mbox_reg_state32, offset, value);
  105. break;
  106. case 8:
  107. mailbox_mem_writeq(cxl_dstate->mbox_reg_state64, offset, value);
  108. break;
  109. default:
  110. g_assert_not_reached();
  111. }
  112. if (ARRAY_FIELD_EX32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
  113. DOORBELL)) {
  114. cxl_process_mailbox(cxl_dstate);
  115. }
  116. }
  117. static uint64_t mdev_reg_read(void *opaque, hwaddr offset, unsigned size)
  118. {
  119. uint64_t retval = 0;
  120. retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MEDIA_STATUS, 1);
  121. retval = FIELD_DP64(retval, CXL_MEM_DEV_STS, MBOX_READY, 1);
  122. return retval;
  123. }
  124. static void ro_reg_write(void *opaque, hwaddr offset, uint64_t value,
  125. unsigned size)
  126. {
  127. /* Many register sets are read only */
  128. }
  129. static const MemoryRegionOps mdev_ops = {
  130. .read = mdev_reg_read,
  131. .write = ro_reg_write,
  132. .endianness = DEVICE_LITTLE_ENDIAN,
  133. .valid = {
  134. .min_access_size = 1,
  135. .max_access_size = 8,
  136. .unaligned = false,
  137. },
  138. .impl = {
  139. .min_access_size = 8,
  140. .max_access_size = 8,
  141. },
  142. };
  143. static const MemoryRegionOps mailbox_ops = {
  144. .read = mailbox_reg_read,
  145. .write = mailbox_reg_write,
  146. .endianness = DEVICE_LITTLE_ENDIAN,
  147. .valid = {
  148. .min_access_size = 1,
  149. .max_access_size = 8,
  150. .unaligned = false,
  151. },
  152. .impl = {
  153. .min_access_size = 1,
  154. .max_access_size = 8,
  155. },
  156. };
  157. static const MemoryRegionOps dev_ops = {
  158. .read = dev_reg_read,
  159. .write = ro_reg_write,
  160. .endianness = DEVICE_LITTLE_ENDIAN,
  161. .valid = {
  162. .min_access_size = 1,
  163. .max_access_size = 8,
  164. .unaligned = false,
  165. },
  166. .impl = {
  167. .min_access_size = 1,
  168. .max_access_size = 8,
  169. },
  170. };
  171. static const MemoryRegionOps caps_ops = {
  172. .read = caps_reg_read,
  173. .write = ro_reg_write,
  174. .endianness = DEVICE_LITTLE_ENDIAN,
  175. .valid = {
  176. .min_access_size = 1,
  177. .max_access_size = 8,
  178. .unaligned = false,
  179. },
  180. .impl = {
  181. .min_access_size = 4,
  182. .max_access_size = 8,
  183. },
  184. };
  185. void cxl_device_register_block_init(Object *obj, CXLDeviceState *cxl_dstate)
  186. {
  187. /* This will be a BAR, so needs to be rounded up to pow2 for PCI spec */
  188. memory_region_init(&cxl_dstate->device_registers, obj, "device-registers",
  189. pow2ceil(CXL_MMIO_SIZE));
  190. memory_region_init_io(&cxl_dstate->caps, obj, &caps_ops, cxl_dstate,
  191. "cap-array", CXL_CAPS_SIZE);
  192. memory_region_init_io(&cxl_dstate->device, obj, &dev_ops, cxl_dstate,
  193. "device-status", CXL_DEVICE_STATUS_REGISTERS_LENGTH);
  194. memory_region_init_io(&cxl_dstate->mailbox, obj, &mailbox_ops, cxl_dstate,
  195. "mailbox", CXL_MAILBOX_REGISTERS_LENGTH);
  196. memory_region_init_io(&cxl_dstate->memory_device, obj, &mdev_ops,
  197. cxl_dstate, "memory device caps",
  198. CXL_MEMORY_DEVICE_REGISTERS_LENGTH);
  199. memory_region_add_subregion(&cxl_dstate->device_registers, 0,
  200. &cxl_dstate->caps);
  201. memory_region_add_subregion(&cxl_dstate->device_registers,
  202. CXL_DEVICE_STATUS_REGISTERS_OFFSET,
  203. &cxl_dstate->device);
  204. memory_region_add_subregion(&cxl_dstate->device_registers,
  205. CXL_MAILBOX_REGISTERS_OFFSET,
  206. &cxl_dstate->mailbox);
  207. memory_region_add_subregion(&cxl_dstate->device_registers,
  208. CXL_MEMORY_DEVICE_REGISTERS_OFFSET,
  209. &cxl_dstate->memory_device);
  210. }
  211. static void device_reg_init_common(CXLDeviceState *cxl_dstate) { }
  212. static void mailbox_reg_init_common(CXLDeviceState *cxl_dstate)
  213. {
  214. /* 2048 payload size, with no interrupt or background support */
  215. ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CAP,
  216. PAYLOAD_SIZE, CXL_MAILBOX_PAYLOAD_SHIFT);
  217. cxl_dstate->payload_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE;
  218. }
  219. static void memdev_reg_init_common(CXLDeviceState *cxl_dstate) { }
  220. void cxl_device_register_init_common(CXLDeviceState *cxl_dstate)
  221. {
  222. uint64_t *cap_hdrs = cxl_dstate->caps_reg_state64;
  223. const int cap_count = 3;
  224. /* CXL Device Capabilities Array Register */
  225. ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_ID, 0);
  226. ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_VERSION, 1);
  227. ARRAY_FIELD_DP64(cap_hdrs, CXL_DEV_CAP_ARRAY, CAP_COUNT, cap_count);
  228. cxl_device_cap_init(cxl_dstate, DEVICE_STATUS, 1);
  229. device_reg_init_common(cxl_dstate);
  230. cxl_device_cap_init(cxl_dstate, MAILBOX, 2);
  231. mailbox_reg_init_common(cxl_dstate);
  232. cxl_device_cap_init(cxl_dstate, MEMORY_DEVICE, 0x4000);
  233. memdev_reg_init_common(cxl_dstate);
  234. cxl_initialize_mailbox(cxl_dstate);
  235. }