cxl-mailbox-utils.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761
  1. /*
  2. * CXL Utility library for mailbox interface
  3. *
  4. * Copyright(C) 2020 Intel Corporation.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2. See the
  7. * COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "hw/cxl/cxl.h"
  11. #include "hw/cxl/cxl_events.h"
  12. #include "hw/pci/pci.h"
  13. #include "qemu/cutils.h"
  14. #include "qemu/log.h"
  15. #include "qemu/units.h"
  16. #include "qemu/uuid.h"
  17. #define CXL_CAPACITY_MULTIPLIER (256 * MiB)
  18. /*
  19. * How to add a new command, example. The command set FOO, with cmd BAR.
  20. * 1. Add the command set and cmd to the enum.
  21. * FOO = 0x7f,
  22. * #define BAR 0
  23. * 2. Implement the handler
  24. * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
  25. * CXLDeviceState *cxl_dstate, uint16_t *len)
  26. * 3. Add the command to the cxl_cmd_set[][]
  27. * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
  28. * 4. Implement your handler
  29. * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; }
  30. *
  31. *
  32. * Writing the handler:
  33. * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the
  34. * in/out length of the payload. The handler is responsible for consuming the
  35. * payload from cmd->payload and operating upon it as necessary. It must then
  36. * fill the output data into cmd->payload (overwriting what was there),
  37. * setting the length, and returning a valid return code.
  38. *
  39. * XXX: The handler need not worry about endianness. The payload is read out of
  40. * a register interface that already deals with it.
  41. */
  42. enum {
  43. EVENTS = 0x01,
  44. #define GET_RECORDS 0x0
  45. #define CLEAR_RECORDS 0x1
  46. #define GET_INTERRUPT_POLICY 0x2
  47. #define SET_INTERRUPT_POLICY 0x3
  48. FIRMWARE_UPDATE = 0x02,
  49. #define GET_INFO 0x0
  50. TIMESTAMP = 0x03,
  51. #define GET 0x0
  52. #define SET 0x1
  53. LOGS = 0x04,
  54. #define GET_SUPPORTED 0x0
  55. #define GET_LOG 0x1
  56. IDENTIFY = 0x40,
  57. #define MEMORY_DEVICE 0x0
  58. CCLS = 0x41,
  59. #define GET_PARTITION_INFO 0x0
  60. #define GET_LSA 0x2
  61. #define SET_LSA 0x3
  62. MEDIA_AND_POISON = 0x43,
  63. #define GET_POISON_LIST 0x0
  64. #define INJECT_POISON 0x1
  65. #define CLEAR_POISON 0x2
  66. };
  67. struct cxl_cmd;
  68. typedef CXLRetCode (*opcode_handler)(struct cxl_cmd *cmd,
  69. CXLDeviceState *cxl_dstate, uint16_t *len);
  70. struct cxl_cmd {
  71. const char *name;
  72. opcode_handler handler;
  73. ssize_t in;
  74. uint16_t effect; /* Reported in CEL */
  75. uint8_t *payload;
  76. };
  77. static CXLRetCode cmd_events_get_records(struct cxl_cmd *cmd,
  78. CXLDeviceState *cxlds,
  79. uint16_t *len)
  80. {
  81. CXLGetEventPayload *pl;
  82. uint8_t log_type;
  83. int max_recs;
  84. if (cmd->in < sizeof(log_type)) {
  85. return CXL_MBOX_INVALID_INPUT;
  86. }
  87. log_type = *((uint8_t *)cmd->payload);
  88. pl = (CXLGetEventPayload *)cmd->payload;
  89. memset(pl, 0, sizeof(*pl));
  90. max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) /
  91. CXL_EVENT_RECORD_SIZE;
  92. if (max_recs > 0xFFFF) {
  93. max_recs = 0xFFFF;
  94. }
  95. return cxl_event_get_records(cxlds, pl, log_type, max_recs, len);
  96. }
  97. static CXLRetCode cmd_events_clear_records(struct cxl_cmd *cmd,
  98. CXLDeviceState *cxlds,
  99. uint16_t *len)
  100. {
  101. CXLClearEventPayload *pl;
  102. pl = (CXLClearEventPayload *)cmd->payload;
  103. *len = 0;
  104. return cxl_event_clear_records(cxlds, pl);
  105. }
  106. static CXLRetCode cmd_events_get_interrupt_policy(struct cxl_cmd *cmd,
  107. CXLDeviceState *cxlds,
  108. uint16_t *len)
  109. {
  110. CXLEventInterruptPolicy *policy;
  111. CXLEventLog *log;
  112. policy = (CXLEventInterruptPolicy *)cmd->payload;
  113. memset(policy, 0, sizeof(*policy));
  114. log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
  115. if (log->irq_enabled) {
  116. policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
  117. }
  118. log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
  119. if (log->irq_enabled) {
  120. policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
  121. }
  122. log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
  123. if (log->irq_enabled) {
  124. policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
  125. }
  126. log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
  127. if (log->irq_enabled) {
  128. policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
  129. }
  130. log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
  131. if (log->irq_enabled) {
  132. /* Dynamic Capacity borrows the same vector as info */
  133. policy->dyn_cap_settings = CXL_INT_MSI_MSIX;
  134. }
  135. *len = sizeof(*policy);
  136. return CXL_MBOX_SUCCESS;
  137. }
  138. static CXLRetCode cmd_events_set_interrupt_policy(struct cxl_cmd *cmd,
  139. CXLDeviceState *cxlds,
  140. uint16_t *len)
  141. {
  142. CXLEventInterruptPolicy *policy;
  143. CXLEventLog *log;
  144. if (*len < CXL_EVENT_INT_SETTING_MIN_LEN) {
  145. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  146. }
  147. policy = (CXLEventInterruptPolicy *)cmd->payload;
  148. log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
  149. log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) ==
  150. CXL_INT_MSI_MSIX;
  151. log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
  152. log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) ==
  153. CXL_INT_MSI_MSIX;
  154. log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
  155. log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) ==
  156. CXL_INT_MSI_MSIX;
  157. log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
  158. log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) ==
  159. CXL_INT_MSI_MSIX;
  160. /* DCD is optional */
  161. if (*len < sizeof(*policy)) {
  162. return CXL_MBOX_SUCCESS;
  163. }
  164. log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
  165. log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) ==
  166. CXL_INT_MSI_MSIX;
  167. *len = sizeof(*policy);
  168. return CXL_MBOX_SUCCESS;
  169. }
  170. /* 8.2.9.2.1 */
  171. static CXLRetCode cmd_firmware_update_get_info(struct cxl_cmd *cmd,
  172. CXLDeviceState *cxl_dstate,
  173. uint16_t *len)
  174. {
  175. struct {
  176. uint8_t slots_supported;
  177. uint8_t slot_info;
  178. uint8_t caps;
  179. uint8_t rsvd[0xd];
  180. char fw_rev1[0x10];
  181. char fw_rev2[0x10];
  182. char fw_rev3[0x10];
  183. char fw_rev4[0x10];
  184. } QEMU_PACKED *fw_info;
  185. QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50);
  186. if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) ||
  187. (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER)) {
  188. return CXL_MBOX_INTERNAL_ERROR;
  189. }
  190. fw_info = (void *)cmd->payload;
  191. memset(fw_info, 0, sizeof(*fw_info));
  192. fw_info->slots_supported = 2;
  193. fw_info->slot_info = BIT(0) | BIT(3);
  194. fw_info->caps = 0;
  195. pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0");
  196. *len = sizeof(*fw_info);
  197. return CXL_MBOX_SUCCESS;
  198. }
  199. /* 8.2.9.3.1 */
  200. static CXLRetCode cmd_timestamp_get(struct cxl_cmd *cmd,
  201. CXLDeviceState *cxl_dstate,
  202. uint16_t *len)
  203. {
  204. uint64_t final_time = cxl_device_get_timestamp(cxl_dstate);
  205. stq_le_p(cmd->payload, final_time);
  206. *len = 8;
  207. return CXL_MBOX_SUCCESS;
  208. }
  209. /* 8.2.9.3.2 */
  210. static CXLRetCode cmd_timestamp_set(struct cxl_cmd *cmd,
  211. CXLDeviceState *cxl_dstate,
  212. uint16_t *len)
  213. {
  214. cxl_dstate->timestamp.set = true;
  215. cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
  216. cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)cmd->payload);
  217. *len = 0;
  218. return CXL_MBOX_SUCCESS;
  219. }
  220. /* CXL 3.0 8.2.9.5.2.1 Command Effects Log (CEL) */
  221. static const QemuUUID cel_uuid = {
  222. .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
  223. 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
  224. };
  225. /* 8.2.9.4.1 */
  226. static CXLRetCode cmd_logs_get_supported(struct cxl_cmd *cmd,
  227. CXLDeviceState *cxl_dstate,
  228. uint16_t *len)
  229. {
  230. struct {
  231. uint16_t entries;
  232. uint8_t rsvd[6];
  233. struct {
  234. QemuUUID uuid;
  235. uint32_t size;
  236. } log_entries[1];
  237. } QEMU_PACKED *supported_logs = (void *)cmd->payload;
  238. QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c);
  239. supported_logs->entries = 1;
  240. supported_logs->log_entries[0].uuid = cel_uuid;
  241. supported_logs->log_entries[0].size = 4 * cxl_dstate->cel_size;
  242. *len = sizeof(*supported_logs);
  243. return CXL_MBOX_SUCCESS;
  244. }
  245. /* 8.2.9.4.2 */
  246. static CXLRetCode cmd_logs_get_log(struct cxl_cmd *cmd,
  247. CXLDeviceState *cxl_dstate,
  248. uint16_t *len)
  249. {
  250. struct {
  251. QemuUUID uuid;
  252. uint32_t offset;
  253. uint32_t length;
  254. } QEMU_PACKED QEMU_ALIGNED(16) *get_log = (void *)cmd->payload;
  255. /*
  256. * 8.2.9.4.2
  257. * The device shall return Invalid Parameter if the Offset or Length
  258. * fields attempt to access beyond the size of the log as reported by Get
  259. * Supported Logs.
  260. *
  261. * XXX: Spec is wrong, "Invalid Parameter" isn't a thing.
  262. * XXX: Spec doesn't address incorrect UUID incorrectness.
  263. *
  264. * The CEL buffer is large enough to fit all commands in the emulation, so
  265. * the only possible failure would be if the mailbox itself isn't big
  266. * enough.
  267. */
  268. if (get_log->offset + get_log->length > cxl_dstate->payload_size) {
  269. return CXL_MBOX_INVALID_INPUT;
  270. }
  271. if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) {
  272. return CXL_MBOX_UNSUPPORTED;
  273. }
  274. /* Store off everything to local variables so we can wipe out the payload */
  275. *len = get_log->length;
  276. memmove(cmd->payload, cxl_dstate->cel_log + get_log->offset,
  277. get_log->length);
  278. return CXL_MBOX_SUCCESS;
  279. }
  280. /* 8.2.9.5.1.1 */
  281. static CXLRetCode cmd_identify_memory_device(struct cxl_cmd *cmd,
  282. CXLDeviceState *cxl_dstate,
  283. uint16_t *len)
  284. {
  285. struct {
  286. char fw_revision[0x10];
  287. uint64_t total_capacity;
  288. uint64_t volatile_capacity;
  289. uint64_t persistent_capacity;
  290. uint64_t partition_align;
  291. uint16_t info_event_log_size;
  292. uint16_t warning_event_log_size;
  293. uint16_t failure_event_log_size;
  294. uint16_t fatal_event_log_size;
  295. uint32_t lsa_size;
  296. uint8_t poison_list_max_mer[3];
  297. uint16_t inject_poison_limit;
  298. uint8_t poison_caps;
  299. uint8_t qos_telemetry_caps;
  300. } QEMU_PACKED *id;
  301. QEMU_BUILD_BUG_ON(sizeof(*id) != 0x43);
  302. CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
  303. CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
  304. if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
  305. (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
  306. return CXL_MBOX_INTERNAL_ERROR;
  307. }
  308. id = (void *)cmd->payload;
  309. memset(id, 0, sizeof(*id));
  310. snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0);
  311. stq_le_p(&id->total_capacity, cxl_dstate->mem_size / CXL_CAPACITY_MULTIPLIER);
  312. stq_le_p(&id->persistent_capacity, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
  313. stq_le_p(&id->volatile_capacity, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
  314. stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d));
  315. /* 256 poison records */
  316. st24_le_p(id->poison_list_max_mer, 256);
  317. /* No limit - so limited by main poison record limit */
  318. stw_le_p(&id->inject_poison_limit, 0);
  319. *len = sizeof(*id);
  320. return CXL_MBOX_SUCCESS;
  321. }
  322. static CXLRetCode cmd_ccls_get_partition_info(struct cxl_cmd *cmd,
  323. CXLDeviceState *cxl_dstate,
  324. uint16_t *len)
  325. {
  326. struct {
  327. uint64_t active_vmem;
  328. uint64_t active_pmem;
  329. uint64_t next_vmem;
  330. uint64_t next_pmem;
  331. } QEMU_PACKED *part_info = (void *)cmd->payload;
  332. QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20);
  333. if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
  334. (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
  335. return CXL_MBOX_INTERNAL_ERROR;
  336. }
  337. stq_le_p(&part_info->active_vmem, cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
  338. /*
  339. * When both next_vmem and next_pmem are 0, there is no pending change to
  340. * partitioning.
  341. */
  342. stq_le_p(&part_info->next_vmem, 0);
  343. stq_le_p(&part_info->active_pmem, cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
  344. stq_le_p(&part_info->next_pmem, 0);
  345. *len = sizeof(*part_info);
  346. return CXL_MBOX_SUCCESS;
  347. }
  348. static CXLRetCode cmd_ccls_get_lsa(struct cxl_cmd *cmd,
  349. CXLDeviceState *cxl_dstate,
  350. uint16_t *len)
  351. {
  352. struct {
  353. uint32_t offset;
  354. uint32_t length;
  355. } QEMU_PACKED *get_lsa;
  356. CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
  357. CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
  358. uint32_t offset, length;
  359. get_lsa = (void *)cmd->payload;
  360. offset = get_lsa->offset;
  361. length = get_lsa->length;
  362. if (offset + length > cvc->get_lsa_size(ct3d)) {
  363. *len = 0;
  364. return CXL_MBOX_INVALID_INPUT;
  365. }
  366. *len = cvc->get_lsa(ct3d, get_lsa, length, offset);
  367. return CXL_MBOX_SUCCESS;
  368. }
  369. static CXLRetCode cmd_ccls_set_lsa(struct cxl_cmd *cmd,
  370. CXLDeviceState *cxl_dstate,
  371. uint16_t *len)
  372. {
  373. struct set_lsa_pl {
  374. uint32_t offset;
  375. uint32_t rsvd;
  376. uint8_t data[];
  377. } QEMU_PACKED;
  378. struct set_lsa_pl *set_lsa_payload = (void *)cmd->payload;
  379. CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
  380. CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
  381. const size_t hdr_len = offsetof(struct set_lsa_pl, data);
  382. uint16_t plen = *len;
  383. *len = 0;
  384. if (!plen) {
  385. return CXL_MBOX_SUCCESS;
  386. }
  387. if (set_lsa_payload->offset + plen > cvc->get_lsa_size(ct3d) + hdr_len) {
  388. return CXL_MBOX_INVALID_INPUT;
  389. }
  390. plen -= hdr_len;
  391. cvc->set_lsa(ct3d, set_lsa_payload->data, plen, set_lsa_payload->offset);
  392. return CXL_MBOX_SUCCESS;
  393. }
  394. /*
  395. * This is very inefficient, but good enough for now!
  396. * Also the payload will always fit, so no need to handle the MORE flag and
  397. * make this stateful. We may want to allow longer poison lists to aid
  398. * testing that kernel functionality.
  399. */
  400. static CXLRetCode cmd_media_get_poison_list(struct cxl_cmd *cmd,
  401. CXLDeviceState *cxl_dstate,
  402. uint16_t *len)
  403. {
  404. struct get_poison_list_pl {
  405. uint64_t pa;
  406. uint64_t length;
  407. } QEMU_PACKED;
  408. struct get_poison_list_out_pl {
  409. uint8_t flags;
  410. uint8_t rsvd1;
  411. uint64_t overflow_timestamp;
  412. uint16_t count;
  413. uint8_t rsvd2[0x14];
  414. struct {
  415. uint64_t addr;
  416. uint32_t length;
  417. uint32_t resv;
  418. } QEMU_PACKED records[];
  419. } QEMU_PACKED;
  420. struct get_poison_list_pl *in = (void *)cmd->payload;
  421. struct get_poison_list_out_pl *out = (void *)cmd->payload;
  422. CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
  423. uint16_t record_count = 0, i = 0;
  424. uint64_t query_start, query_length;
  425. CXLPoisonList *poison_list = &ct3d->poison_list;
  426. CXLPoison *ent;
  427. uint16_t out_pl_len;
  428. query_start = ldq_le_p(&in->pa);
  429. /* 64 byte alignment required */
  430. if (query_start & 0x3f) {
  431. return CXL_MBOX_INVALID_INPUT;
  432. }
  433. query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
  434. QLIST_FOREACH(ent, poison_list, node) {
  435. /* Check for no overlap */
  436. if (ent->start >= query_start + query_length ||
  437. ent->start + ent->length <= query_start) {
  438. continue;
  439. }
  440. record_count++;
  441. }
  442. out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
  443. assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
  444. memset(out, 0, out_pl_len);
  445. QLIST_FOREACH(ent, poison_list, node) {
  446. uint64_t start, stop;
  447. /* Check for no overlap */
  448. if (ent->start >= query_start + query_length ||
  449. ent->start + ent->length <= query_start) {
  450. continue;
  451. }
  452. /* Deal with overlap */
  453. start = MAX(ROUND_DOWN(ent->start, 64ull), query_start);
  454. stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length,
  455. query_start + query_length);
  456. stq_le_p(&out->records[i].addr, start | (ent->type & 0x7));
  457. stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
  458. i++;
  459. }
  460. if (ct3d->poison_list_overflowed) {
  461. out->flags = (1 << 1);
  462. stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts);
  463. }
  464. stw_le_p(&out->count, record_count);
  465. *len = out_pl_len;
  466. return CXL_MBOX_SUCCESS;
  467. }
  468. static CXLRetCode cmd_media_inject_poison(struct cxl_cmd *cmd,
  469. CXLDeviceState *cxl_dstate,
  470. uint16_t *len_unused)
  471. {
  472. CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
  473. CXLPoisonList *poison_list = &ct3d->poison_list;
  474. CXLPoison *ent;
  475. struct inject_poison_pl {
  476. uint64_t dpa;
  477. };
  478. struct inject_poison_pl *in = (void *)cmd->payload;
  479. uint64_t dpa = ldq_le_p(&in->dpa);
  480. CXLPoison *p;
  481. QLIST_FOREACH(ent, poison_list, node) {
  482. if (dpa >= ent->start &&
  483. dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) {
  484. return CXL_MBOX_SUCCESS;
  485. }
  486. }
  487. if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
  488. return CXL_MBOX_INJECT_POISON_LIMIT;
  489. }
  490. p = g_new0(CXLPoison, 1);
  491. p->length = CXL_CACHE_LINE_SIZE;
  492. p->start = dpa;
  493. p->type = CXL_POISON_TYPE_INJECTED;
  494. /*
  495. * Possible todo: Merge with existing entry if next to it and if same type
  496. */
  497. QLIST_INSERT_HEAD(poison_list, p, node);
  498. ct3d->poison_list_cnt++;
  499. return CXL_MBOX_SUCCESS;
  500. }
  501. static CXLRetCode cmd_media_clear_poison(struct cxl_cmd *cmd,
  502. CXLDeviceState *cxl_dstate,
  503. uint16_t *len_unused)
  504. {
  505. CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
  506. CXLPoisonList *poison_list = &ct3d->poison_list;
  507. CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
  508. struct clear_poison_pl {
  509. uint64_t dpa;
  510. uint8_t data[64];
  511. };
  512. CXLPoison *ent;
  513. uint64_t dpa;
  514. struct clear_poison_pl *in = (void *)cmd->payload;
  515. dpa = ldq_le_p(&in->dpa);
  516. if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->mem_size) {
  517. return CXL_MBOX_INVALID_PA;
  518. }
  519. /* Clearing a region with no poison is not an error so always do so */
  520. if (cvc->set_cacheline) {
  521. if (!cvc->set_cacheline(ct3d, dpa, in->data)) {
  522. return CXL_MBOX_INTERNAL_ERROR;
  523. }
  524. }
  525. QLIST_FOREACH(ent, poison_list, node) {
  526. /*
  527. * Test for contained in entry. Simpler than general case
  528. * as clearing 64 bytes and entries 64 byte aligned
  529. */
  530. if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) {
  531. break;
  532. }
  533. }
  534. if (!ent) {
  535. return CXL_MBOX_SUCCESS;
  536. }
  537. QLIST_REMOVE(ent, node);
  538. ct3d->poison_list_cnt--;
  539. if (dpa > ent->start) {
  540. CXLPoison *frag;
  541. /* Cannot overflow as replacing existing entry */
  542. frag = g_new0(CXLPoison, 1);
  543. frag->start = ent->start;
  544. frag->length = dpa - ent->start;
  545. frag->type = ent->type;
  546. QLIST_INSERT_HEAD(poison_list, frag, node);
  547. ct3d->poison_list_cnt++;
  548. }
  549. if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) {
  550. CXLPoison *frag;
  551. if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
  552. cxl_set_poison_list_overflowed(ct3d);
  553. } else {
  554. frag = g_new0(CXLPoison, 1);
  555. frag->start = dpa + CXL_CACHE_LINE_SIZE;
  556. frag->length = ent->start + ent->length - frag->start;
  557. frag->type = ent->type;
  558. QLIST_INSERT_HEAD(poison_list, frag, node);
  559. ct3d->poison_list_cnt++;
  560. }
  561. }
  562. /* Any fragments have been added, free original entry */
  563. g_free(ent);
  564. return CXL_MBOX_SUCCESS;
  565. }
  566. #define IMMEDIATE_CONFIG_CHANGE (1 << 1)
  567. #define IMMEDIATE_DATA_CHANGE (1 << 2)
  568. #define IMMEDIATE_POLICY_CHANGE (1 << 3)
  569. #define IMMEDIATE_LOG_CHANGE (1 << 4)
  570. static struct cxl_cmd cxl_cmd_set[256][256] = {
  571. [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS",
  572. cmd_events_get_records, 1, 0 },
  573. [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS",
  574. cmd_events_clear_records, ~0, IMMEDIATE_LOG_CHANGE },
  575. [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY",
  576. cmd_events_get_interrupt_policy, 0, 0 },
  577. [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY",
  578. cmd_events_set_interrupt_policy,
  579. ~0, IMMEDIATE_CONFIG_CHANGE },
  580. [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO",
  581. cmd_firmware_update_get_info, 0, 0 },
  582. [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
  583. [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8, IMMEDIATE_POLICY_CHANGE },
  584. [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0, 0 },
  585. [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
  586. [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE",
  587. cmd_identify_memory_device, 0, 0 },
  588. [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO",
  589. cmd_ccls_get_partition_info, 0, 0 },
  590. [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 },
  591. [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa,
  592. ~0, IMMEDIATE_CONFIG_CHANGE | IMMEDIATE_DATA_CHANGE },
  593. [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST",
  594. cmd_media_get_poison_list, 16, 0 },
  595. [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON",
  596. cmd_media_inject_poison, 8, 0 },
  597. [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON",
  598. cmd_media_clear_poison, 72, 0 },
  599. };
  600. void cxl_process_mailbox(CXLDeviceState *cxl_dstate)
  601. {
  602. uint16_t ret = CXL_MBOX_SUCCESS;
  603. struct cxl_cmd *cxl_cmd;
  604. uint64_t status_reg;
  605. opcode_handler h;
  606. uint64_t command_reg = cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD];
  607. uint8_t set = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND_SET);
  608. uint8_t cmd = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND);
  609. uint16_t len = FIELD_EX64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH);
  610. cxl_cmd = &cxl_cmd_set[set][cmd];
  611. h = cxl_cmd->handler;
  612. if (h) {
  613. if (len == cxl_cmd->in || cxl_cmd->in == ~0) {
  614. cxl_cmd->payload = cxl_dstate->mbox_reg_state +
  615. A_CXL_DEV_CMD_PAYLOAD;
  616. ret = (*h)(cxl_cmd, cxl_dstate, &len);
  617. assert(len <= cxl_dstate->payload_size);
  618. } else {
  619. ret = CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  620. }
  621. } else {
  622. qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n",
  623. set << 8 | cmd);
  624. ret = CXL_MBOX_UNSUPPORTED;
  625. }
  626. /* Set the return code */
  627. status_reg = FIELD_DP64(0, CXL_DEV_MAILBOX_STS, ERRNO, ret);
  628. /* Set the return length */
  629. command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND_SET, 0);
  630. command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, COMMAND, 0);
  631. command_reg = FIELD_DP64(command_reg, CXL_DEV_MAILBOX_CMD, LENGTH, len);
  632. cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_CMD] = command_reg;
  633. cxl_dstate->mbox_reg_state64[R_CXL_DEV_MAILBOX_STS] = status_reg;
  634. /* Tell the host we're done */
  635. ARRAY_FIELD_DP32(cxl_dstate->mbox_reg_state32, CXL_DEV_MAILBOX_CTRL,
  636. DOORBELL, 0);
  637. }
  638. void cxl_initialize_mailbox(CXLDeviceState *cxl_dstate)
  639. {
  640. for (int set = 0; set < 256; set++) {
  641. for (int cmd = 0; cmd < 256; cmd++) {
  642. if (cxl_cmd_set[set][cmd].handler) {
  643. struct cxl_cmd *c = &cxl_cmd_set[set][cmd];
  644. struct cel_log *log =
  645. &cxl_dstate->cel_log[cxl_dstate->cel_size];
  646. log->opcode = (set << 8) | cmd;
  647. log->effect = c->effect;
  648. cxl_dstate->cel_size++;
  649. }
  650. }
  651. }
  652. }