cxl-mailbox-utils.c 105 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066
  1. /*
  2. * CXL Utility library for mailbox interface
  3. *
  4. * Copyright(C) 2020 Intel Corporation.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2. See the
  7. * COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "hw/pci/msi.h"
  11. #include "hw/pci/msix.h"
  12. #include "hw/cxl/cxl.h"
  13. #include "hw/cxl/cxl_events.h"
  14. #include "hw/cxl/cxl_mailbox.h"
  15. #include "hw/pci/pci.h"
  16. #include "hw/pci-bridge/cxl_upstream_port.h"
  17. #include "qemu/cutils.h"
  18. #include "qemu/log.h"
  19. #include "qemu/units.h"
  20. #include "qemu/uuid.h"
  21. #include "system/hostmem.h"
  22. #include "qemu/range.h"
  23. #define CXL_CAPACITY_MULTIPLIER (256 * MiB)
  24. #define CXL_DC_EVENT_LOG_SIZE 8
  25. #define CXL_NUM_EXTENTS_SUPPORTED 512
  26. #define CXL_NUM_TAGS_SUPPORTED 0
  27. /*
  28. * How to add a new command, example. The command set FOO, with cmd BAR.
  29. * 1. Add the command set and cmd to the enum.
  30. * FOO = 0x7f,
  31. * #define BAR 0
  32. * 2. Implement the handler
  33. * static CXLRetCode cmd_foo_bar(struct cxl_cmd *cmd,
  34. * CXLDeviceState *cxl_dstate, uint16_t *len)
  35. * 3. Add the command to the cxl_cmd_set[][]
  36. * [FOO][BAR] = { "FOO_BAR", cmd_foo_bar, x, y },
  37. * 4. Implement your handler
  38. * define_mailbox_handler(FOO_BAR) { ... return CXL_MBOX_SUCCESS; }
  39. *
  40. *
  41. * Writing the handler:
  42. * The handler will provide the &struct cxl_cmd, the &CXLDeviceState, and the
  43. * in/out length of the payload. The handler is responsible for consuming the
  44. * payload from cmd->payload and operating upon it as necessary. It must then
  45. * fill the output data into cmd->payload (overwriting what was there),
  46. * setting the length, and returning a valid return code.
  47. *
  48. * XXX: The handler need not worry about endianness. The payload is read out of
  49. * a register interface that already deals with it.
  50. */
  51. enum {
  52. INFOSTAT = 0x00,
  53. #define IS_IDENTIFY 0x1
  54. #define BACKGROUND_OPERATION_STATUS 0x2
  55. EVENTS = 0x01,
  56. #define GET_RECORDS 0x0
  57. #define CLEAR_RECORDS 0x1
  58. #define GET_INTERRUPT_POLICY 0x2
  59. #define SET_INTERRUPT_POLICY 0x3
  60. FIRMWARE_UPDATE = 0x02,
  61. #define GET_INFO 0x0
  62. #define TRANSFER 0x1
  63. #define ACTIVATE 0x2
  64. TIMESTAMP = 0x03,
  65. #define GET 0x0
  66. #define SET 0x1
  67. LOGS = 0x04,
  68. #define GET_SUPPORTED 0x0
  69. #define GET_LOG 0x1
  70. FEATURES = 0x05,
  71. #define GET_SUPPORTED 0x0
  72. #define GET_FEATURE 0x1
  73. #define SET_FEATURE 0x2
  74. IDENTIFY = 0x40,
  75. #define MEMORY_DEVICE 0x0
  76. CCLS = 0x41,
  77. #define GET_PARTITION_INFO 0x0
  78. #define GET_LSA 0x2
  79. #define SET_LSA 0x3
  80. SANITIZE = 0x44,
  81. #define OVERWRITE 0x0
  82. #define SECURE_ERASE 0x1
  83. PERSISTENT_MEM = 0x45,
  84. #define GET_SECURITY_STATE 0x0
  85. MEDIA_AND_POISON = 0x43,
  86. #define GET_POISON_LIST 0x0
  87. #define INJECT_POISON 0x1
  88. #define CLEAR_POISON 0x2
  89. #define GET_SCAN_MEDIA_CAPABILITIES 0x3
  90. #define SCAN_MEDIA 0x4
  91. #define GET_SCAN_MEDIA_RESULTS 0x5
  92. DCD_CONFIG = 0x48,
  93. #define GET_DC_CONFIG 0x0
  94. #define GET_DYN_CAP_EXT_LIST 0x1
  95. #define ADD_DYN_CAP_RSP 0x2
  96. #define RELEASE_DYN_CAP 0x3
  97. PHYSICAL_SWITCH = 0x51,
  98. #define IDENTIFY_SWITCH_DEVICE 0x0
  99. #define GET_PHYSICAL_PORT_STATE 0x1
  100. TUNNEL = 0x53,
  101. #define MANAGEMENT_COMMAND 0x0
  102. };
  103. /* CCI Message Format CXL r3.1 Figure 7-19 */
  104. typedef struct CXLCCIMessage {
  105. uint8_t category;
  106. #define CXL_CCI_CAT_REQ 0
  107. #define CXL_CCI_CAT_RSP 1
  108. uint8_t tag;
  109. uint8_t resv1;
  110. uint8_t command;
  111. uint8_t command_set;
  112. uint8_t pl_length[3];
  113. uint16_t rc;
  114. uint16_t vendor_specific;
  115. uint8_t payload[];
  116. } QEMU_PACKED CXLCCIMessage;
  117. /* This command is only defined to an MLD FM Owned LD or an MHD */
  118. static CXLRetCode cmd_tunnel_management_cmd(const struct cxl_cmd *cmd,
  119. uint8_t *payload_in,
  120. size_t len_in,
  121. uint8_t *payload_out,
  122. size_t *len_out,
  123. CXLCCI *cci)
  124. {
  125. PCIDevice *tunnel_target;
  126. CXLCCI *target_cci;
  127. struct {
  128. uint8_t port_or_ld_id;
  129. uint8_t target_type;
  130. uint16_t size;
  131. CXLCCIMessage ccimessage;
  132. } QEMU_PACKED *in;
  133. struct {
  134. uint16_t resp_len;
  135. uint8_t resv[2];
  136. CXLCCIMessage ccimessage;
  137. } QEMU_PACKED *out;
  138. size_t pl_length, length_out;
  139. bool bg_started;
  140. int rc;
  141. if (cmd->in < sizeof(*in)) {
  142. return CXL_MBOX_INVALID_INPUT;
  143. }
  144. in = (void *)payload_in;
  145. out = (void *)payload_out;
  146. if (len_in < sizeof(*in)) {
  147. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  148. }
  149. /* Enough room for minimum sized message - no payload */
  150. if (in->size < sizeof(in->ccimessage)) {
  151. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  152. }
  153. /* Length of input payload should be in->size + a wrapping tunnel header */
  154. if (in->size != len_in - offsetof(typeof(*out), ccimessage)) {
  155. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  156. }
  157. if (in->ccimessage.category != CXL_CCI_CAT_REQ) {
  158. return CXL_MBOX_INVALID_INPUT;
  159. }
  160. if (in->target_type != 0) {
  161. qemu_log_mask(LOG_UNIMP,
  162. "Tunneled Command sent to non existent FM-LD");
  163. return CXL_MBOX_INVALID_INPUT;
  164. }
  165. /*
  166. * Target of a tunnel unfortunately depends on type of CCI readint
  167. * the message.
  168. * If in a switch, then it's the port number.
  169. * If in an MLD it is the ld number.
  170. * If in an MHD target type indicate where we are going.
  171. */
  172. if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
  173. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  174. if (in->port_or_ld_id != 0) {
  175. /* Only pretending to have one for now! */
  176. return CXL_MBOX_INVALID_INPUT;
  177. }
  178. target_cci = &ct3d->ld0_cci;
  179. } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
  180. CXLUpstreamPort *usp = CXL_USP(cci->d);
  181. tunnel_target = pcie_find_port_by_pn(&PCI_BRIDGE(usp)->sec_bus,
  182. in->port_or_ld_id);
  183. if (!tunnel_target) {
  184. return CXL_MBOX_INVALID_INPUT;
  185. }
  186. tunnel_target =
  187. pci_bridge_get_sec_bus(PCI_BRIDGE(tunnel_target))->devices[0];
  188. if (!tunnel_target) {
  189. return CXL_MBOX_INVALID_INPUT;
  190. }
  191. if (object_dynamic_cast(OBJECT(tunnel_target), TYPE_CXL_TYPE3)) {
  192. CXLType3Dev *ct3d = CXL_TYPE3(tunnel_target);
  193. /* Tunneled VDMs always land on FM Owned LD */
  194. target_cci = &ct3d->vdm_fm_owned_ld_mctp_cci;
  195. } else {
  196. return CXL_MBOX_INVALID_INPUT;
  197. }
  198. } else {
  199. return CXL_MBOX_INVALID_INPUT;
  200. }
  201. pl_length = in->ccimessage.pl_length[2] << 16 |
  202. in->ccimessage.pl_length[1] << 8 | in->ccimessage.pl_length[0];
  203. rc = cxl_process_cci_message(target_cci,
  204. in->ccimessage.command_set,
  205. in->ccimessage.command,
  206. pl_length, in->ccimessage.payload,
  207. &length_out, out->ccimessage.payload,
  208. &bg_started);
  209. /* Payload should be in place. Rest of CCI header and needs filling */
  210. out->resp_len = length_out + sizeof(CXLCCIMessage);
  211. st24_le_p(out->ccimessage.pl_length, length_out);
  212. out->ccimessage.rc = rc;
  213. out->ccimessage.category = CXL_CCI_CAT_RSP;
  214. out->ccimessage.command = in->ccimessage.command;
  215. out->ccimessage.command_set = in->ccimessage.command_set;
  216. out->ccimessage.tag = in->ccimessage.tag;
  217. *len_out = length_out + sizeof(*out);
  218. return CXL_MBOX_SUCCESS;
  219. }
  220. static CXLRetCode cmd_events_get_records(const struct cxl_cmd *cmd,
  221. uint8_t *payload_in, size_t len_in,
  222. uint8_t *payload_out, size_t *len_out,
  223. CXLCCI *cci)
  224. {
  225. CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
  226. CXLGetEventPayload *pl;
  227. uint8_t log_type;
  228. int max_recs;
  229. if (cmd->in < sizeof(log_type)) {
  230. return CXL_MBOX_INVALID_INPUT;
  231. }
  232. log_type = payload_in[0];
  233. pl = (CXLGetEventPayload *)payload_out;
  234. max_recs = (cxlds->payload_size - CXL_EVENT_PAYLOAD_HDR_SIZE) /
  235. CXL_EVENT_RECORD_SIZE;
  236. if (max_recs > 0xFFFF) {
  237. max_recs = 0xFFFF;
  238. }
  239. return cxl_event_get_records(cxlds, pl, log_type, max_recs, len_out);
  240. }
  241. static CXLRetCode cmd_events_clear_records(const struct cxl_cmd *cmd,
  242. uint8_t *payload_in,
  243. size_t len_in,
  244. uint8_t *payload_out,
  245. size_t *len_out,
  246. CXLCCI *cci)
  247. {
  248. CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
  249. CXLClearEventPayload *pl;
  250. pl = (CXLClearEventPayload *)payload_in;
  251. if (len_in < sizeof(*pl) ||
  252. len_in < sizeof(*pl) + sizeof(*pl->handle) * pl->nr_recs) {
  253. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  254. }
  255. *len_out = 0;
  256. return cxl_event_clear_records(cxlds, pl);
  257. }
  258. static CXLRetCode cmd_events_get_interrupt_policy(const struct cxl_cmd *cmd,
  259. uint8_t *payload_in,
  260. size_t len_in,
  261. uint8_t *payload_out,
  262. size_t *len_out,
  263. CXLCCI *cci)
  264. {
  265. CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
  266. CXLEventInterruptPolicy *policy;
  267. CXLEventLog *log;
  268. policy = (CXLEventInterruptPolicy *)payload_out;
  269. log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
  270. if (log->irq_enabled) {
  271. policy->info_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
  272. }
  273. log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
  274. if (log->irq_enabled) {
  275. policy->warn_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
  276. }
  277. log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
  278. if (log->irq_enabled) {
  279. policy->failure_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
  280. }
  281. log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
  282. if (log->irq_enabled) {
  283. policy->fatal_settings = CXL_EVENT_INT_SETTING(log->irq_vec);
  284. }
  285. log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
  286. if (log->irq_enabled) {
  287. /* Dynamic Capacity borrows the same vector as info */
  288. policy->dyn_cap_settings = CXL_INT_MSI_MSIX;
  289. }
  290. *len_out = sizeof(*policy);
  291. return CXL_MBOX_SUCCESS;
  292. }
  293. static CXLRetCode cmd_events_set_interrupt_policy(const struct cxl_cmd *cmd,
  294. uint8_t *payload_in,
  295. size_t len_in,
  296. uint8_t *payload_out,
  297. size_t *len_out,
  298. CXLCCI *cci)
  299. {
  300. CXLDeviceState *cxlds = &CXL_TYPE3(cci->d)->cxl_dstate;
  301. CXLEventInterruptPolicy *policy;
  302. CXLEventLog *log;
  303. if (len_in < CXL_EVENT_INT_SETTING_MIN_LEN) {
  304. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  305. }
  306. policy = (CXLEventInterruptPolicy *)payload_in;
  307. log = &cxlds->event_logs[CXL_EVENT_TYPE_INFO];
  308. log->irq_enabled = (policy->info_settings & CXL_EVENT_INT_MODE_MASK) ==
  309. CXL_INT_MSI_MSIX;
  310. log = &cxlds->event_logs[CXL_EVENT_TYPE_WARN];
  311. log->irq_enabled = (policy->warn_settings & CXL_EVENT_INT_MODE_MASK) ==
  312. CXL_INT_MSI_MSIX;
  313. log = &cxlds->event_logs[CXL_EVENT_TYPE_FAIL];
  314. log->irq_enabled = (policy->failure_settings & CXL_EVENT_INT_MODE_MASK) ==
  315. CXL_INT_MSI_MSIX;
  316. log = &cxlds->event_logs[CXL_EVENT_TYPE_FATAL];
  317. log->irq_enabled = (policy->fatal_settings & CXL_EVENT_INT_MODE_MASK) ==
  318. CXL_INT_MSI_MSIX;
  319. /* DCD is optional */
  320. if (len_in < sizeof(*policy)) {
  321. return CXL_MBOX_SUCCESS;
  322. }
  323. log = &cxlds->event_logs[CXL_EVENT_TYPE_DYNAMIC_CAP];
  324. log->irq_enabled = (policy->dyn_cap_settings & CXL_EVENT_INT_MODE_MASK) ==
  325. CXL_INT_MSI_MSIX;
  326. *len_out = 0;
  327. return CXL_MBOX_SUCCESS;
  328. }
  329. /* CXL r3.1 section 8.2.9.1.1: Identify (Opcode 0001h) */
  330. static CXLRetCode cmd_infostat_identify(const struct cxl_cmd *cmd,
  331. uint8_t *payload_in,
  332. size_t len_in,
  333. uint8_t *payload_out,
  334. size_t *len_out,
  335. CXLCCI *cci)
  336. {
  337. PCIDeviceClass *class = PCI_DEVICE_GET_CLASS(cci->d);
  338. struct {
  339. uint16_t pcie_vid;
  340. uint16_t pcie_did;
  341. uint16_t pcie_subsys_vid;
  342. uint16_t pcie_subsys_id;
  343. uint64_t sn;
  344. uint8_t max_message_size;
  345. uint8_t component_type;
  346. } QEMU_PACKED *is_identify;
  347. QEMU_BUILD_BUG_ON(sizeof(*is_identify) != 18);
  348. is_identify = (void *)payload_out;
  349. is_identify->pcie_vid = class->vendor_id;
  350. is_identify->pcie_did = class->device_id;
  351. if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_USP)) {
  352. is_identify->sn = CXL_USP(cci->d)->sn;
  353. /* Subsystem info not defined for a USP */
  354. is_identify->pcie_subsys_vid = 0;
  355. is_identify->pcie_subsys_id = 0;
  356. is_identify->component_type = 0x0; /* Switch */
  357. } else if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
  358. PCIDevice *pci_dev = PCI_DEVICE(cci->d);
  359. is_identify->sn = CXL_TYPE3(cci->d)->sn;
  360. /*
  361. * We can't always use class->subsystem_vendor_id as
  362. * it is not set if the defaults are used.
  363. */
  364. is_identify->pcie_subsys_vid =
  365. pci_get_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID);
  366. is_identify->pcie_subsys_id =
  367. pci_get_word(pci_dev->config + PCI_SUBSYSTEM_ID);
  368. is_identify->component_type = 0x3; /* Type 3 */
  369. }
  370. /* TODO: Allow this to vary across different CCIs */
  371. is_identify->max_message_size = 9; /* 512 bytes - MCTP_CXL_MAILBOX_BYTES */
  372. *len_out = sizeof(*is_identify);
  373. return CXL_MBOX_SUCCESS;
  374. }
  375. static void cxl_set_dsp_active_bm(PCIBus *b, PCIDevice *d,
  376. void *private)
  377. {
  378. uint8_t *bm = private;
  379. if (object_dynamic_cast(OBJECT(d), TYPE_CXL_DSP)) {
  380. uint8_t port = PCIE_PORT(d)->port;
  381. bm[port / 8] |= 1 << (port % 8);
  382. }
  383. }
  384. /* CXL r3.1 Section 7.6.7.1.1: Identify Switch Device (Opcode 5100h) */
  385. static CXLRetCode cmd_identify_switch_device(const struct cxl_cmd *cmd,
  386. uint8_t *payload_in,
  387. size_t len_in,
  388. uint8_t *payload_out,
  389. size_t *len_out,
  390. CXLCCI *cci)
  391. {
  392. PCIEPort *usp = PCIE_PORT(cci->d);
  393. PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus;
  394. int num_phys_ports = pcie_count_ds_ports(bus);
  395. struct cxl_fmapi_ident_switch_dev_resp_pl {
  396. uint8_t ingress_port_id;
  397. uint8_t rsvd;
  398. uint8_t num_physical_ports;
  399. uint8_t num_vcss;
  400. uint8_t active_port_bitmask[0x20];
  401. uint8_t active_vcs_bitmask[0x20];
  402. uint16_t total_vppbs;
  403. uint16_t bound_vppbs;
  404. uint8_t num_hdm_decoders_per_usp;
  405. } QEMU_PACKED *out;
  406. QEMU_BUILD_BUG_ON(sizeof(*out) != 0x49);
  407. out = (struct cxl_fmapi_ident_switch_dev_resp_pl *)payload_out;
  408. *out = (struct cxl_fmapi_ident_switch_dev_resp_pl) {
  409. .num_physical_ports = num_phys_ports + 1, /* 1 USP */
  410. .num_vcss = 1, /* Not yet support multiple VCS - potentially tricky */
  411. .active_vcs_bitmask[0] = 0x1,
  412. .total_vppbs = num_phys_ports + 1,
  413. .bound_vppbs = num_phys_ports + 1,
  414. .num_hdm_decoders_per_usp = 4,
  415. };
  416. /* Depends on the CCI type */
  417. if (object_dynamic_cast(OBJECT(cci->intf), TYPE_PCIE_PORT)) {
  418. out->ingress_port_id = PCIE_PORT(cci->intf)->port;
  419. } else {
  420. /* MCTP? */
  421. out->ingress_port_id = 0;
  422. }
  423. pci_for_each_device_under_bus(bus, cxl_set_dsp_active_bm,
  424. out->active_port_bitmask);
  425. out->active_port_bitmask[usp->port / 8] |= (1 << usp->port % 8);
  426. *len_out = sizeof(*out);
  427. return CXL_MBOX_SUCCESS;
  428. }
  429. /* CXL r3.1 Section 7.6.7.1.2: Get Physical Port State (Opcode 5101h) */
  430. static CXLRetCode cmd_get_physical_port_state(const struct cxl_cmd *cmd,
  431. uint8_t *payload_in,
  432. size_t len_in,
  433. uint8_t *payload_out,
  434. size_t *len_out,
  435. CXLCCI *cci)
  436. {
  437. /* CXL r3.1 Table 7-17: Get Physical Port State Request Payload */
  438. struct cxl_fmapi_get_phys_port_state_req_pl {
  439. uint8_t num_ports;
  440. uint8_t ports[];
  441. } QEMU_PACKED *in;
  442. /*
  443. * CXL r3.1 Table 7-19: Get Physical Port State Port Information Block
  444. * Format
  445. */
  446. struct cxl_fmapi_port_state_info_block {
  447. uint8_t port_id;
  448. uint8_t config_state;
  449. uint8_t connected_device_cxl_version;
  450. uint8_t rsv1;
  451. uint8_t connected_device_type;
  452. uint8_t port_cxl_version_bitmask;
  453. uint8_t max_link_width;
  454. uint8_t negotiated_link_width;
  455. uint8_t supported_link_speeds_vector;
  456. uint8_t max_link_speed;
  457. uint8_t current_link_speed;
  458. uint8_t ltssm_state;
  459. uint8_t first_lane_num;
  460. uint16_t link_state;
  461. uint8_t supported_ld_count;
  462. } QEMU_PACKED;
  463. /* CXL r3.1 Table 7-18: Get Physical Port State Response Payload */
  464. struct cxl_fmapi_get_phys_port_state_resp_pl {
  465. uint8_t num_ports;
  466. uint8_t rsv1[3];
  467. struct cxl_fmapi_port_state_info_block ports[];
  468. } QEMU_PACKED *out;
  469. PCIBus *bus = &PCI_BRIDGE(cci->d)->sec_bus;
  470. PCIEPort *usp = PCIE_PORT(cci->d);
  471. size_t pl_size;
  472. int i;
  473. in = (struct cxl_fmapi_get_phys_port_state_req_pl *)payload_in;
  474. out = (struct cxl_fmapi_get_phys_port_state_resp_pl *)payload_out;
  475. if (len_in < sizeof(*in)) {
  476. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  477. }
  478. /* Check if what was requested can fit */
  479. if (sizeof(*out) + sizeof(*out->ports) * in->num_ports > cci->payload_max) {
  480. return CXL_MBOX_INVALID_INPUT;
  481. }
  482. /* For success there should be a match for each requested */
  483. out->num_ports = in->num_ports;
  484. for (i = 0; i < in->num_ports; i++) {
  485. struct cxl_fmapi_port_state_info_block *port;
  486. /* First try to match on downstream port */
  487. PCIDevice *port_dev;
  488. uint16_t lnkcap, lnkcap2, lnksta;
  489. port = &out->ports[i];
  490. port_dev = pcie_find_port_by_pn(bus, in->ports[i]);
  491. if (port_dev) { /* DSP */
  492. PCIDevice *ds_dev = pci_bridge_get_sec_bus(PCI_BRIDGE(port_dev))
  493. ->devices[0];
  494. port->config_state = 3;
  495. if (ds_dev) {
  496. if (object_dynamic_cast(OBJECT(ds_dev), TYPE_CXL_TYPE3)) {
  497. port->connected_device_type = 5; /* Assume MLD for now */
  498. } else {
  499. port->connected_device_type = 1;
  500. }
  501. } else {
  502. port->connected_device_type = 0;
  503. }
  504. port->supported_ld_count = 3;
  505. } else if (usp->port == in->ports[i]) { /* USP */
  506. port_dev = PCI_DEVICE(usp);
  507. port->config_state = 4;
  508. port->connected_device_type = 0;
  509. } else {
  510. return CXL_MBOX_INVALID_INPUT;
  511. }
  512. port->port_id = in->ports[i];
  513. /* Information on status of this port in lnksta, lnkcap */
  514. if (!port_dev->exp.exp_cap) {
  515. return CXL_MBOX_INTERNAL_ERROR;
  516. }
  517. lnksta = port_dev->config_read(port_dev,
  518. port_dev->exp.exp_cap + PCI_EXP_LNKSTA,
  519. sizeof(lnksta));
  520. lnkcap = port_dev->config_read(port_dev,
  521. port_dev->exp.exp_cap + PCI_EXP_LNKCAP,
  522. sizeof(lnkcap));
  523. lnkcap2 = port_dev->config_read(port_dev,
  524. port_dev->exp.exp_cap + PCI_EXP_LNKCAP2,
  525. sizeof(lnkcap2));
  526. port->max_link_width = (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
  527. port->negotiated_link_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> 4;
  528. /* No definition for SLS field in linux/pci_regs.h */
  529. port->supported_link_speeds_vector = (lnkcap2 & 0xFE) >> 1;
  530. port->max_link_speed = lnkcap & PCI_EXP_LNKCAP_SLS;
  531. port->current_link_speed = lnksta & PCI_EXP_LNKSTA_CLS;
  532. /* TODO: Track down if we can get the rest of the info */
  533. port->ltssm_state = 0x7;
  534. port->first_lane_num = 0;
  535. port->link_state = 0;
  536. port->port_cxl_version_bitmask = 0x2;
  537. port->connected_device_cxl_version = 0x2;
  538. }
  539. pl_size = sizeof(*out) + sizeof(*out->ports) * in->num_ports;
  540. *len_out = pl_size;
  541. return CXL_MBOX_SUCCESS;
  542. }
  543. /* CXL r3.1 Section 8.2.9.1.2: Background Operation Status (Opcode 0002h) */
  544. static CXLRetCode cmd_infostat_bg_op_sts(const struct cxl_cmd *cmd,
  545. uint8_t *payload_in,
  546. size_t len_in,
  547. uint8_t *payload_out,
  548. size_t *len_out,
  549. CXLCCI *cci)
  550. {
  551. struct {
  552. uint8_t status;
  553. uint8_t rsvd;
  554. uint16_t opcode;
  555. uint16_t returncode;
  556. uint16_t vendor_ext_status;
  557. } QEMU_PACKED *bg_op_status;
  558. QEMU_BUILD_BUG_ON(sizeof(*bg_op_status) != 8);
  559. bg_op_status = (void *)payload_out;
  560. bg_op_status->status = cci->bg.complete_pct << 1;
  561. if (cci->bg.runtime > 0) {
  562. bg_op_status->status |= 1U << 0;
  563. }
  564. bg_op_status->opcode = cci->bg.opcode;
  565. bg_op_status->returncode = cci->bg.ret_code;
  566. *len_out = sizeof(*bg_op_status);
  567. return CXL_MBOX_SUCCESS;
  568. }
  569. #define CXL_FW_SLOTS 2
  570. #define CXL_FW_SIZE 0x02000000 /* 32 mb */
  571. /* CXL r3.1 Section 8.2.9.3.1: Get FW Info (Opcode 0200h) */
  572. static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd,
  573. uint8_t *payload_in,
  574. size_t len,
  575. uint8_t *payload_out,
  576. size_t *len_out,
  577. CXLCCI *cci)
  578. {
  579. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  580. CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
  581. struct {
  582. uint8_t slots_supported;
  583. uint8_t slot_info;
  584. uint8_t caps;
  585. uint8_t rsvd[0xd];
  586. char fw_rev1[0x10];
  587. char fw_rev2[0x10];
  588. char fw_rev3[0x10];
  589. char fw_rev4[0x10];
  590. } QEMU_PACKED *fw_info;
  591. QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50);
  592. if (!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER) ||
  593. !QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER) ||
  594. !QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER)) {
  595. return CXL_MBOX_INTERNAL_ERROR;
  596. }
  597. fw_info = (void *)payload_out;
  598. fw_info->slots_supported = CXL_FW_SLOTS;
  599. fw_info->slot_info = (cci->fw.active_slot & 0x7) |
  600. ((cci->fw.staged_slot & 0x7) << 3);
  601. fw_info->caps = BIT(0); /* online update supported */
  602. if (cci->fw.slot[0]) {
  603. pstrcpy(fw_info->fw_rev1, sizeof(fw_info->fw_rev1), "BWFW VERSION 0");
  604. }
  605. if (cci->fw.slot[1]) {
  606. pstrcpy(fw_info->fw_rev2, sizeof(fw_info->fw_rev2), "BWFW VERSION 1");
  607. }
  608. *len_out = sizeof(*fw_info);
  609. return CXL_MBOX_SUCCESS;
  610. }
  611. /* CXL r3.1 section 8.2.9.3.2: Transfer FW (Opcode 0201h) */
  612. #define CXL_FW_XFER_ALIGNMENT 128
  613. #define CXL_FW_XFER_ACTION_FULL 0x0
  614. #define CXL_FW_XFER_ACTION_INIT 0x1
  615. #define CXL_FW_XFER_ACTION_CONTINUE 0x2
  616. #define CXL_FW_XFER_ACTION_END 0x3
  617. #define CXL_FW_XFER_ACTION_ABORT 0x4
  618. static CXLRetCode cmd_firmware_update_transfer(const struct cxl_cmd *cmd,
  619. uint8_t *payload_in,
  620. size_t len,
  621. uint8_t *payload_out,
  622. size_t *len_out,
  623. CXLCCI *cci)
  624. {
  625. struct {
  626. uint8_t action;
  627. uint8_t slot;
  628. uint8_t rsvd1[2];
  629. uint32_t offset;
  630. uint8_t rsvd2[0x78];
  631. uint8_t data[];
  632. } QEMU_PACKED *fw_transfer = (void *)payload_in;
  633. size_t offset, length;
  634. if (len < sizeof(*fw_transfer)) {
  635. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  636. }
  637. if (fw_transfer->action == CXL_FW_XFER_ACTION_ABORT) {
  638. /*
  639. * At this point there aren't any on-going transfers
  640. * running in the bg - this is serialized before this
  641. * call altogether. Just mark the state machine and
  642. * disregard any other input.
  643. */
  644. cci->fw.transferring = false;
  645. return CXL_MBOX_SUCCESS;
  646. }
  647. offset = fw_transfer->offset * CXL_FW_XFER_ALIGNMENT;
  648. length = len - sizeof(*fw_transfer);
  649. if (offset + length > CXL_FW_SIZE) {
  650. return CXL_MBOX_INVALID_INPUT;
  651. }
  652. if (cci->fw.transferring) {
  653. if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL ||
  654. fw_transfer->action == CXL_FW_XFER_ACTION_INIT) {
  655. return CXL_MBOX_FW_XFER_IN_PROGRESS;
  656. }
  657. /*
  658. * Abort partitioned package transfer if over 30 secs
  659. * between parts. As opposed to the explicit ABORT action,
  660. * semantically treat this condition as an error - as
  661. * if a part action were passed without a previous INIT.
  662. */
  663. if (difftime(time(NULL), cci->fw.last_partxfer) > 30.0) {
  664. cci->fw.transferring = false;
  665. return CXL_MBOX_INVALID_INPUT;
  666. }
  667. } else if (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE ||
  668. fw_transfer->action == CXL_FW_XFER_ACTION_END) {
  669. return CXL_MBOX_INVALID_INPUT;
  670. }
  671. /* allow back-to-back retransmission */
  672. if ((offset != cci->fw.prev_offset || length != cci->fw.prev_len) &&
  673. (fw_transfer->action == CXL_FW_XFER_ACTION_CONTINUE ||
  674. fw_transfer->action == CXL_FW_XFER_ACTION_END)) {
  675. /* verify no overlaps */
  676. if (offset < cci->fw.prev_offset + cci->fw.prev_len) {
  677. return CXL_MBOX_FW_XFER_OUT_OF_ORDER;
  678. }
  679. }
  680. switch (fw_transfer->action) {
  681. case CXL_FW_XFER_ACTION_FULL: /* ignores offset */
  682. case CXL_FW_XFER_ACTION_END:
  683. if (fw_transfer->slot == 0 ||
  684. fw_transfer->slot == cci->fw.active_slot ||
  685. fw_transfer->slot > CXL_FW_SLOTS) {
  686. return CXL_MBOX_FW_INVALID_SLOT;
  687. }
  688. /* mark the slot used upon bg completion */
  689. break;
  690. case CXL_FW_XFER_ACTION_INIT:
  691. if (offset != 0) {
  692. return CXL_MBOX_INVALID_INPUT;
  693. }
  694. cci->fw.transferring = true;
  695. cci->fw.prev_offset = offset;
  696. cci->fw.prev_len = length;
  697. break;
  698. case CXL_FW_XFER_ACTION_CONTINUE:
  699. cci->fw.prev_offset = offset;
  700. cci->fw.prev_len = length;
  701. break;
  702. default:
  703. return CXL_MBOX_INVALID_INPUT;
  704. }
  705. if (fw_transfer->action == CXL_FW_XFER_ACTION_FULL) {
  706. cci->bg.runtime = 10 * 1000UL;
  707. } else {
  708. cci->bg.runtime = 2 * 1000UL;
  709. }
  710. /* keep relevant context for bg completion */
  711. cci->fw.curr_action = fw_transfer->action;
  712. cci->fw.curr_slot = fw_transfer->slot;
  713. *len_out = 0;
  714. return CXL_MBOX_BG_STARTED;
  715. }
  716. static void __do_firmware_xfer(CXLCCI *cci)
  717. {
  718. switch (cci->fw.curr_action) {
  719. case CXL_FW_XFER_ACTION_FULL:
  720. case CXL_FW_XFER_ACTION_END:
  721. cci->fw.slot[cci->fw.curr_slot - 1] = true;
  722. cci->fw.transferring = false;
  723. break;
  724. case CXL_FW_XFER_ACTION_INIT:
  725. case CXL_FW_XFER_ACTION_CONTINUE:
  726. time(&cci->fw.last_partxfer);
  727. break;
  728. default:
  729. break;
  730. }
  731. }
  732. /* CXL r3.1 section 8.2.9.3.3: Activate FW (Opcode 0202h) */
  733. static CXLRetCode cmd_firmware_update_activate(const struct cxl_cmd *cmd,
  734. uint8_t *payload_in,
  735. size_t len,
  736. uint8_t *payload_out,
  737. size_t *len_out,
  738. CXLCCI *cci)
  739. {
  740. struct {
  741. uint8_t action;
  742. uint8_t slot;
  743. } QEMU_PACKED *fw_activate = (void *)payload_in;
  744. QEMU_BUILD_BUG_ON(sizeof(*fw_activate) != 0x2);
  745. if (fw_activate->slot == 0 ||
  746. fw_activate->slot == cci->fw.active_slot ||
  747. fw_activate->slot > CXL_FW_SLOTS) {
  748. return CXL_MBOX_FW_INVALID_SLOT;
  749. }
  750. /* ensure that an actual fw package is there */
  751. if (!cci->fw.slot[fw_activate->slot - 1]) {
  752. return CXL_MBOX_FW_INVALID_SLOT;
  753. }
  754. switch (fw_activate->action) {
  755. case 0: /* online */
  756. cci->fw.active_slot = fw_activate->slot;
  757. break;
  758. case 1: /* reset */
  759. cci->fw.staged_slot = fw_activate->slot;
  760. break;
  761. default:
  762. return CXL_MBOX_INVALID_INPUT;
  763. }
  764. return CXL_MBOX_SUCCESS;
  765. }
  766. /* CXL r3.1 Section 8.2.9.4.1: Get Timestamp (Opcode 0300h) */
  767. static CXLRetCode cmd_timestamp_get(const struct cxl_cmd *cmd,
  768. uint8_t *payload_in,
  769. size_t len_in,
  770. uint8_t *payload_out,
  771. size_t *len_out,
  772. CXLCCI *cci)
  773. {
  774. CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
  775. uint64_t final_time = cxl_device_get_timestamp(cxl_dstate);
  776. stq_le_p(payload_out, final_time);
  777. *len_out = 8;
  778. return CXL_MBOX_SUCCESS;
  779. }
  780. /* CXL r3.1 Section 8.2.9.4.2: Set Timestamp (Opcode 0301h) */
  781. static CXLRetCode cmd_timestamp_set(const struct cxl_cmd *cmd,
  782. uint8_t *payload_in,
  783. size_t len_in,
  784. uint8_t *payload_out,
  785. size_t *len_out,
  786. CXLCCI *cci)
  787. {
  788. CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
  789. cxl_dstate->timestamp.set = true;
  790. cxl_dstate->timestamp.last_set = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
  791. cxl_dstate->timestamp.host_set = le64_to_cpu(*(uint64_t *)payload_in);
  792. *len_out = 0;
  793. return CXL_MBOX_SUCCESS;
  794. }
  795. /* CXL r3.1 Section 8.2.9.5.2.1: Command Effects Log (CEL) */
  796. static const QemuUUID cel_uuid = {
  797. .data = UUID(0x0da9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79,
  798. 0x96, 0xb1, 0x62, 0x3b, 0x3f, 0x17)
  799. };
  800. /* CXL r3.1 Section 8.2.9.5.1: Get Supported Logs (Opcode 0400h) */
  801. static CXLRetCode cmd_logs_get_supported(const struct cxl_cmd *cmd,
  802. uint8_t *payload_in,
  803. size_t len_in,
  804. uint8_t *payload_out,
  805. size_t *len_out,
  806. CXLCCI *cci)
  807. {
  808. struct {
  809. uint16_t entries;
  810. uint8_t rsvd[6];
  811. struct {
  812. QemuUUID uuid;
  813. uint32_t size;
  814. } log_entries[1];
  815. } QEMU_PACKED *supported_logs = (void *)payload_out;
  816. QEMU_BUILD_BUG_ON(sizeof(*supported_logs) != 0x1c);
  817. supported_logs->entries = 1;
  818. supported_logs->log_entries[0].uuid = cel_uuid;
  819. supported_logs->log_entries[0].size = 4 * cci->cel_size;
  820. *len_out = sizeof(*supported_logs);
  821. return CXL_MBOX_SUCCESS;
  822. }
  823. /* CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h) */
  824. static CXLRetCode cmd_logs_get_log(const struct cxl_cmd *cmd,
  825. uint8_t *payload_in,
  826. size_t len_in,
  827. uint8_t *payload_out,
  828. size_t *len_out,
  829. CXLCCI *cci)
  830. {
  831. struct {
  832. QemuUUID uuid;
  833. uint32_t offset;
  834. uint32_t length;
  835. } QEMU_PACKED QEMU_ALIGNED(16) *get_log;
  836. get_log = (void *)payload_in;
  837. if (get_log->length > cci->payload_max) {
  838. return CXL_MBOX_INVALID_INPUT;
  839. }
  840. if (!qemu_uuid_is_equal(&get_log->uuid, &cel_uuid)) {
  841. return CXL_MBOX_INVALID_LOG;
  842. }
  843. /*
  844. * CXL r3.1 Section 8.2.9.5.2: Get Log (Opcode 0401h)
  845. * The device shall return Invalid Input if the Offset or Length
  846. * fields attempt to access beyond the size of the log as reported by Get
  847. * Supported Log.
  848. *
  849. * Only valid for there to be one entry per opcode, but the length + offset
  850. * may still be greater than that if the inputs are not valid and so access
  851. * beyond the end of cci->cel_log.
  852. */
  853. if ((uint64_t)get_log->offset + get_log->length >= sizeof(cci->cel_log)) {
  854. return CXL_MBOX_INVALID_INPUT;
  855. }
  856. /* Store off everything to local variables so we can wipe out the payload */
  857. *len_out = get_log->length;
  858. memmove(payload_out, cci->cel_log + get_log->offset, get_log->length);
  859. return CXL_MBOX_SUCCESS;
  860. }
  861. /* CXL r3.1 section 8.2.9.6: Features */
  862. /*
  863. * Get Supported Features output payload
  864. * CXL r3.1 section 8.2.9.6.1 Table 8-96
  865. */
  866. typedef struct CXLSupportedFeatureHeader {
  867. uint16_t entries;
  868. uint16_t nsuppfeats_dev;
  869. uint32_t reserved;
  870. } QEMU_PACKED CXLSupportedFeatureHeader;
  871. /*
  872. * Get Supported Features Supported Feature Entry
  873. * CXL r3.1 section 8.2.9.6.1 Table 8-97
  874. */
  875. typedef struct CXLSupportedFeatureEntry {
  876. QemuUUID uuid;
  877. uint16_t feat_index;
  878. uint16_t get_feat_size;
  879. uint16_t set_feat_size;
  880. uint32_t attr_flags;
  881. uint8_t get_feat_version;
  882. uint8_t set_feat_version;
  883. uint16_t set_feat_effects;
  884. uint8_t rsvd[18];
  885. } QEMU_PACKED CXLSupportedFeatureEntry;
  886. /*
  887. * Get Supported Features Supported Feature Entry
  888. * CXL rev 3.1 section 8.2.9.6.1 Table 8-97
  889. */
  890. /* Supported Feature Entry : attribute flags */
  891. #define CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE BIT(0)
  892. #define CXL_FEAT_ENTRY_ATTR_FLAG_DEEPEST_RESET_PERSISTENCE_MASK GENMASK(3, 1)
  893. #define CXL_FEAT_ENTRY_ATTR_FLAG_PERSIST_ACROSS_FIRMWARE_UPDATE BIT(4)
  894. #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_DEFAULT_SELECTION BIT(5)
  895. #define CXL_FEAT_ENTRY_ATTR_FLAG_SUPPORT_SAVED_SELECTION BIT(6)
  896. /* Supported Feature Entry : set feature effects */
  897. #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_COLD_RESET BIT(0)
  898. #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE BIT(1)
  899. #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_DATA_CHANGE BIT(2)
  900. #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_POLICY_CHANGE BIT(3)
  901. #define CXL_FEAT_ENTRY_SFE_IMMEDIATE_LOG_CHANGE BIT(4)
  902. #define CXL_FEAT_ENTRY_SFE_SECURITY_STATE_CHANGE BIT(5)
  903. #define CXL_FEAT_ENTRY_SFE_BACKGROUND_OPERATION BIT(6)
  904. #define CXL_FEAT_ENTRY_SFE_SUPPORT_SECONDARY_MAILBOX BIT(7)
  905. #define CXL_FEAT_ENTRY_SFE_SUPPORT_ABORT_BACKGROUND_OPERATION BIT(8)
  906. #define CXL_FEAT_ENTRY_SFE_CEL_VALID BIT(9)
  907. #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CONV_RESET BIT(10)
  908. #define CXL_FEAT_ENTRY_SFE_CONFIG_CHANGE_CXL_RESET BIT(11)
  909. enum CXL_SUPPORTED_FEATURES_LIST {
  910. CXL_FEATURE_PATROL_SCRUB = 0,
  911. CXL_FEATURE_ECS,
  912. CXL_FEATURE_MAX
  913. };
  914. /* Get Feature CXL 3.1 Spec 8.2.9.6.2 */
  915. /*
  916. * Get Feature input payload
  917. * CXL r3.1 section 8.2.9.6.2 Table 8-99
  918. */
  919. /* Get Feature : Payload in selection */
  920. enum CXL_GET_FEATURE_SELECTION {
  921. CXL_GET_FEATURE_SEL_CURRENT_VALUE,
  922. CXL_GET_FEATURE_SEL_DEFAULT_VALUE,
  923. CXL_GET_FEATURE_SEL_SAVED_VALUE,
  924. CXL_GET_FEATURE_SEL_MAX
  925. };
  926. /* Set Feature CXL 3.1 Spec 8.2.9.6.3 */
  927. /*
  928. * Set Feature input payload
  929. * CXL r3.1 section 8.2.9.6.3 Table 8-101
  930. */
  931. typedef struct CXLSetFeatureInHeader {
  932. QemuUUID uuid;
  933. uint32_t flags;
  934. uint16_t offset;
  935. uint8_t version;
  936. uint8_t rsvd[9];
  937. } QEMU_PACKED QEMU_ALIGNED(16) CXLSetFeatureInHeader;
  938. /* Set Feature : Payload in flags */
  939. #define CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK 0x7
  940. enum CXL_SET_FEATURE_FLAG_DATA_TRANSFER {
  941. CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER,
  942. CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER,
  943. CXL_SET_FEATURE_FLAG_CONTINUE_DATA_TRANSFER,
  944. CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER,
  945. CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER,
  946. CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MAX
  947. };
  948. #define CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET BIT(3)
  949. /* CXL r3.1 section 8.2.9.9.11.1: Device Patrol Scrub Control Feature */
  950. static const QemuUUID patrol_scrub_uuid = {
  951. .data = UUID(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33,
  952. 0x75, 0x77, 0x4e, 0x06, 0xdb, 0x8a)
  953. };
  954. typedef struct CXLMemPatrolScrubSetFeature {
  955. CXLSetFeatureInHeader hdr;
  956. CXLMemPatrolScrubWriteAttrs feat_data;
  957. } QEMU_PACKED QEMU_ALIGNED(16) CXLMemPatrolScrubSetFeature;
  958. /*
  959. * CXL r3.1 section 8.2.9.9.11.2:
  960. * DDR5 Error Check Scrub (ECS) Control Feature
  961. */
  962. static const QemuUUID ecs_uuid = {
  963. .data = UUID(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba,
  964. 0xb9, 0x69, 0x1e, 0x89, 0x33, 0x86)
  965. };
  966. typedef struct CXLMemECSSetFeature {
  967. CXLSetFeatureInHeader hdr;
  968. CXLMemECSWriteAttrs feat_data[];
  969. } QEMU_PACKED QEMU_ALIGNED(16) CXLMemECSSetFeature;
  970. /* CXL r3.1 section 8.2.9.6.1: Get Supported Features (Opcode 0500h) */
  971. static CXLRetCode cmd_features_get_supported(const struct cxl_cmd *cmd,
  972. uint8_t *payload_in,
  973. size_t len_in,
  974. uint8_t *payload_out,
  975. size_t *len_out,
  976. CXLCCI *cci)
  977. {
  978. struct {
  979. uint32_t count;
  980. uint16_t start_index;
  981. uint16_t reserved;
  982. } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_in = (void *)payload_in;
  983. struct {
  984. CXLSupportedFeatureHeader hdr;
  985. CXLSupportedFeatureEntry feat_entries[];
  986. } QEMU_PACKED QEMU_ALIGNED(16) * get_feats_out = (void *)payload_out;
  987. uint16_t index, req_entries;
  988. uint16_t entry;
  989. if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
  990. return CXL_MBOX_UNSUPPORTED;
  991. }
  992. if (get_feats_in->count < sizeof(CXLSupportedFeatureHeader) ||
  993. get_feats_in->start_index >= CXL_FEATURE_MAX) {
  994. return CXL_MBOX_INVALID_INPUT;
  995. }
  996. req_entries = (get_feats_in->count -
  997. sizeof(CXLSupportedFeatureHeader)) /
  998. sizeof(CXLSupportedFeatureEntry);
  999. req_entries = MIN(req_entries,
  1000. (CXL_FEATURE_MAX - get_feats_in->start_index));
  1001. for (entry = 0, index = get_feats_in->start_index;
  1002. entry < req_entries; index++) {
  1003. switch (index) {
  1004. case CXL_FEATURE_PATROL_SCRUB:
  1005. /* Fill supported feature entry for device patrol scrub control */
  1006. get_feats_out->feat_entries[entry++] =
  1007. (struct CXLSupportedFeatureEntry) {
  1008. .uuid = patrol_scrub_uuid,
  1009. .feat_index = index,
  1010. .get_feat_size = sizeof(CXLMemPatrolScrubReadAttrs),
  1011. .set_feat_size = sizeof(CXLMemPatrolScrubWriteAttrs),
  1012. .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE,
  1013. .get_feat_version = CXL_MEMDEV_PS_GET_FEATURE_VERSION,
  1014. .set_feat_version = CXL_MEMDEV_PS_SET_FEATURE_VERSION,
  1015. .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE |
  1016. CXL_FEAT_ENTRY_SFE_CEL_VALID,
  1017. };
  1018. break;
  1019. case CXL_FEATURE_ECS:
  1020. /* Fill supported feature entry for device DDR5 ECS control */
  1021. get_feats_out->feat_entries[entry++] =
  1022. (struct CXLSupportedFeatureEntry) {
  1023. .uuid = ecs_uuid,
  1024. .feat_index = index,
  1025. .get_feat_size = sizeof(CXLMemECSReadAttrs),
  1026. .set_feat_size = sizeof(CXLMemECSWriteAttrs),
  1027. .attr_flags = CXL_FEAT_ENTRY_ATTR_FLAG_CHANGABLE,
  1028. .get_feat_version = CXL_ECS_GET_FEATURE_VERSION,
  1029. .set_feat_version = CXL_ECS_SET_FEATURE_VERSION,
  1030. .set_feat_effects = CXL_FEAT_ENTRY_SFE_IMMEDIATE_CONFIG_CHANGE |
  1031. CXL_FEAT_ENTRY_SFE_CEL_VALID,
  1032. };
  1033. break;
  1034. default:
  1035. __builtin_unreachable();
  1036. }
  1037. }
  1038. get_feats_out->hdr.nsuppfeats_dev = CXL_FEATURE_MAX;
  1039. get_feats_out->hdr.entries = req_entries;
  1040. *len_out = sizeof(CXLSupportedFeatureHeader) +
  1041. req_entries * sizeof(CXLSupportedFeatureEntry);
  1042. return CXL_MBOX_SUCCESS;
  1043. }
  1044. /* CXL r3.1 section 8.2.9.6.2: Get Feature (Opcode 0501h) */
  1045. static CXLRetCode cmd_features_get_feature(const struct cxl_cmd *cmd,
  1046. uint8_t *payload_in,
  1047. size_t len_in,
  1048. uint8_t *payload_out,
  1049. size_t *len_out,
  1050. CXLCCI *cci)
  1051. {
  1052. struct {
  1053. QemuUUID uuid;
  1054. uint16_t offset;
  1055. uint16_t count;
  1056. uint8_t selection;
  1057. } QEMU_PACKED QEMU_ALIGNED(16) * get_feature;
  1058. uint16_t bytes_to_copy = 0;
  1059. CXLType3Dev *ct3d;
  1060. CXLSetFeatureInfo *set_feat_info;
  1061. if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
  1062. return CXL_MBOX_UNSUPPORTED;
  1063. }
  1064. ct3d = CXL_TYPE3(cci->d);
  1065. get_feature = (void *)payload_in;
  1066. set_feat_info = &ct3d->set_feat_info;
  1067. if (qemu_uuid_is_equal(&get_feature->uuid, &set_feat_info->uuid)) {
  1068. return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS;
  1069. }
  1070. if (get_feature->selection != CXL_GET_FEATURE_SEL_CURRENT_VALUE) {
  1071. return CXL_MBOX_UNSUPPORTED;
  1072. }
  1073. if (get_feature->offset + get_feature->count > cci->payload_max) {
  1074. return CXL_MBOX_INVALID_INPUT;
  1075. }
  1076. if (qemu_uuid_is_equal(&get_feature->uuid, &patrol_scrub_uuid)) {
  1077. if (get_feature->offset >= sizeof(CXLMemPatrolScrubReadAttrs)) {
  1078. return CXL_MBOX_INVALID_INPUT;
  1079. }
  1080. bytes_to_copy = sizeof(CXLMemPatrolScrubReadAttrs) -
  1081. get_feature->offset;
  1082. bytes_to_copy = MIN(bytes_to_copy, get_feature->count);
  1083. memcpy(payload_out,
  1084. (uint8_t *)&ct3d->patrol_scrub_attrs + get_feature->offset,
  1085. bytes_to_copy);
  1086. } else if (qemu_uuid_is_equal(&get_feature->uuid, &ecs_uuid)) {
  1087. if (get_feature->offset >= sizeof(CXLMemECSReadAttrs)) {
  1088. return CXL_MBOX_INVALID_INPUT;
  1089. }
  1090. bytes_to_copy = sizeof(CXLMemECSReadAttrs) - get_feature->offset;
  1091. bytes_to_copy = MIN(bytes_to_copy, get_feature->count);
  1092. memcpy(payload_out,
  1093. (uint8_t *)&ct3d->ecs_attrs + get_feature->offset,
  1094. bytes_to_copy);
  1095. } else {
  1096. return CXL_MBOX_UNSUPPORTED;
  1097. }
  1098. *len_out = bytes_to_copy;
  1099. return CXL_MBOX_SUCCESS;
  1100. }
  1101. /* CXL r3.1 section 8.2.9.6.3: Set Feature (Opcode 0502h) */
  1102. static CXLRetCode cmd_features_set_feature(const struct cxl_cmd *cmd,
  1103. uint8_t *payload_in,
  1104. size_t len_in,
  1105. uint8_t *payload_out,
  1106. size_t *len_out,
  1107. CXLCCI *cci)
  1108. {
  1109. CXLSetFeatureInHeader *hdr = (void *)payload_in;
  1110. CXLMemPatrolScrubWriteAttrs *ps_write_attrs;
  1111. CXLMemPatrolScrubSetFeature *ps_set_feature;
  1112. CXLMemECSWriteAttrs *ecs_write_attrs;
  1113. CXLMemECSSetFeature *ecs_set_feature;
  1114. CXLSetFeatureInfo *set_feat_info;
  1115. uint16_t bytes_to_copy = 0;
  1116. uint8_t data_transfer_flag;
  1117. CXLType3Dev *ct3d;
  1118. uint16_t count;
  1119. if (len_in < sizeof(*hdr)) {
  1120. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  1121. }
  1122. if (!object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
  1123. return CXL_MBOX_UNSUPPORTED;
  1124. }
  1125. ct3d = CXL_TYPE3(cci->d);
  1126. set_feat_info = &ct3d->set_feat_info;
  1127. if (!qemu_uuid_is_null(&set_feat_info->uuid) &&
  1128. !qemu_uuid_is_equal(&hdr->uuid, &set_feat_info->uuid)) {
  1129. return CXL_MBOX_FEATURE_TRANSFER_IN_PROGRESS;
  1130. }
  1131. if (hdr->flags & CXL_SET_FEAT_DATA_SAVED_ACROSS_RESET) {
  1132. set_feat_info->data_saved_across_reset = true;
  1133. } else {
  1134. set_feat_info->data_saved_across_reset = false;
  1135. }
  1136. data_transfer_flag =
  1137. hdr->flags & CXL_SET_FEATURE_FLAG_DATA_TRANSFER_MASK;
  1138. if (data_transfer_flag == CXL_SET_FEATURE_FLAG_INITIATE_DATA_TRANSFER) {
  1139. set_feat_info->uuid = hdr->uuid;
  1140. set_feat_info->data_size = 0;
  1141. }
  1142. set_feat_info->data_transfer_flag = data_transfer_flag;
  1143. set_feat_info->data_offset = hdr->offset;
  1144. bytes_to_copy = len_in - sizeof(CXLSetFeatureInHeader);
  1145. if (bytes_to_copy == 0) {
  1146. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  1147. }
  1148. if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
  1149. if (hdr->version != CXL_MEMDEV_PS_SET_FEATURE_VERSION) {
  1150. return CXL_MBOX_UNSUPPORTED;
  1151. }
  1152. ps_set_feature = (void *)payload_in;
  1153. ps_write_attrs = &ps_set_feature->feat_data;
  1154. if ((uint32_t)hdr->offset + bytes_to_copy >
  1155. sizeof(ct3d->patrol_scrub_wr_attrs)) {
  1156. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  1157. }
  1158. memcpy((uint8_t *)&ct3d->patrol_scrub_wr_attrs + hdr->offset,
  1159. ps_write_attrs,
  1160. bytes_to_copy);
  1161. set_feat_info->data_size += bytes_to_copy;
  1162. if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
  1163. data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) {
  1164. ct3d->patrol_scrub_attrs.scrub_cycle &= ~0xFF;
  1165. ct3d->patrol_scrub_attrs.scrub_cycle |=
  1166. ct3d->patrol_scrub_wr_attrs.scrub_cycle_hr & 0xFF;
  1167. ct3d->patrol_scrub_attrs.scrub_flags &= ~0x1;
  1168. ct3d->patrol_scrub_attrs.scrub_flags |=
  1169. ct3d->patrol_scrub_wr_attrs.scrub_flags & 0x1;
  1170. }
  1171. } else if (qemu_uuid_is_equal(&hdr->uuid,
  1172. &ecs_uuid)) {
  1173. if (hdr->version != CXL_ECS_SET_FEATURE_VERSION) {
  1174. return CXL_MBOX_UNSUPPORTED;
  1175. }
  1176. ecs_set_feature = (void *)payload_in;
  1177. ecs_write_attrs = ecs_set_feature->feat_data;
  1178. if ((uint32_t)hdr->offset + bytes_to_copy >
  1179. sizeof(ct3d->ecs_wr_attrs)) {
  1180. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  1181. }
  1182. memcpy((uint8_t *)&ct3d->ecs_wr_attrs + hdr->offset,
  1183. ecs_write_attrs,
  1184. bytes_to_copy);
  1185. set_feat_info->data_size += bytes_to_copy;
  1186. if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
  1187. data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER) {
  1188. ct3d->ecs_attrs.ecs_log_cap = ct3d->ecs_wr_attrs.ecs_log_cap;
  1189. for (count = 0; count < CXL_ECS_NUM_MEDIA_FRUS; count++) {
  1190. ct3d->ecs_attrs.fru_attrs[count].ecs_config =
  1191. ct3d->ecs_wr_attrs.fru_attrs[count].ecs_config & 0x1F;
  1192. }
  1193. }
  1194. } else {
  1195. return CXL_MBOX_UNSUPPORTED;
  1196. }
  1197. if (data_transfer_flag == CXL_SET_FEATURE_FLAG_FULL_DATA_TRANSFER ||
  1198. data_transfer_flag == CXL_SET_FEATURE_FLAG_FINISH_DATA_TRANSFER ||
  1199. data_transfer_flag == CXL_SET_FEATURE_FLAG_ABORT_DATA_TRANSFER) {
  1200. memset(&set_feat_info->uuid, 0, sizeof(QemuUUID));
  1201. if (qemu_uuid_is_equal(&hdr->uuid, &patrol_scrub_uuid)) {
  1202. memset(&ct3d->patrol_scrub_wr_attrs, 0, set_feat_info->data_size);
  1203. } else if (qemu_uuid_is_equal(&hdr->uuid, &ecs_uuid)) {
  1204. memset(&ct3d->ecs_wr_attrs, 0, set_feat_info->data_size);
  1205. }
  1206. set_feat_info->data_transfer_flag = 0;
  1207. set_feat_info->data_saved_across_reset = false;
  1208. set_feat_info->data_offset = 0;
  1209. set_feat_info->data_size = 0;
  1210. }
  1211. return CXL_MBOX_SUCCESS;
  1212. }
  1213. /* CXL r3.1 Section 8.2.9.9.1.1: Identify Memory Device (Opcode 4000h) */
  1214. static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd,
  1215. uint8_t *payload_in,
  1216. size_t len_in,
  1217. uint8_t *payload_out,
  1218. size_t *len_out,
  1219. CXLCCI *cci)
  1220. {
  1221. struct {
  1222. char fw_revision[0x10];
  1223. uint64_t total_capacity;
  1224. uint64_t volatile_capacity;
  1225. uint64_t persistent_capacity;
  1226. uint64_t partition_align;
  1227. uint16_t info_event_log_size;
  1228. uint16_t warning_event_log_size;
  1229. uint16_t failure_event_log_size;
  1230. uint16_t fatal_event_log_size;
  1231. uint32_t lsa_size;
  1232. uint8_t poison_list_max_mer[3];
  1233. uint16_t inject_poison_limit;
  1234. uint8_t poison_caps;
  1235. uint8_t qos_telemetry_caps;
  1236. uint16_t dc_event_log_size;
  1237. } QEMU_PACKED *id;
  1238. QEMU_BUILD_BUG_ON(sizeof(*id) != 0x45);
  1239. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  1240. CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
  1241. CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
  1242. if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
  1243. (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) ||
  1244. (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) {
  1245. return CXL_MBOX_INTERNAL_ERROR;
  1246. }
  1247. id = (void *)payload_out;
  1248. snprintf(id->fw_revision, 0x10, "BWFW VERSION %02d", 0);
  1249. stq_le_p(&id->total_capacity,
  1250. cxl_dstate->static_mem_size / CXL_CAPACITY_MULTIPLIER);
  1251. stq_le_p(&id->persistent_capacity,
  1252. cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
  1253. stq_le_p(&id->volatile_capacity,
  1254. cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
  1255. stl_le_p(&id->lsa_size, cvc->get_lsa_size(ct3d));
  1256. /* 256 poison records */
  1257. st24_le_p(id->poison_list_max_mer, 256);
  1258. /* No limit - so limited by main poison record limit */
  1259. stw_le_p(&id->inject_poison_limit, 0);
  1260. stw_le_p(&id->dc_event_log_size, CXL_DC_EVENT_LOG_SIZE);
  1261. *len_out = sizeof(*id);
  1262. return CXL_MBOX_SUCCESS;
  1263. }
  1264. /* CXL r3.1 Section 8.2.9.9.2.1: Get Partition Info (Opcode 4100h) */
  1265. static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd,
  1266. uint8_t *payload_in,
  1267. size_t len_in,
  1268. uint8_t *payload_out,
  1269. size_t *len_out,
  1270. CXLCCI *cci)
  1271. {
  1272. CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
  1273. struct {
  1274. uint64_t active_vmem;
  1275. uint64_t active_pmem;
  1276. uint64_t next_vmem;
  1277. uint64_t next_pmem;
  1278. } QEMU_PACKED *part_info = (void *)payload_out;
  1279. QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20);
  1280. CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
  1281. if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
  1282. (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) ||
  1283. (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) {
  1284. return CXL_MBOX_INTERNAL_ERROR;
  1285. }
  1286. stq_le_p(&part_info->active_vmem,
  1287. cxl_dstate->vmem_size / CXL_CAPACITY_MULTIPLIER);
  1288. /*
  1289. * When both next_vmem and next_pmem are 0, there is no pending change to
  1290. * partitioning.
  1291. */
  1292. stq_le_p(&part_info->next_vmem, 0);
  1293. stq_le_p(&part_info->active_pmem,
  1294. cxl_dstate->pmem_size / CXL_CAPACITY_MULTIPLIER);
  1295. stq_le_p(&part_info->next_pmem, 0);
  1296. *len_out = sizeof(*part_info);
  1297. return CXL_MBOX_SUCCESS;
  1298. }
  1299. /* CXL r3.1 Section 8.2.9.9.2.3: Get LSA (Opcode 4102h) */
  1300. static CXLRetCode cmd_ccls_get_lsa(const struct cxl_cmd *cmd,
  1301. uint8_t *payload_in,
  1302. size_t len_in,
  1303. uint8_t *payload_out,
  1304. size_t *len_out,
  1305. CXLCCI *cci)
  1306. {
  1307. struct {
  1308. uint32_t offset;
  1309. uint32_t length;
  1310. } QEMU_PACKED *get_lsa;
  1311. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  1312. CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
  1313. uint64_t offset, length;
  1314. get_lsa = (void *)payload_in;
  1315. offset = get_lsa->offset;
  1316. length = get_lsa->length;
  1317. if (offset + length > cvc->get_lsa_size(ct3d)) {
  1318. *len_out = 0;
  1319. return CXL_MBOX_INVALID_INPUT;
  1320. }
  1321. *len_out = cvc->get_lsa(ct3d, payload_out, length, offset);
  1322. return CXL_MBOX_SUCCESS;
  1323. }
  1324. /* CXL r3.1 Section 8.2.9.9.2.4: Set LSA (Opcode 4103h) */
  1325. static CXLRetCode cmd_ccls_set_lsa(const struct cxl_cmd *cmd,
  1326. uint8_t *payload_in,
  1327. size_t len_in,
  1328. uint8_t *payload_out,
  1329. size_t *len_out,
  1330. CXLCCI *cci)
  1331. {
  1332. struct set_lsa_pl {
  1333. uint32_t offset;
  1334. uint32_t rsvd;
  1335. uint8_t data[];
  1336. } QEMU_PACKED;
  1337. struct set_lsa_pl *set_lsa_payload = (void *)payload_in;
  1338. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  1339. CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
  1340. const size_t hdr_len = offsetof(struct set_lsa_pl, data);
  1341. *len_out = 0;
  1342. if (len_in < hdr_len) {
  1343. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  1344. }
  1345. if (set_lsa_payload->offset + len_in > cvc->get_lsa_size(ct3d) + hdr_len) {
  1346. return CXL_MBOX_INVALID_INPUT;
  1347. }
  1348. len_in -= hdr_len;
  1349. cvc->set_lsa(ct3d, set_lsa_payload->data, len_in, set_lsa_payload->offset);
  1350. return CXL_MBOX_SUCCESS;
  1351. }
  1352. /* Perform the actual device zeroing */
  1353. static void __do_sanitization(CXLType3Dev *ct3d)
  1354. {
  1355. MemoryRegion *mr;
  1356. if (ct3d->hostvmem) {
  1357. mr = host_memory_backend_get_memory(ct3d->hostvmem);
  1358. if (mr) {
  1359. void *hostmem = memory_region_get_ram_ptr(mr);
  1360. memset(hostmem, 0, memory_region_size(mr));
  1361. }
  1362. }
  1363. if (ct3d->hostpmem) {
  1364. mr = host_memory_backend_get_memory(ct3d->hostpmem);
  1365. if (mr) {
  1366. void *hostmem = memory_region_get_ram_ptr(mr);
  1367. memset(hostmem, 0, memory_region_size(mr));
  1368. }
  1369. }
  1370. if (ct3d->lsa) {
  1371. mr = host_memory_backend_get_memory(ct3d->lsa);
  1372. if (mr) {
  1373. void *lsa = memory_region_get_ram_ptr(mr);
  1374. memset(lsa, 0, memory_region_size(mr));
  1375. }
  1376. }
  1377. cxl_discard_all_event_records(&ct3d->cxl_dstate);
  1378. }
  1379. /*
  1380. * CXL r3.1 Section 8.2.9.9.5.1: Sanitize (Opcode 4400h)
  1381. *
  1382. * Once the Sanitize command has started successfully, the device shall be
  1383. * placed in the media disabled state. If the command fails or is interrupted
  1384. * by a reset or power failure, it shall remain in the media disabled state
  1385. * until a successful Sanitize command has been completed. During this state:
  1386. *
  1387. * 1. Memory writes to the device will have no effect, and all memory reads
  1388. * will return random values (no user data returned, even for locations that
  1389. * the failed Sanitize operation didn’t sanitize yet).
  1390. *
  1391. * 2. Mailbox commands shall still be processed in the disabled state, except
  1392. * that commands that access Sanitized areas shall fail with the Media Disabled
  1393. * error code.
  1394. */
  1395. static CXLRetCode cmd_sanitize_overwrite(const struct cxl_cmd *cmd,
  1396. uint8_t *payload_in,
  1397. size_t len_in,
  1398. uint8_t *payload_out,
  1399. size_t *len_out,
  1400. CXLCCI *cci)
  1401. {
  1402. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  1403. uint64_t total_mem; /* in Mb */
  1404. int secs;
  1405. total_mem = (ct3d->cxl_dstate.vmem_size + ct3d->cxl_dstate.pmem_size) >> 20;
  1406. if (total_mem <= 512) {
  1407. secs = 4;
  1408. } else if (total_mem <= 1024) {
  1409. secs = 8;
  1410. } else if (total_mem <= 2 * 1024) {
  1411. secs = 15;
  1412. } else if (total_mem <= 4 * 1024) {
  1413. secs = 30;
  1414. } else if (total_mem <= 8 * 1024) {
  1415. secs = 60;
  1416. } else if (total_mem <= 16 * 1024) {
  1417. secs = 2 * 60;
  1418. } else if (total_mem <= 32 * 1024) {
  1419. secs = 4 * 60;
  1420. } else if (total_mem <= 64 * 1024) {
  1421. secs = 8 * 60;
  1422. } else if (total_mem <= 128 * 1024) {
  1423. secs = 15 * 60;
  1424. } else if (total_mem <= 256 * 1024) {
  1425. secs = 30 * 60;
  1426. } else if (total_mem <= 512 * 1024) {
  1427. secs = 60 * 60;
  1428. } else if (total_mem <= 1024 * 1024) {
  1429. secs = 120 * 60;
  1430. } else {
  1431. secs = 240 * 60; /* max 4 hrs */
  1432. }
  1433. /* EBUSY other bg cmds as of now */
  1434. cci->bg.runtime = secs * 1000UL;
  1435. *len_out = 0;
  1436. cxl_dev_disable_media(&ct3d->cxl_dstate);
  1437. /* sanitize when done */
  1438. return CXL_MBOX_BG_STARTED;
  1439. }
  1440. static CXLRetCode cmd_get_security_state(const struct cxl_cmd *cmd,
  1441. uint8_t *payload_in,
  1442. size_t len_in,
  1443. uint8_t *payload_out,
  1444. size_t *len_out,
  1445. CXLCCI *cci)
  1446. {
  1447. uint32_t *state = (uint32_t *)payload_out;
  1448. *state = 0;
  1449. *len_out = 4;
  1450. return CXL_MBOX_SUCCESS;
  1451. }
  1452. /*
  1453. * CXL r3.1 Section 8.2.9.9.4.1: Get Poison List (Opcode 4300h)
  1454. *
  1455. * This is very inefficient, but good enough for now!
  1456. * Also the payload will always fit, so no need to handle the MORE flag and
  1457. * make this stateful. We may want to allow longer poison lists to aid
  1458. * testing that kernel functionality.
  1459. */
  1460. static CXLRetCode cmd_media_get_poison_list(const struct cxl_cmd *cmd,
  1461. uint8_t *payload_in,
  1462. size_t len_in,
  1463. uint8_t *payload_out,
  1464. size_t *len_out,
  1465. CXLCCI *cci)
  1466. {
  1467. struct get_poison_list_pl {
  1468. uint64_t pa;
  1469. uint64_t length;
  1470. } QEMU_PACKED;
  1471. struct get_poison_list_out_pl {
  1472. uint8_t flags;
  1473. uint8_t rsvd1;
  1474. uint64_t overflow_timestamp;
  1475. uint16_t count;
  1476. uint8_t rsvd2[0x14];
  1477. struct {
  1478. uint64_t addr;
  1479. uint32_t length;
  1480. uint32_t resv;
  1481. } QEMU_PACKED records[];
  1482. } QEMU_PACKED;
  1483. struct get_poison_list_pl *in = (void *)payload_in;
  1484. struct get_poison_list_out_pl *out = (void *)payload_out;
  1485. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  1486. uint16_t record_count = 0, i = 0;
  1487. uint64_t query_start, query_length;
  1488. CXLPoisonList *poison_list = &ct3d->poison_list;
  1489. CXLPoison *ent;
  1490. uint16_t out_pl_len;
  1491. query_start = ldq_le_p(&in->pa);
  1492. /* 64 byte alignment required */
  1493. if (query_start & 0x3f) {
  1494. return CXL_MBOX_INVALID_INPUT;
  1495. }
  1496. query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
  1497. QLIST_FOREACH(ent, poison_list, node) {
  1498. /* Check for no overlap */
  1499. if (!ranges_overlap(ent->start, ent->length,
  1500. query_start, query_length)) {
  1501. continue;
  1502. }
  1503. record_count++;
  1504. }
  1505. out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
  1506. assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
  1507. QLIST_FOREACH(ent, poison_list, node) {
  1508. uint64_t start, stop;
  1509. /* Check for no overlap */
  1510. if (!ranges_overlap(ent->start, ent->length,
  1511. query_start, query_length)) {
  1512. continue;
  1513. }
  1514. /* Deal with overlap */
  1515. start = MAX(ROUND_DOWN(ent->start, 64ull), query_start);
  1516. stop = MIN(ROUND_DOWN(ent->start, 64ull) + ent->length,
  1517. query_start + query_length);
  1518. stq_le_p(&out->records[i].addr, start | (ent->type & 0x7));
  1519. stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
  1520. i++;
  1521. }
  1522. if (ct3d->poison_list_overflowed) {
  1523. out->flags = (1 << 1);
  1524. stq_le_p(&out->overflow_timestamp, ct3d->poison_list_overflow_ts);
  1525. }
  1526. if (scan_media_running(cci)) {
  1527. out->flags |= (1 << 2);
  1528. }
  1529. stw_le_p(&out->count, record_count);
  1530. *len_out = out_pl_len;
  1531. return CXL_MBOX_SUCCESS;
  1532. }
  1533. /* CXL r3.1 Section 8.2.9.9.4.2: Inject Poison (Opcode 4301h) */
  1534. static CXLRetCode cmd_media_inject_poison(const struct cxl_cmd *cmd,
  1535. uint8_t *payload_in,
  1536. size_t len_in,
  1537. uint8_t *payload_out,
  1538. size_t *len_out,
  1539. CXLCCI *cci)
  1540. {
  1541. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  1542. CXLPoisonList *poison_list = &ct3d->poison_list;
  1543. CXLPoison *ent;
  1544. struct inject_poison_pl {
  1545. uint64_t dpa;
  1546. };
  1547. struct inject_poison_pl *in = (void *)payload_in;
  1548. uint64_t dpa = ldq_le_p(&in->dpa);
  1549. CXLPoison *p;
  1550. QLIST_FOREACH(ent, poison_list, node) {
  1551. if (dpa >= ent->start &&
  1552. dpa + CXL_CACHE_LINE_SIZE <= ent->start + ent->length) {
  1553. return CXL_MBOX_SUCCESS;
  1554. }
  1555. }
  1556. /*
  1557. * Freeze the list if there is an on-going scan media operation.
  1558. */
  1559. if (scan_media_running(cci)) {
  1560. /*
  1561. * XXX: Spec is ambiguous - is this case considered
  1562. * a successful return despite not adding to the list?
  1563. */
  1564. goto success;
  1565. }
  1566. if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
  1567. return CXL_MBOX_INJECT_POISON_LIMIT;
  1568. }
  1569. p = g_new0(CXLPoison, 1);
  1570. p->length = CXL_CACHE_LINE_SIZE;
  1571. p->start = dpa;
  1572. p->type = CXL_POISON_TYPE_INJECTED;
  1573. /*
  1574. * Possible todo: Merge with existing entry if next to it and if same type
  1575. */
  1576. QLIST_INSERT_HEAD(poison_list, p, node);
  1577. ct3d->poison_list_cnt++;
  1578. success:
  1579. *len_out = 0;
  1580. return CXL_MBOX_SUCCESS;
  1581. }
  1582. /* CXL r3.1 Section 8.2.9.9.4.3: Clear Poison (Opcode 4302h */
  1583. static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd,
  1584. uint8_t *payload_in,
  1585. size_t len_in,
  1586. uint8_t *payload_out,
  1587. size_t *len_out,
  1588. CXLCCI *cci)
  1589. {
  1590. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  1591. CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
  1592. CXLPoisonList *poison_list = &ct3d->poison_list;
  1593. CXLType3Class *cvc = CXL_TYPE3_GET_CLASS(ct3d);
  1594. struct clear_poison_pl {
  1595. uint64_t dpa;
  1596. uint8_t data[64];
  1597. };
  1598. CXLPoison *ent;
  1599. uint64_t dpa;
  1600. struct clear_poison_pl *in = (void *)payload_in;
  1601. dpa = ldq_le_p(&in->dpa);
  1602. if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size +
  1603. ct3d->dc.total_capacity) {
  1604. return CXL_MBOX_INVALID_PA;
  1605. }
  1606. /* Clearing a region with no poison is not an error so always do so */
  1607. if (cvc->set_cacheline) {
  1608. if (!cvc->set_cacheline(ct3d, dpa, in->data)) {
  1609. return CXL_MBOX_INTERNAL_ERROR;
  1610. }
  1611. }
  1612. /*
  1613. * Freeze the list if there is an on-going scan media operation.
  1614. */
  1615. if (scan_media_running(cci)) {
  1616. /*
  1617. * XXX: Spec is ambiguous - is this case considered
  1618. * a successful return despite not removing from the list?
  1619. */
  1620. goto success;
  1621. }
  1622. QLIST_FOREACH(ent, poison_list, node) {
  1623. /*
  1624. * Test for contained in entry. Simpler than general case
  1625. * as clearing 64 bytes and entries 64 byte aligned
  1626. */
  1627. if ((dpa >= ent->start) && (dpa < ent->start + ent->length)) {
  1628. break;
  1629. }
  1630. }
  1631. if (!ent) {
  1632. goto success;
  1633. }
  1634. QLIST_REMOVE(ent, node);
  1635. ct3d->poison_list_cnt--;
  1636. if (dpa > ent->start) {
  1637. CXLPoison *frag;
  1638. /* Cannot overflow as replacing existing entry */
  1639. frag = g_new0(CXLPoison, 1);
  1640. frag->start = ent->start;
  1641. frag->length = dpa - ent->start;
  1642. frag->type = ent->type;
  1643. QLIST_INSERT_HEAD(poison_list, frag, node);
  1644. ct3d->poison_list_cnt++;
  1645. }
  1646. if (dpa + CXL_CACHE_LINE_SIZE < ent->start + ent->length) {
  1647. CXLPoison *frag;
  1648. if (ct3d->poison_list_cnt == CXL_POISON_LIST_LIMIT) {
  1649. cxl_set_poison_list_overflowed(ct3d);
  1650. } else {
  1651. frag = g_new0(CXLPoison, 1);
  1652. frag->start = dpa + CXL_CACHE_LINE_SIZE;
  1653. frag->length = ent->start + ent->length - frag->start;
  1654. frag->type = ent->type;
  1655. QLIST_INSERT_HEAD(poison_list, frag, node);
  1656. ct3d->poison_list_cnt++;
  1657. }
  1658. }
  1659. /* Any fragments have been added, free original entry */
  1660. g_free(ent);
  1661. success:
  1662. *len_out = 0;
  1663. return CXL_MBOX_SUCCESS;
  1664. }
  1665. /*
  1666. * CXL r3.1 section 8.2.9.9.4.4: Get Scan Media Capabilities
  1667. */
  1668. static CXLRetCode
  1669. cmd_media_get_scan_media_capabilities(const struct cxl_cmd *cmd,
  1670. uint8_t *payload_in,
  1671. size_t len_in,
  1672. uint8_t *payload_out,
  1673. size_t *len_out,
  1674. CXLCCI *cci)
  1675. {
  1676. struct get_scan_media_capabilities_pl {
  1677. uint64_t pa;
  1678. uint64_t length;
  1679. } QEMU_PACKED;
  1680. struct get_scan_media_capabilities_out_pl {
  1681. uint32_t estimated_runtime_ms;
  1682. };
  1683. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  1684. CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
  1685. struct get_scan_media_capabilities_pl *in = (void *)payload_in;
  1686. struct get_scan_media_capabilities_out_pl *out = (void *)payload_out;
  1687. uint64_t query_start;
  1688. uint64_t query_length;
  1689. query_start = ldq_le_p(&in->pa);
  1690. /* 64 byte alignment required */
  1691. if (query_start & 0x3f) {
  1692. return CXL_MBOX_INVALID_INPUT;
  1693. }
  1694. query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
  1695. if (query_start + query_length > cxl_dstate->static_mem_size) {
  1696. return CXL_MBOX_INVALID_PA;
  1697. }
  1698. /*
  1699. * Just use 400 nanosecond access/read latency + 100 ns for
  1700. * the cost of updating the poison list. For small enough
  1701. * chunks return at least 1 ms.
  1702. */
  1703. stl_le_p(&out->estimated_runtime_ms,
  1704. MAX(1, query_length * (0.0005L / 64)));
  1705. *len_out = sizeof(*out);
  1706. return CXL_MBOX_SUCCESS;
  1707. }
  1708. static void __do_scan_media(CXLType3Dev *ct3d)
  1709. {
  1710. CXLPoison *ent;
  1711. unsigned int results_cnt = 0;
  1712. QLIST_FOREACH(ent, &ct3d->scan_media_results, node) {
  1713. results_cnt++;
  1714. }
  1715. /* only scan media may clear the overflow */
  1716. if (ct3d->poison_list_overflowed &&
  1717. ct3d->poison_list_cnt == results_cnt) {
  1718. cxl_clear_poison_list_overflowed(ct3d);
  1719. }
  1720. /* scan media has run since last conventional reset */
  1721. ct3d->scan_media_hasrun = true;
  1722. }
  1723. /*
  1724. * CXL r3.1 section 8.2.9.9.4.5: Scan Media
  1725. */
  1726. static CXLRetCode cmd_media_scan_media(const struct cxl_cmd *cmd,
  1727. uint8_t *payload_in,
  1728. size_t len_in,
  1729. uint8_t *payload_out,
  1730. size_t *len_out,
  1731. CXLCCI *cci)
  1732. {
  1733. struct scan_media_pl {
  1734. uint64_t pa;
  1735. uint64_t length;
  1736. uint8_t flags;
  1737. } QEMU_PACKED;
  1738. struct scan_media_pl *in = (void *)payload_in;
  1739. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  1740. CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
  1741. uint64_t query_start;
  1742. uint64_t query_length;
  1743. CXLPoison *ent, *next;
  1744. query_start = ldq_le_p(&in->pa);
  1745. /* 64 byte alignment required */
  1746. if (query_start & 0x3f) {
  1747. return CXL_MBOX_INVALID_INPUT;
  1748. }
  1749. query_length = ldq_le_p(&in->length) * CXL_CACHE_LINE_SIZE;
  1750. if (query_start + query_length > cxl_dstate->static_mem_size) {
  1751. return CXL_MBOX_INVALID_PA;
  1752. }
  1753. if (ct3d->dc.num_regions && query_start + query_length >=
  1754. cxl_dstate->static_mem_size + ct3d->dc.total_capacity) {
  1755. return CXL_MBOX_INVALID_PA;
  1756. }
  1757. if (in->flags == 0) { /* TODO */
  1758. qemu_log_mask(LOG_UNIMP,
  1759. "Scan Media Event Log is unsupported\n");
  1760. }
  1761. /* any previous results are discarded upon a new Scan Media */
  1762. QLIST_FOREACH_SAFE(ent, &ct3d->scan_media_results, node, next) {
  1763. QLIST_REMOVE(ent, node);
  1764. g_free(ent);
  1765. }
  1766. /* kill the poison list - it will be recreated */
  1767. if (ct3d->poison_list_overflowed) {
  1768. QLIST_FOREACH_SAFE(ent, &ct3d->poison_list, node, next) {
  1769. QLIST_REMOVE(ent, node);
  1770. g_free(ent);
  1771. ct3d->poison_list_cnt--;
  1772. }
  1773. }
  1774. /*
  1775. * Scan the backup list and move corresponding entries
  1776. * into the results list, updating the poison list
  1777. * when possible.
  1778. */
  1779. QLIST_FOREACH_SAFE(ent, &ct3d->poison_list_bkp, node, next) {
  1780. CXLPoison *res;
  1781. if (ent->start >= query_start + query_length ||
  1782. ent->start + ent->length <= query_start) {
  1783. continue;
  1784. }
  1785. /*
  1786. * If a Get Poison List cmd comes in while this
  1787. * scan is being done, it will see the new complete
  1788. * list, while setting the respective flag.
  1789. */
  1790. if (ct3d->poison_list_cnt < CXL_POISON_LIST_LIMIT) {
  1791. CXLPoison *p = g_new0(CXLPoison, 1);
  1792. p->start = ent->start;
  1793. p->length = ent->length;
  1794. p->type = ent->type;
  1795. QLIST_INSERT_HEAD(&ct3d->poison_list, p, node);
  1796. ct3d->poison_list_cnt++;
  1797. }
  1798. res = g_new0(CXLPoison, 1);
  1799. res->start = ent->start;
  1800. res->length = ent->length;
  1801. res->type = ent->type;
  1802. QLIST_INSERT_HEAD(&ct3d->scan_media_results, res, node);
  1803. QLIST_REMOVE(ent, node);
  1804. g_free(ent);
  1805. }
  1806. cci->bg.runtime = MAX(1, query_length * (0.0005L / 64));
  1807. *len_out = 0;
  1808. return CXL_MBOX_BG_STARTED;
  1809. }
  1810. /*
  1811. * CXL r3.1 section 8.2.9.9.4.6: Get Scan Media Results
  1812. */
  1813. static CXLRetCode cmd_media_get_scan_media_results(const struct cxl_cmd *cmd,
  1814. uint8_t *payload_in,
  1815. size_t len_in,
  1816. uint8_t *payload_out,
  1817. size_t *len_out,
  1818. CXLCCI *cci)
  1819. {
  1820. struct get_scan_media_results_out_pl {
  1821. uint64_t dpa_restart;
  1822. uint64_t length;
  1823. uint8_t flags;
  1824. uint8_t rsvd1;
  1825. uint16_t count;
  1826. uint8_t rsvd2[0xc];
  1827. struct {
  1828. uint64_t addr;
  1829. uint32_t length;
  1830. uint32_t resv;
  1831. } QEMU_PACKED records[];
  1832. } QEMU_PACKED;
  1833. struct get_scan_media_results_out_pl *out = (void *)payload_out;
  1834. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  1835. CXLPoisonList *scan_media_results = &ct3d->scan_media_results;
  1836. CXLPoison *ent, *next;
  1837. uint16_t total_count = 0, record_count = 0, i = 0;
  1838. uint16_t out_pl_len;
  1839. if (!ct3d->scan_media_hasrun) {
  1840. return CXL_MBOX_UNSUPPORTED;
  1841. }
  1842. /*
  1843. * Calculate limits, all entries are within the same address range of the
  1844. * last scan media call.
  1845. */
  1846. QLIST_FOREACH(ent, scan_media_results, node) {
  1847. size_t rec_size = record_count * sizeof(out->records[0]);
  1848. if (sizeof(*out) + rec_size < CXL_MAILBOX_MAX_PAYLOAD_SIZE) {
  1849. record_count++;
  1850. }
  1851. total_count++;
  1852. }
  1853. out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
  1854. assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
  1855. memset(out, 0, out_pl_len);
  1856. QLIST_FOREACH_SAFE(ent, scan_media_results, node, next) {
  1857. uint64_t start, stop;
  1858. if (i == record_count) {
  1859. break;
  1860. }
  1861. start = ROUND_DOWN(ent->start, 64ull);
  1862. stop = ROUND_DOWN(ent->start, 64ull) + ent->length;
  1863. stq_le_p(&out->records[i].addr, start);
  1864. stl_le_p(&out->records[i].length, (stop - start) / CXL_CACHE_LINE_SIZE);
  1865. i++;
  1866. /* consume the returning entry */
  1867. QLIST_REMOVE(ent, node);
  1868. g_free(ent);
  1869. }
  1870. stw_le_p(&out->count, record_count);
  1871. if (total_count > record_count) {
  1872. out->flags = (1 << 0); /* More Media Error Records */
  1873. }
  1874. *len_out = out_pl_len;
  1875. return CXL_MBOX_SUCCESS;
  1876. }
  1877. /*
  1878. * CXL r3.1 section 8.2.9.9.9.1: Get Dynamic Capacity Configuration
  1879. * (Opcode: 4800h)
  1880. */
  1881. static CXLRetCode cmd_dcd_get_dyn_cap_config(const struct cxl_cmd *cmd,
  1882. uint8_t *payload_in,
  1883. size_t len_in,
  1884. uint8_t *payload_out,
  1885. size_t *len_out,
  1886. CXLCCI *cci)
  1887. {
  1888. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  1889. struct {
  1890. uint8_t region_cnt;
  1891. uint8_t start_rid;
  1892. } QEMU_PACKED *in = (void *)payload_in;
  1893. struct {
  1894. uint8_t num_regions;
  1895. uint8_t regions_returned;
  1896. uint8_t rsvd1[6];
  1897. struct {
  1898. uint64_t base;
  1899. uint64_t decode_len;
  1900. uint64_t region_len;
  1901. uint64_t block_size;
  1902. uint32_t dsmadhandle;
  1903. uint8_t flags;
  1904. uint8_t rsvd2[3];
  1905. } QEMU_PACKED records[];
  1906. } QEMU_PACKED *out = (void *)payload_out;
  1907. struct {
  1908. uint32_t num_extents_supported;
  1909. uint32_t num_extents_available;
  1910. uint32_t num_tags_supported;
  1911. uint32_t num_tags_available;
  1912. } QEMU_PACKED *extra_out;
  1913. uint16_t record_count;
  1914. uint16_t i;
  1915. uint16_t out_pl_len;
  1916. uint8_t start_rid;
  1917. start_rid = in->start_rid;
  1918. if (start_rid >= ct3d->dc.num_regions) {
  1919. return CXL_MBOX_INVALID_INPUT;
  1920. }
  1921. record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt);
  1922. out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
  1923. extra_out = (void *)(payload_out + out_pl_len);
  1924. out_pl_len += sizeof(*extra_out);
  1925. assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
  1926. out->num_regions = ct3d->dc.num_regions;
  1927. out->regions_returned = record_count;
  1928. for (i = 0; i < record_count; i++) {
  1929. stq_le_p(&out->records[i].base,
  1930. ct3d->dc.regions[start_rid + i].base);
  1931. stq_le_p(&out->records[i].decode_len,
  1932. ct3d->dc.regions[start_rid + i].decode_len /
  1933. CXL_CAPACITY_MULTIPLIER);
  1934. stq_le_p(&out->records[i].region_len,
  1935. ct3d->dc.regions[start_rid + i].len);
  1936. stq_le_p(&out->records[i].block_size,
  1937. ct3d->dc.regions[start_rid + i].block_size);
  1938. stl_le_p(&out->records[i].dsmadhandle,
  1939. ct3d->dc.regions[start_rid + i].dsmadhandle);
  1940. out->records[i].flags = ct3d->dc.regions[start_rid + i].flags;
  1941. }
  1942. /*
  1943. * TODO: Assign values once extents and tags are introduced
  1944. * to use.
  1945. */
  1946. stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED);
  1947. stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED -
  1948. ct3d->dc.total_extent_count);
  1949. stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED);
  1950. stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED);
  1951. *len_out = out_pl_len;
  1952. return CXL_MBOX_SUCCESS;
  1953. }
  1954. /*
  1955. * CXL r3.1 section 8.2.9.9.9.2:
  1956. * Get Dynamic Capacity Extent List (Opcode 4801h)
  1957. */
  1958. static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd,
  1959. uint8_t *payload_in,
  1960. size_t len_in,
  1961. uint8_t *payload_out,
  1962. size_t *len_out,
  1963. CXLCCI *cci)
  1964. {
  1965. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  1966. struct {
  1967. uint32_t extent_cnt;
  1968. uint32_t start_extent_id;
  1969. } QEMU_PACKED *in = (void *)payload_in;
  1970. struct {
  1971. uint32_t count;
  1972. uint32_t total_extents;
  1973. uint32_t generation_num;
  1974. uint8_t rsvd[4];
  1975. CXLDCExtentRaw records[];
  1976. } QEMU_PACKED *out = (void *)payload_out;
  1977. uint32_t start_extent_id = in->start_extent_id;
  1978. CXLDCExtentList *extent_list = &ct3d->dc.extents;
  1979. uint16_t record_count = 0, i = 0, record_done = 0;
  1980. uint16_t out_pl_len, size;
  1981. CXLDCExtent *ent;
  1982. if (start_extent_id > ct3d->dc.total_extent_count) {
  1983. return CXL_MBOX_INVALID_INPUT;
  1984. }
  1985. record_count = MIN(in->extent_cnt,
  1986. ct3d->dc.total_extent_count - start_extent_id);
  1987. size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out);
  1988. record_count = MIN(record_count, size / sizeof(out->records[0]));
  1989. out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
  1990. stl_le_p(&out->count, record_count);
  1991. stl_le_p(&out->total_extents, ct3d->dc.total_extent_count);
  1992. stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq);
  1993. if (record_count > 0) {
  1994. CXLDCExtentRaw *out_rec = &out->records[record_done];
  1995. QTAILQ_FOREACH(ent, extent_list, node) {
  1996. if (i++ < start_extent_id) {
  1997. continue;
  1998. }
  1999. stq_le_p(&out_rec->start_dpa, ent->start_dpa);
  2000. stq_le_p(&out_rec->len, ent->len);
  2001. memcpy(&out_rec->tag, ent->tag, 0x10);
  2002. stw_le_p(&out_rec->shared_seq, ent->shared_seq);
  2003. record_done++;
  2004. out_rec++;
  2005. if (record_done == record_count) {
  2006. break;
  2007. }
  2008. }
  2009. }
  2010. *len_out = out_pl_len;
  2011. return CXL_MBOX_SUCCESS;
  2012. }
  2013. /*
  2014. * Check whether any bit between addr[nr, nr+size) is set,
  2015. * return true if any bit is set, otherwise return false
  2016. */
  2017. bool test_any_bits_set(const unsigned long *addr, unsigned long nr,
  2018. unsigned long size)
  2019. {
  2020. unsigned long res = find_next_bit(addr, size + nr, nr);
  2021. return res < nr + size;
  2022. }
  2023. CXLDCRegion *cxl_find_dc_region(CXLType3Dev *ct3d, uint64_t dpa, uint64_t len)
  2024. {
  2025. int i;
  2026. CXLDCRegion *region = &ct3d->dc.regions[0];
  2027. if (dpa < region->base ||
  2028. dpa >= region->base + ct3d->dc.total_capacity) {
  2029. return NULL;
  2030. }
  2031. /*
  2032. * CXL r3.1 section 9.13.3: Dynamic Capacity Device (DCD)
  2033. *
  2034. * Regions are used in increasing-DPA order, with Region 0 being used for
  2035. * the lowest DPA of Dynamic Capacity and Region 7 for the highest DPA.
  2036. * So check from the last region to find where the dpa belongs. Extents that
  2037. * cross multiple regions are not allowed.
  2038. */
  2039. for (i = ct3d->dc.num_regions - 1; i >= 0; i--) {
  2040. region = &ct3d->dc.regions[i];
  2041. if (dpa >= region->base) {
  2042. if (dpa + len > region->base + region->len) {
  2043. return NULL;
  2044. }
  2045. return region;
  2046. }
  2047. }
  2048. return NULL;
  2049. }
  2050. void cxl_insert_extent_to_extent_list(CXLDCExtentList *list,
  2051. uint64_t dpa,
  2052. uint64_t len,
  2053. uint8_t *tag,
  2054. uint16_t shared_seq)
  2055. {
  2056. CXLDCExtent *extent;
  2057. extent = g_new0(CXLDCExtent, 1);
  2058. extent->start_dpa = dpa;
  2059. extent->len = len;
  2060. if (tag) {
  2061. memcpy(extent->tag, tag, 0x10);
  2062. }
  2063. extent->shared_seq = shared_seq;
  2064. QTAILQ_INSERT_TAIL(list, extent, node);
  2065. }
  2066. void cxl_remove_extent_from_extent_list(CXLDCExtentList *list,
  2067. CXLDCExtent *extent)
  2068. {
  2069. QTAILQ_REMOVE(list, extent, node);
  2070. g_free(extent);
  2071. }
  2072. /*
  2073. * Add a new extent to the extent "group" if group exists;
  2074. * otherwise, create a new group
  2075. * Return value: the extent group where the extent is inserted.
  2076. */
  2077. CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group,
  2078. uint64_t dpa,
  2079. uint64_t len,
  2080. uint8_t *tag,
  2081. uint16_t shared_seq)
  2082. {
  2083. if (!group) {
  2084. group = g_new0(CXLDCExtentGroup, 1);
  2085. QTAILQ_INIT(&group->list);
  2086. }
  2087. cxl_insert_extent_to_extent_list(&group->list, dpa, len,
  2088. tag, shared_seq);
  2089. return group;
  2090. }
  2091. void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list,
  2092. CXLDCExtentGroup *group)
  2093. {
  2094. QTAILQ_INSERT_TAIL(list, group, node);
  2095. }
  2096. void cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list)
  2097. {
  2098. CXLDCExtent *ent, *ent_next;
  2099. CXLDCExtentGroup *group = QTAILQ_FIRST(list);
  2100. QTAILQ_REMOVE(list, group, node);
  2101. QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) {
  2102. cxl_remove_extent_from_extent_list(&group->list, ent);
  2103. }
  2104. g_free(group);
  2105. }
  2106. /*
  2107. * CXL r3.1 Table 8-168: Add Dynamic Capacity Response Input Payload
  2108. * CXL r3.1 Table 8-170: Release Dynamic Capacity Input Payload
  2109. */
  2110. typedef struct CXLUpdateDCExtentListInPl {
  2111. uint32_t num_entries_updated;
  2112. uint8_t flags;
  2113. uint8_t rsvd[3];
  2114. /* CXL r3.1 Table 8-169: Updated Extent */
  2115. struct {
  2116. uint64_t start_dpa;
  2117. uint64_t len;
  2118. uint8_t rsvd[8];
  2119. } QEMU_PACKED updated_entries[];
  2120. } QEMU_PACKED CXLUpdateDCExtentListInPl;
  2121. /*
  2122. * For the extents in the extent list to operate, check whether they are valid
  2123. * 1. The extent should be in the range of a valid DC region;
  2124. * 2. The extent should not cross multiple regions;
  2125. * 3. The start DPA and the length of the extent should align with the block
  2126. * size of the region;
  2127. * 4. The address range of multiple extents in the list should not overlap.
  2128. */
  2129. static CXLRetCode cxl_detect_malformed_extent_list(CXLType3Dev *ct3d,
  2130. const CXLUpdateDCExtentListInPl *in)
  2131. {
  2132. uint64_t min_block_size = UINT64_MAX;
  2133. CXLDCRegion *region;
  2134. CXLDCRegion *lastregion = &ct3d->dc.regions[ct3d->dc.num_regions - 1];
  2135. g_autofree unsigned long *blk_bitmap = NULL;
  2136. uint64_t dpa, len;
  2137. uint32_t i;
  2138. for (i = 0; i < ct3d->dc.num_regions; i++) {
  2139. region = &ct3d->dc.regions[i];
  2140. min_block_size = MIN(min_block_size, region->block_size);
  2141. }
  2142. blk_bitmap = bitmap_new((lastregion->base + lastregion->len -
  2143. ct3d->dc.regions[0].base) / min_block_size);
  2144. for (i = 0; i < in->num_entries_updated; i++) {
  2145. dpa = in->updated_entries[i].start_dpa;
  2146. len = in->updated_entries[i].len;
  2147. region = cxl_find_dc_region(ct3d, dpa, len);
  2148. if (!region) {
  2149. return CXL_MBOX_INVALID_PA;
  2150. }
  2151. dpa -= ct3d->dc.regions[0].base;
  2152. if (dpa % region->block_size || len % region->block_size) {
  2153. return CXL_MBOX_INVALID_EXTENT_LIST;
  2154. }
  2155. /* the dpa range already covered by some other extents in the list */
  2156. if (test_any_bits_set(blk_bitmap, dpa / min_block_size,
  2157. len / min_block_size)) {
  2158. return CXL_MBOX_INVALID_EXTENT_LIST;
  2159. }
  2160. bitmap_set(blk_bitmap, dpa / min_block_size, len / min_block_size);
  2161. }
  2162. return CXL_MBOX_SUCCESS;
  2163. }
  2164. static CXLRetCode cxl_dcd_add_dyn_cap_rsp_dry_run(CXLType3Dev *ct3d,
  2165. const CXLUpdateDCExtentListInPl *in)
  2166. {
  2167. uint32_t i;
  2168. CXLDCExtent *ent;
  2169. CXLDCExtentGroup *ext_group;
  2170. uint64_t dpa, len;
  2171. Range range1, range2;
  2172. for (i = 0; i < in->num_entries_updated; i++) {
  2173. dpa = in->updated_entries[i].start_dpa;
  2174. len = in->updated_entries[i].len;
  2175. range_init_nofail(&range1, dpa, len);
  2176. /*
  2177. * The host-accepted DPA range must be contained by the first extent
  2178. * group in the pending list
  2179. */
  2180. ext_group = QTAILQ_FIRST(&ct3d->dc.extents_pending);
  2181. if (!cxl_extents_contains_dpa_range(&ext_group->list, dpa, len)) {
  2182. return CXL_MBOX_INVALID_PA;
  2183. }
  2184. /* to-be-added range should not overlap with range already accepted */
  2185. QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) {
  2186. range_init_nofail(&range2, ent->start_dpa, ent->len);
  2187. if (range_overlaps_range(&range1, &range2)) {
  2188. return CXL_MBOX_INVALID_PA;
  2189. }
  2190. }
  2191. }
  2192. return CXL_MBOX_SUCCESS;
  2193. }
  2194. /*
  2195. * CXL r3.1 section 8.2.9.9.9.3: Add Dynamic Capacity Response (Opcode 4802h)
  2196. * An extent is added to the extent list and becomes usable only after the
  2197. * response is processed successfully.
  2198. */
  2199. static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd,
  2200. uint8_t *payload_in,
  2201. size_t len_in,
  2202. uint8_t *payload_out,
  2203. size_t *len_out,
  2204. CXLCCI *cci)
  2205. {
  2206. CXLUpdateDCExtentListInPl *in = (void *)payload_in;
  2207. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  2208. CXLDCExtentList *extent_list = &ct3d->dc.extents;
  2209. uint32_t i;
  2210. uint64_t dpa, len;
  2211. CXLRetCode ret;
  2212. if (len_in < sizeof(*in)) {
  2213. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  2214. }
  2215. if (in->num_entries_updated == 0) {
  2216. cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
  2217. return CXL_MBOX_SUCCESS;
  2218. }
  2219. if (len_in <
  2220. sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) {
  2221. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  2222. }
  2223. /* Adding extents causes exceeding device's extent tracking ability. */
  2224. if (in->num_entries_updated + ct3d->dc.total_extent_count >
  2225. CXL_NUM_EXTENTS_SUPPORTED) {
  2226. return CXL_MBOX_RESOURCES_EXHAUSTED;
  2227. }
  2228. ret = cxl_detect_malformed_extent_list(ct3d, in);
  2229. if (ret != CXL_MBOX_SUCCESS) {
  2230. return ret;
  2231. }
  2232. ret = cxl_dcd_add_dyn_cap_rsp_dry_run(ct3d, in);
  2233. if (ret != CXL_MBOX_SUCCESS) {
  2234. return ret;
  2235. }
  2236. for (i = 0; i < in->num_entries_updated; i++) {
  2237. dpa = in->updated_entries[i].start_dpa;
  2238. len = in->updated_entries[i].len;
  2239. cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0);
  2240. ct3d->dc.total_extent_count += 1;
  2241. ct3_set_region_block_backed(ct3d, dpa, len);
  2242. }
  2243. /* Remove the first extent group in the pending list */
  2244. cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
  2245. return CXL_MBOX_SUCCESS;
  2246. }
  2247. /*
  2248. * Copy extent list from src to dst
  2249. * Return value: number of extents copied
  2250. */
  2251. static uint32_t copy_extent_list(CXLDCExtentList *dst,
  2252. const CXLDCExtentList *src)
  2253. {
  2254. uint32_t cnt = 0;
  2255. CXLDCExtent *ent;
  2256. if (!dst || !src) {
  2257. return 0;
  2258. }
  2259. QTAILQ_FOREACH(ent, src, node) {
  2260. cxl_insert_extent_to_extent_list(dst, ent->start_dpa, ent->len,
  2261. ent->tag, ent->shared_seq);
  2262. cnt++;
  2263. }
  2264. return cnt;
  2265. }
  2266. static CXLRetCode cxl_dc_extent_release_dry_run(CXLType3Dev *ct3d,
  2267. const CXLUpdateDCExtentListInPl *in, CXLDCExtentList *updated_list,
  2268. uint32_t *updated_list_size)
  2269. {
  2270. CXLDCExtent *ent, *ent_next;
  2271. uint64_t dpa, len;
  2272. uint32_t i;
  2273. int cnt_delta = 0;
  2274. CXLRetCode ret = CXL_MBOX_SUCCESS;
  2275. QTAILQ_INIT(updated_list);
  2276. copy_extent_list(updated_list, &ct3d->dc.extents);
  2277. for (i = 0; i < in->num_entries_updated; i++) {
  2278. Range range;
  2279. dpa = in->updated_entries[i].start_dpa;
  2280. len = in->updated_entries[i].len;
  2281. /* Check if the DPA range is not fully backed with valid extents */
  2282. if (!ct3_test_region_block_backed(ct3d, dpa, len)) {
  2283. ret = CXL_MBOX_INVALID_PA;
  2284. goto free_and_exit;
  2285. }
  2286. /* After this point, extent overflow is the only error can happen */
  2287. while (len > 0) {
  2288. QTAILQ_FOREACH(ent, updated_list, node) {
  2289. range_init_nofail(&range, ent->start_dpa, ent->len);
  2290. if (range_contains(&range, dpa)) {
  2291. uint64_t len1, len2 = 0, len_done = 0;
  2292. uint64_t ent_start_dpa = ent->start_dpa;
  2293. uint64_t ent_len = ent->len;
  2294. len1 = dpa - ent->start_dpa;
  2295. /* Found the extent or the subset of an existing extent */
  2296. if (range_contains(&range, dpa + len - 1)) {
  2297. len2 = ent_start_dpa + ent_len - dpa - len;
  2298. } else {
  2299. dpa = ent_start_dpa + ent_len;
  2300. }
  2301. len_done = ent_len - len1 - len2;
  2302. cxl_remove_extent_from_extent_list(updated_list, ent);
  2303. cnt_delta--;
  2304. if (len1) {
  2305. cxl_insert_extent_to_extent_list(updated_list,
  2306. ent_start_dpa,
  2307. len1, NULL, 0);
  2308. cnt_delta++;
  2309. }
  2310. if (len2) {
  2311. cxl_insert_extent_to_extent_list(updated_list,
  2312. dpa + len,
  2313. len2, NULL, 0);
  2314. cnt_delta++;
  2315. }
  2316. if (cnt_delta + ct3d->dc.total_extent_count >
  2317. CXL_NUM_EXTENTS_SUPPORTED) {
  2318. ret = CXL_MBOX_RESOURCES_EXHAUSTED;
  2319. goto free_and_exit;
  2320. }
  2321. len -= len_done;
  2322. break;
  2323. }
  2324. }
  2325. }
  2326. }
  2327. free_and_exit:
  2328. if (ret != CXL_MBOX_SUCCESS) {
  2329. QTAILQ_FOREACH_SAFE(ent, updated_list, node, ent_next) {
  2330. cxl_remove_extent_from_extent_list(updated_list, ent);
  2331. }
  2332. *updated_list_size = 0;
  2333. } else {
  2334. *updated_list_size = ct3d->dc.total_extent_count + cnt_delta;
  2335. }
  2336. return ret;
  2337. }
  2338. /*
  2339. * CXL r3.1 section 8.2.9.9.9.4: Release Dynamic Capacity (Opcode 4803h)
  2340. */
  2341. static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd,
  2342. uint8_t *payload_in,
  2343. size_t len_in,
  2344. uint8_t *payload_out,
  2345. size_t *len_out,
  2346. CXLCCI *cci)
  2347. {
  2348. CXLUpdateDCExtentListInPl *in = (void *)payload_in;
  2349. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  2350. CXLDCExtentList updated_list;
  2351. CXLDCExtent *ent, *ent_next;
  2352. uint32_t updated_list_size;
  2353. CXLRetCode ret;
  2354. if (len_in < sizeof(*in)) {
  2355. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  2356. }
  2357. if (in->num_entries_updated == 0) {
  2358. return CXL_MBOX_INVALID_INPUT;
  2359. }
  2360. if (len_in <
  2361. sizeof(*in) + sizeof(*in->updated_entries) * in->num_entries_updated) {
  2362. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  2363. }
  2364. ret = cxl_detect_malformed_extent_list(ct3d, in);
  2365. if (ret != CXL_MBOX_SUCCESS) {
  2366. return ret;
  2367. }
  2368. ret = cxl_dc_extent_release_dry_run(ct3d, in, &updated_list,
  2369. &updated_list_size);
  2370. if (ret != CXL_MBOX_SUCCESS) {
  2371. return ret;
  2372. }
  2373. /*
  2374. * If the dry run release passes, the returned updated_list will
  2375. * be the updated extent list and we just need to clear the extents
  2376. * in the accepted list and copy extents in the updated_list to accepted
  2377. * list and update the extent count;
  2378. */
  2379. QTAILQ_FOREACH_SAFE(ent, &ct3d->dc.extents, node, ent_next) {
  2380. ct3_clear_region_block_backed(ct3d, ent->start_dpa, ent->len);
  2381. cxl_remove_extent_from_extent_list(&ct3d->dc.extents, ent);
  2382. }
  2383. copy_extent_list(&ct3d->dc.extents, &updated_list);
  2384. QTAILQ_FOREACH_SAFE(ent, &updated_list, node, ent_next) {
  2385. ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len);
  2386. cxl_remove_extent_from_extent_list(&updated_list, ent);
  2387. }
  2388. ct3d->dc.total_extent_count = updated_list_size;
  2389. return CXL_MBOX_SUCCESS;
  2390. }
  2391. static const struct cxl_cmd cxl_cmd_set[256][256] = {
  2392. [EVENTS][GET_RECORDS] = { "EVENTS_GET_RECORDS",
  2393. cmd_events_get_records, 1, 0 },
  2394. [EVENTS][CLEAR_RECORDS] = { "EVENTS_CLEAR_RECORDS",
  2395. cmd_events_clear_records, ~0, CXL_MBOX_IMMEDIATE_LOG_CHANGE },
  2396. [EVENTS][GET_INTERRUPT_POLICY] = { "EVENTS_GET_INTERRUPT_POLICY",
  2397. cmd_events_get_interrupt_policy, 0, 0 },
  2398. [EVENTS][SET_INTERRUPT_POLICY] = { "EVENTS_SET_INTERRUPT_POLICY",
  2399. cmd_events_set_interrupt_policy,
  2400. ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE },
  2401. [FIRMWARE_UPDATE][GET_INFO] = { "FIRMWARE_UPDATE_GET_INFO",
  2402. cmd_firmware_update_get_info, 0, 0 },
  2403. [FIRMWARE_UPDATE][TRANSFER] = { "FIRMWARE_UPDATE_TRANSFER",
  2404. cmd_firmware_update_transfer, ~0, CXL_MBOX_BACKGROUND_OPERATION },
  2405. [FIRMWARE_UPDATE][ACTIVATE] = { "FIRMWARE_UPDATE_ACTIVATE",
  2406. cmd_firmware_update_activate, 2, CXL_MBOX_BACKGROUND_OPERATION },
  2407. [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
  2408. [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set,
  2409. 8, CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
  2410. [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported,
  2411. 0, 0 },
  2412. [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
  2413. [FEATURES][GET_SUPPORTED] = { "FEATURES_GET_SUPPORTED",
  2414. cmd_features_get_supported, 0x8, 0 },
  2415. [FEATURES][GET_FEATURE] = { "FEATURES_GET_FEATURE",
  2416. cmd_features_get_feature, 0x15, 0 },
  2417. [FEATURES][SET_FEATURE] = { "FEATURES_SET_FEATURE",
  2418. cmd_features_set_feature,
  2419. ~0,
  2420. (CXL_MBOX_IMMEDIATE_CONFIG_CHANGE |
  2421. CXL_MBOX_IMMEDIATE_DATA_CHANGE |
  2422. CXL_MBOX_IMMEDIATE_POLICY_CHANGE |
  2423. CXL_MBOX_IMMEDIATE_LOG_CHANGE |
  2424. CXL_MBOX_SECURITY_STATE_CHANGE)},
  2425. [IDENTIFY][MEMORY_DEVICE] = { "IDENTIFY_MEMORY_DEVICE",
  2426. cmd_identify_memory_device, 0, 0 },
  2427. [CCLS][GET_PARTITION_INFO] = { "CCLS_GET_PARTITION_INFO",
  2428. cmd_ccls_get_partition_info, 0, 0 },
  2429. [CCLS][GET_LSA] = { "CCLS_GET_LSA", cmd_ccls_get_lsa, 8, 0 },
  2430. [CCLS][SET_LSA] = { "CCLS_SET_LSA", cmd_ccls_set_lsa,
  2431. ~0, CXL_MBOX_IMMEDIATE_CONFIG_CHANGE | CXL_MBOX_IMMEDIATE_DATA_CHANGE },
  2432. [SANITIZE][OVERWRITE] = { "SANITIZE_OVERWRITE", cmd_sanitize_overwrite, 0,
  2433. (CXL_MBOX_IMMEDIATE_DATA_CHANGE |
  2434. CXL_MBOX_SECURITY_STATE_CHANGE |
  2435. CXL_MBOX_BACKGROUND_OPERATION)},
  2436. [PERSISTENT_MEM][GET_SECURITY_STATE] = { "GET_SECURITY_STATE",
  2437. cmd_get_security_state, 0, 0 },
  2438. [MEDIA_AND_POISON][GET_POISON_LIST] = { "MEDIA_AND_POISON_GET_POISON_LIST",
  2439. cmd_media_get_poison_list, 16, 0 },
  2440. [MEDIA_AND_POISON][INJECT_POISON] = { "MEDIA_AND_POISON_INJECT_POISON",
  2441. cmd_media_inject_poison, 8, 0 },
  2442. [MEDIA_AND_POISON][CLEAR_POISON] = { "MEDIA_AND_POISON_CLEAR_POISON",
  2443. cmd_media_clear_poison, 72, 0 },
  2444. [MEDIA_AND_POISON][GET_SCAN_MEDIA_CAPABILITIES] = {
  2445. "MEDIA_AND_POISON_GET_SCAN_MEDIA_CAPABILITIES",
  2446. cmd_media_get_scan_media_capabilities, 16, 0 },
  2447. [MEDIA_AND_POISON][SCAN_MEDIA] = { "MEDIA_AND_POISON_SCAN_MEDIA",
  2448. cmd_media_scan_media, 17, CXL_MBOX_BACKGROUND_OPERATION },
  2449. [MEDIA_AND_POISON][GET_SCAN_MEDIA_RESULTS] = {
  2450. "MEDIA_AND_POISON_GET_SCAN_MEDIA_RESULTS",
  2451. cmd_media_get_scan_media_results, 0, 0 },
  2452. };
  2453. static const struct cxl_cmd cxl_cmd_set_dcd[256][256] = {
  2454. [DCD_CONFIG][GET_DC_CONFIG] = { "DCD_GET_DC_CONFIG",
  2455. cmd_dcd_get_dyn_cap_config, 2, 0 },
  2456. [DCD_CONFIG][GET_DYN_CAP_EXT_LIST] = {
  2457. "DCD_GET_DYNAMIC_CAPACITY_EXTENT_LIST", cmd_dcd_get_dyn_cap_ext_list,
  2458. 8, 0 },
  2459. [DCD_CONFIG][ADD_DYN_CAP_RSP] = {
  2460. "DCD_ADD_DYNAMIC_CAPACITY_RESPONSE", cmd_dcd_add_dyn_cap_rsp,
  2461. ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE },
  2462. [DCD_CONFIG][RELEASE_DYN_CAP] = {
  2463. "DCD_RELEASE_DYNAMIC_CAPACITY", cmd_dcd_release_dyn_cap,
  2464. ~0, CXL_MBOX_IMMEDIATE_DATA_CHANGE },
  2465. };
  2466. static const struct cxl_cmd cxl_cmd_set_sw[256][256] = {
  2467. [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
  2468. [INFOSTAT][BACKGROUND_OPERATION_STATUS] = { "BACKGROUND_OPERATION_STATUS",
  2469. cmd_infostat_bg_op_sts, 0, 0 },
  2470. [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
  2471. [TIMESTAMP][SET] = { "TIMESTAMP_SET", cmd_timestamp_set, 8,
  2472. CXL_MBOX_IMMEDIATE_POLICY_CHANGE },
  2473. [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
  2474. 0 },
  2475. [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
  2476. [PHYSICAL_SWITCH][IDENTIFY_SWITCH_DEVICE] = { "IDENTIFY_SWITCH_DEVICE",
  2477. cmd_identify_switch_device, 0, 0 },
  2478. [PHYSICAL_SWITCH][GET_PHYSICAL_PORT_STATE] = { "SWITCH_PHYSICAL_PORT_STATS",
  2479. cmd_get_physical_port_state, ~0, 0 },
  2480. [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND",
  2481. cmd_tunnel_management_cmd, ~0, 0 },
  2482. };
  2483. /*
  2484. * While the command is executing in the background, the device should
  2485. * update the percentage complete in the Background Command Status Register
  2486. * at least once per second.
  2487. */
  2488. #define CXL_MBOX_BG_UPDATE_FREQ 1000UL
  2489. int cxl_process_cci_message(CXLCCI *cci, uint8_t set, uint8_t cmd,
  2490. size_t len_in, uint8_t *pl_in, size_t *len_out,
  2491. uint8_t *pl_out, bool *bg_started)
  2492. {
  2493. int ret;
  2494. const struct cxl_cmd *cxl_cmd;
  2495. opcode_handler h;
  2496. CXLDeviceState *cxl_dstate;
  2497. *len_out = 0;
  2498. cxl_cmd = &cci->cxl_cmd_set[set][cmd];
  2499. h = cxl_cmd->handler;
  2500. if (!h) {
  2501. qemu_log_mask(LOG_UNIMP, "Command %04xh not implemented\n",
  2502. set << 8 | cmd);
  2503. return CXL_MBOX_UNSUPPORTED;
  2504. }
  2505. if (len_in != cxl_cmd->in && cxl_cmd->in != ~0) {
  2506. return CXL_MBOX_INVALID_PAYLOAD_LENGTH;
  2507. }
  2508. /* Only one bg command at a time */
  2509. if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) &&
  2510. cci->bg.runtime > 0) {
  2511. return CXL_MBOX_BUSY;
  2512. }
  2513. /* forbid any selected commands while the media is disabled */
  2514. if (object_dynamic_cast(OBJECT(cci->d), TYPE_CXL_TYPE3)) {
  2515. cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
  2516. if (cxl_dev_media_disabled(cxl_dstate)) {
  2517. if (h == cmd_events_get_records ||
  2518. h == cmd_ccls_get_partition_info ||
  2519. h == cmd_ccls_set_lsa ||
  2520. h == cmd_ccls_get_lsa ||
  2521. h == cmd_logs_get_log ||
  2522. h == cmd_media_get_poison_list ||
  2523. h == cmd_media_inject_poison ||
  2524. h == cmd_media_clear_poison ||
  2525. h == cmd_sanitize_overwrite ||
  2526. h == cmd_firmware_update_transfer ||
  2527. h == cmd_firmware_update_activate) {
  2528. return CXL_MBOX_MEDIA_DISABLED;
  2529. }
  2530. }
  2531. }
  2532. ret = (*h)(cxl_cmd, pl_in, len_in, pl_out, len_out, cci);
  2533. if ((cxl_cmd->effect & CXL_MBOX_BACKGROUND_OPERATION) &&
  2534. ret == CXL_MBOX_BG_STARTED) {
  2535. *bg_started = true;
  2536. } else {
  2537. *bg_started = false;
  2538. }
  2539. /* Set bg and the return code */
  2540. if (*bg_started) {
  2541. uint64_t now;
  2542. cci->bg.opcode = (set << 8) | cmd;
  2543. cci->bg.complete_pct = 0;
  2544. cci->bg.ret_code = 0;
  2545. now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
  2546. cci->bg.starttime = now;
  2547. timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ);
  2548. }
  2549. return ret;
  2550. }
  2551. static void bg_timercb(void *opaque)
  2552. {
  2553. CXLCCI *cci = opaque;
  2554. uint64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
  2555. uint64_t total_time = cci->bg.starttime + cci->bg.runtime;
  2556. assert(cci->bg.runtime > 0);
  2557. if (now >= total_time) { /* we are done */
  2558. uint16_t ret = CXL_MBOX_SUCCESS;
  2559. cci->bg.complete_pct = 100;
  2560. cci->bg.ret_code = ret;
  2561. switch (cci->bg.opcode) {
  2562. case 0x0201: /* fw transfer */
  2563. __do_firmware_xfer(cci);
  2564. break;
  2565. case 0x4400: /* sanitize */
  2566. {
  2567. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  2568. __do_sanitization(ct3d);
  2569. cxl_dev_enable_media(&ct3d->cxl_dstate);
  2570. }
  2571. break;
  2572. case 0x4304: /* scan media */
  2573. {
  2574. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  2575. __do_scan_media(ct3d);
  2576. break;
  2577. }
  2578. default:
  2579. __builtin_unreachable();
  2580. break;
  2581. }
  2582. } else {
  2583. /* estimate only */
  2584. cci->bg.complete_pct =
  2585. 100 * (now - cci->bg.starttime) / cci->bg.runtime;
  2586. timer_mod(cci->bg.timer, now + CXL_MBOX_BG_UPDATE_FREQ);
  2587. }
  2588. if (cci->bg.complete_pct == 100) {
  2589. /* TODO: generalize to switch CCI */
  2590. CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
  2591. CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
  2592. PCIDevice *pdev = PCI_DEVICE(cci->d);
  2593. cci->bg.starttime = 0;
  2594. /* registers are updated, allow new bg-capable cmds */
  2595. cci->bg.runtime = 0;
  2596. if (msix_enabled(pdev)) {
  2597. msix_notify(pdev, cxl_dstate->mbox_msi_n);
  2598. } else if (msi_enabled(pdev)) {
  2599. msi_notify(pdev, cxl_dstate->mbox_msi_n);
  2600. }
  2601. }
  2602. }
  2603. static void cxl_rebuild_cel(CXLCCI *cci)
  2604. {
  2605. cci->cel_size = 0; /* Reset for a fresh build */
  2606. for (int set = 0; set < 256; set++) {
  2607. for (int cmd = 0; cmd < 256; cmd++) {
  2608. if (cci->cxl_cmd_set[set][cmd].handler) {
  2609. const struct cxl_cmd *c = &cci->cxl_cmd_set[set][cmd];
  2610. struct cel_log *log =
  2611. &cci->cel_log[cci->cel_size];
  2612. log->opcode = (set << 8) | cmd;
  2613. log->effect = c->effect;
  2614. cci->cel_size++;
  2615. }
  2616. }
  2617. }
  2618. }
  2619. void cxl_init_cci(CXLCCI *cci, size_t payload_max)
  2620. {
  2621. cci->payload_max = payload_max;
  2622. cxl_rebuild_cel(cci);
  2623. cci->bg.complete_pct = 0;
  2624. cci->bg.starttime = 0;
  2625. cci->bg.runtime = 0;
  2626. cci->bg.timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
  2627. bg_timercb, cci);
  2628. memset(&cci->fw, 0, sizeof(cci->fw));
  2629. cci->fw.active_slot = 1;
  2630. cci->fw.slot[cci->fw.active_slot - 1] = true;
  2631. }
  2632. static void cxl_copy_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmds)[256])
  2633. {
  2634. for (int set = 0; set < 256; set++) {
  2635. for (int cmd = 0; cmd < 256; cmd++) {
  2636. if (cxl_cmds[set][cmd].handler) {
  2637. cci->cxl_cmd_set[set][cmd] = cxl_cmds[set][cmd];
  2638. }
  2639. }
  2640. }
  2641. }
  2642. void cxl_add_cci_commands(CXLCCI *cci, const struct cxl_cmd (*cxl_cmd_set)[256],
  2643. size_t payload_max)
  2644. {
  2645. cci->payload_max = MAX(payload_max, cci->payload_max);
  2646. cxl_copy_cci_commands(cci, cxl_cmd_set);
  2647. cxl_rebuild_cel(cci);
  2648. }
  2649. void cxl_initialize_mailbox_swcci(CXLCCI *cci, DeviceState *intf,
  2650. DeviceState *d, size_t payload_max)
  2651. {
  2652. cxl_copy_cci_commands(cci, cxl_cmd_set_sw);
  2653. cci->d = d;
  2654. cci->intf = intf;
  2655. cxl_init_cci(cci, payload_max);
  2656. }
  2657. void cxl_initialize_mailbox_t3(CXLCCI *cci, DeviceState *d, size_t payload_max)
  2658. {
  2659. CXLType3Dev *ct3d = CXL_TYPE3(d);
  2660. cxl_copy_cci_commands(cci, cxl_cmd_set);
  2661. if (ct3d->dc.num_regions) {
  2662. cxl_copy_cci_commands(cci, cxl_cmd_set_dcd);
  2663. }
  2664. cci->d = d;
  2665. /* No separation for PCI MB as protocol handled in PCI device */
  2666. cci->intf = d;
  2667. cxl_init_cci(cci, payload_max);
  2668. }
  2669. static const struct cxl_cmd cxl_cmd_set_t3_ld[256][256] = {
  2670. [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0 },
  2671. [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
  2672. 0 },
  2673. [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
  2674. };
  2675. void cxl_initialize_t3_ld_cci(CXLCCI *cci, DeviceState *d, DeviceState *intf,
  2676. size_t payload_max)
  2677. {
  2678. cxl_copy_cci_commands(cci, cxl_cmd_set_t3_ld);
  2679. cci->d = d;
  2680. cci->intf = intf;
  2681. cxl_init_cci(cci, payload_max);
  2682. }
  2683. static const struct cxl_cmd cxl_cmd_set_t3_fm_owned_ld_mctp[256][256] = {
  2684. [INFOSTAT][IS_IDENTIFY] = { "IDENTIFY", cmd_infostat_identify, 0, 0},
  2685. [LOGS][GET_SUPPORTED] = { "LOGS_GET_SUPPORTED", cmd_logs_get_supported, 0,
  2686. 0 },
  2687. [LOGS][GET_LOG] = { "LOGS_GET_LOG", cmd_logs_get_log, 0x18, 0 },
  2688. [TIMESTAMP][GET] = { "TIMESTAMP_GET", cmd_timestamp_get, 0, 0 },
  2689. [TUNNEL][MANAGEMENT_COMMAND] = { "TUNNEL_MANAGEMENT_COMMAND",
  2690. cmd_tunnel_management_cmd, ~0, 0 },
  2691. };
  2692. void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d,
  2693. DeviceState *intf,
  2694. size_t payload_max)
  2695. {
  2696. cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp);
  2697. cci->d = d;
  2698. cci->intf = intf;
  2699. cxl_init_cci(cci, payload_max);
  2700. }