nvdimm.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474
  1. /*
  2. * NVDIMM ACPI Implementation
  3. *
  4. * Copyright(C) 2015 Intel Corporation.
  5. *
  6. * Author:
  7. * Xiao Guangrong <guangrong.xiao@linux.intel.com>
  8. *
  9. * NFIT is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
  10. * and the DSM specification can be found at:
  11. * http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
  12. *
  13. * Currently, it only supports PMEM Virtualization.
  14. *
  15. * This library is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU Lesser General Public
  17. * License as published by the Free Software Foundation; either
  18. * version 2.1 of the License, or (at your option) any later version.
  19. *
  20. * This library is distributed in the hope that it will be useful,
  21. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  23. * Lesser General Public License for more details.
  24. *
  25. * You should have received a copy of the GNU Lesser General Public
  26. * License along with this library; if not, see <http://www.gnu.org/licenses/>
  27. */
  28. #include "qemu/osdep.h"
  29. #include "qemu/uuid.h"
  30. #include "qapi/error.h"
  31. #include "hw/acpi/acpi.h"
  32. #include "hw/acpi/aml-build.h"
  33. #include "hw/acpi/bios-linker-loader.h"
  34. #include "hw/nvram/fw_cfg.h"
  35. #include "hw/mem/nvdimm.h"
  36. #include "qemu/nvdimm-utils.h"
  37. #include "trace.h"
  38. /*
  39. * define Byte Addressable Persistent Memory (PM) Region according to
  40. * ACPI 6.0: 5.2.25.1 System Physical Address Range Structure.
  41. */
  42. static const uint8_t nvdimm_nfit_spa_uuid[] =
  43. UUID_LE(0x66f0d379, 0xb4f3, 0x4074, 0xac, 0x43, 0x0d, 0x33,
  44. 0x18, 0xb7, 0x8c, 0xdb);
  45. /*
  46. * define NFIT structures according to ACPI 6.0: 5.2.25 NVDIMM Firmware
  47. * Interface Table (NFIT).
  48. */
  49. /*
  50. * System Physical Address Range Structure
  51. *
  52. * It describes the system physical address ranges occupied by NVDIMMs and
  53. * the types of the regions.
  54. */
  55. struct NvdimmNfitSpa {
  56. uint16_t type;
  57. uint16_t length;
  58. uint16_t spa_index;
  59. uint16_t flags;
  60. uint32_t reserved;
  61. uint32_t proximity_domain;
  62. uint8_t type_guid[16];
  63. uint64_t spa_base;
  64. uint64_t spa_length;
  65. uint64_t mem_attr;
  66. } QEMU_PACKED;
  67. typedef struct NvdimmNfitSpa NvdimmNfitSpa;
  68. /*
  69. * Memory Device to System Physical Address Range Mapping Structure
  70. *
  71. * It enables identifying each NVDIMM region and the corresponding SPA
  72. * describing the memory interleave
  73. */
  74. struct NvdimmNfitMemDev {
  75. uint16_t type;
  76. uint16_t length;
  77. uint32_t nfit_handle;
  78. uint16_t phys_id;
  79. uint16_t region_id;
  80. uint16_t spa_index;
  81. uint16_t dcr_index;
  82. uint64_t region_len;
  83. uint64_t region_offset;
  84. uint64_t region_dpa;
  85. uint16_t interleave_index;
  86. uint16_t interleave_ways;
  87. uint16_t flags;
  88. uint16_t reserved;
  89. } QEMU_PACKED;
  90. typedef struct NvdimmNfitMemDev NvdimmNfitMemDev;
  91. #define ACPI_NFIT_MEM_NOT_ARMED (1 << 3)
  92. /*
  93. * NVDIMM Control Region Structure
  94. *
  95. * It describes the NVDIMM and if applicable, Block Control Window.
  96. */
  97. struct NvdimmNfitControlRegion {
  98. uint16_t type;
  99. uint16_t length;
  100. uint16_t dcr_index;
  101. uint16_t vendor_id;
  102. uint16_t device_id;
  103. uint16_t revision_id;
  104. uint16_t sub_vendor_id;
  105. uint16_t sub_device_id;
  106. uint16_t sub_revision_id;
  107. uint8_t reserved[6];
  108. uint32_t serial_number;
  109. uint16_t fic;
  110. uint16_t num_bcw;
  111. uint64_t bcw_size;
  112. uint64_t cmd_offset;
  113. uint64_t cmd_size;
  114. uint64_t status_offset;
  115. uint64_t status_size;
  116. uint16_t flags;
  117. uint8_t reserved2[6];
  118. } QEMU_PACKED;
  119. typedef struct NvdimmNfitControlRegion NvdimmNfitControlRegion;
  120. /*
  121. * NVDIMM Platform Capabilities Structure
  122. *
  123. * Defined in section 5.2.25.9 of ACPI 6.2 Errata A, September 2017
  124. */
  125. struct NvdimmNfitPlatformCaps {
  126. uint16_t type;
  127. uint16_t length;
  128. uint8_t highest_cap;
  129. uint8_t reserved[3];
  130. uint32_t capabilities;
  131. uint8_t reserved2[4];
  132. } QEMU_PACKED;
  133. typedef struct NvdimmNfitPlatformCaps NvdimmNfitPlatformCaps;
  134. /*
  135. * Module serial number is a unique number for each device. We use the
  136. * slot id of NVDIMM device to generate this number so that each device
  137. * associates with a different number.
  138. *
  139. * 0x123456 is a magic number we arbitrarily chose.
  140. */
  141. static uint32_t nvdimm_slot_to_sn(int slot)
  142. {
  143. return 0x123456 + slot;
  144. }
  145. /*
  146. * handle is used to uniquely associate nfit_memdev structure with NVDIMM
  147. * ACPI device - nfit_memdev.nfit_handle matches with the value returned
  148. * by ACPI device _ADR method.
  149. *
  150. * We generate the handle with the slot id of NVDIMM device and reserve
  151. * 0 for NVDIMM root device.
  152. */
  153. static uint32_t nvdimm_slot_to_handle(int slot)
  154. {
  155. return slot + 1;
  156. }
  157. /*
  158. * index uniquely identifies the structure, 0 is reserved which indicates
  159. * that the structure is not valid or the associated structure is not
  160. * present.
  161. *
  162. * Each NVDIMM device needs two indexes, one for nfit_spa and another for
  163. * nfit_dc which are generated by the slot id of NVDIMM device.
  164. */
  165. static uint16_t nvdimm_slot_to_spa_index(int slot)
  166. {
  167. return (slot + 1) << 1;
  168. }
  169. /* See the comments of nvdimm_slot_to_spa_index(). */
  170. static uint32_t nvdimm_slot_to_dcr_index(int slot)
  171. {
  172. return nvdimm_slot_to_spa_index(slot) + 1;
  173. }
  174. static NVDIMMDevice *nvdimm_get_device_by_handle(uint32_t handle)
  175. {
  176. NVDIMMDevice *nvdimm = NULL;
  177. GSList *list, *device_list = nvdimm_get_device_list();
  178. for (list = device_list; list; list = list->next) {
  179. NVDIMMDevice *nvd = list->data;
  180. int slot = object_property_get_int(OBJECT(nvd), PC_DIMM_SLOT_PROP,
  181. NULL);
  182. if (nvdimm_slot_to_handle(slot) == handle) {
  183. nvdimm = nvd;
  184. break;
  185. }
  186. }
  187. g_slist_free(device_list);
  188. return nvdimm;
  189. }
  190. /* ACPI 6.0: 5.2.25.1 System Physical Address Range Structure */
  191. static void
  192. nvdimm_build_structure_spa(GArray *structures, DeviceState *dev)
  193. {
  194. NvdimmNfitSpa *nfit_spa;
  195. uint64_t addr = object_property_get_uint(OBJECT(dev), PC_DIMM_ADDR_PROP,
  196. NULL);
  197. uint64_t size = object_property_get_uint(OBJECT(dev), PC_DIMM_SIZE_PROP,
  198. NULL);
  199. uint32_t node = object_property_get_uint(OBJECT(dev), PC_DIMM_NODE_PROP,
  200. NULL);
  201. int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
  202. NULL);
  203. nfit_spa = acpi_data_push(structures, sizeof(*nfit_spa));
  204. nfit_spa->type = cpu_to_le16(0 /* System Physical Address Range
  205. Structure */);
  206. nfit_spa->length = cpu_to_le16(sizeof(*nfit_spa));
  207. nfit_spa->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot));
  208. /*
  209. * Control region is strict as all the device info, such as SN, index,
  210. * is associated with slot id.
  211. */
  212. nfit_spa->flags = cpu_to_le16(1 /* Control region is strictly for
  213. management during hot add/online
  214. operation */ |
  215. 2 /* Data in Proximity Domain field is
  216. valid*/);
  217. /* NUMA node. */
  218. nfit_spa->proximity_domain = cpu_to_le32(node);
  219. /* the region reported as PMEM. */
  220. memcpy(nfit_spa->type_guid, nvdimm_nfit_spa_uuid,
  221. sizeof(nvdimm_nfit_spa_uuid));
  222. nfit_spa->spa_base = cpu_to_le64(addr);
  223. nfit_spa->spa_length = cpu_to_le64(size);
  224. /* It is the PMEM and can be cached as writeback. */
  225. nfit_spa->mem_attr = cpu_to_le64(0x8ULL /* EFI_MEMORY_WB */ |
  226. 0x8000ULL /* EFI_MEMORY_NV */);
  227. }
  228. /*
  229. * ACPI 6.0: 5.2.25.2 Memory Device to System Physical Address Range Mapping
  230. * Structure
  231. */
  232. static void
  233. nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev)
  234. {
  235. NvdimmNfitMemDev *nfit_memdev;
  236. NVDIMMDevice *nvdimm = NVDIMM(OBJECT(dev));
  237. uint64_t size = object_property_get_uint(OBJECT(dev), PC_DIMM_SIZE_PROP,
  238. NULL);
  239. int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
  240. NULL);
  241. uint32_t handle = nvdimm_slot_to_handle(slot);
  242. nfit_memdev = acpi_data_push(structures, sizeof(*nfit_memdev));
  243. nfit_memdev->type = cpu_to_le16(1 /* Memory Device to System Address
  244. Range Map Structure*/);
  245. nfit_memdev->length = cpu_to_le16(sizeof(*nfit_memdev));
  246. nfit_memdev->nfit_handle = cpu_to_le32(handle);
  247. /*
  248. * associate memory device with System Physical Address Range
  249. * Structure.
  250. */
  251. nfit_memdev->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot));
  252. /* associate memory device with Control Region Structure. */
  253. nfit_memdev->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot));
  254. /* The memory region on the device. */
  255. nfit_memdev->region_len = cpu_to_le64(size);
  256. /* The device address starts from 0. */
  257. nfit_memdev->region_dpa = cpu_to_le64(0);
  258. /* Only one interleave for PMEM. */
  259. nfit_memdev->interleave_ways = cpu_to_le16(1);
  260. if (nvdimm->unarmed) {
  261. nfit_memdev->flags |= cpu_to_le16(ACPI_NFIT_MEM_NOT_ARMED);
  262. }
  263. }
  264. /*
  265. * ACPI 6.0: 5.2.25.5 NVDIMM Control Region Structure.
  266. */
  267. static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev)
  268. {
  269. NvdimmNfitControlRegion *nfit_dcr;
  270. int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
  271. NULL);
  272. uint32_t sn = nvdimm_slot_to_sn(slot);
  273. nfit_dcr = acpi_data_push(structures, sizeof(*nfit_dcr));
  274. nfit_dcr->type = cpu_to_le16(4 /* NVDIMM Control Region Structure */);
  275. nfit_dcr->length = cpu_to_le16(sizeof(*nfit_dcr));
  276. nfit_dcr->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot));
  277. /* vendor: Intel. */
  278. nfit_dcr->vendor_id = cpu_to_le16(0x8086);
  279. nfit_dcr->device_id = cpu_to_le16(1);
  280. /* The _DSM method is following Intel's DSM specification. */
  281. nfit_dcr->revision_id = cpu_to_le16(1 /* Current Revision supported
  282. in ACPI 6.0 is 1. */);
  283. nfit_dcr->serial_number = cpu_to_le32(sn);
  284. nfit_dcr->fic = cpu_to_le16(0x301 /* Format Interface Code:
  285. Byte addressable, no energy backed.
  286. See ACPI 6.2, sect 5.2.25.6 and
  287. JEDEC Annex L Release 3. */);
  288. }
  289. /*
  290. * ACPI 6.2 Errata A: 5.2.25.9 NVDIMM Platform Capabilities Structure
  291. */
  292. static void
  293. nvdimm_build_structure_caps(GArray *structures, uint32_t capabilities)
  294. {
  295. NvdimmNfitPlatformCaps *nfit_caps;
  296. nfit_caps = acpi_data_push(structures, sizeof(*nfit_caps));
  297. nfit_caps->type = cpu_to_le16(7 /* NVDIMM Platform Capabilities */);
  298. nfit_caps->length = cpu_to_le16(sizeof(*nfit_caps));
  299. nfit_caps->highest_cap = 31 - clz32(capabilities);
  300. nfit_caps->capabilities = cpu_to_le32(capabilities);
  301. }
  302. static GArray *nvdimm_build_device_structure(NVDIMMState *state)
  303. {
  304. GSList *device_list, *list = nvdimm_get_device_list();
  305. GArray *structures = g_array_new(false, true /* clear */, 1);
  306. for (device_list = list; device_list; device_list = device_list->next) {
  307. DeviceState *dev = device_list->data;
  308. /* build System Physical Address Range Structure. */
  309. nvdimm_build_structure_spa(structures, dev);
  310. /*
  311. * build Memory Device to System Physical Address Range Mapping
  312. * Structure.
  313. */
  314. nvdimm_build_structure_memdev(structures, dev);
  315. /* build NVDIMM Control Region Structure. */
  316. nvdimm_build_structure_dcr(structures, dev);
  317. }
  318. g_slist_free(list);
  319. if (state->persistence) {
  320. nvdimm_build_structure_caps(structures, state->persistence);
  321. }
  322. return structures;
  323. }
  324. static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf)
  325. {
  326. fit_buf->fit = g_array_new(false, true /* clear */, 1);
  327. }
  328. static void nvdimm_build_fit_buffer(NVDIMMState *state)
  329. {
  330. NvdimmFitBuffer *fit_buf = &state->fit_buf;
  331. g_array_free(fit_buf->fit, true);
  332. fit_buf->fit = nvdimm_build_device_structure(state);
  333. fit_buf->dirty = true;
  334. }
  335. void nvdimm_plug(NVDIMMState *state)
  336. {
  337. nvdimm_build_fit_buffer(state);
  338. }
  339. /*
  340. * NVDIMM Firmware Interface Table
  341. * @signature: "NFIT"
  342. *
  343. * It provides information that allows OSPM to enumerate NVDIMM present in
  344. * the platform and associate system physical address ranges created by the
  345. * NVDIMMs.
  346. *
  347. * It is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
  348. */
  349. static void nvdimm_build_nfit(NVDIMMState *state, GArray *table_offsets,
  350. GArray *table_data, BIOSLinker *linker,
  351. const char *oem_id, const char *oem_table_id)
  352. {
  353. NvdimmFitBuffer *fit_buf = &state->fit_buf;
  354. AcpiTable table = { .sig = "NFIT", .rev = 1,
  355. .oem_id = oem_id, .oem_table_id = oem_table_id };
  356. acpi_add_table(table_offsets, table_data);
  357. acpi_table_begin(&table, table_data);
  358. /* Reserved */
  359. build_append_int_noprefix(table_data, 0, 4);
  360. /* NVDIMM device structures. */
  361. g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len);
  362. acpi_table_end(linker, &table);
  363. }
  364. #define NVDIMM_DSM_MEMORY_SIZE 4096
  365. struct NvdimmDsmIn {
  366. uint32_t handle;
  367. uint32_t revision;
  368. uint32_t function;
  369. /* the remaining size in the page is used by arg3. */
  370. union {
  371. uint8_t arg3[4084];
  372. };
  373. } QEMU_PACKED;
  374. typedef struct NvdimmDsmIn NvdimmDsmIn;
  375. QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmIn) != NVDIMM_DSM_MEMORY_SIZE);
  376. struct NvdimmDsmOut {
  377. /* the size of buffer filled by QEMU. */
  378. uint32_t len;
  379. uint8_t data[4092];
  380. } QEMU_PACKED;
  381. typedef struct NvdimmDsmOut NvdimmDsmOut;
  382. QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmOut) != NVDIMM_DSM_MEMORY_SIZE);
  383. struct NvdimmDsmFunc0Out {
  384. /* the size of buffer filled by QEMU. */
  385. uint32_t len;
  386. uint32_t supported_func;
  387. } QEMU_PACKED;
  388. typedef struct NvdimmDsmFunc0Out NvdimmDsmFunc0Out;
  389. struct NvdimmDsmFuncNoPayloadOut {
  390. /* the size of buffer filled by QEMU. */
  391. uint32_t len;
  392. uint32_t func_ret_status;
  393. } QEMU_PACKED;
  394. typedef struct NvdimmDsmFuncNoPayloadOut NvdimmDsmFuncNoPayloadOut;
  395. struct NvdimmFuncGetLabelSizeOut {
  396. /* the size of buffer filled by QEMU. */
  397. uint32_t len;
  398. uint32_t func_ret_status; /* return status code. */
  399. uint32_t label_size; /* the size of label data area. */
  400. /*
  401. * Maximum size of the namespace label data length supported by
  402. * the platform in Get/Set Namespace Label Data functions.
  403. */
  404. uint32_t max_xfer;
  405. } QEMU_PACKED;
  406. typedef struct NvdimmFuncGetLabelSizeOut NvdimmFuncGetLabelSizeOut;
  407. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelSizeOut) > NVDIMM_DSM_MEMORY_SIZE);
  408. struct NvdimmFuncGetLabelDataIn {
  409. uint32_t offset; /* the offset in the namespace label data area. */
  410. uint32_t length; /* the size of data is to be read via the function. */
  411. } QEMU_PACKED;
  412. typedef struct NvdimmFuncGetLabelDataIn NvdimmFuncGetLabelDataIn;
  413. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataIn) +
  414. offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE);
  415. struct NvdimmFuncGetLabelDataOut {
  416. /* the size of buffer filled by QEMU. */
  417. uint32_t len;
  418. uint32_t func_ret_status; /* return status code. */
  419. uint8_t out_buf[]; /* the data got via Get Namespace Label function. */
  420. } QEMU_PACKED;
  421. typedef struct NvdimmFuncGetLabelDataOut NvdimmFuncGetLabelDataOut;
  422. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataOut) > NVDIMM_DSM_MEMORY_SIZE);
  423. struct NvdimmFuncSetLabelDataIn {
  424. uint32_t offset; /* the offset in the namespace label data area. */
  425. uint32_t length; /* the size of data is to be written via the function. */
  426. uint8_t in_buf[]; /* the data written to label data area. */
  427. } QEMU_PACKED;
  428. typedef struct NvdimmFuncSetLabelDataIn NvdimmFuncSetLabelDataIn;
  429. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncSetLabelDataIn) +
  430. offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE);
  431. struct NvdimmFuncReadFITIn {
  432. uint32_t offset; /* the offset into FIT buffer. */
  433. } QEMU_PACKED;
  434. typedef struct NvdimmFuncReadFITIn NvdimmFuncReadFITIn;
  435. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITIn) +
  436. offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE);
  437. struct NvdimmFuncReadFITOut {
  438. /* the size of buffer filled by QEMU. */
  439. uint32_t len;
  440. uint32_t func_ret_status; /* return status code. */
  441. uint8_t fit[]; /* the FIT data. */
  442. } QEMU_PACKED;
  443. typedef struct NvdimmFuncReadFITOut NvdimmFuncReadFITOut;
  444. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITOut) > NVDIMM_DSM_MEMORY_SIZE);
  445. static void
  446. nvdimm_dsm_function0(uint32_t supported_func, hwaddr dsm_mem_addr)
  447. {
  448. NvdimmDsmFunc0Out func0 = {
  449. .len = cpu_to_le32(sizeof(func0)),
  450. .supported_func = cpu_to_le32(supported_func),
  451. };
  452. cpu_physical_memory_write(dsm_mem_addr, &func0, sizeof(func0));
  453. }
  454. static void
  455. nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr dsm_mem_addr)
  456. {
  457. NvdimmDsmFuncNoPayloadOut out = {
  458. .len = cpu_to_le32(sizeof(out)),
  459. .func_ret_status = cpu_to_le32(func_ret_status),
  460. };
  461. cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out));
  462. }
  463. #define NVDIMM_DSM_RET_STATUS_SUCCESS 0 /* Success */
  464. #define NVDIMM_DSM_RET_STATUS_UNSUPPORT 1 /* Not Supported */
  465. #define NVDIMM_DSM_RET_STATUS_NOMEMDEV 2 /* Non-Existing Memory Device */
  466. #define NVDIMM_DSM_RET_STATUS_INVALID 3 /* Invalid Input Parameters */
  467. #define NVDIMM_DSM_RET_STATUS_FIT_CHANGED 0x100 /* FIT Changed */
  468. #define NVDIMM_QEMU_RSVD_HANDLE_ROOT 0x10000
  469. /* Read FIT data, defined in docs/specs/acpi_nvdimm.txt. */
  470. static void nvdimm_dsm_func_read_fit(NVDIMMState *state, NvdimmDsmIn *in,
  471. hwaddr dsm_mem_addr)
  472. {
  473. NvdimmFitBuffer *fit_buf = &state->fit_buf;
  474. NvdimmFuncReadFITIn *read_fit;
  475. NvdimmFuncReadFITOut *read_fit_out;
  476. GArray *fit;
  477. uint32_t read_len = 0, func_ret_status;
  478. int size;
  479. read_fit = (NvdimmFuncReadFITIn *)in->arg3;
  480. read_fit->offset = le32_to_cpu(read_fit->offset);
  481. fit = fit_buf->fit;
  482. trace_acpi_nvdimm_read_fit(read_fit->offset, fit->len,
  483. fit_buf->dirty ? "Yes" : "No");
  484. if (read_fit->offset > fit->len) {
  485. func_ret_status = NVDIMM_DSM_RET_STATUS_INVALID;
  486. goto exit;
  487. }
  488. /* It is the first time to read FIT. */
  489. if (!read_fit->offset) {
  490. fit_buf->dirty = false;
  491. } else if (fit_buf->dirty) { /* FIT has been changed during RFIT. */
  492. func_ret_status = NVDIMM_DSM_RET_STATUS_FIT_CHANGED;
  493. goto exit;
  494. }
  495. func_ret_status = NVDIMM_DSM_RET_STATUS_SUCCESS;
  496. read_len = MIN(fit->len - read_fit->offset,
  497. NVDIMM_DSM_MEMORY_SIZE - sizeof(NvdimmFuncReadFITOut));
  498. exit:
  499. size = sizeof(NvdimmFuncReadFITOut) + read_len;
  500. read_fit_out = g_malloc(size);
  501. read_fit_out->len = cpu_to_le32(size);
  502. read_fit_out->func_ret_status = cpu_to_le32(func_ret_status);
  503. memcpy(read_fit_out->fit, fit->data + read_fit->offset, read_len);
  504. cpu_physical_memory_write(dsm_mem_addr, read_fit_out, size);
  505. g_free(read_fit_out);
  506. }
  507. static void
  508. nvdimm_dsm_handle_reserved_root_method(NVDIMMState *state,
  509. NvdimmDsmIn *in, hwaddr dsm_mem_addr)
  510. {
  511. switch (in->function) {
  512. case 0x0:
  513. nvdimm_dsm_function0(0x1 | 1 << 1 /* Read FIT */, dsm_mem_addr);
  514. return;
  515. case 0x1 /* Read FIT */:
  516. nvdimm_dsm_func_read_fit(state, in, dsm_mem_addr);
  517. return;
  518. }
  519. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
  520. }
  521. static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
  522. {
  523. /*
  524. * function 0 is called to inquire which functions are supported by
  525. * OSPM
  526. */
  527. if (!in->function) {
  528. nvdimm_dsm_function0(0 /* No function supported other than
  529. function 0 */, dsm_mem_addr);
  530. return;
  531. }
  532. /* No function except function 0 is supported yet. */
  533. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
  534. }
  535. /*
  536. * the max transfer size is the max size transferred by both a
  537. * 'Get Namespace Label Data' function and a 'Set Namespace Label Data'
  538. * function.
  539. */
  540. static uint32_t nvdimm_get_max_xfer_label_size(void)
  541. {
  542. uint32_t max_get_size, max_set_size, dsm_memory_size;
  543. dsm_memory_size = NVDIMM_DSM_MEMORY_SIZE;
  544. /*
  545. * the max data ACPI can read one time which is transferred by
  546. * the response of 'Get Namespace Label Data' function.
  547. */
  548. max_get_size = dsm_memory_size - sizeof(NvdimmFuncGetLabelDataOut);
  549. /*
  550. * the max data ACPI can write one time which is transferred by
  551. * 'Set Namespace Label Data' function.
  552. */
  553. max_set_size = dsm_memory_size - offsetof(NvdimmDsmIn, arg3) -
  554. sizeof(NvdimmFuncSetLabelDataIn);
  555. return MIN(max_get_size, max_set_size);
  556. }
  557. /*
  558. * DSM Spec Rev1 4.4 Get Namespace Label Size (Function Index 4).
  559. *
  560. * It gets the size of Namespace Label data area and the max data size
  561. * that Get/Set Namespace Label Data functions can transfer.
  562. */
  563. static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr)
  564. {
  565. NvdimmFuncGetLabelSizeOut label_size_out = {
  566. .len = cpu_to_le32(sizeof(label_size_out)),
  567. };
  568. uint32_t label_size, mxfer;
  569. label_size = nvdimm->label_size;
  570. mxfer = nvdimm_get_max_xfer_label_size();
  571. trace_acpi_nvdimm_label_info(label_size, mxfer);
  572. label_size_out.func_ret_status = cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS);
  573. label_size_out.label_size = cpu_to_le32(label_size);
  574. label_size_out.max_xfer = cpu_to_le32(mxfer);
  575. cpu_physical_memory_write(dsm_mem_addr, &label_size_out,
  576. sizeof(label_size_out));
  577. }
  578. static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm,
  579. uint32_t offset, uint32_t length,
  580. bool is_write)
  581. {
  582. uint32_t ret = NVDIMM_DSM_RET_STATUS_INVALID;
  583. if (offset + length < offset) {
  584. trace_acpi_nvdimm_label_overflow(offset, length);
  585. return ret;
  586. }
  587. if (nvdimm->label_size < offset + length) {
  588. trace_acpi_nvdimm_label_oversize(offset + length, nvdimm->label_size);
  589. return ret;
  590. }
  591. if (length > nvdimm_get_max_xfer_label_size()) {
  592. trace_acpi_nvdimm_label_xfer_exceed(length,
  593. nvdimm_get_max_xfer_label_size());
  594. return ret;
  595. }
  596. if (is_write && nvdimm->readonly) {
  597. return NVDIMM_DSM_RET_STATUS_UNSUPPORT;
  598. }
  599. return NVDIMM_DSM_RET_STATUS_SUCCESS;
  600. }
  601. /*
  602. * DSM Spec Rev1 4.5 Get Namespace Label Data (Function Index 5).
  603. */
  604. static void nvdimm_dsm_get_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
  605. hwaddr dsm_mem_addr)
  606. {
  607. NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm);
  608. NvdimmFuncGetLabelDataIn *get_label_data;
  609. NvdimmFuncGetLabelDataOut *get_label_data_out;
  610. uint32_t status;
  611. int size;
  612. get_label_data = (NvdimmFuncGetLabelDataIn *)in->arg3;
  613. get_label_data->offset = le32_to_cpu(get_label_data->offset);
  614. get_label_data->length = le32_to_cpu(get_label_data->length);
  615. trace_acpi_nvdimm_read_label(get_label_data->offset,
  616. get_label_data->length);
  617. status = nvdimm_rw_label_data_check(nvdimm, get_label_data->offset,
  618. get_label_data->length, false);
  619. if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) {
  620. nvdimm_dsm_no_payload(status, dsm_mem_addr);
  621. return;
  622. }
  623. size = sizeof(*get_label_data_out) + get_label_data->length;
  624. assert(size <= NVDIMM_DSM_MEMORY_SIZE);
  625. get_label_data_out = g_malloc(size);
  626. get_label_data_out->len = cpu_to_le32(size);
  627. get_label_data_out->func_ret_status =
  628. cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS);
  629. nvc->read_label_data(nvdimm, get_label_data_out->out_buf,
  630. get_label_data->length, get_label_data->offset);
  631. cpu_physical_memory_write(dsm_mem_addr, get_label_data_out, size);
  632. g_free(get_label_data_out);
  633. }
  634. /*
  635. * DSM Spec Rev1 4.6 Set Namespace Label Data (Function Index 6).
  636. */
  637. static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
  638. hwaddr dsm_mem_addr)
  639. {
  640. NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm);
  641. NvdimmFuncSetLabelDataIn *set_label_data;
  642. uint32_t status;
  643. set_label_data = (NvdimmFuncSetLabelDataIn *)in->arg3;
  644. set_label_data->offset = le32_to_cpu(set_label_data->offset);
  645. set_label_data->length = le32_to_cpu(set_label_data->length);
  646. trace_acpi_nvdimm_write_label(set_label_data->offset,
  647. set_label_data->length);
  648. status = nvdimm_rw_label_data_check(nvdimm, set_label_data->offset,
  649. set_label_data->length, true);
  650. if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) {
  651. nvdimm_dsm_no_payload(status, dsm_mem_addr);
  652. return;
  653. }
  654. assert(offsetof(NvdimmDsmIn, arg3) + sizeof(*set_label_data) +
  655. set_label_data->length <= NVDIMM_DSM_MEMORY_SIZE);
  656. nvc->write_label_data(nvdimm, set_label_data->in_buf,
  657. set_label_data->length, set_label_data->offset);
  658. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_SUCCESS, dsm_mem_addr);
  659. }
  660. static void nvdimm_dsm_device(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
  661. {
  662. NVDIMMDevice *nvdimm = nvdimm_get_device_by_handle(in->handle);
  663. /* See the comments in nvdimm_dsm_root(). */
  664. if (!in->function) {
  665. uint32_t supported_func = 0;
  666. if (nvdimm && nvdimm->label_size) {
  667. supported_func |= 0x1 /* Bit 0 indicates whether there is
  668. support for any functions other
  669. than function 0. */ |
  670. 1 << 4 /* Get Namespace Label Size */ |
  671. 1 << 5 /* Get Namespace Label Data */ |
  672. 1 << 6 /* Set Namespace Label Data */;
  673. }
  674. nvdimm_dsm_function0(supported_func, dsm_mem_addr);
  675. return;
  676. }
  677. if (!nvdimm) {
  678. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_NOMEMDEV,
  679. dsm_mem_addr);
  680. return;
  681. }
  682. /* Encode DSM function according to DSM Spec Rev1. */
  683. switch (in->function) {
  684. case 4 /* Get Namespace Label Size */:
  685. if (nvdimm->label_size) {
  686. nvdimm_dsm_label_size(nvdimm, dsm_mem_addr);
  687. return;
  688. }
  689. break;
  690. case 5 /* Get Namespace Label Data */:
  691. if (nvdimm->label_size) {
  692. nvdimm_dsm_get_label_data(nvdimm, in, dsm_mem_addr);
  693. return;
  694. }
  695. break;
  696. case 0x6 /* Set Namespace Label Data */:
  697. if (nvdimm->label_size) {
  698. nvdimm_dsm_set_label_data(nvdimm, in, dsm_mem_addr);
  699. return;
  700. }
  701. break;
  702. }
  703. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
  704. }
  705. static uint64_t
  706. nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size)
  707. {
  708. trace_acpi_nvdimm_read_io_port();
  709. return 0;
  710. }
  711. static void
  712. nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
  713. {
  714. NVDIMMState *state = opaque;
  715. NvdimmDsmIn *in;
  716. hwaddr dsm_mem_addr = val;
  717. trace_acpi_nvdimm_dsm_mem_addr(dsm_mem_addr);
  718. /*
  719. * The DSM memory is mapped to guest address space so an evil guest
  720. * can change its content while we are doing DSM emulation. Avoid
  721. * this by copying DSM memory to QEMU local memory.
  722. */
  723. in = g_new(NvdimmDsmIn, 1);
  724. cpu_physical_memory_read(dsm_mem_addr, in, sizeof(*in));
  725. in->revision = le32_to_cpu(in->revision);
  726. in->function = le32_to_cpu(in->function);
  727. in->handle = le32_to_cpu(in->handle);
  728. trace_acpi_nvdimm_dsm_info(in->revision, in->handle, in->function);
  729. if (in->revision != 0x1 /* Currently we only support DSM Spec Rev1. */) {
  730. trace_acpi_nvdimm_invalid_revision(in->revision);
  731. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
  732. goto exit;
  733. }
  734. if (in->handle == NVDIMM_QEMU_RSVD_HANDLE_ROOT) {
  735. nvdimm_dsm_handle_reserved_root_method(state, in, dsm_mem_addr);
  736. goto exit;
  737. }
  738. /* Handle 0 is reserved for NVDIMM Root Device. */
  739. if (!in->handle) {
  740. nvdimm_dsm_root(in, dsm_mem_addr);
  741. goto exit;
  742. }
  743. nvdimm_dsm_device(in, dsm_mem_addr);
  744. exit:
  745. g_free(in);
  746. }
  747. static const MemoryRegionOps nvdimm_dsm_ops = {
  748. .read = nvdimm_dsm_read,
  749. .write = nvdimm_dsm_write,
  750. .endianness = DEVICE_LITTLE_ENDIAN,
  751. .valid = {
  752. .min_access_size = 4,
  753. .max_access_size = 4,
  754. },
  755. };
  756. void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev)
  757. {
  758. if (dev->hotplugged) {
  759. acpi_send_event(DEVICE(hotplug_dev), ACPI_NVDIMM_HOTPLUG_STATUS);
  760. }
  761. }
  762. void nvdimm_init_acpi_state(NVDIMMState *state, MemoryRegion *io,
  763. struct AcpiGenericAddress dsm_io,
  764. FWCfgState *fw_cfg, Object *owner)
  765. {
  766. state->dsm_io = dsm_io;
  767. memory_region_init_io(&state->io_mr, owner, &nvdimm_dsm_ops, state,
  768. "nvdimm-acpi-io", dsm_io.bit_width >> 3);
  769. memory_region_add_subregion(io, dsm_io.address, &state->io_mr);
  770. state->dsm_mem = g_array_new(false, true /* clear */, 1);
  771. acpi_data_push(state->dsm_mem, sizeof(NvdimmDsmIn));
  772. fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data,
  773. state->dsm_mem->len);
  774. nvdimm_init_fit_buffer(&state->fit_buf);
  775. }
  776. #define NVDIMM_COMMON_DSM "NCAL"
  777. #define NVDIMM_ACPI_MEM_ADDR "MEMA"
  778. #define NVDIMM_DSM_MEMORY "NRAM"
  779. #define NVDIMM_DSM_IOPORT "NPIO"
  780. #define NVDIMM_DSM_NOTIFY "NTFI"
  781. #define NVDIMM_DSM_HANDLE "HDLE"
  782. #define NVDIMM_DSM_REVISION "REVS"
  783. #define NVDIMM_DSM_FUNCTION "FUNC"
  784. #define NVDIMM_DSM_ARG3 "FARG"
  785. #define NVDIMM_DSM_OUT_BUF_SIZE "RLEN"
  786. #define NVDIMM_DSM_OUT_BUF "ODAT"
  787. #define NVDIMM_DSM_RFIT_STATUS "RSTA"
  788. #define NVDIMM_QEMU_RSVD_UUID "648B9CF2-CDA1-4312-8AD9-49C4AF32BD62"
  789. #define NVDIMM_DEVICE_DSM_UUID "4309AC30-0D11-11E4-9191-0800200C9A66"
  790. static void nvdimm_build_common_dsm(Aml *dev,
  791. NVDIMMState *nvdimm_state)
  792. {
  793. Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *elsectx2;
  794. Aml *elsectx, *unsupport, *unpatched, *expected_uuid, *uuid_invalid;
  795. Aml *pckg, *pckg_index, *pckg_buf, *field, *dsm_out_buf, *dsm_out_buf_size;
  796. Aml *whilectx, *offset;
  797. uint8_t byte_list[1];
  798. AmlRegionSpace rs;
  799. method = aml_method(NVDIMM_COMMON_DSM, 5, AML_SERIALIZED);
  800. uuid = aml_arg(0);
  801. function = aml_arg(2);
  802. handle = aml_arg(4);
  803. dsm_mem = aml_local(6);
  804. dsm_out_buf = aml_local(7);
  805. aml_append(method, aml_store(aml_name(NVDIMM_ACPI_MEM_ADDR), dsm_mem));
  806. if (nvdimm_state->dsm_io.space_id == AML_AS_SYSTEM_IO) {
  807. rs = AML_SYSTEM_IO;
  808. } else {
  809. rs = AML_SYSTEM_MEMORY;
  810. }
  811. /* map DSM memory and IO into ACPI namespace. */
  812. aml_append(method, aml_operation_region(NVDIMM_DSM_IOPORT, rs,
  813. aml_int(nvdimm_state->dsm_io.address),
  814. nvdimm_state->dsm_io.bit_width >> 3));
  815. aml_append(method, aml_operation_region(NVDIMM_DSM_MEMORY,
  816. AML_SYSTEM_MEMORY, dsm_mem, sizeof(NvdimmDsmIn)));
  817. /*
  818. * DSM notifier:
  819. * NVDIMM_DSM_NOTIFY: write the address of DSM memory and notify QEMU to
  820. * emulate the access.
  821. *
  822. * It is the IO port so that accessing them will cause VM-exit, the
  823. * control will be transferred to QEMU.
  824. */
  825. field = aml_field(NVDIMM_DSM_IOPORT, AML_DWORD_ACC, AML_NOLOCK,
  826. AML_PRESERVE);
  827. aml_append(field, aml_named_field(NVDIMM_DSM_NOTIFY,
  828. nvdimm_state->dsm_io.bit_width));
  829. aml_append(method, field);
  830. /*
  831. * DSM input:
  832. * NVDIMM_DSM_HANDLE: store device's handle, it's zero if the _DSM call
  833. * happens on NVDIMM Root Device.
  834. * NVDIMM_DSM_REVISION: store the Arg1 of _DSM call.
  835. * NVDIMM_DSM_FUNCTION: store the Arg2 of _DSM call.
  836. * NVDIMM_DSM_ARG3: store the Arg3 of _DSM call which is a Package
  837. * containing function-specific arguments.
  838. *
  839. * They are RAM mapping on host so that these accesses never cause
  840. * VM-EXIT.
  841. */
  842. field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
  843. AML_PRESERVE);
  844. aml_append(field, aml_named_field(NVDIMM_DSM_HANDLE,
  845. sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE));
  846. aml_append(field, aml_named_field(NVDIMM_DSM_REVISION,
  847. sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE));
  848. aml_append(field, aml_named_field(NVDIMM_DSM_FUNCTION,
  849. sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE));
  850. aml_append(field, aml_named_field(NVDIMM_DSM_ARG3,
  851. (sizeof(NvdimmDsmIn) - offsetof(NvdimmDsmIn, arg3)) * BITS_PER_BYTE));
  852. aml_append(method, field);
  853. /*
  854. * DSM output:
  855. * NVDIMM_DSM_OUT_BUF_SIZE: the size of the buffer filled by QEMU.
  856. * NVDIMM_DSM_OUT_BUF: the buffer QEMU uses to store the result.
  857. *
  858. * Since the page is reused by both input and out, the input data
  859. * will be lost after storing new result into ODAT so we should fetch
  860. * all the input data before writing the result.
  861. */
  862. field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
  863. AML_PRESERVE);
  864. aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF_SIZE,
  865. sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE));
  866. aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF,
  867. (sizeof(NvdimmDsmOut) - offsetof(NvdimmDsmOut, data)) * BITS_PER_BYTE));
  868. aml_append(method, field);
  869. /*
  870. * do not support any method if DSM memory address has not been
  871. * patched.
  872. */
  873. unpatched = aml_equal(dsm_mem, aml_int(0x0));
  874. expected_uuid = aml_local(0);
  875. ifctx = aml_if(aml_equal(handle, aml_int(0x0)));
  876. aml_append(ifctx, aml_store(
  877. aml_touuid("2F10E7A4-9E91-11E4-89D3-123B93F75CBA")
  878. /* UUID for NVDIMM Root Device */, expected_uuid));
  879. aml_append(method, ifctx);
  880. elsectx = aml_else();
  881. ifctx = aml_if(aml_equal(handle, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT)));
  882. aml_append(ifctx, aml_store(aml_touuid(NVDIMM_QEMU_RSVD_UUID
  883. /* UUID for QEMU internal use */), expected_uuid));
  884. aml_append(elsectx, ifctx);
  885. elsectx2 = aml_else();
  886. aml_append(elsectx2, aml_store(aml_touuid(NVDIMM_DEVICE_DSM_UUID)
  887. /* UUID for NVDIMM Devices */, expected_uuid));
  888. aml_append(elsectx, elsectx2);
  889. aml_append(method, elsectx);
  890. uuid_invalid = aml_lnot(aml_equal(uuid, expected_uuid));
  891. unsupport = aml_if(aml_lor(unpatched, uuid_invalid));
  892. /*
  893. * function 0 is called to inquire what functions are supported by
  894. * OSPM
  895. */
  896. ifctx = aml_if(aml_equal(function, aml_int(0)));
  897. byte_list[0] = 0 /* No function Supported */;
  898. aml_append(ifctx, aml_return(aml_buffer(1, byte_list)));
  899. aml_append(unsupport, ifctx);
  900. /* No function is supported yet. */
  901. byte_list[0] = NVDIMM_DSM_RET_STATUS_UNSUPPORT;
  902. aml_append(unsupport, aml_return(aml_buffer(1, byte_list)));
  903. aml_append(method, unsupport);
  904. /*
  905. * The HDLE indicates the DSM function is issued from which device,
  906. * it reserves 0 for root device and is the handle for NVDIMM devices.
  907. * See the comments in nvdimm_slot_to_handle().
  908. */
  909. aml_append(method, aml_store(handle, aml_name(NVDIMM_DSM_HANDLE)));
  910. aml_append(method, aml_store(aml_arg(1), aml_name(NVDIMM_DSM_REVISION)));
  911. aml_append(method, aml_store(function, aml_name(NVDIMM_DSM_FUNCTION)));
  912. /*
  913. * The fourth parameter (Arg3) of _DSM is a package which contains
  914. * a buffer, the layout of the buffer is specified by UUID (Arg0),
  915. * Revision ID (Arg1) and Function Index (Arg2) which are documented
  916. * in the DSM Spec.
  917. */
  918. pckg = aml_arg(3);
  919. ifctx = aml_if(aml_land(aml_equal(aml_object_type(pckg),
  920. aml_int(4 /* Package */)) /* It is a Package? */,
  921. aml_equal(aml_sizeof(pckg), aml_int(1)) /* 1 element? */));
  922. pckg_index = aml_local(2);
  923. pckg_buf = aml_local(3);
  924. aml_append(ifctx, aml_store(aml_index(pckg, aml_int(0)), pckg_index));
  925. aml_append(ifctx, aml_store(aml_derefof(pckg_index), pckg_buf));
  926. aml_append(ifctx, aml_store(pckg_buf, aml_name(NVDIMM_DSM_ARG3)));
  927. aml_append(method, ifctx);
  928. /*
  929. * tell QEMU about the real address of DSM memory, then QEMU
  930. * gets the control and fills the result in DSM memory.
  931. */
  932. aml_append(method, aml_store(dsm_mem, aml_name(NVDIMM_DSM_NOTIFY)));
  933. dsm_out_buf_size = aml_local(1);
  934. /* RLEN is not included in the payload returned to guest. */
  935. aml_append(method, aml_subtract(aml_name(NVDIMM_DSM_OUT_BUF_SIZE),
  936. aml_int(4), dsm_out_buf_size));
  937. /*
  938. * As per ACPI spec 6.3, Table 19-419 Object Conversion Rules, if
  939. * the Buffer Field <= to the size of an Integer (in bits), it will
  940. * be treated as an integer. Moreover, the integer size depends on
  941. * DSDT tables revision number. If revision number is < 2, integer
  942. * size is 32 bits, otherwise it is 64 bits.
  943. * Because of this CreateField() cannot be used if RLEN < Integer Size.
  944. *
  945. * Also please note that APCI ASL operator SizeOf() doesn't support
  946. * Integer and there isn't any other way to figure out the Integer
  947. * size. Hence we assume 8 byte as Integer size and if RLEN < 8 bytes,
  948. * build dsm_out_buf byte by byte.
  949. */
  950. ifctx = aml_if(aml_lless(dsm_out_buf_size, aml_int(8)));
  951. offset = aml_local(2);
  952. aml_append(ifctx, aml_store(aml_int(0), offset));
  953. aml_append(ifctx, aml_name_decl("TBUF", aml_buffer(1, NULL)));
  954. aml_append(ifctx, aml_store(aml_buffer(0, NULL), dsm_out_buf));
  955. whilectx = aml_while(aml_lless(offset, dsm_out_buf_size));
  956. /* Copy 1 byte at offset from ODAT to temporary buffer(TBUF). */
  957. aml_append(whilectx, aml_store(aml_derefof(aml_index(
  958. aml_name(NVDIMM_DSM_OUT_BUF), offset)),
  959. aml_index(aml_name("TBUF"), aml_int(0))));
  960. aml_append(whilectx, aml_concatenate(dsm_out_buf, aml_name("TBUF"),
  961. dsm_out_buf));
  962. aml_append(whilectx, aml_increment(offset));
  963. aml_append(ifctx, whilectx);
  964. aml_append(ifctx, aml_return(dsm_out_buf));
  965. aml_append(method, ifctx);
  966. /* If RLEN >= Integer size, just use CreateField() operator */
  967. aml_append(method, aml_store(aml_shiftleft(dsm_out_buf_size, aml_int(3)),
  968. dsm_out_buf_size));
  969. aml_append(method, aml_create_field(aml_name(NVDIMM_DSM_OUT_BUF),
  970. aml_int(0), dsm_out_buf_size, "OBUF"));
  971. aml_append(method, aml_return(aml_name("OBUF")));
  972. aml_append(dev, method);
  973. }
  974. static void nvdimm_build_device_dsm(Aml *dev, uint32_t handle)
  975. {
  976. Aml *method;
  977. method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
  978. aml_append(method, aml_return(aml_call5(NVDIMM_COMMON_DSM, aml_arg(0),
  979. aml_arg(1), aml_arg(2), aml_arg(3),
  980. aml_int(handle))));
  981. aml_append(dev, method);
  982. }
  983. static void nvdimm_build_fit(Aml *dev)
  984. {
  985. Aml *method, *pkg, *buf, *buf_size, *offset, *call_result;
  986. Aml *whilectx, *ifcond, *ifctx, *elsectx, *fit;
  987. buf = aml_local(0);
  988. buf_size = aml_local(1);
  989. fit = aml_local(2);
  990. aml_append(dev, aml_name_decl(NVDIMM_DSM_RFIT_STATUS, aml_int(0)));
  991. /* build helper function, RFIT. */
  992. method = aml_method("RFIT", 1, AML_SERIALIZED);
  993. aml_append(method, aml_name_decl("OFST", aml_int(0)));
  994. /* prepare input package. */
  995. pkg = aml_package(1);
  996. aml_append(method, aml_store(aml_arg(0), aml_name("OFST")));
  997. aml_append(pkg, aml_name("OFST"));
  998. /* call Read_FIT function. */
  999. call_result = aml_call5(NVDIMM_COMMON_DSM,
  1000. aml_touuid(NVDIMM_QEMU_RSVD_UUID),
  1001. aml_int(1) /* Revision 1 */,
  1002. aml_int(0x1) /* Read FIT */,
  1003. pkg, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT));
  1004. aml_append(method, aml_store(call_result, buf));
  1005. /* handle _DSM result. */
  1006. aml_append(method, aml_create_dword_field(buf,
  1007. aml_int(0) /* offset at byte 0 */, "STAU"));
  1008. aml_append(method, aml_store(aml_name("STAU"),
  1009. aml_name(NVDIMM_DSM_RFIT_STATUS)));
  1010. /* if something is wrong during _DSM. */
  1011. ifcond = aml_equal(aml_int(NVDIMM_DSM_RET_STATUS_SUCCESS),
  1012. aml_name("STAU"));
  1013. ifctx = aml_if(aml_lnot(ifcond));
  1014. aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
  1015. aml_append(method, ifctx);
  1016. aml_append(method, aml_store(aml_sizeof(buf), buf_size));
  1017. aml_append(method, aml_subtract(buf_size,
  1018. aml_int(4) /* the size of "STAU" */,
  1019. buf_size));
  1020. /* if we read the end of fit. */
  1021. ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
  1022. aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
  1023. aml_append(method, ifctx);
  1024. aml_append(method, aml_create_field(buf,
  1025. aml_int(4 * BITS_PER_BYTE), /* offset at byte 4.*/
  1026. aml_shiftleft(buf_size, aml_int(3)), "BUFF"));
  1027. aml_append(method, aml_return(aml_name("BUFF")));
  1028. aml_append(dev, method);
  1029. /* build _FIT. */
  1030. method = aml_method("_FIT", 0, AML_SERIALIZED);
  1031. offset = aml_local(3);
  1032. aml_append(method, aml_store(aml_buffer(0, NULL), fit));
  1033. aml_append(method, aml_store(aml_int(0), offset));
  1034. whilectx = aml_while(aml_int(1));
  1035. aml_append(whilectx, aml_store(aml_call1("RFIT", offset), buf));
  1036. aml_append(whilectx, aml_store(aml_sizeof(buf), buf_size));
  1037. /*
  1038. * if fit buffer was changed during RFIT, read from the beginning
  1039. * again.
  1040. */
  1041. ifctx = aml_if(aml_equal(aml_name(NVDIMM_DSM_RFIT_STATUS),
  1042. aml_int(NVDIMM_DSM_RET_STATUS_FIT_CHANGED)));
  1043. aml_append(ifctx, aml_store(aml_buffer(0, NULL), fit));
  1044. aml_append(ifctx, aml_store(aml_int(0), offset));
  1045. aml_append(whilectx, ifctx);
  1046. elsectx = aml_else();
  1047. /* finish fit read if no data is read out. */
  1048. ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
  1049. aml_append(ifctx, aml_return(fit));
  1050. aml_append(elsectx, ifctx);
  1051. /* update the offset. */
  1052. aml_append(elsectx, aml_add(offset, buf_size, offset));
  1053. /* append the data we read out to the fit buffer. */
  1054. aml_append(elsectx, aml_concatenate(fit, buf, fit));
  1055. aml_append(whilectx, elsectx);
  1056. aml_append(method, whilectx);
  1057. aml_append(dev, method);
  1058. }
  1059. static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots)
  1060. {
  1061. uint32_t slot;
  1062. Aml *method, *pkg, *field, *com_call;
  1063. for (slot = 0; slot < ram_slots; slot++) {
  1064. uint32_t handle = nvdimm_slot_to_handle(slot);
  1065. Aml *nvdimm_dev;
  1066. nvdimm_dev = aml_device("NV%02X", slot);
  1067. /*
  1068. * ACPI 6.0: 9.20 NVDIMM Devices:
  1069. *
  1070. * _ADR object that is used to supply OSPM with unique address
  1071. * of the NVDIMM device. This is done by returning the NFIT Device
  1072. * handle that is used to identify the associated entries in ACPI
  1073. * table NFIT or _FIT.
  1074. */
  1075. aml_append(nvdimm_dev, aml_name_decl("_ADR", aml_int(handle)));
  1076. /*
  1077. * ACPI v6.4: Section 6.5.10 NVDIMM Label Methods
  1078. */
  1079. /* _LSI */
  1080. method = aml_method("_LSI", 0, AML_SERIALIZED);
  1081. com_call = aml_call5(NVDIMM_COMMON_DSM,
  1082. aml_touuid(NVDIMM_DEVICE_DSM_UUID),
  1083. aml_int(1), aml_int(4), aml_int(0),
  1084. aml_int(handle));
  1085. aml_append(method, aml_store(com_call, aml_local(0)));
  1086. aml_append(method, aml_create_dword_field(aml_local(0),
  1087. aml_int(0), "STTS"));
  1088. aml_append(method, aml_create_dword_field(aml_local(0), aml_int(4),
  1089. "SLSA"));
  1090. aml_append(method, aml_create_dword_field(aml_local(0), aml_int(8),
  1091. "MAXT"));
  1092. pkg = aml_package(3);
  1093. aml_append(pkg, aml_name("STTS"));
  1094. aml_append(pkg, aml_name("SLSA"));
  1095. aml_append(pkg, aml_name("MAXT"));
  1096. aml_append(method, aml_store(pkg, aml_local(1)));
  1097. aml_append(method, aml_return(aml_local(1)));
  1098. aml_append(nvdimm_dev, method);
  1099. /* _LSR */
  1100. method = aml_method("_LSR", 2, AML_SERIALIZED);
  1101. aml_append(method, aml_name_decl("INPT", aml_buffer(8, NULL)));
  1102. aml_append(method, aml_create_dword_field(aml_name("INPT"),
  1103. aml_int(0), "OFST"));
  1104. aml_append(method, aml_create_dword_field(aml_name("INPT"),
  1105. aml_int(4), "LEN"));
  1106. aml_append(method, aml_store(aml_arg(0), aml_name("OFST")));
  1107. aml_append(method, aml_store(aml_arg(1), aml_name("LEN")));
  1108. pkg = aml_package(1);
  1109. aml_append(pkg, aml_name("INPT"));
  1110. aml_append(method, aml_store(pkg, aml_local(0)));
  1111. com_call = aml_call5(NVDIMM_COMMON_DSM,
  1112. aml_touuid(NVDIMM_DEVICE_DSM_UUID),
  1113. aml_int(1), aml_int(5), aml_local(0),
  1114. aml_int(handle));
  1115. aml_append(method, aml_store(com_call, aml_local(3)));
  1116. field = aml_create_dword_field(aml_local(3), aml_int(0), "STTS");
  1117. aml_append(method, field);
  1118. field = aml_create_field(aml_local(3), aml_int(32),
  1119. aml_shiftleft(aml_name("LEN"), aml_int(3)),
  1120. "LDAT");
  1121. aml_append(method, field);
  1122. aml_append(method, aml_name_decl("LSA", aml_buffer(0, NULL)));
  1123. aml_append(method, aml_to_buffer(aml_name("LDAT"), aml_name("LSA")));
  1124. pkg = aml_package(2);
  1125. aml_append(pkg, aml_name("STTS"));
  1126. aml_append(pkg, aml_name("LSA"));
  1127. aml_append(method, aml_store(pkg, aml_local(1)));
  1128. aml_append(method, aml_return(aml_local(1)));
  1129. aml_append(nvdimm_dev, method);
  1130. /* _LSW */
  1131. method = aml_method("_LSW", 3, AML_SERIALIZED);
  1132. aml_append(method, aml_store(aml_arg(2), aml_local(2)));
  1133. aml_append(method, aml_name_decl("INPT", aml_buffer(8, NULL)));
  1134. field = aml_create_dword_field(aml_name("INPT"),
  1135. aml_int(0), "OFST");
  1136. aml_append(method, field);
  1137. field = aml_create_dword_field(aml_name("INPT"),
  1138. aml_int(4), "TLEN");
  1139. aml_append(method, field);
  1140. aml_append(method, aml_store(aml_arg(0), aml_name("OFST")));
  1141. aml_append(method, aml_store(aml_arg(1), aml_name("TLEN")));
  1142. aml_append(method, aml_concatenate(aml_name("INPT"), aml_local(2),
  1143. aml_name("INPT")));
  1144. pkg = aml_package(1);
  1145. aml_append(pkg, aml_name("INPT"));
  1146. aml_append(method, aml_store(pkg, aml_local(0)));
  1147. com_call = aml_call5(NVDIMM_COMMON_DSM,
  1148. aml_touuid(NVDIMM_DEVICE_DSM_UUID),
  1149. aml_int(1), aml_int(6), aml_local(0),
  1150. aml_int(handle));
  1151. aml_append(method, aml_store(com_call, aml_local(3)));
  1152. field = aml_create_dword_field(aml_local(3), aml_int(0), "STTS");
  1153. aml_append(method, field);
  1154. aml_append(method, aml_return(aml_name("STTS")));
  1155. aml_append(nvdimm_dev, method);
  1156. nvdimm_build_device_dsm(nvdimm_dev, handle);
  1157. aml_append(root_dev, nvdimm_dev);
  1158. }
  1159. }
  1160. static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data,
  1161. BIOSLinker *linker,
  1162. NVDIMMState *nvdimm_state,
  1163. uint32_t ram_slots, const char *oem_id)
  1164. {
  1165. int mem_addr_offset;
  1166. Aml *ssdt, *sb_scope, *dev;
  1167. AcpiTable table = { .sig = "SSDT", .rev = 1,
  1168. .oem_id = oem_id, .oem_table_id = "NVDIMM" };
  1169. acpi_add_table(table_offsets, table_data);
  1170. acpi_table_begin(&table, table_data);
  1171. ssdt = init_aml_allocator();
  1172. sb_scope = aml_scope("\\_SB");
  1173. dev = aml_device("NVDR");
  1174. /*
  1175. * ACPI 6.0: 9.20 NVDIMM Devices:
  1176. *
  1177. * The ACPI Name Space device uses _HID of ACPI0012 to identify the root
  1178. * NVDIMM interface device. Platform firmware is required to contain one
  1179. * such device in _SB scope if NVDIMMs support is exposed by platform to
  1180. * OSPM.
  1181. * For each NVDIMM present or intended to be supported by platform,
  1182. * platform firmware also exposes an ACPI Namespace Device under the
  1183. * root device.
  1184. */
  1185. aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012")));
  1186. nvdimm_build_common_dsm(dev, nvdimm_state);
  1187. /* 0 is reserved for root device. */
  1188. nvdimm_build_device_dsm(dev, 0);
  1189. nvdimm_build_fit(dev);
  1190. nvdimm_build_nvdimm_devices(dev, ram_slots);
  1191. aml_append(sb_scope, dev);
  1192. aml_append(ssdt, sb_scope);
  1193. /* copy AML table into ACPI tables blob and patch header there */
  1194. g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len);
  1195. mem_addr_offset = build_append_named_dword(table_data,
  1196. NVDIMM_ACPI_MEM_ADDR);
  1197. bios_linker_loader_alloc(linker,
  1198. NVDIMM_DSM_MEM_FILE, nvdimm_state->dsm_mem,
  1199. sizeof(NvdimmDsmIn), false /* high memory */);
  1200. bios_linker_loader_add_pointer(linker,
  1201. ACPI_BUILD_TABLE_FILE, mem_addr_offset, sizeof(uint32_t),
  1202. NVDIMM_DSM_MEM_FILE, 0);
  1203. free_aml_allocator();
  1204. /*
  1205. * must be executed as the last so that pointer patching command above
  1206. * would be executed by guest before it recalculated checksum which were
  1207. * scheduled by acpi_table_end()
  1208. */
  1209. acpi_table_end(linker, &table);
  1210. }
  1211. void nvdimm_build_srat(GArray *table_data)
  1212. {
  1213. GSList *device_list, *list = nvdimm_get_device_list();
  1214. for (device_list = list; device_list; device_list = device_list->next) {
  1215. DeviceState *dev = device_list->data;
  1216. Object *obj = OBJECT(dev);
  1217. uint64_t addr, size;
  1218. int node;
  1219. node = object_property_get_int(obj, PC_DIMM_NODE_PROP, &error_abort);
  1220. addr = object_property_get_uint(obj, PC_DIMM_ADDR_PROP, &error_abort);
  1221. size = object_property_get_uint(obj, PC_DIMM_SIZE_PROP, &error_abort);
  1222. build_srat_memory(table_data, addr, size, node,
  1223. MEM_AFFINITY_ENABLED | MEM_AFFINITY_NON_VOLATILE);
  1224. }
  1225. g_slist_free(list);
  1226. }
  1227. void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
  1228. BIOSLinker *linker, NVDIMMState *state,
  1229. uint32_t ram_slots, const char *oem_id,
  1230. const char *oem_table_id)
  1231. {
  1232. GSList *device_list;
  1233. /* no nvdimm device can be plugged. */
  1234. if (!ram_slots) {
  1235. return;
  1236. }
  1237. nvdimm_build_ssdt(table_offsets, table_data, linker, state,
  1238. ram_slots, oem_id);
  1239. device_list = nvdimm_get_device_list();
  1240. /* no NVDIMM device is plugged. */
  1241. if (!device_list) {
  1242. return;
  1243. }
  1244. nvdimm_build_nfit(state, table_offsets, table_data, linker,
  1245. oem_id, oem_table_id);
  1246. g_slist_free(device_list);
  1247. }