2
0

nvdimm.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343
  1. /*
  2. * NVDIMM ACPI Implementation
  3. *
  4. * Copyright(C) 2015 Intel Corporation.
  5. *
  6. * Author:
  7. * Xiao Guangrong <guangrong.xiao@linux.intel.com>
  8. *
  9. * NFIT is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
  10. * and the DSM specification can be found at:
  11. * http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
  12. *
  13. * Currently, it only supports PMEM Virtualization.
  14. *
  15. * This library is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU Lesser General Public
  17. * License as published by the Free Software Foundation; either
  18. * version 2 of the License, or (at your option) any later version.
  19. *
  20. * This library is distributed in the hope that it will be useful,
  21. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  23. * Lesser General Public License for more details.
  24. *
  25. * You should have received a copy of the GNU Lesser General Public
  26. * License along with this library; if not, see <http://www.gnu.org/licenses/>
  27. */
  28. #include "qemu/osdep.h"
  29. #include "hw/acpi/acpi.h"
  30. #include "hw/acpi/aml-build.h"
  31. #include "hw/acpi/bios-linker-loader.h"
  32. #include "hw/nvram/fw_cfg.h"
  33. #include "hw/mem/nvdimm.h"
  34. static int nvdimm_device_list(Object *obj, void *opaque)
  35. {
  36. GSList **list = opaque;
  37. if (object_dynamic_cast(obj, TYPE_NVDIMM)) {
  38. *list = g_slist_append(*list, DEVICE(obj));
  39. }
  40. object_child_foreach(obj, nvdimm_device_list, opaque);
  41. return 0;
  42. }
  43. /*
  44. * inquire NVDIMM devices and link them into the list which is
  45. * returned to the caller.
  46. *
  47. * Note: it is the caller's responsibility to free the list to avoid
  48. * memory leak.
  49. */
  50. static GSList *nvdimm_get_device_list(void)
  51. {
  52. GSList *list = NULL;
  53. object_child_foreach(qdev_get_machine(), nvdimm_device_list, &list);
  54. return list;
  55. }
  56. #define NVDIMM_UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
  57. { (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
  58. (b) & 0xff, ((b) >> 8) & 0xff, (c) & 0xff, ((c) >> 8) & 0xff, \
  59. (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }
  60. /*
  61. * define Byte Addressable Persistent Memory (PM) Region according to
  62. * ACPI 6.0: 5.2.25.1 System Physical Address Range Structure.
  63. */
  64. static const uint8_t nvdimm_nfit_spa_uuid[] =
  65. NVDIMM_UUID_LE(0x66f0d379, 0xb4f3, 0x4074, 0xac, 0x43, 0x0d, 0x33,
  66. 0x18, 0xb7, 0x8c, 0xdb);
  67. /*
  68. * NVDIMM Firmware Interface Table
  69. * @signature: "NFIT"
  70. *
  71. * It provides information that allows OSPM to enumerate NVDIMM present in
  72. * the platform and associate system physical address ranges created by the
  73. * NVDIMMs.
  74. *
  75. * It is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
  76. */
  77. struct NvdimmNfitHeader {
  78. ACPI_TABLE_HEADER_DEF
  79. uint32_t reserved;
  80. } QEMU_PACKED;
  81. typedef struct NvdimmNfitHeader NvdimmNfitHeader;
  82. /*
  83. * define NFIT structures according to ACPI 6.0: 5.2.25 NVDIMM Firmware
  84. * Interface Table (NFIT).
  85. */
  86. /*
  87. * System Physical Address Range Structure
  88. *
  89. * It describes the system physical address ranges occupied by NVDIMMs and
  90. * the types of the regions.
  91. */
  92. struct NvdimmNfitSpa {
  93. uint16_t type;
  94. uint16_t length;
  95. uint16_t spa_index;
  96. uint16_t flags;
  97. uint32_t reserved;
  98. uint32_t proximity_domain;
  99. uint8_t type_guid[16];
  100. uint64_t spa_base;
  101. uint64_t spa_length;
  102. uint64_t mem_attr;
  103. } QEMU_PACKED;
  104. typedef struct NvdimmNfitSpa NvdimmNfitSpa;
  105. /*
  106. * Memory Device to System Physical Address Range Mapping Structure
  107. *
  108. * It enables identifying each NVDIMM region and the corresponding SPA
  109. * describing the memory interleave
  110. */
  111. struct NvdimmNfitMemDev {
  112. uint16_t type;
  113. uint16_t length;
  114. uint32_t nfit_handle;
  115. uint16_t phys_id;
  116. uint16_t region_id;
  117. uint16_t spa_index;
  118. uint16_t dcr_index;
  119. uint64_t region_len;
  120. uint64_t region_offset;
  121. uint64_t region_dpa;
  122. uint16_t interleave_index;
  123. uint16_t interleave_ways;
  124. uint16_t flags;
  125. uint16_t reserved;
  126. } QEMU_PACKED;
  127. typedef struct NvdimmNfitMemDev NvdimmNfitMemDev;
  128. #define ACPI_NFIT_MEM_NOT_ARMED (1 << 3)
  129. /*
  130. * NVDIMM Control Region Structure
  131. *
  132. * It describes the NVDIMM and if applicable, Block Control Window.
  133. */
  134. struct NvdimmNfitControlRegion {
  135. uint16_t type;
  136. uint16_t length;
  137. uint16_t dcr_index;
  138. uint16_t vendor_id;
  139. uint16_t device_id;
  140. uint16_t revision_id;
  141. uint16_t sub_vendor_id;
  142. uint16_t sub_device_id;
  143. uint16_t sub_revision_id;
  144. uint8_t reserved[6];
  145. uint32_t serial_number;
  146. uint16_t fic;
  147. uint16_t num_bcw;
  148. uint64_t bcw_size;
  149. uint64_t cmd_offset;
  150. uint64_t cmd_size;
  151. uint64_t status_offset;
  152. uint64_t status_size;
  153. uint16_t flags;
  154. uint8_t reserved2[6];
  155. } QEMU_PACKED;
  156. typedef struct NvdimmNfitControlRegion NvdimmNfitControlRegion;
  157. /*
  158. * NVDIMM Platform Capabilities Structure
  159. *
  160. * Defined in section 5.2.25.9 of ACPI 6.2 Errata A, September 2017
  161. */
  162. struct NvdimmNfitPlatformCaps {
  163. uint16_t type;
  164. uint16_t length;
  165. uint8_t highest_cap;
  166. uint8_t reserved[3];
  167. uint32_t capabilities;
  168. uint8_t reserved2[4];
  169. } QEMU_PACKED;
  170. typedef struct NvdimmNfitPlatformCaps NvdimmNfitPlatformCaps;
  171. /*
  172. * Module serial number is a unique number for each device. We use the
  173. * slot id of NVDIMM device to generate this number so that each device
  174. * associates with a different number.
  175. *
  176. * 0x123456 is a magic number we arbitrarily chose.
  177. */
  178. static uint32_t nvdimm_slot_to_sn(int slot)
  179. {
  180. return 0x123456 + slot;
  181. }
  182. /*
  183. * handle is used to uniquely associate nfit_memdev structure with NVDIMM
  184. * ACPI device - nfit_memdev.nfit_handle matches with the value returned
  185. * by ACPI device _ADR method.
  186. *
  187. * We generate the handle with the slot id of NVDIMM device and reserve
  188. * 0 for NVDIMM root device.
  189. */
  190. static uint32_t nvdimm_slot_to_handle(int slot)
  191. {
  192. return slot + 1;
  193. }
  194. /*
  195. * index uniquely identifies the structure, 0 is reserved which indicates
  196. * that the structure is not valid or the associated structure is not
  197. * present.
  198. *
  199. * Each NVDIMM device needs two indexes, one for nfit_spa and another for
  200. * nfit_dc which are generated by the slot id of NVDIMM device.
  201. */
  202. static uint16_t nvdimm_slot_to_spa_index(int slot)
  203. {
  204. return (slot + 1) << 1;
  205. }
  206. /* See the comments of nvdimm_slot_to_spa_index(). */
  207. static uint32_t nvdimm_slot_to_dcr_index(int slot)
  208. {
  209. return nvdimm_slot_to_spa_index(slot) + 1;
  210. }
  211. static NVDIMMDevice *nvdimm_get_device_by_handle(uint32_t handle)
  212. {
  213. NVDIMMDevice *nvdimm = NULL;
  214. GSList *list, *device_list = nvdimm_get_device_list();
  215. for (list = device_list; list; list = list->next) {
  216. NVDIMMDevice *nvd = list->data;
  217. int slot = object_property_get_int(OBJECT(nvd), PC_DIMM_SLOT_PROP,
  218. NULL);
  219. if (nvdimm_slot_to_handle(slot) == handle) {
  220. nvdimm = nvd;
  221. break;
  222. }
  223. }
  224. g_slist_free(device_list);
  225. return nvdimm;
  226. }
  227. /* ACPI 6.0: 5.2.25.1 System Physical Address Range Structure */
  228. static void
  229. nvdimm_build_structure_spa(GArray *structures, DeviceState *dev)
  230. {
  231. NvdimmNfitSpa *nfit_spa;
  232. uint64_t addr = object_property_get_uint(OBJECT(dev), PC_DIMM_ADDR_PROP,
  233. NULL);
  234. uint64_t size = object_property_get_uint(OBJECT(dev), PC_DIMM_SIZE_PROP,
  235. NULL);
  236. uint32_t node = object_property_get_uint(OBJECT(dev), PC_DIMM_NODE_PROP,
  237. NULL);
  238. int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
  239. NULL);
  240. nfit_spa = acpi_data_push(structures, sizeof(*nfit_spa));
  241. nfit_spa->type = cpu_to_le16(0 /* System Physical Address Range
  242. Structure */);
  243. nfit_spa->length = cpu_to_le16(sizeof(*nfit_spa));
  244. nfit_spa->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot));
  245. /*
  246. * Control region is strict as all the device info, such as SN, index,
  247. * is associated with slot id.
  248. */
  249. nfit_spa->flags = cpu_to_le16(1 /* Control region is strictly for
  250. management during hot add/online
  251. operation */ |
  252. 2 /* Data in Proximity Domain field is
  253. valid*/);
  254. /* NUMA node. */
  255. nfit_spa->proximity_domain = cpu_to_le32(node);
  256. /* the region reported as PMEM. */
  257. memcpy(nfit_spa->type_guid, nvdimm_nfit_spa_uuid,
  258. sizeof(nvdimm_nfit_spa_uuid));
  259. nfit_spa->spa_base = cpu_to_le64(addr);
  260. nfit_spa->spa_length = cpu_to_le64(size);
  261. /* It is the PMEM and can be cached as writeback. */
  262. nfit_spa->mem_attr = cpu_to_le64(0x8ULL /* EFI_MEMORY_WB */ |
  263. 0x8000ULL /* EFI_MEMORY_NV */);
  264. }
  265. /*
  266. * ACPI 6.0: 5.2.25.2 Memory Device to System Physical Address Range Mapping
  267. * Structure
  268. */
  269. static void
  270. nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev)
  271. {
  272. NvdimmNfitMemDev *nfit_memdev;
  273. NVDIMMDevice *nvdimm = NVDIMM(OBJECT(dev));
  274. uint64_t size = object_property_get_uint(OBJECT(dev), PC_DIMM_SIZE_PROP,
  275. NULL);
  276. int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
  277. NULL);
  278. uint32_t handle = nvdimm_slot_to_handle(slot);
  279. nfit_memdev = acpi_data_push(structures, sizeof(*nfit_memdev));
  280. nfit_memdev->type = cpu_to_le16(1 /* Memory Device to System Address
  281. Range Map Structure*/);
  282. nfit_memdev->length = cpu_to_le16(sizeof(*nfit_memdev));
  283. nfit_memdev->nfit_handle = cpu_to_le32(handle);
  284. /*
  285. * associate memory device with System Physical Address Range
  286. * Structure.
  287. */
  288. nfit_memdev->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot));
  289. /* associate memory device with Control Region Structure. */
  290. nfit_memdev->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot));
  291. /* The memory region on the device. */
  292. nfit_memdev->region_len = cpu_to_le64(size);
  293. /* The device address starts from 0. */
  294. nfit_memdev->region_dpa = cpu_to_le64(0);
  295. /* Only one interleave for PMEM. */
  296. nfit_memdev->interleave_ways = cpu_to_le16(1);
  297. if (nvdimm->unarmed) {
  298. nfit_memdev->flags |= cpu_to_le16(ACPI_NFIT_MEM_NOT_ARMED);
  299. }
  300. }
  301. /*
  302. * ACPI 6.0: 5.2.25.5 NVDIMM Control Region Structure.
  303. */
  304. static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev)
  305. {
  306. NvdimmNfitControlRegion *nfit_dcr;
  307. int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
  308. NULL);
  309. uint32_t sn = nvdimm_slot_to_sn(slot);
  310. nfit_dcr = acpi_data_push(structures, sizeof(*nfit_dcr));
  311. nfit_dcr->type = cpu_to_le16(4 /* NVDIMM Control Region Structure */);
  312. nfit_dcr->length = cpu_to_le16(sizeof(*nfit_dcr));
  313. nfit_dcr->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot));
  314. /* vendor: Intel. */
  315. nfit_dcr->vendor_id = cpu_to_le16(0x8086);
  316. nfit_dcr->device_id = cpu_to_le16(1);
  317. /* The _DSM method is following Intel's DSM specification. */
  318. nfit_dcr->revision_id = cpu_to_le16(1 /* Current Revision supported
  319. in ACPI 6.0 is 1. */);
  320. nfit_dcr->serial_number = cpu_to_le32(sn);
  321. nfit_dcr->fic = cpu_to_le16(0x301 /* Format Interface Code:
  322. Byte addressable, no energy backed.
  323. See ACPI 6.2, sect 5.2.25.6 and
  324. JEDEC Annex L Release 3. */);
  325. }
  326. /*
  327. * ACPI 6.2 Errata A: 5.2.25.9 NVDIMM Platform Capabilities Structure
  328. */
  329. static void
  330. nvdimm_build_structure_caps(GArray *structures, uint32_t capabilities)
  331. {
  332. NvdimmNfitPlatformCaps *nfit_caps;
  333. nfit_caps = acpi_data_push(structures, sizeof(*nfit_caps));
  334. nfit_caps->type = cpu_to_le16(7 /* NVDIMM Platform Capabilities */);
  335. nfit_caps->length = cpu_to_le16(sizeof(*nfit_caps));
  336. nfit_caps->highest_cap = 31 - clz32(capabilities);
  337. nfit_caps->capabilities = cpu_to_le32(capabilities);
  338. }
  339. static GArray *nvdimm_build_device_structure(NVDIMMState *state)
  340. {
  341. GSList *device_list = nvdimm_get_device_list();
  342. GArray *structures = g_array_new(false, true /* clear */, 1);
  343. for (; device_list; device_list = device_list->next) {
  344. DeviceState *dev = device_list->data;
  345. /* build System Physical Address Range Structure. */
  346. nvdimm_build_structure_spa(structures, dev);
  347. /*
  348. * build Memory Device to System Physical Address Range Mapping
  349. * Structure.
  350. */
  351. nvdimm_build_structure_memdev(structures, dev);
  352. /* build NVDIMM Control Region Structure. */
  353. nvdimm_build_structure_dcr(structures, dev);
  354. }
  355. g_slist_free(device_list);
  356. if (state->persistence) {
  357. nvdimm_build_structure_caps(structures, state->persistence);
  358. }
  359. return structures;
  360. }
  361. static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf)
  362. {
  363. fit_buf->fit = g_array_new(false, true /* clear */, 1);
  364. }
  365. static void nvdimm_build_fit_buffer(NVDIMMState *state)
  366. {
  367. NvdimmFitBuffer *fit_buf = &state->fit_buf;
  368. g_array_free(fit_buf->fit, true);
  369. fit_buf->fit = nvdimm_build_device_structure(state);
  370. fit_buf->dirty = true;
  371. }
  372. void nvdimm_plug(NVDIMMState *state)
  373. {
  374. nvdimm_build_fit_buffer(state);
  375. }
  376. static void nvdimm_build_nfit(NVDIMMState *state, GArray *table_offsets,
  377. GArray *table_data, BIOSLinker *linker)
  378. {
  379. NvdimmFitBuffer *fit_buf = &state->fit_buf;
  380. unsigned int header;
  381. acpi_add_table(table_offsets, table_data);
  382. /* NFIT header. */
  383. header = table_data->len;
  384. acpi_data_push(table_data, sizeof(NvdimmNfitHeader));
  385. /* NVDIMM device structures. */
  386. g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len);
  387. build_header(linker, table_data,
  388. (void *)(table_data->data + header), "NFIT",
  389. sizeof(NvdimmNfitHeader) + fit_buf->fit->len, 1, NULL, NULL);
  390. }
  391. #define NVDIMM_DSM_MEMORY_SIZE 4096
  392. struct NvdimmDsmIn {
  393. uint32_t handle;
  394. uint32_t revision;
  395. uint32_t function;
  396. /* the remaining size in the page is used by arg3. */
  397. union {
  398. uint8_t arg3[4084];
  399. };
  400. } QEMU_PACKED;
  401. typedef struct NvdimmDsmIn NvdimmDsmIn;
  402. QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmIn) != NVDIMM_DSM_MEMORY_SIZE);
  403. struct NvdimmDsmOut {
  404. /* the size of buffer filled by QEMU. */
  405. uint32_t len;
  406. uint8_t data[4092];
  407. } QEMU_PACKED;
  408. typedef struct NvdimmDsmOut NvdimmDsmOut;
  409. QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmOut) != NVDIMM_DSM_MEMORY_SIZE);
  410. struct NvdimmDsmFunc0Out {
  411. /* the size of buffer filled by QEMU. */
  412. uint32_t len;
  413. uint32_t supported_func;
  414. } QEMU_PACKED;
  415. typedef struct NvdimmDsmFunc0Out NvdimmDsmFunc0Out;
  416. struct NvdimmDsmFuncNoPayloadOut {
  417. /* the size of buffer filled by QEMU. */
  418. uint32_t len;
  419. uint32_t func_ret_status;
  420. } QEMU_PACKED;
  421. typedef struct NvdimmDsmFuncNoPayloadOut NvdimmDsmFuncNoPayloadOut;
  422. struct NvdimmFuncGetLabelSizeOut {
  423. /* the size of buffer filled by QEMU. */
  424. uint32_t len;
  425. uint32_t func_ret_status; /* return status code. */
  426. uint32_t label_size; /* the size of label data area. */
  427. /*
  428. * Maximum size of the namespace label data length supported by
  429. * the platform in Get/Set Namespace Label Data functions.
  430. */
  431. uint32_t max_xfer;
  432. } QEMU_PACKED;
  433. typedef struct NvdimmFuncGetLabelSizeOut NvdimmFuncGetLabelSizeOut;
  434. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelSizeOut) > NVDIMM_DSM_MEMORY_SIZE);
  435. struct NvdimmFuncGetLabelDataIn {
  436. uint32_t offset; /* the offset in the namespace label data area. */
  437. uint32_t length; /* the size of data is to be read via the function. */
  438. } QEMU_PACKED;
  439. typedef struct NvdimmFuncGetLabelDataIn NvdimmFuncGetLabelDataIn;
  440. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataIn) +
  441. offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE);
  442. struct NvdimmFuncGetLabelDataOut {
  443. /* the size of buffer filled by QEMU. */
  444. uint32_t len;
  445. uint32_t func_ret_status; /* return status code. */
  446. uint8_t out_buf[0]; /* the data got via Get Namesapce Label function. */
  447. } QEMU_PACKED;
  448. typedef struct NvdimmFuncGetLabelDataOut NvdimmFuncGetLabelDataOut;
  449. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataOut) > NVDIMM_DSM_MEMORY_SIZE);
  450. struct NvdimmFuncSetLabelDataIn {
  451. uint32_t offset; /* the offset in the namespace label data area. */
  452. uint32_t length; /* the size of data is to be written via the function. */
  453. uint8_t in_buf[0]; /* the data written to label data area. */
  454. } QEMU_PACKED;
  455. typedef struct NvdimmFuncSetLabelDataIn NvdimmFuncSetLabelDataIn;
  456. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncSetLabelDataIn) +
  457. offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE);
  458. struct NvdimmFuncReadFITIn {
  459. uint32_t offset; /* the offset into FIT buffer. */
  460. } QEMU_PACKED;
  461. typedef struct NvdimmFuncReadFITIn NvdimmFuncReadFITIn;
  462. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITIn) +
  463. offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE);
  464. struct NvdimmFuncReadFITOut {
  465. /* the size of buffer filled by QEMU. */
  466. uint32_t len;
  467. uint32_t func_ret_status; /* return status code. */
  468. uint8_t fit[0]; /* the FIT data. */
  469. } QEMU_PACKED;
  470. typedef struct NvdimmFuncReadFITOut NvdimmFuncReadFITOut;
  471. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITOut) > NVDIMM_DSM_MEMORY_SIZE);
  472. static void
  473. nvdimm_dsm_function0(uint32_t supported_func, hwaddr dsm_mem_addr)
  474. {
  475. NvdimmDsmFunc0Out func0 = {
  476. .len = cpu_to_le32(sizeof(func0)),
  477. .supported_func = cpu_to_le32(supported_func),
  478. };
  479. cpu_physical_memory_write(dsm_mem_addr, &func0, sizeof(func0));
  480. }
  481. static void
  482. nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr dsm_mem_addr)
  483. {
  484. NvdimmDsmFuncNoPayloadOut out = {
  485. .len = cpu_to_le32(sizeof(out)),
  486. .func_ret_status = cpu_to_le32(func_ret_status),
  487. };
  488. cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out));
  489. }
  490. #define NVDIMM_DSM_RET_STATUS_SUCCESS 0 /* Success */
  491. #define NVDIMM_DSM_RET_STATUS_UNSUPPORT 1 /* Not Supported */
  492. #define NVDIMM_DSM_RET_STATUS_NOMEMDEV 2 /* Non-Existing Memory Device */
  493. #define NVDIMM_DSM_RET_STATUS_INVALID 3 /* Invalid Input Parameters */
  494. #define NVDIMM_DSM_RET_STATUS_FIT_CHANGED 0x100 /* FIT Changed */
  495. #define NVDIMM_QEMU_RSVD_HANDLE_ROOT 0x10000
  496. /* Read FIT data, defined in docs/specs/acpi_nvdimm.txt. */
  497. static void nvdimm_dsm_func_read_fit(NVDIMMState *state, NvdimmDsmIn *in,
  498. hwaddr dsm_mem_addr)
  499. {
  500. NvdimmFitBuffer *fit_buf = &state->fit_buf;
  501. NvdimmFuncReadFITIn *read_fit;
  502. NvdimmFuncReadFITOut *read_fit_out;
  503. GArray *fit;
  504. uint32_t read_len = 0, func_ret_status;
  505. int size;
  506. read_fit = (NvdimmFuncReadFITIn *)in->arg3;
  507. read_fit->offset = le32_to_cpu(read_fit->offset);
  508. fit = fit_buf->fit;
  509. nvdimm_debug("Read FIT: offset %#x FIT size %#x Dirty %s.\n",
  510. read_fit->offset, fit->len, fit_buf->dirty ? "Yes" : "No");
  511. if (read_fit->offset > fit->len) {
  512. func_ret_status = NVDIMM_DSM_RET_STATUS_INVALID;
  513. goto exit;
  514. }
  515. /* It is the first time to read FIT. */
  516. if (!read_fit->offset) {
  517. fit_buf->dirty = false;
  518. } else if (fit_buf->dirty) { /* FIT has been changed during RFIT. */
  519. func_ret_status = NVDIMM_DSM_RET_STATUS_FIT_CHANGED;
  520. goto exit;
  521. }
  522. func_ret_status = NVDIMM_DSM_RET_STATUS_SUCCESS;
  523. read_len = MIN(fit->len - read_fit->offset,
  524. NVDIMM_DSM_MEMORY_SIZE - sizeof(NvdimmFuncReadFITOut));
  525. exit:
  526. size = sizeof(NvdimmFuncReadFITOut) + read_len;
  527. read_fit_out = g_malloc(size);
  528. read_fit_out->len = cpu_to_le32(size);
  529. read_fit_out->func_ret_status = cpu_to_le32(func_ret_status);
  530. memcpy(read_fit_out->fit, fit->data + read_fit->offset, read_len);
  531. cpu_physical_memory_write(dsm_mem_addr, read_fit_out, size);
  532. g_free(read_fit_out);
  533. }
  534. static void
  535. nvdimm_dsm_handle_reserved_root_method(NVDIMMState *state,
  536. NvdimmDsmIn *in, hwaddr dsm_mem_addr)
  537. {
  538. switch (in->function) {
  539. case 0x0:
  540. nvdimm_dsm_function0(0x1 | 1 << 1 /* Read FIT */, dsm_mem_addr);
  541. return;
  542. case 0x1 /* Read FIT */:
  543. nvdimm_dsm_func_read_fit(state, in, dsm_mem_addr);
  544. return;
  545. }
  546. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
  547. }
  548. static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
  549. {
  550. /*
  551. * function 0 is called to inquire which functions are supported by
  552. * OSPM
  553. */
  554. if (!in->function) {
  555. nvdimm_dsm_function0(0 /* No function supported other than
  556. function 0 */, dsm_mem_addr);
  557. return;
  558. }
  559. /* No function except function 0 is supported yet. */
  560. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
  561. }
  562. /*
  563. * the max transfer size is the max size transferred by both a
  564. * 'Get Namespace Label Data' function and a 'Set Namespace Label Data'
  565. * function.
  566. */
  567. static uint32_t nvdimm_get_max_xfer_label_size(void)
  568. {
  569. uint32_t max_get_size, max_set_size, dsm_memory_size;
  570. dsm_memory_size = NVDIMM_DSM_MEMORY_SIZE;
  571. /*
  572. * the max data ACPI can read one time which is transferred by
  573. * the response of 'Get Namespace Label Data' function.
  574. */
  575. max_get_size = dsm_memory_size - sizeof(NvdimmFuncGetLabelDataOut);
  576. /*
  577. * the max data ACPI can write one time which is transferred by
  578. * 'Set Namespace Label Data' function.
  579. */
  580. max_set_size = dsm_memory_size - offsetof(NvdimmDsmIn, arg3) -
  581. sizeof(NvdimmFuncSetLabelDataIn);
  582. return MIN(max_get_size, max_set_size);
  583. }
  584. /*
  585. * DSM Spec Rev1 4.4 Get Namespace Label Size (Function Index 4).
  586. *
  587. * It gets the size of Namespace Label data area and the max data size
  588. * that Get/Set Namespace Label Data functions can transfer.
  589. */
  590. static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr)
  591. {
  592. NvdimmFuncGetLabelSizeOut label_size_out = {
  593. .len = cpu_to_le32(sizeof(label_size_out)),
  594. };
  595. uint32_t label_size, mxfer;
  596. label_size = nvdimm->label_size;
  597. mxfer = nvdimm_get_max_xfer_label_size();
  598. nvdimm_debug("label_size %#x, max_xfer %#x.\n", label_size, mxfer);
  599. label_size_out.func_ret_status = cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS);
  600. label_size_out.label_size = cpu_to_le32(label_size);
  601. label_size_out.max_xfer = cpu_to_le32(mxfer);
  602. cpu_physical_memory_write(dsm_mem_addr, &label_size_out,
  603. sizeof(label_size_out));
  604. }
  605. static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm,
  606. uint32_t offset, uint32_t length)
  607. {
  608. uint32_t ret = NVDIMM_DSM_RET_STATUS_INVALID;
  609. if (offset + length < offset) {
  610. nvdimm_debug("offset %#x + length %#x is overflow.\n", offset,
  611. length);
  612. return ret;
  613. }
  614. if (nvdimm->label_size < offset + length) {
  615. nvdimm_debug("position %#x is beyond label data (len = %" PRIx64 ").\n",
  616. offset + length, nvdimm->label_size);
  617. return ret;
  618. }
  619. if (length > nvdimm_get_max_xfer_label_size()) {
  620. nvdimm_debug("length (%#x) is larger than max_xfer (%#x).\n",
  621. length, nvdimm_get_max_xfer_label_size());
  622. return ret;
  623. }
  624. return NVDIMM_DSM_RET_STATUS_SUCCESS;
  625. }
  626. /*
  627. * DSM Spec Rev1 4.5 Get Namespace Label Data (Function Index 5).
  628. */
  629. static void nvdimm_dsm_get_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
  630. hwaddr dsm_mem_addr)
  631. {
  632. NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm);
  633. NvdimmFuncGetLabelDataIn *get_label_data;
  634. NvdimmFuncGetLabelDataOut *get_label_data_out;
  635. uint32_t status;
  636. int size;
  637. get_label_data = (NvdimmFuncGetLabelDataIn *)in->arg3;
  638. get_label_data->offset = le32_to_cpu(get_label_data->offset);
  639. get_label_data->length = le32_to_cpu(get_label_data->length);
  640. nvdimm_debug("Read Label Data: offset %#x length %#x.\n",
  641. get_label_data->offset, get_label_data->length);
  642. status = nvdimm_rw_label_data_check(nvdimm, get_label_data->offset,
  643. get_label_data->length);
  644. if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) {
  645. nvdimm_dsm_no_payload(status, dsm_mem_addr);
  646. return;
  647. }
  648. size = sizeof(*get_label_data_out) + get_label_data->length;
  649. assert(size <= NVDIMM_DSM_MEMORY_SIZE);
  650. get_label_data_out = g_malloc(size);
  651. get_label_data_out->len = cpu_to_le32(size);
  652. get_label_data_out->func_ret_status =
  653. cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS);
  654. nvc->read_label_data(nvdimm, get_label_data_out->out_buf,
  655. get_label_data->length, get_label_data->offset);
  656. cpu_physical_memory_write(dsm_mem_addr, get_label_data_out, size);
  657. g_free(get_label_data_out);
  658. }
  659. /*
  660. * DSM Spec Rev1 4.6 Set Namespace Label Data (Function Index 6).
  661. */
  662. static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
  663. hwaddr dsm_mem_addr)
  664. {
  665. NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm);
  666. NvdimmFuncSetLabelDataIn *set_label_data;
  667. uint32_t status;
  668. set_label_data = (NvdimmFuncSetLabelDataIn *)in->arg3;
  669. set_label_data->offset = le32_to_cpu(set_label_data->offset);
  670. set_label_data->length = le32_to_cpu(set_label_data->length);
  671. nvdimm_debug("Write Label Data: offset %#x length %#x.\n",
  672. set_label_data->offset, set_label_data->length);
  673. status = nvdimm_rw_label_data_check(nvdimm, set_label_data->offset,
  674. set_label_data->length);
  675. if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) {
  676. nvdimm_dsm_no_payload(status, dsm_mem_addr);
  677. return;
  678. }
  679. assert(offsetof(NvdimmDsmIn, arg3) + sizeof(*set_label_data) +
  680. set_label_data->length <= NVDIMM_DSM_MEMORY_SIZE);
  681. nvc->write_label_data(nvdimm, set_label_data->in_buf,
  682. set_label_data->length, set_label_data->offset);
  683. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_SUCCESS, dsm_mem_addr);
  684. }
  685. static void nvdimm_dsm_device(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
  686. {
  687. NVDIMMDevice *nvdimm = nvdimm_get_device_by_handle(in->handle);
  688. /* See the comments in nvdimm_dsm_root(). */
  689. if (!in->function) {
  690. uint32_t supported_func = 0;
  691. if (nvdimm && nvdimm->label_size) {
  692. supported_func |= 0x1 /* Bit 0 indicates whether there is
  693. support for any functions other
  694. than function 0. */ |
  695. 1 << 4 /* Get Namespace Label Size */ |
  696. 1 << 5 /* Get Namespace Label Data */ |
  697. 1 << 6 /* Set Namespace Label Data */;
  698. }
  699. nvdimm_dsm_function0(supported_func, dsm_mem_addr);
  700. return;
  701. }
  702. if (!nvdimm) {
  703. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_NOMEMDEV,
  704. dsm_mem_addr);
  705. return;
  706. }
  707. /* Encode DSM function according to DSM Spec Rev1. */
  708. switch (in->function) {
  709. case 4 /* Get Namespace Label Size */:
  710. if (nvdimm->label_size) {
  711. nvdimm_dsm_label_size(nvdimm, dsm_mem_addr);
  712. return;
  713. }
  714. break;
  715. case 5 /* Get Namespace Label Data */:
  716. if (nvdimm->label_size) {
  717. nvdimm_dsm_get_label_data(nvdimm, in, dsm_mem_addr);
  718. return;
  719. }
  720. break;
  721. case 0x6 /* Set Namespace Label Data */:
  722. if (nvdimm->label_size) {
  723. nvdimm_dsm_set_label_data(nvdimm, in, dsm_mem_addr);
  724. return;
  725. }
  726. break;
  727. }
  728. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
  729. }
  730. static uint64_t
  731. nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size)
  732. {
  733. nvdimm_debug("BUG: we never read _DSM IO Port.\n");
  734. return 0;
  735. }
  736. static void
  737. nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
  738. {
  739. NVDIMMState *state = opaque;
  740. NvdimmDsmIn *in;
  741. hwaddr dsm_mem_addr = val;
  742. nvdimm_debug("dsm memory address %#" HWADDR_PRIx ".\n", dsm_mem_addr);
  743. /*
  744. * The DSM memory is mapped to guest address space so an evil guest
  745. * can change its content while we are doing DSM emulation. Avoid
  746. * this by copying DSM memory to QEMU local memory.
  747. */
  748. in = g_new(NvdimmDsmIn, 1);
  749. cpu_physical_memory_read(dsm_mem_addr, in, sizeof(*in));
  750. in->revision = le32_to_cpu(in->revision);
  751. in->function = le32_to_cpu(in->function);
  752. in->handle = le32_to_cpu(in->handle);
  753. nvdimm_debug("Revision %#x Handler %#x Function %#x.\n", in->revision,
  754. in->handle, in->function);
  755. if (in->revision != 0x1 /* Currently we only support DSM Spec Rev1. */) {
  756. nvdimm_debug("Revision %#x is not supported, expect %#x.\n",
  757. in->revision, 0x1);
  758. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
  759. goto exit;
  760. }
  761. if (in->handle == NVDIMM_QEMU_RSVD_HANDLE_ROOT) {
  762. nvdimm_dsm_handle_reserved_root_method(state, in, dsm_mem_addr);
  763. goto exit;
  764. }
  765. /* Handle 0 is reserved for NVDIMM Root Device. */
  766. if (!in->handle) {
  767. nvdimm_dsm_root(in, dsm_mem_addr);
  768. goto exit;
  769. }
  770. nvdimm_dsm_device(in, dsm_mem_addr);
  771. exit:
  772. g_free(in);
  773. }
  774. static const MemoryRegionOps nvdimm_dsm_ops = {
  775. .read = nvdimm_dsm_read,
  776. .write = nvdimm_dsm_write,
  777. .endianness = DEVICE_LITTLE_ENDIAN,
  778. .valid = {
  779. .min_access_size = 4,
  780. .max_access_size = 4,
  781. },
  782. };
  783. void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev)
  784. {
  785. if (dev->hotplugged) {
  786. acpi_send_event(DEVICE(hotplug_dev), ACPI_NVDIMM_HOTPLUG_STATUS);
  787. }
  788. }
  789. void nvdimm_init_acpi_state(NVDIMMState *state, MemoryRegion *io,
  790. FWCfgState *fw_cfg, Object *owner)
  791. {
  792. memory_region_init_io(&state->io_mr, owner, &nvdimm_dsm_ops, state,
  793. "nvdimm-acpi-io", NVDIMM_ACPI_IO_LEN);
  794. memory_region_add_subregion(io, NVDIMM_ACPI_IO_BASE, &state->io_mr);
  795. state->dsm_mem = g_array_new(false, true /* clear */, 1);
  796. acpi_data_push(state->dsm_mem, sizeof(NvdimmDsmIn));
  797. fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data,
  798. state->dsm_mem->len);
  799. nvdimm_init_fit_buffer(&state->fit_buf);
  800. }
  801. #define NVDIMM_COMMON_DSM "NCAL"
  802. #define NVDIMM_ACPI_MEM_ADDR "MEMA"
  803. #define NVDIMM_DSM_MEMORY "NRAM"
  804. #define NVDIMM_DSM_IOPORT "NPIO"
  805. #define NVDIMM_DSM_NOTIFY "NTFI"
  806. #define NVDIMM_DSM_HANDLE "HDLE"
  807. #define NVDIMM_DSM_REVISION "REVS"
  808. #define NVDIMM_DSM_FUNCTION "FUNC"
  809. #define NVDIMM_DSM_ARG3 "FARG"
  810. #define NVDIMM_DSM_OUT_BUF_SIZE "RLEN"
  811. #define NVDIMM_DSM_OUT_BUF "ODAT"
  812. #define NVDIMM_DSM_RFIT_STATUS "RSTA"
  813. #define NVDIMM_QEMU_RSVD_UUID "648B9CF2-CDA1-4312-8AD9-49C4AF32BD62"
  814. static void nvdimm_build_common_dsm(Aml *dev)
  815. {
  816. Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *elsectx2;
  817. Aml *elsectx, *unsupport, *unpatched, *expected_uuid, *uuid_invalid;
  818. Aml *pckg, *pckg_index, *pckg_buf, *field, *dsm_out_buf, *dsm_out_buf_size;
  819. uint8_t byte_list[1];
  820. method = aml_method(NVDIMM_COMMON_DSM, 5, AML_SERIALIZED);
  821. uuid = aml_arg(0);
  822. function = aml_arg(2);
  823. handle = aml_arg(4);
  824. dsm_mem = aml_local(6);
  825. dsm_out_buf = aml_local(7);
  826. aml_append(method, aml_store(aml_name(NVDIMM_ACPI_MEM_ADDR), dsm_mem));
  827. /* map DSM memory and IO into ACPI namespace. */
  828. aml_append(method, aml_operation_region(NVDIMM_DSM_IOPORT, AML_SYSTEM_IO,
  829. aml_int(NVDIMM_ACPI_IO_BASE), NVDIMM_ACPI_IO_LEN));
  830. aml_append(method, aml_operation_region(NVDIMM_DSM_MEMORY,
  831. AML_SYSTEM_MEMORY, dsm_mem, sizeof(NvdimmDsmIn)));
  832. /*
  833. * DSM notifier:
  834. * NVDIMM_DSM_NOTIFY: write the address of DSM memory and notify QEMU to
  835. * emulate the access.
  836. *
  837. * It is the IO port so that accessing them will cause VM-exit, the
  838. * control will be transferred to QEMU.
  839. */
  840. field = aml_field(NVDIMM_DSM_IOPORT, AML_DWORD_ACC, AML_NOLOCK,
  841. AML_PRESERVE);
  842. aml_append(field, aml_named_field(NVDIMM_DSM_NOTIFY,
  843. NVDIMM_ACPI_IO_LEN * BITS_PER_BYTE));
  844. aml_append(method, field);
  845. /*
  846. * DSM input:
  847. * NVDIMM_DSM_HANDLE: store device's handle, it's zero if the _DSM call
  848. * happens on NVDIMM Root Device.
  849. * NVDIMM_DSM_REVISION: store the Arg1 of _DSM call.
  850. * NVDIMM_DSM_FUNCTION: store the Arg2 of _DSM call.
  851. * NVDIMM_DSM_ARG3: store the Arg3 of _DSM call which is a Package
  852. * containing function-specific arguments.
  853. *
  854. * They are RAM mapping on host so that these accesses never cause
  855. * VM-EXIT.
  856. */
  857. field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
  858. AML_PRESERVE);
  859. aml_append(field, aml_named_field(NVDIMM_DSM_HANDLE,
  860. sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE));
  861. aml_append(field, aml_named_field(NVDIMM_DSM_REVISION,
  862. sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE));
  863. aml_append(field, aml_named_field(NVDIMM_DSM_FUNCTION,
  864. sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE));
  865. aml_append(field, aml_named_field(NVDIMM_DSM_ARG3,
  866. (sizeof(NvdimmDsmIn) - offsetof(NvdimmDsmIn, arg3)) * BITS_PER_BYTE));
  867. aml_append(method, field);
  868. /*
  869. * DSM output:
  870. * NVDIMM_DSM_OUT_BUF_SIZE: the size of the buffer filled by QEMU.
  871. * NVDIMM_DSM_OUT_BUF: the buffer QEMU uses to store the result.
  872. *
  873. * Since the page is reused by both input and out, the input data
  874. * will be lost after storing new result into ODAT so we should fetch
  875. * all the input data before writing the result.
  876. */
  877. field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
  878. AML_PRESERVE);
  879. aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF_SIZE,
  880. sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE));
  881. aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF,
  882. (sizeof(NvdimmDsmOut) - offsetof(NvdimmDsmOut, data)) * BITS_PER_BYTE));
  883. aml_append(method, field);
  884. /*
  885. * do not support any method if DSM memory address has not been
  886. * patched.
  887. */
  888. unpatched = aml_equal(dsm_mem, aml_int(0x0));
  889. expected_uuid = aml_local(0);
  890. ifctx = aml_if(aml_equal(handle, aml_int(0x0)));
  891. aml_append(ifctx, aml_store(
  892. aml_touuid("2F10E7A4-9E91-11E4-89D3-123B93F75CBA")
  893. /* UUID for NVDIMM Root Device */, expected_uuid));
  894. aml_append(method, ifctx);
  895. elsectx = aml_else();
  896. ifctx = aml_if(aml_equal(handle, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT)));
  897. aml_append(ifctx, aml_store(aml_touuid(NVDIMM_QEMU_RSVD_UUID
  898. /* UUID for QEMU internal use */), expected_uuid));
  899. aml_append(elsectx, ifctx);
  900. elsectx2 = aml_else();
  901. aml_append(elsectx2, aml_store(
  902. aml_touuid("4309AC30-0D11-11E4-9191-0800200C9A66")
  903. /* UUID for NVDIMM Devices */, expected_uuid));
  904. aml_append(elsectx, elsectx2);
  905. aml_append(method, elsectx);
  906. uuid_invalid = aml_lnot(aml_equal(uuid, expected_uuid));
  907. unsupport = aml_if(aml_or(unpatched, uuid_invalid, NULL));
  908. /*
  909. * function 0 is called to inquire what functions are supported by
  910. * OSPM
  911. */
  912. ifctx = aml_if(aml_equal(function, aml_int(0)));
  913. byte_list[0] = 0 /* No function Supported */;
  914. aml_append(ifctx, aml_return(aml_buffer(1, byte_list)));
  915. aml_append(unsupport, ifctx);
  916. /* No function is supported yet. */
  917. byte_list[0] = NVDIMM_DSM_RET_STATUS_UNSUPPORT;
  918. aml_append(unsupport, aml_return(aml_buffer(1, byte_list)));
  919. aml_append(method, unsupport);
  920. /*
  921. * The HDLE indicates the DSM function is issued from which device,
  922. * it reserves 0 for root device and is the handle for NVDIMM devices.
  923. * See the comments in nvdimm_slot_to_handle().
  924. */
  925. aml_append(method, aml_store(handle, aml_name(NVDIMM_DSM_HANDLE)));
  926. aml_append(method, aml_store(aml_arg(1), aml_name(NVDIMM_DSM_REVISION)));
  927. aml_append(method, aml_store(function, aml_name(NVDIMM_DSM_FUNCTION)));
  928. /*
  929. * The fourth parameter (Arg3) of _DSM is a package which contains
  930. * a buffer, the layout of the buffer is specified by UUID (Arg0),
  931. * Revision ID (Arg1) and Function Index (Arg2) which are documented
  932. * in the DSM Spec.
  933. */
  934. pckg = aml_arg(3);
  935. ifctx = aml_if(aml_and(aml_equal(aml_object_type(pckg),
  936. aml_int(4 /* Package */)) /* It is a Package? */,
  937. aml_equal(aml_sizeof(pckg), aml_int(1)) /* 1 element? */,
  938. NULL));
  939. pckg_index = aml_local(2);
  940. pckg_buf = aml_local(3);
  941. aml_append(ifctx, aml_store(aml_index(pckg, aml_int(0)), pckg_index));
  942. aml_append(ifctx, aml_store(aml_derefof(pckg_index), pckg_buf));
  943. aml_append(ifctx, aml_store(pckg_buf, aml_name(NVDIMM_DSM_ARG3)));
  944. aml_append(method, ifctx);
  945. /*
  946. * tell QEMU about the real address of DSM memory, then QEMU
  947. * gets the control and fills the result in DSM memory.
  948. */
  949. aml_append(method, aml_store(dsm_mem, aml_name(NVDIMM_DSM_NOTIFY)));
  950. dsm_out_buf_size = aml_local(1);
  951. /* RLEN is not included in the payload returned to guest. */
  952. aml_append(method, aml_subtract(aml_name(NVDIMM_DSM_OUT_BUF_SIZE),
  953. aml_int(4), dsm_out_buf_size));
  954. aml_append(method, aml_store(aml_shiftleft(dsm_out_buf_size, aml_int(3)),
  955. dsm_out_buf_size));
  956. aml_append(method, aml_create_field(aml_name(NVDIMM_DSM_OUT_BUF),
  957. aml_int(0), dsm_out_buf_size, "OBUF"));
  958. aml_append(method, aml_concatenate(aml_buffer(0, NULL), aml_name("OBUF"),
  959. dsm_out_buf));
  960. aml_append(method, aml_return(dsm_out_buf));
  961. aml_append(dev, method);
  962. }
  963. static void nvdimm_build_device_dsm(Aml *dev, uint32_t handle)
  964. {
  965. Aml *method;
  966. method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
  967. aml_append(method, aml_return(aml_call5(NVDIMM_COMMON_DSM, aml_arg(0),
  968. aml_arg(1), aml_arg(2), aml_arg(3),
  969. aml_int(handle))));
  970. aml_append(dev, method);
  971. }
  972. static void nvdimm_build_fit(Aml *dev)
  973. {
  974. Aml *method, *pkg, *buf, *buf_size, *offset, *call_result;
  975. Aml *whilectx, *ifcond, *ifctx, *elsectx, *fit;
  976. buf = aml_local(0);
  977. buf_size = aml_local(1);
  978. fit = aml_local(2);
  979. aml_append(dev, aml_name_decl(NVDIMM_DSM_RFIT_STATUS, aml_int(0)));
  980. /* build helper function, RFIT. */
  981. method = aml_method("RFIT", 1, AML_SERIALIZED);
  982. aml_append(method, aml_name_decl("OFST", aml_int(0)));
  983. /* prepare input package. */
  984. pkg = aml_package(1);
  985. aml_append(method, aml_store(aml_arg(0), aml_name("OFST")));
  986. aml_append(pkg, aml_name("OFST"));
  987. /* call Read_FIT function. */
  988. call_result = aml_call5(NVDIMM_COMMON_DSM,
  989. aml_touuid(NVDIMM_QEMU_RSVD_UUID),
  990. aml_int(1) /* Revision 1 */,
  991. aml_int(0x1) /* Read FIT */,
  992. pkg, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT));
  993. aml_append(method, aml_store(call_result, buf));
  994. /* handle _DSM result. */
  995. aml_append(method, aml_create_dword_field(buf,
  996. aml_int(0) /* offset at byte 0 */, "STAU"));
  997. aml_append(method, aml_store(aml_name("STAU"),
  998. aml_name(NVDIMM_DSM_RFIT_STATUS)));
  999. /* if something is wrong during _DSM. */
  1000. ifcond = aml_equal(aml_int(NVDIMM_DSM_RET_STATUS_SUCCESS),
  1001. aml_name("STAU"));
  1002. ifctx = aml_if(aml_lnot(ifcond));
  1003. aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
  1004. aml_append(method, ifctx);
  1005. aml_append(method, aml_store(aml_sizeof(buf), buf_size));
  1006. aml_append(method, aml_subtract(buf_size,
  1007. aml_int(4) /* the size of "STAU" */,
  1008. buf_size));
  1009. /* if we read the end of fit. */
  1010. ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
  1011. aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
  1012. aml_append(method, ifctx);
  1013. aml_append(method, aml_create_field(buf,
  1014. aml_int(4 * BITS_PER_BYTE), /* offset at byte 4.*/
  1015. aml_shiftleft(buf_size, aml_int(3)), "BUFF"));
  1016. aml_append(method, aml_return(aml_name("BUFF")));
  1017. aml_append(dev, method);
  1018. /* build _FIT. */
  1019. method = aml_method("_FIT", 0, AML_SERIALIZED);
  1020. offset = aml_local(3);
  1021. aml_append(method, aml_store(aml_buffer(0, NULL), fit));
  1022. aml_append(method, aml_store(aml_int(0), offset));
  1023. whilectx = aml_while(aml_int(1));
  1024. aml_append(whilectx, aml_store(aml_call1("RFIT", offset), buf));
  1025. aml_append(whilectx, aml_store(aml_sizeof(buf), buf_size));
  1026. /*
  1027. * if fit buffer was changed during RFIT, read from the beginning
  1028. * again.
  1029. */
  1030. ifctx = aml_if(aml_equal(aml_name(NVDIMM_DSM_RFIT_STATUS),
  1031. aml_int(NVDIMM_DSM_RET_STATUS_FIT_CHANGED)));
  1032. aml_append(ifctx, aml_store(aml_buffer(0, NULL), fit));
  1033. aml_append(ifctx, aml_store(aml_int(0), offset));
  1034. aml_append(whilectx, ifctx);
  1035. elsectx = aml_else();
  1036. /* finish fit read if no data is read out. */
  1037. ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
  1038. aml_append(ifctx, aml_return(fit));
  1039. aml_append(elsectx, ifctx);
  1040. /* update the offset. */
  1041. aml_append(elsectx, aml_add(offset, buf_size, offset));
  1042. /* append the data we read out to the fit buffer. */
  1043. aml_append(elsectx, aml_concatenate(fit, buf, fit));
  1044. aml_append(whilectx, elsectx);
  1045. aml_append(method, whilectx);
  1046. aml_append(dev, method);
  1047. }
  1048. static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots)
  1049. {
  1050. uint32_t slot;
  1051. for (slot = 0; slot < ram_slots; slot++) {
  1052. uint32_t handle = nvdimm_slot_to_handle(slot);
  1053. Aml *nvdimm_dev;
  1054. nvdimm_dev = aml_device("NV%02X", slot);
  1055. /*
  1056. * ACPI 6.0: 9.20 NVDIMM Devices:
  1057. *
  1058. * _ADR object that is used to supply OSPM with unique address
  1059. * of the NVDIMM device. This is done by returning the NFIT Device
  1060. * handle that is used to identify the associated entries in ACPI
  1061. * table NFIT or _FIT.
  1062. */
  1063. aml_append(nvdimm_dev, aml_name_decl("_ADR", aml_int(handle)));
  1064. nvdimm_build_device_dsm(nvdimm_dev, handle);
  1065. aml_append(root_dev, nvdimm_dev);
  1066. }
  1067. }
  1068. static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data,
  1069. BIOSLinker *linker, GArray *dsm_dma_area,
  1070. uint32_t ram_slots)
  1071. {
  1072. Aml *ssdt, *sb_scope, *dev;
  1073. int mem_addr_offset, nvdimm_ssdt;
  1074. acpi_add_table(table_offsets, table_data);
  1075. ssdt = init_aml_allocator();
  1076. acpi_data_push(ssdt->buf, sizeof(AcpiTableHeader));
  1077. sb_scope = aml_scope("\\_SB");
  1078. dev = aml_device("NVDR");
  1079. /*
  1080. * ACPI 6.0: 9.20 NVDIMM Devices:
  1081. *
  1082. * The ACPI Name Space device uses _HID of ACPI0012 to identify the root
  1083. * NVDIMM interface device. Platform firmware is required to contain one
  1084. * such device in _SB scope if NVDIMMs support is exposed by platform to
  1085. * OSPM.
  1086. * For each NVDIMM present or intended to be supported by platform,
  1087. * platform firmware also exposes an ACPI Namespace Device under the
  1088. * root device.
  1089. */
  1090. aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012")));
  1091. nvdimm_build_common_dsm(dev);
  1092. /* 0 is reserved for root device. */
  1093. nvdimm_build_device_dsm(dev, 0);
  1094. nvdimm_build_fit(dev);
  1095. nvdimm_build_nvdimm_devices(dev, ram_slots);
  1096. aml_append(sb_scope, dev);
  1097. aml_append(ssdt, sb_scope);
  1098. nvdimm_ssdt = table_data->len;
  1099. /* copy AML table into ACPI tables blob and patch header there */
  1100. g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len);
  1101. mem_addr_offset = build_append_named_dword(table_data,
  1102. NVDIMM_ACPI_MEM_ADDR);
  1103. bios_linker_loader_alloc(linker,
  1104. NVDIMM_DSM_MEM_FILE, dsm_dma_area,
  1105. sizeof(NvdimmDsmIn), false /* high memory */);
  1106. bios_linker_loader_add_pointer(linker,
  1107. ACPI_BUILD_TABLE_FILE, mem_addr_offset, sizeof(uint32_t),
  1108. NVDIMM_DSM_MEM_FILE, 0);
  1109. build_header(linker, table_data,
  1110. (void *)(table_data->data + nvdimm_ssdt),
  1111. "SSDT", table_data->len - nvdimm_ssdt, 1, NULL, "NVDIMM");
  1112. free_aml_allocator();
  1113. }
  1114. void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
  1115. BIOSLinker *linker, NVDIMMState *state,
  1116. uint32_t ram_slots)
  1117. {
  1118. GSList *device_list;
  1119. /* no nvdimm device can be plugged. */
  1120. if (!ram_slots) {
  1121. return;
  1122. }
  1123. nvdimm_build_ssdt(table_offsets, table_data, linker, state->dsm_mem,
  1124. ram_slots);
  1125. device_list = nvdimm_get_device_list();
  1126. /* no NVDIMM device is plugged. */
  1127. if (!device_list) {
  1128. return;
  1129. }
  1130. nvdimm_build_nfit(state, table_offsets, table_data, linker);
  1131. g_slist_free(device_list);
  1132. }