2
0

nvdimm.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382
  1. /*
  2. * NVDIMM ACPI Implementation
  3. *
  4. * Copyright(C) 2015 Intel Corporation.
  5. *
  6. * Author:
  7. * Xiao Guangrong <guangrong.xiao@linux.intel.com>
  8. *
  9. * NFIT is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
  10. * and the DSM specification can be found at:
  11. * http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf
  12. *
  13. * Currently, it only supports PMEM Virtualization.
  14. *
  15. * This library is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU Lesser General Public
  17. * License as published by the Free Software Foundation; either
  18. * version 2 of the License, or (at your option) any later version.
  19. *
  20. * This library is distributed in the hope that it will be useful,
  21. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  23. * Lesser General Public License for more details.
  24. *
  25. * You should have received a copy of the GNU Lesser General Public
  26. * License along with this library; if not, see <http://www.gnu.org/licenses/>
  27. */
  28. #include "qemu/osdep.h"
  29. #include "qemu/uuid.h"
  30. #include "qapi/error.h"
  31. #include "hw/acpi/acpi.h"
  32. #include "hw/acpi/aml-build.h"
  33. #include "hw/acpi/bios-linker-loader.h"
  34. #include "hw/nvram/fw_cfg.h"
  35. #include "hw/mem/nvdimm.h"
  36. #include "qemu/nvdimm-utils.h"
  37. /*
  38. * define Byte Addressable Persistent Memory (PM) Region according to
  39. * ACPI 6.0: 5.2.25.1 System Physical Address Range Structure.
  40. */
  41. static const uint8_t nvdimm_nfit_spa_uuid[] =
  42. UUID_LE(0x66f0d379, 0xb4f3, 0x4074, 0xac, 0x43, 0x0d, 0x33,
  43. 0x18, 0xb7, 0x8c, 0xdb);
  44. /*
  45. * NVDIMM Firmware Interface Table
  46. * @signature: "NFIT"
  47. *
  48. * It provides information that allows OSPM to enumerate NVDIMM present in
  49. * the platform and associate system physical address ranges created by the
  50. * NVDIMMs.
  51. *
  52. * It is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT)
  53. */
  54. struct NvdimmNfitHeader {
  55. ACPI_TABLE_HEADER_DEF
  56. uint32_t reserved;
  57. } QEMU_PACKED;
  58. typedef struct NvdimmNfitHeader NvdimmNfitHeader;
  59. /*
  60. * define NFIT structures according to ACPI 6.0: 5.2.25 NVDIMM Firmware
  61. * Interface Table (NFIT).
  62. */
  63. /*
  64. * System Physical Address Range Structure
  65. *
  66. * It describes the system physical address ranges occupied by NVDIMMs and
  67. * the types of the regions.
  68. */
  69. struct NvdimmNfitSpa {
  70. uint16_t type;
  71. uint16_t length;
  72. uint16_t spa_index;
  73. uint16_t flags;
  74. uint32_t reserved;
  75. uint32_t proximity_domain;
  76. uint8_t type_guid[16];
  77. uint64_t spa_base;
  78. uint64_t spa_length;
  79. uint64_t mem_attr;
  80. } QEMU_PACKED;
  81. typedef struct NvdimmNfitSpa NvdimmNfitSpa;
  82. /*
  83. * Memory Device to System Physical Address Range Mapping Structure
  84. *
  85. * It enables identifying each NVDIMM region and the corresponding SPA
  86. * describing the memory interleave
  87. */
  88. struct NvdimmNfitMemDev {
  89. uint16_t type;
  90. uint16_t length;
  91. uint32_t nfit_handle;
  92. uint16_t phys_id;
  93. uint16_t region_id;
  94. uint16_t spa_index;
  95. uint16_t dcr_index;
  96. uint64_t region_len;
  97. uint64_t region_offset;
  98. uint64_t region_dpa;
  99. uint16_t interleave_index;
  100. uint16_t interleave_ways;
  101. uint16_t flags;
  102. uint16_t reserved;
  103. } QEMU_PACKED;
  104. typedef struct NvdimmNfitMemDev NvdimmNfitMemDev;
  105. #define ACPI_NFIT_MEM_NOT_ARMED (1 << 3)
  106. /*
  107. * NVDIMM Control Region Structure
  108. *
  109. * It describes the NVDIMM and if applicable, Block Control Window.
  110. */
  111. struct NvdimmNfitControlRegion {
  112. uint16_t type;
  113. uint16_t length;
  114. uint16_t dcr_index;
  115. uint16_t vendor_id;
  116. uint16_t device_id;
  117. uint16_t revision_id;
  118. uint16_t sub_vendor_id;
  119. uint16_t sub_device_id;
  120. uint16_t sub_revision_id;
  121. uint8_t reserved[6];
  122. uint32_t serial_number;
  123. uint16_t fic;
  124. uint16_t num_bcw;
  125. uint64_t bcw_size;
  126. uint64_t cmd_offset;
  127. uint64_t cmd_size;
  128. uint64_t status_offset;
  129. uint64_t status_size;
  130. uint16_t flags;
  131. uint8_t reserved2[6];
  132. } QEMU_PACKED;
  133. typedef struct NvdimmNfitControlRegion NvdimmNfitControlRegion;
  134. /*
  135. * NVDIMM Platform Capabilities Structure
  136. *
  137. * Defined in section 5.2.25.9 of ACPI 6.2 Errata A, September 2017
  138. */
  139. struct NvdimmNfitPlatformCaps {
  140. uint16_t type;
  141. uint16_t length;
  142. uint8_t highest_cap;
  143. uint8_t reserved[3];
  144. uint32_t capabilities;
  145. uint8_t reserved2[4];
  146. } QEMU_PACKED;
  147. typedef struct NvdimmNfitPlatformCaps NvdimmNfitPlatformCaps;
  148. /*
  149. * Module serial number is a unique number for each device. We use the
  150. * slot id of NVDIMM device to generate this number so that each device
  151. * associates with a different number.
  152. *
  153. * 0x123456 is a magic number we arbitrarily chose.
  154. */
  155. static uint32_t nvdimm_slot_to_sn(int slot)
  156. {
  157. return 0x123456 + slot;
  158. }
  159. /*
  160. * handle is used to uniquely associate nfit_memdev structure with NVDIMM
  161. * ACPI device - nfit_memdev.nfit_handle matches with the value returned
  162. * by ACPI device _ADR method.
  163. *
  164. * We generate the handle with the slot id of NVDIMM device and reserve
  165. * 0 for NVDIMM root device.
  166. */
  167. static uint32_t nvdimm_slot_to_handle(int slot)
  168. {
  169. return slot + 1;
  170. }
  171. /*
  172. * index uniquely identifies the structure, 0 is reserved which indicates
  173. * that the structure is not valid or the associated structure is not
  174. * present.
  175. *
  176. * Each NVDIMM device needs two indexes, one for nfit_spa and another for
  177. * nfit_dc which are generated by the slot id of NVDIMM device.
  178. */
  179. static uint16_t nvdimm_slot_to_spa_index(int slot)
  180. {
  181. return (slot + 1) << 1;
  182. }
  183. /* See the comments of nvdimm_slot_to_spa_index(). */
  184. static uint32_t nvdimm_slot_to_dcr_index(int slot)
  185. {
  186. return nvdimm_slot_to_spa_index(slot) + 1;
  187. }
  188. static NVDIMMDevice *nvdimm_get_device_by_handle(uint32_t handle)
  189. {
  190. NVDIMMDevice *nvdimm = NULL;
  191. GSList *list, *device_list = nvdimm_get_device_list();
  192. for (list = device_list; list; list = list->next) {
  193. NVDIMMDevice *nvd = list->data;
  194. int slot = object_property_get_int(OBJECT(nvd), PC_DIMM_SLOT_PROP,
  195. NULL);
  196. if (nvdimm_slot_to_handle(slot) == handle) {
  197. nvdimm = nvd;
  198. break;
  199. }
  200. }
  201. g_slist_free(device_list);
  202. return nvdimm;
  203. }
  204. /* ACPI 6.0: 5.2.25.1 System Physical Address Range Structure */
  205. static void
  206. nvdimm_build_structure_spa(GArray *structures, DeviceState *dev)
  207. {
  208. NvdimmNfitSpa *nfit_spa;
  209. uint64_t addr = object_property_get_uint(OBJECT(dev), PC_DIMM_ADDR_PROP,
  210. NULL);
  211. uint64_t size = object_property_get_uint(OBJECT(dev), PC_DIMM_SIZE_PROP,
  212. NULL);
  213. uint32_t node = object_property_get_uint(OBJECT(dev), PC_DIMM_NODE_PROP,
  214. NULL);
  215. int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
  216. NULL);
  217. nfit_spa = acpi_data_push(structures, sizeof(*nfit_spa));
  218. nfit_spa->type = cpu_to_le16(0 /* System Physical Address Range
  219. Structure */);
  220. nfit_spa->length = cpu_to_le16(sizeof(*nfit_spa));
  221. nfit_spa->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot));
  222. /*
  223. * Control region is strict as all the device info, such as SN, index,
  224. * is associated with slot id.
  225. */
  226. nfit_spa->flags = cpu_to_le16(1 /* Control region is strictly for
  227. management during hot add/online
  228. operation */ |
  229. 2 /* Data in Proximity Domain field is
  230. valid*/);
  231. /* NUMA node. */
  232. nfit_spa->proximity_domain = cpu_to_le32(node);
  233. /* the region reported as PMEM. */
  234. memcpy(nfit_spa->type_guid, nvdimm_nfit_spa_uuid,
  235. sizeof(nvdimm_nfit_spa_uuid));
  236. nfit_spa->spa_base = cpu_to_le64(addr);
  237. nfit_spa->spa_length = cpu_to_le64(size);
  238. /* It is the PMEM and can be cached as writeback. */
  239. nfit_spa->mem_attr = cpu_to_le64(0x8ULL /* EFI_MEMORY_WB */ |
  240. 0x8000ULL /* EFI_MEMORY_NV */);
  241. }
  242. /*
  243. * ACPI 6.0: 5.2.25.2 Memory Device to System Physical Address Range Mapping
  244. * Structure
  245. */
  246. static void
  247. nvdimm_build_structure_memdev(GArray *structures, DeviceState *dev)
  248. {
  249. NvdimmNfitMemDev *nfit_memdev;
  250. NVDIMMDevice *nvdimm = NVDIMM(OBJECT(dev));
  251. uint64_t size = object_property_get_uint(OBJECT(dev), PC_DIMM_SIZE_PROP,
  252. NULL);
  253. int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
  254. NULL);
  255. uint32_t handle = nvdimm_slot_to_handle(slot);
  256. nfit_memdev = acpi_data_push(structures, sizeof(*nfit_memdev));
  257. nfit_memdev->type = cpu_to_le16(1 /* Memory Device to System Address
  258. Range Map Structure*/);
  259. nfit_memdev->length = cpu_to_le16(sizeof(*nfit_memdev));
  260. nfit_memdev->nfit_handle = cpu_to_le32(handle);
  261. /*
  262. * associate memory device with System Physical Address Range
  263. * Structure.
  264. */
  265. nfit_memdev->spa_index = cpu_to_le16(nvdimm_slot_to_spa_index(slot));
  266. /* associate memory device with Control Region Structure. */
  267. nfit_memdev->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot));
  268. /* The memory region on the device. */
  269. nfit_memdev->region_len = cpu_to_le64(size);
  270. /* The device address starts from 0. */
  271. nfit_memdev->region_dpa = cpu_to_le64(0);
  272. /* Only one interleave for PMEM. */
  273. nfit_memdev->interleave_ways = cpu_to_le16(1);
  274. if (nvdimm->unarmed) {
  275. nfit_memdev->flags |= cpu_to_le16(ACPI_NFIT_MEM_NOT_ARMED);
  276. }
  277. }
  278. /*
  279. * ACPI 6.0: 5.2.25.5 NVDIMM Control Region Structure.
  280. */
  281. static void nvdimm_build_structure_dcr(GArray *structures, DeviceState *dev)
  282. {
  283. NvdimmNfitControlRegion *nfit_dcr;
  284. int slot = object_property_get_int(OBJECT(dev), PC_DIMM_SLOT_PROP,
  285. NULL);
  286. uint32_t sn = nvdimm_slot_to_sn(slot);
  287. nfit_dcr = acpi_data_push(structures, sizeof(*nfit_dcr));
  288. nfit_dcr->type = cpu_to_le16(4 /* NVDIMM Control Region Structure */);
  289. nfit_dcr->length = cpu_to_le16(sizeof(*nfit_dcr));
  290. nfit_dcr->dcr_index = cpu_to_le16(nvdimm_slot_to_dcr_index(slot));
  291. /* vendor: Intel. */
  292. nfit_dcr->vendor_id = cpu_to_le16(0x8086);
  293. nfit_dcr->device_id = cpu_to_le16(1);
  294. /* The _DSM method is following Intel's DSM specification. */
  295. nfit_dcr->revision_id = cpu_to_le16(1 /* Current Revision supported
  296. in ACPI 6.0 is 1. */);
  297. nfit_dcr->serial_number = cpu_to_le32(sn);
  298. nfit_dcr->fic = cpu_to_le16(0x301 /* Format Interface Code:
  299. Byte addressable, no energy backed.
  300. See ACPI 6.2, sect 5.2.25.6 and
  301. JEDEC Annex L Release 3. */);
  302. }
  303. /*
  304. * ACPI 6.2 Errata A: 5.2.25.9 NVDIMM Platform Capabilities Structure
  305. */
  306. static void
  307. nvdimm_build_structure_caps(GArray *structures, uint32_t capabilities)
  308. {
  309. NvdimmNfitPlatformCaps *nfit_caps;
  310. nfit_caps = acpi_data_push(structures, sizeof(*nfit_caps));
  311. nfit_caps->type = cpu_to_le16(7 /* NVDIMM Platform Capabilities */);
  312. nfit_caps->length = cpu_to_le16(sizeof(*nfit_caps));
  313. nfit_caps->highest_cap = 31 - clz32(capabilities);
  314. nfit_caps->capabilities = cpu_to_le32(capabilities);
  315. }
  316. static GArray *nvdimm_build_device_structure(NVDIMMState *state)
  317. {
  318. GSList *device_list = nvdimm_get_device_list();
  319. GArray *structures = g_array_new(false, true /* clear */, 1);
  320. for (; device_list; device_list = device_list->next) {
  321. DeviceState *dev = device_list->data;
  322. /* build System Physical Address Range Structure. */
  323. nvdimm_build_structure_spa(structures, dev);
  324. /*
  325. * build Memory Device to System Physical Address Range Mapping
  326. * Structure.
  327. */
  328. nvdimm_build_structure_memdev(structures, dev);
  329. /* build NVDIMM Control Region Structure. */
  330. nvdimm_build_structure_dcr(structures, dev);
  331. }
  332. g_slist_free(device_list);
  333. if (state->persistence) {
  334. nvdimm_build_structure_caps(structures, state->persistence);
  335. }
  336. return structures;
  337. }
  338. static void nvdimm_init_fit_buffer(NvdimmFitBuffer *fit_buf)
  339. {
  340. fit_buf->fit = g_array_new(false, true /* clear */, 1);
  341. }
  342. static void nvdimm_build_fit_buffer(NVDIMMState *state)
  343. {
  344. NvdimmFitBuffer *fit_buf = &state->fit_buf;
  345. g_array_free(fit_buf->fit, true);
  346. fit_buf->fit = nvdimm_build_device_structure(state);
  347. fit_buf->dirty = true;
  348. }
  349. void nvdimm_plug(NVDIMMState *state)
  350. {
  351. nvdimm_build_fit_buffer(state);
  352. }
  353. static void nvdimm_build_nfit(NVDIMMState *state, GArray *table_offsets,
  354. GArray *table_data, BIOSLinker *linker)
  355. {
  356. NvdimmFitBuffer *fit_buf = &state->fit_buf;
  357. unsigned int header;
  358. acpi_add_table(table_offsets, table_data);
  359. /* NFIT header. */
  360. header = table_data->len;
  361. acpi_data_push(table_data, sizeof(NvdimmNfitHeader));
  362. /* NVDIMM device structures. */
  363. g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len);
  364. build_header(linker, table_data,
  365. (void *)(table_data->data + header), "NFIT",
  366. sizeof(NvdimmNfitHeader) + fit_buf->fit->len, 1, NULL, NULL);
  367. }
  368. #define NVDIMM_DSM_MEMORY_SIZE 4096
  369. struct NvdimmDsmIn {
  370. uint32_t handle;
  371. uint32_t revision;
  372. uint32_t function;
  373. /* the remaining size in the page is used by arg3. */
  374. union {
  375. uint8_t arg3[4084];
  376. };
  377. } QEMU_PACKED;
  378. typedef struct NvdimmDsmIn NvdimmDsmIn;
  379. QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmIn) != NVDIMM_DSM_MEMORY_SIZE);
  380. struct NvdimmDsmOut {
  381. /* the size of buffer filled by QEMU. */
  382. uint32_t len;
  383. uint8_t data[4092];
  384. } QEMU_PACKED;
  385. typedef struct NvdimmDsmOut NvdimmDsmOut;
  386. QEMU_BUILD_BUG_ON(sizeof(NvdimmDsmOut) != NVDIMM_DSM_MEMORY_SIZE);
  387. struct NvdimmDsmFunc0Out {
  388. /* the size of buffer filled by QEMU. */
  389. uint32_t len;
  390. uint32_t supported_func;
  391. } QEMU_PACKED;
  392. typedef struct NvdimmDsmFunc0Out NvdimmDsmFunc0Out;
  393. struct NvdimmDsmFuncNoPayloadOut {
  394. /* the size of buffer filled by QEMU. */
  395. uint32_t len;
  396. uint32_t func_ret_status;
  397. } QEMU_PACKED;
  398. typedef struct NvdimmDsmFuncNoPayloadOut NvdimmDsmFuncNoPayloadOut;
  399. struct NvdimmFuncGetLabelSizeOut {
  400. /* the size of buffer filled by QEMU. */
  401. uint32_t len;
  402. uint32_t func_ret_status; /* return status code. */
  403. uint32_t label_size; /* the size of label data area. */
  404. /*
  405. * Maximum size of the namespace label data length supported by
  406. * the platform in Get/Set Namespace Label Data functions.
  407. */
  408. uint32_t max_xfer;
  409. } QEMU_PACKED;
  410. typedef struct NvdimmFuncGetLabelSizeOut NvdimmFuncGetLabelSizeOut;
  411. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelSizeOut) > NVDIMM_DSM_MEMORY_SIZE);
  412. struct NvdimmFuncGetLabelDataIn {
  413. uint32_t offset; /* the offset in the namespace label data area. */
  414. uint32_t length; /* the size of data is to be read via the function. */
  415. } QEMU_PACKED;
  416. typedef struct NvdimmFuncGetLabelDataIn NvdimmFuncGetLabelDataIn;
  417. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataIn) +
  418. offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE);
  419. struct NvdimmFuncGetLabelDataOut {
  420. /* the size of buffer filled by QEMU. */
  421. uint32_t len;
  422. uint32_t func_ret_status; /* return status code. */
  423. uint8_t out_buf[]; /* the data got via Get Namesapce Label function. */
  424. } QEMU_PACKED;
  425. typedef struct NvdimmFuncGetLabelDataOut NvdimmFuncGetLabelDataOut;
  426. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataOut) > NVDIMM_DSM_MEMORY_SIZE);
  427. struct NvdimmFuncSetLabelDataIn {
  428. uint32_t offset; /* the offset in the namespace label data area. */
  429. uint32_t length; /* the size of data is to be written via the function. */
  430. uint8_t in_buf[]; /* the data written to label data area. */
  431. } QEMU_PACKED;
  432. typedef struct NvdimmFuncSetLabelDataIn NvdimmFuncSetLabelDataIn;
  433. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncSetLabelDataIn) +
  434. offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE);
  435. struct NvdimmFuncReadFITIn {
  436. uint32_t offset; /* the offset into FIT buffer. */
  437. } QEMU_PACKED;
  438. typedef struct NvdimmFuncReadFITIn NvdimmFuncReadFITIn;
  439. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITIn) +
  440. offsetof(NvdimmDsmIn, arg3) > NVDIMM_DSM_MEMORY_SIZE);
  441. struct NvdimmFuncReadFITOut {
  442. /* the size of buffer filled by QEMU. */
  443. uint32_t len;
  444. uint32_t func_ret_status; /* return status code. */
  445. uint8_t fit[]; /* the FIT data. */
  446. } QEMU_PACKED;
  447. typedef struct NvdimmFuncReadFITOut NvdimmFuncReadFITOut;
  448. QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncReadFITOut) > NVDIMM_DSM_MEMORY_SIZE);
  449. static void
  450. nvdimm_dsm_function0(uint32_t supported_func, hwaddr dsm_mem_addr)
  451. {
  452. NvdimmDsmFunc0Out func0 = {
  453. .len = cpu_to_le32(sizeof(func0)),
  454. .supported_func = cpu_to_le32(supported_func),
  455. };
  456. cpu_physical_memory_write(dsm_mem_addr, &func0, sizeof(func0));
  457. }
  458. static void
  459. nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr dsm_mem_addr)
  460. {
  461. NvdimmDsmFuncNoPayloadOut out = {
  462. .len = cpu_to_le32(sizeof(out)),
  463. .func_ret_status = cpu_to_le32(func_ret_status),
  464. };
  465. cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out));
  466. }
  467. #define NVDIMM_DSM_RET_STATUS_SUCCESS 0 /* Success */
  468. #define NVDIMM_DSM_RET_STATUS_UNSUPPORT 1 /* Not Supported */
  469. #define NVDIMM_DSM_RET_STATUS_NOMEMDEV 2 /* Non-Existing Memory Device */
  470. #define NVDIMM_DSM_RET_STATUS_INVALID 3 /* Invalid Input Parameters */
  471. #define NVDIMM_DSM_RET_STATUS_FIT_CHANGED 0x100 /* FIT Changed */
  472. #define NVDIMM_QEMU_RSVD_HANDLE_ROOT 0x10000
  473. /* Read FIT data, defined in docs/specs/acpi_nvdimm.txt. */
  474. static void nvdimm_dsm_func_read_fit(NVDIMMState *state, NvdimmDsmIn *in,
  475. hwaddr dsm_mem_addr)
  476. {
  477. NvdimmFitBuffer *fit_buf = &state->fit_buf;
  478. NvdimmFuncReadFITIn *read_fit;
  479. NvdimmFuncReadFITOut *read_fit_out;
  480. GArray *fit;
  481. uint32_t read_len = 0, func_ret_status;
  482. int size;
  483. read_fit = (NvdimmFuncReadFITIn *)in->arg3;
  484. read_fit->offset = le32_to_cpu(read_fit->offset);
  485. fit = fit_buf->fit;
  486. nvdimm_debug("Read FIT: offset %#x FIT size %#x Dirty %s.\n",
  487. read_fit->offset, fit->len, fit_buf->dirty ? "Yes" : "No");
  488. if (read_fit->offset > fit->len) {
  489. func_ret_status = NVDIMM_DSM_RET_STATUS_INVALID;
  490. goto exit;
  491. }
  492. /* It is the first time to read FIT. */
  493. if (!read_fit->offset) {
  494. fit_buf->dirty = false;
  495. } else if (fit_buf->dirty) { /* FIT has been changed during RFIT. */
  496. func_ret_status = NVDIMM_DSM_RET_STATUS_FIT_CHANGED;
  497. goto exit;
  498. }
  499. func_ret_status = NVDIMM_DSM_RET_STATUS_SUCCESS;
  500. read_len = MIN(fit->len - read_fit->offset,
  501. NVDIMM_DSM_MEMORY_SIZE - sizeof(NvdimmFuncReadFITOut));
  502. exit:
  503. size = sizeof(NvdimmFuncReadFITOut) + read_len;
  504. read_fit_out = g_malloc(size);
  505. read_fit_out->len = cpu_to_le32(size);
  506. read_fit_out->func_ret_status = cpu_to_le32(func_ret_status);
  507. memcpy(read_fit_out->fit, fit->data + read_fit->offset, read_len);
  508. cpu_physical_memory_write(dsm_mem_addr, read_fit_out, size);
  509. g_free(read_fit_out);
  510. }
  511. static void
  512. nvdimm_dsm_handle_reserved_root_method(NVDIMMState *state,
  513. NvdimmDsmIn *in, hwaddr dsm_mem_addr)
  514. {
  515. switch (in->function) {
  516. case 0x0:
  517. nvdimm_dsm_function0(0x1 | 1 << 1 /* Read FIT */, dsm_mem_addr);
  518. return;
  519. case 0x1 /* Read FIT */:
  520. nvdimm_dsm_func_read_fit(state, in, dsm_mem_addr);
  521. return;
  522. }
  523. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
  524. }
  525. static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
  526. {
  527. /*
  528. * function 0 is called to inquire which functions are supported by
  529. * OSPM
  530. */
  531. if (!in->function) {
  532. nvdimm_dsm_function0(0 /* No function supported other than
  533. function 0 */, dsm_mem_addr);
  534. return;
  535. }
  536. /* No function except function 0 is supported yet. */
  537. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
  538. }
  539. /*
  540. * the max transfer size is the max size transferred by both a
  541. * 'Get Namespace Label Data' function and a 'Set Namespace Label Data'
  542. * function.
  543. */
  544. static uint32_t nvdimm_get_max_xfer_label_size(void)
  545. {
  546. uint32_t max_get_size, max_set_size, dsm_memory_size;
  547. dsm_memory_size = NVDIMM_DSM_MEMORY_SIZE;
  548. /*
  549. * the max data ACPI can read one time which is transferred by
  550. * the response of 'Get Namespace Label Data' function.
  551. */
  552. max_get_size = dsm_memory_size - sizeof(NvdimmFuncGetLabelDataOut);
  553. /*
  554. * the max data ACPI can write one time which is transferred by
  555. * 'Set Namespace Label Data' function.
  556. */
  557. max_set_size = dsm_memory_size - offsetof(NvdimmDsmIn, arg3) -
  558. sizeof(NvdimmFuncSetLabelDataIn);
  559. return MIN(max_get_size, max_set_size);
  560. }
  561. /*
  562. * DSM Spec Rev1 4.4 Get Namespace Label Size (Function Index 4).
  563. *
  564. * It gets the size of Namespace Label data area and the max data size
  565. * that Get/Set Namespace Label Data functions can transfer.
  566. */
  567. static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr)
  568. {
  569. NvdimmFuncGetLabelSizeOut label_size_out = {
  570. .len = cpu_to_le32(sizeof(label_size_out)),
  571. };
  572. uint32_t label_size, mxfer;
  573. label_size = nvdimm->label_size;
  574. mxfer = nvdimm_get_max_xfer_label_size();
  575. nvdimm_debug("label_size %#x, max_xfer %#x.\n", label_size, mxfer);
  576. label_size_out.func_ret_status = cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS);
  577. label_size_out.label_size = cpu_to_le32(label_size);
  578. label_size_out.max_xfer = cpu_to_le32(mxfer);
  579. cpu_physical_memory_write(dsm_mem_addr, &label_size_out,
  580. sizeof(label_size_out));
  581. }
  582. static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm,
  583. uint32_t offset, uint32_t length)
  584. {
  585. uint32_t ret = NVDIMM_DSM_RET_STATUS_INVALID;
  586. if (offset + length < offset) {
  587. nvdimm_debug("offset %#x + length %#x is overflow.\n", offset,
  588. length);
  589. return ret;
  590. }
  591. if (nvdimm->label_size < offset + length) {
  592. nvdimm_debug("position %#x is beyond label data (len = %" PRIx64 ").\n",
  593. offset + length, nvdimm->label_size);
  594. return ret;
  595. }
  596. if (length > nvdimm_get_max_xfer_label_size()) {
  597. nvdimm_debug("length (%#x) is larger than max_xfer (%#x).\n",
  598. length, nvdimm_get_max_xfer_label_size());
  599. return ret;
  600. }
  601. return NVDIMM_DSM_RET_STATUS_SUCCESS;
  602. }
  603. /*
  604. * DSM Spec Rev1 4.5 Get Namespace Label Data (Function Index 5).
  605. */
  606. static void nvdimm_dsm_get_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
  607. hwaddr dsm_mem_addr)
  608. {
  609. NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm);
  610. NvdimmFuncGetLabelDataIn *get_label_data;
  611. NvdimmFuncGetLabelDataOut *get_label_data_out;
  612. uint32_t status;
  613. int size;
  614. get_label_data = (NvdimmFuncGetLabelDataIn *)in->arg3;
  615. get_label_data->offset = le32_to_cpu(get_label_data->offset);
  616. get_label_data->length = le32_to_cpu(get_label_data->length);
  617. nvdimm_debug("Read Label Data: offset %#x length %#x.\n",
  618. get_label_data->offset, get_label_data->length);
  619. status = nvdimm_rw_label_data_check(nvdimm, get_label_data->offset,
  620. get_label_data->length);
  621. if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) {
  622. nvdimm_dsm_no_payload(status, dsm_mem_addr);
  623. return;
  624. }
  625. size = sizeof(*get_label_data_out) + get_label_data->length;
  626. assert(size <= NVDIMM_DSM_MEMORY_SIZE);
  627. get_label_data_out = g_malloc(size);
  628. get_label_data_out->len = cpu_to_le32(size);
  629. get_label_data_out->func_ret_status =
  630. cpu_to_le32(NVDIMM_DSM_RET_STATUS_SUCCESS);
  631. nvc->read_label_data(nvdimm, get_label_data_out->out_buf,
  632. get_label_data->length, get_label_data->offset);
  633. cpu_physical_memory_write(dsm_mem_addr, get_label_data_out, size);
  634. g_free(get_label_data_out);
  635. }
  636. /*
  637. * DSM Spec Rev1 4.6 Set Namespace Label Data (Function Index 6).
  638. */
  639. static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
  640. hwaddr dsm_mem_addr)
  641. {
  642. NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm);
  643. NvdimmFuncSetLabelDataIn *set_label_data;
  644. uint32_t status;
  645. set_label_data = (NvdimmFuncSetLabelDataIn *)in->arg3;
  646. set_label_data->offset = le32_to_cpu(set_label_data->offset);
  647. set_label_data->length = le32_to_cpu(set_label_data->length);
  648. nvdimm_debug("Write Label Data: offset %#x length %#x.\n",
  649. set_label_data->offset, set_label_data->length);
  650. status = nvdimm_rw_label_data_check(nvdimm, set_label_data->offset,
  651. set_label_data->length);
  652. if (status != NVDIMM_DSM_RET_STATUS_SUCCESS) {
  653. nvdimm_dsm_no_payload(status, dsm_mem_addr);
  654. return;
  655. }
  656. assert(offsetof(NvdimmDsmIn, arg3) + sizeof(*set_label_data) +
  657. set_label_data->length <= NVDIMM_DSM_MEMORY_SIZE);
  658. nvc->write_label_data(nvdimm, set_label_data->in_buf,
  659. set_label_data->length, set_label_data->offset);
  660. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_SUCCESS, dsm_mem_addr);
  661. }
  662. static void nvdimm_dsm_device(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
  663. {
  664. NVDIMMDevice *nvdimm = nvdimm_get_device_by_handle(in->handle);
  665. /* See the comments in nvdimm_dsm_root(). */
  666. if (!in->function) {
  667. uint32_t supported_func = 0;
  668. if (nvdimm && nvdimm->label_size) {
  669. supported_func |= 0x1 /* Bit 0 indicates whether there is
  670. support for any functions other
  671. than function 0. */ |
  672. 1 << 4 /* Get Namespace Label Size */ |
  673. 1 << 5 /* Get Namespace Label Data */ |
  674. 1 << 6 /* Set Namespace Label Data */;
  675. }
  676. nvdimm_dsm_function0(supported_func, dsm_mem_addr);
  677. return;
  678. }
  679. if (!nvdimm) {
  680. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_NOMEMDEV,
  681. dsm_mem_addr);
  682. return;
  683. }
  684. /* Encode DSM function according to DSM Spec Rev1. */
  685. switch (in->function) {
  686. case 4 /* Get Namespace Label Size */:
  687. if (nvdimm->label_size) {
  688. nvdimm_dsm_label_size(nvdimm, dsm_mem_addr);
  689. return;
  690. }
  691. break;
  692. case 5 /* Get Namespace Label Data */:
  693. if (nvdimm->label_size) {
  694. nvdimm_dsm_get_label_data(nvdimm, in, dsm_mem_addr);
  695. return;
  696. }
  697. break;
  698. case 0x6 /* Set Namespace Label Data */:
  699. if (nvdimm->label_size) {
  700. nvdimm_dsm_set_label_data(nvdimm, in, dsm_mem_addr);
  701. return;
  702. }
  703. break;
  704. }
  705. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
  706. }
  707. static uint64_t
  708. nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size)
  709. {
  710. nvdimm_debug("BUG: we never read _DSM IO Port.\n");
  711. return 0;
  712. }
  713. static void
  714. nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
  715. {
  716. NVDIMMState *state = opaque;
  717. NvdimmDsmIn *in;
  718. hwaddr dsm_mem_addr = val;
  719. nvdimm_debug("dsm memory address %#" HWADDR_PRIx ".\n", dsm_mem_addr);
  720. /*
  721. * The DSM memory is mapped to guest address space so an evil guest
  722. * can change its content while we are doing DSM emulation. Avoid
  723. * this by copying DSM memory to QEMU local memory.
  724. */
  725. in = g_new(NvdimmDsmIn, 1);
  726. cpu_physical_memory_read(dsm_mem_addr, in, sizeof(*in));
  727. in->revision = le32_to_cpu(in->revision);
  728. in->function = le32_to_cpu(in->function);
  729. in->handle = le32_to_cpu(in->handle);
  730. nvdimm_debug("Revision %#x Handler %#x Function %#x.\n", in->revision,
  731. in->handle, in->function);
  732. if (in->revision != 0x1 /* Currently we only support DSM Spec Rev1. */) {
  733. nvdimm_debug("Revision %#x is not supported, expect %#x.\n",
  734. in->revision, 0x1);
  735. nvdimm_dsm_no_payload(NVDIMM_DSM_RET_STATUS_UNSUPPORT, dsm_mem_addr);
  736. goto exit;
  737. }
  738. if (in->handle == NVDIMM_QEMU_RSVD_HANDLE_ROOT) {
  739. nvdimm_dsm_handle_reserved_root_method(state, in, dsm_mem_addr);
  740. goto exit;
  741. }
  742. /* Handle 0 is reserved for NVDIMM Root Device. */
  743. if (!in->handle) {
  744. nvdimm_dsm_root(in, dsm_mem_addr);
  745. goto exit;
  746. }
  747. nvdimm_dsm_device(in, dsm_mem_addr);
  748. exit:
  749. g_free(in);
  750. }
  751. static const MemoryRegionOps nvdimm_dsm_ops = {
  752. .read = nvdimm_dsm_read,
  753. .write = nvdimm_dsm_write,
  754. .endianness = DEVICE_LITTLE_ENDIAN,
  755. .valid = {
  756. .min_access_size = 4,
  757. .max_access_size = 4,
  758. },
  759. };
  760. void nvdimm_acpi_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev)
  761. {
  762. if (dev->hotplugged) {
  763. acpi_send_event(DEVICE(hotplug_dev), ACPI_NVDIMM_HOTPLUG_STATUS);
  764. }
  765. }
  766. void nvdimm_init_acpi_state(NVDIMMState *state, MemoryRegion *io,
  767. struct AcpiGenericAddress dsm_io,
  768. FWCfgState *fw_cfg, Object *owner)
  769. {
  770. state->dsm_io = dsm_io;
  771. memory_region_init_io(&state->io_mr, owner, &nvdimm_dsm_ops, state,
  772. "nvdimm-acpi-io", dsm_io.bit_width >> 3);
  773. memory_region_add_subregion(io, dsm_io.address, &state->io_mr);
  774. state->dsm_mem = g_array_new(false, true /* clear */, 1);
  775. acpi_data_push(state->dsm_mem, sizeof(NvdimmDsmIn));
  776. fw_cfg_add_file(fw_cfg, NVDIMM_DSM_MEM_FILE, state->dsm_mem->data,
  777. state->dsm_mem->len);
  778. nvdimm_init_fit_buffer(&state->fit_buf);
  779. }
  780. #define NVDIMM_COMMON_DSM "NCAL"
  781. #define NVDIMM_ACPI_MEM_ADDR "MEMA"
  782. #define NVDIMM_DSM_MEMORY "NRAM"
  783. #define NVDIMM_DSM_IOPORT "NPIO"
  784. #define NVDIMM_DSM_NOTIFY "NTFI"
  785. #define NVDIMM_DSM_HANDLE "HDLE"
  786. #define NVDIMM_DSM_REVISION "REVS"
  787. #define NVDIMM_DSM_FUNCTION "FUNC"
  788. #define NVDIMM_DSM_ARG3 "FARG"
  789. #define NVDIMM_DSM_OUT_BUF_SIZE "RLEN"
  790. #define NVDIMM_DSM_OUT_BUF "ODAT"
  791. #define NVDIMM_DSM_RFIT_STATUS "RSTA"
  792. #define NVDIMM_QEMU_RSVD_UUID "648B9CF2-CDA1-4312-8AD9-49C4AF32BD62"
  793. static void nvdimm_build_common_dsm(Aml *dev,
  794. NVDIMMState *nvdimm_state)
  795. {
  796. Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *elsectx2;
  797. Aml *elsectx, *unsupport, *unpatched, *expected_uuid, *uuid_invalid;
  798. Aml *pckg, *pckg_index, *pckg_buf, *field, *dsm_out_buf, *dsm_out_buf_size;
  799. Aml *whilectx, *offset;
  800. uint8_t byte_list[1];
  801. AmlRegionSpace rs;
  802. method = aml_method(NVDIMM_COMMON_DSM, 5, AML_SERIALIZED);
  803. uuid = aml_arg(0);
  804. function = aml_arg(2);
  805. handle = aml_arg(4);
  806. dsm_mem = aml_local(6);
  807. dsm_out_buf = aml_local(7);
  808. aml_append(method, aml_store(aml_name(NVDIMM_ACPI_MEM_ADDR), dsm_mem));
  809. if (nvdimm_state->dsm_io.space_id == AML_AS_SYSTEM_IO) {
  810. rs = AML_SYSTEM_IO;
  811. } else {
  812. rs = AML_SYSTEM_MEMORY;
  813. }
  814. /* map DSM memory and IO into ACPI namespace. */
  815. aml_append(method, aml_operation_region(NVDIMM_DSM_IOPORT, rs,
  816. aml_int(nvdimm_state->dsm_io.address),
  817. nvdimm_state->dsm_io.bit_width >> 3));
  818. aml_append(method, aml_operation_region(NVDIMM_DSM_MEMORY,
  819. AML_SYSTEM_MEMORY, dsm_mem, sizeof(NvdimmDsmIn)));
  820. /*
  821. * DSM notifier:
  822. * NVDIMM_DSM_NOTIFY: write the address of DSM memory and notify QEMU to
  823. * emulate the access.
  824. *
  825. * It is the IO port so that accessing them will cause VM-exit, the
  826. * control will be transferred to QEMU.
  827. */
  828. field = aml_field(NVDIMM_DSM_IOPORT, AML_DWORD_ACC, AML_NOLOCK,
  829. AML_PRESERVE);
  830. aml_append(field, aml_named_field(NVDIMM_DSM_NOTIFY,
  831. nvdimm_state->dsm_io.bit_width));
  832. aml_append(method, field);
  833. /*
  834. * DSM input:
  835. * NVDIMM_DSM_HANDLE: store device's handle, it's zero if the _DSM call
  836. * happens on NVDIMM Root Device.
  837. * NVDIMM_DSM_REVISION: store the Arg1 of _DSM call.
  838. * NVDIMM_DSM_FUNCTION: store the Arg2 of _DSM call.
  839. * NVDIMM_DSM_ARG3: store the Arg3 of _DSM call which is a Package
  840. * containing function-specific arguments.
  841. *
  842. * They are RAM mapping on host so that these accesses never cause
  843. * VM-EXIT.
  844. */
  845. field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
  846. AML_PRESERVE);
  847. aml_append(field, aml_named_field(NVDIMM_DSM_HANDLE,
  848. sizeof(typeof_field(NvdimmDsmIn, handle)) * BITS_PER_BYTE));
  849. aml_append(field, aml_named_field(NVDIMM_DSM_REVISION,
  850. sizeof(typeof_field(NvdimmDsmIn, revision)) * BITS_PER_BYTE));
  851. aml_append(field, aml_named_field(NVDIMM_DSM_FUNCTION,
  852. sizeof(typeof_field(NvdimmDsmIn, function)) * BITS_PER_BYTE));
  853. aml_append(field, aml_named_field(NVDIMM_DSM_ARG3,
  854. (sizeof(NvdimmDsmIn) - offsetof(NvdimmDsmIn, arg3)) * BITS_PER_BYTE));
  855. aml_append(method, field);
  856. /*
  857. * DSM output:
  858. * NVDIMM_DSM_OUT_BUF_SIZE: the size of the buffer filled by QEMU.
  859. * NVDIMM_DSM_OUT_BUF: the buffer QEMU uses to store the result.
  860. *
  861. * Since the page is reused by both input and out, the input data
  862. * will be lost after storing new result into ODAT so we should fetch
  863. * all the input data before writing the result.
  864. */
  865. field = aml_field(NVDIMM_DSM_MEMORY, AML_DWORD_ACC, AML_NOLOCK,
  866. AML_PRESERVE);
  867. aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF_SIZE,
  868. sizeof(typeof_field(NvdimmDsmOut, len)) * BITS_PER_BYTE));
  869. aml_append(field, aml_named_field(NVDIMM_DSM_OUT_BUF,
  870. (sizeof(NvdimmDsmOut) - offsetof(NvdimmDsmOut, data)) * BITS_PER_BYTE));
  871. aml_append(method, field);
  872. /*
  873. * do not support any method if DSM memory address has not been
  874. * patched.
  875. */
  876. unpatched = aml_equal(dsm_mem, aml_int(0x0));
  877. expected_uuid = aml_local(0);
  878. ifctx = aml_if(aml_equal(handle, aml_int(0x0)));
  879. aml_append(ifctx, aml_store(
  880. aml_touuid("2F10E7A4-9E91-11E4-89D3-123B93F75CBA")
  881. /* UUID for NVDIMM Root Device */, expected_uuid));
  882. aml_append(method, ifctx);
  883. elsectx = aml_else();
  884. ifctx = aml_if(aml_equal(handle, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT)));
  885. aml_append(ifctx, aml_store(aml_touuid(NVDIMM_QEMU_RSVD_UUID
  886. /* UUID for QEMU internal use */), expected_uuid));
  887. aml_append(elsectx, ifctx);
  888. elsectx2 = aml_else();
  889. aml_append(elsectx2, aml_store(
  890. aml_touuid("4309AC30-0D11-11E4-9191-0800200C9A66")
  891. /* UUID for NVDIMM Devices */, expected_uuid));
  892. aml_append(elsectx, elsectx2);
  893. aml_append(method, elsectx);
  894. uuid_invalid = aml_lnot(aml_equal(uuid, expected_uuid));
  895. unsupport = aml_if(aml_or(unpatched, uuid_invalid, NULL));
  896. /*
  897. * function 0 is called to inquire what functions are supported by
  898. * OSPM
  899. */
  900. ifctx = aml_if(aml_equal(function, aml_int(0)));
  901. byte_list[0] = 0 /* No function Supported */;
  902. aml_append(ifctx, aml_return(aml_buffer(1, byte_list)));
  903. aml_append(unsupport, ifctx);
  904. /* No function is supported yet. */
  905. byte_list[0] = NVDIMM_DSM_RET_STATUS_UNSUPPORT;
  906. aml_append(unsupport, aml_return(aml_buffer(1, byte_list)));
  907. aml_append(method, unsupport);
  908. /*
  909. * The HDLE indicates the DSM function is issued from which device,
  910. * it reserves 0 for root device and is the handle for NVDIMM devices.
  911. * See the comments in nvdimm_slot_to_handle().
  912. */
  913. aml_append(method, aml_store(handle, aml_name(NVDIMM_DSM_HANDLE)));
  914. aml_append(method, aml_store(aml_arg(1), aml_name(NVDIMM_DSM_REVISION)));
  915. aml_append(method, aml_store(function, aml_name(NVDIMM_DSM_FUNCTION)));
  916. /*
  917. * The fourth parameter (Arg3) of _DSM is a package which contains
  918. * a buffer, the layout of the buffer is specified by UUID (Arg0),
  919. * Revision ID (Arg1) and Function Index (Arg2) which are documented
  920. * in the DSM Spec.
  921. */
  922. pckg = aml_arg(3);
  923. ifctx = aml_if(aml_and(aml_equal(aml_object_type(pckg),
  924. aml_int(4 /* Package */)) /* It is a Package? */,
  925. aml_equal(aml_sizeof(pckg), aml_int(1)) /* 1 element? */,
  926. NULL));
  927. pckg_index = aml_local(2);
  928. pckg_buf = aml_local(3);
  929. aml_append(ifctx, aml_store(aml_index(pckg, aml_int(0)), pckg_index));
  930. aml_append(ifctx, aml_store(aml_derefof(pckg_index), pckg_buf));
  931. aml_append(ifctx, aml_store(pckg_buf, aml_name(NVDIMM_DSM_ARG3)));
  932. aml_append(method, ifctx);
  933. /*
  934. * tell QEMU about the real address of DSM memory, then QEMU
  935. * gets the control and fills the result in DSM memory.
  936. */
  937. aml_append(method, aml_store(dsm_mem, aml_name(NVDIMM_DSM_NOTIFY)));
  938. dsm_out_buf_size = aml_local(1);
  939. /* RLEN is not included in the payload returned to guest. */
  940. aml_append(method, aml_subtract(aml_name(NVDIMM_DSM_OUT_BUF_SIZE),
  941. aml_int(4), dsm_out_buf_size));
  942. /*
  943. * As per ACPI spec 6.3, Table 19-419 Object Conversion Rules, if
  944. * the Buffer Field <= to the size of an Integer (in bits), it will
  945. * be treated as an integer. Moreover, the integer size depends on
  946. * DSDT tables revision number. If revision number is < 2, integer
  947. * size is 32 bits, otherwise it is 64 bits.
  948. * Because of this CreateField() canot be used if RLEN < Integer Size.
  949. *
  950. * Also please note that APCI ASL operator SizeOf() doesn't support
  951. * Integer and there isn't any other way to figure out the Integer
  952. * size. Hence we assume 8 byte as Integer size and if RLEN < 8 bytes,
  953. * build dsm_out_buf byte by byte.
  954. */
  955. ifctx = aml_if(aml_lless(dsm_out_buf_size, aml_int(8)));
  956. offset = aml_local(2);
  957. aml_append(ifctx, aml_store(aml_int(0), offset));
  958. aml_append(ifctx, aml_name_decl("TBUF", aml_buffer(1, NULL)));
  959. aml_append(ifctx, aml_store(aml_buffer(0, NULL), dsm_out_buf));
  960. whilectx = aml_while(aml_lless(offset, dsm_out_buf_size));
  961. /* Copy 1 byte at offset from ODAT to temporary buffer(TBUF). */
  962. aml_append(whilectx, aml_store(aml_derefof(aml_index(
  963. aml_name(NVDIMM_DSM_OUT_BUF), offset)),
  964. aml_index(aml_name("TBUF"), aml_int(0))));
  965. aml_append(whilectx, aml_concatenate(dsm_out_buf, aml_name("TBUF"),
  966. dsm_out_buf));
  967. aml_append(whilectx, aml_increment(offset));
  968. aml_append(ifctx, whilectx);
  969. aml_append(ifctx, aml_return(dsm_out_buf));
  970. aml_append(method, ifctx);
  971. /* If RLEN >= Integer size, just use CreateField() operator */
  972. aml_append(method, aml_store(aml_shiftleft(dsm_out_buf_size, aml_int(3)),
  973. dsm_out_buf_size));
  974. aml_append(method, aml_create_field(aml_name(NVDIMM_DSM_OUT_BUF),
  975. aml_int(0), dsm_out_buf_size, "OBUF"));
  976. aml_append(method, aml_return(aml_name("OBUF")));
  977. aml_append(dev, method);
  978. }
  979. static void nvdimm_build_device_dsm(Aml *dev, uint32_t handle)
  980. {
  981. Aml *method;
  982. method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
  983. aml_append(method, aml_return(aml_call5(NVDIMM_COMMON_DSM, aml_arg(0),
  984. aml_arg(1), aml_arg(2), aml_arg(3),
  985. aml_int(handle))));
  986. aml_append(dev, method);
  987. }
  988. static void nvdimm_build_fit(Aml *dev)
  989. {
  990. Aml *method, *pkg, *buf, *buf_size, *offset, *call_result;
  991. Aml *whilectx, *ifcond, *ifctx, *elsectx, *fit;
  992. buf = aml_local(0);
  993. buf_size = aml_local(1);
  994. fit = aml_local(2);
  995. aml_append(dev, aml_name_decl(NVDIMM_DSM_RFIT_STATUS, aml_int(0)));
  996. /* build helper function, RFIT. */
  997. method = aml_method("RFIT", 1, AML_SERIALIZED);
  998. aml_append(method, aml_name_decl("OFST", aml_int(0)));
  999. /* prepare input package. */
  1000. pkg = aml_package(1);
  1001. aml_append(method, aml_store(aml_arg(0), aml_name("OFST")));
  1002. aml_append(pkg, aml_name("OFST"));
  1003. /* call Read_FIT function. */
  1004. call_result = aml_call5(NVDIMM_COMMON_DSM,
  1005. aml_touuid(NVDIMM_QEMU_RSVD_UUID),
  1006. aml_int(1) /* Revision 1 */,
  1007. aml_int(0x1) /* Read FIT */,
  1008. pkg, aml_int(NVDIMM_QEMU_RSVD_HANDLE_ROOT));
  1009. aml_append(method, aml_store(call_result, buf));
  1010. /* handle _DSM result. */
  1011. aml_append(method, aml_create_dword_field(buf,
  1012. aml_int(0) /* offset at byte 0 */, "STAU"));
  1013. aml_append(method, aml_store(aml_name("STAU"),
  1014. aml_name(NVDIMM_DSM_RFIT_STATUS)));
  1015. /* if something is wrong during _DSM. */
  1016. ifcond = aml_equal(aml_int(NVDIMM_DSM_RET_STATUS_SUCCESS),
  1017. aml_name("STAU"));
  1018. ifctx = aml_if(aml_lnot(ifcond));
  1019. aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
  1020. aml_append(method, ifctx);
  1021. aml_append(method, aml_store(aml_sizeof(buf), buf_size));
  1022. aml_append(method, aml_subtract(buf_size,
  1023. aml_int(4) /* the size of "STAU" */,
  1024. buf_size));
  1025. /* if we read the end of fit. */
  1026. ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
  1027. aml_append(ifctx, aml_return(aml_buffer(0, NULL)));
  1028. aml_append(method, ifctx);
  1029. aml_append(method, aml_create_field(buf,
  1030. aml_int(4 * BITS_PER_BYTE), /* offset at byte 4.*/
  1031. aml_shiftleft(buf_size, aml_int(3)), "BUFF"));
  1032. aml_append(method, aml_return(aml_name("BUFF")));
  1033. aml_append(dev, method);
  1034. /* build _FIT. */
  1035. method = aml_method("_FIT", 0, AML_SERIALIZED);
  1036. offset = aml_local(3);
  1037. aml_append(method, aml_store(aml_buffer(0, NULL), fit));
  1038. aml_append(method, aml_store(aml_int(0), offset));
  1039. whilectx = aml_while(aml_int(1));
  1040. aml_append(whilectx, aml_store(aml_call1("RFIT", offset), buf));
  1041. aml_append(whilectx, aml_store(aml_sizeof(buf), buf_size));
  1042. /*
  1043. * if fit buffer was changed during RFIT, read from the beginning
  1044. * again.
  1045. */
  1046. ifctx = aml_if(aml_equal(aml_name(NVDIMM_DSM_RFIT_STATUS),
  1047. aml_int(NVDIMM_DSM_RET_STATUS_FIT_CHANGED)));
  1048. aml_append(ifctx, aml_store(aml_buffer(0, NULL), fit));
  1049. aml_append(ifctx, aml_store(aml_int(0), offset));
  1050. aml_append(whilectx, ifctx);
  1051. elsectx = aml_else();
  1052. /* finish fit read if no data is read out. */
  1053. ifctx = aml_if(aml_equal(buf_size, aml_int(0)));
  1054. aml_append(ifctx, aml_return(fit));
  1055. aml_append(elsectx, ifctx);
  1056. /* update the offset. */
  1057. aml_append(elsectx, aml_add(offset, buf_size, offset));
  1058. /* append the data we read out to the fit buffer. */
  1059. aml_append(elsectx, aml_concatenate(fit, buf, fit));
  1060. aml_append(whilectx, elsectx);
  1061. aml_append(method, whilectx);
  1062. aml_append(dev, method);
  1063. }
  1064. static void nvdimm_build_nvdimm_devices(Aml *root_dev, uint32_t ram_slots)
  1065. {
  1066. uint32_t slot;
  1067. for (slot = 0; slot < ram_slots; slot++) {
  1068. uint32_t handle = nvdimm_slot_to_handle(slot);
  1069. Aml *nvdimm_dev;
  1070. nvdimm_dev = aml_device("NV%02X", slot);
  1071. /*
  1072. * ACPI 6.0: 9.20 NVDIMM Devices:
  1073. *
  1074. * _ADR object that is used to supply OSPM with unique address
  1075. * of the NVDIMM device. This is done by returning the NFIT Device
  1076. * handle that is used to identify the associated entries in ACPI
  1077. * table NFIT or _FIT.
  1078. */
  1079. aml_append(nvdimm_dev, aml_name_decl("_ADR", aml_int(handle)));
  1080. nvdimm_build_device_dsm(nvdimm_dev, handle);
  1081. aml_append(root_dev, nvdimm_dev);
  1082. }
  1083. }
  1084. static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data,
  1085. BIOSLinker *linker,
  1086. NVDIMMState *nvdimm_state,
  1087. uint32_t ram_slots)
  1088. {
  1089. Aml *ssdt, *sb_scope, *dev;
  1090. int mem_addr_offset, nvdimm_ssdt;
  1091. acpi_add_table(table_offsets, table_data);
  1092. ssdt = init_aml_allocator();
  1093. acpi_data_push(ssdt->buf, sizeof(AcpiTableHeader));
  1094. sb_scope = aml_scope("\\_SB");
  1095. dev = aml_device("NVDR");
  1096. /*
  1097. * ACPI 6.0: 9.20 NVDIMM Devices:
  1098. *
  1099. * The ACPI Name Space device uses _HID of ACPI0012 to identify the root
  1100. * NVDIMM interface device. Platform firmware is required to contain one
  1101. * such device in _SB scope if NVDIMMs support is exposed by platform to
  1102. * OSPM.
  1103. * For each NVDIMM present or intended to be supported by platform,
  1104. * platform firmware also exposes an ACPI Namespace Device under the
  1105. * root device.
  1106. */
  1107. aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0012")));
  1108. nvdimm_build_common_dsm(dev, nvdimm_state);
  1109. /* 0 is reserved for root device. */
  1110. nvdimm_build_device_dsm(dev, 0);
  1111. nvdimm_build_fit(dev);
  1112. nvdimm_build_nvdimm_devices(dev, ram_slots);
  1113. aml_append(sb_scope, dev);
  1114. aml_append(ssdt, sb_scope);
  1115. nvdimm_ssdt = table_data->len;
  1116. /* copy AML table into ACPI tables blob and patch header there */
  1117. g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len);
  1118. mem_addr_offset = build_append_named_dword(table_data,
  1119. NVDIMM_ACPI_MEM_ADDR);
  1120. bios_linker_loader_alloc(linker,
  1121. NVDIMM_DSM_MEM_FILE, nvdimm_state->dsm_mem,
  1122. sizeof(NvdimmDsmIn), false /* high memory */);
  1123. bios_linker_loader_add_pointer(linker,
  1124. ACPI_BUILD_TABLE_FILE, mem_addr_offset, sizeof(uint32_t),
  1125. NVDIMM_DSM_MEM_FILE, 0);
  1126. build_header(linker, table_data,
  1127. (void *)(table_data->data + nvdimm_ssdt),
  1128. "SSDT", table_data->len - nvdimm_ssdt, 1, NULL, "NVDIMM");
  1129. free_aml_allocator();
  1130. }
  1131. void nvdimm_build_srat(GArray *table_data)
  1132. {
  1133. GSList *device_list = nvdimm_get_device_list();
  1134. for (; device_list; device_list = device_list->next) {
  1135. AcpiSratMemoryAffinity *numamem = NULL;
  1136. DeviceState *dev = device_list->data;
  1137. Object *obj = OBJECT(dev);
  1138. uint64_t addr, size;
  1139. int node;
  1140. node = object_property_get_int(obj, PC_DIMM_NODE_PROP, &error_abort);
  1141. addr = object_property_get_uint(obj, PC_DIMM_ADDR_PROP, &error_abort);
  1142. size = object_property_get_uint(obj, PC_DIMM_SIZE_PROP, &error_abort);
  1143. numamem = acpi_data_push(table_data, sizeof *numamem);
  1144. build_srat_memory(numamem, addr, size, node,
  1145. MEM_AFFINITY_ENABLED | MEM_AFFINITY_NON_VOLATILE);
  1146. }
  1147. g_slist_free(device_list);
  1148. }
  1149. void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data,
  1150. BIOSLinker *linker, NVDIMMState *state,
  1151. uint32_t ram_slots)
  1152. {
  1153. GSList *device_list;
  1154. /* no nvdimm device can be plugged. */
  1155. if (!ram_slots) {
  1156. return;
  1157. }
  1158. nvdimm_build_ssdt(table_offsets, table_data, linker, state,
  1159. ram_slots);
  1160. device_list = nvdimm_get_device_list();
  1161. /* no NVDIMM device is plugged. */
  1162. if (!device_list) {
  1163. return;
  1164. }
  1165. nvdimm_build_nfit(state, table_offsets, table_data, linker);
  1166. g_slist_free(device_list);
  1167. }