numa.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. /*
  2. * NUMA parameter parsing routines
  3. *
  4. * Copyright (c) 2014 Fujitsu Ltd.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include "sysemu/hostmem.h"
  26. #include "sysemu/numa.h"
  27. #include "sysemu/sysemu.h"
  28. #include "exec/cpu-common.h"
  29. #include "exec/ramlist.h"
  30. #include "qemu/bitmap.h"
  31. #include "qemu/error-report.h"
  32. #include "qapi/error.h"
  33. #include "qapi/opts-visitor.h"
  34. #include "qapi/qapi-visit-machine.h"
  35. #include "sysemu/qtest.h"
  36. #include "hw/core/cpu.h"
  37. #include "hw/mem/pc-dimm.h"
  38. #include "migration/vmstate.h"
  39. #include "hw/boards.h"
  40. #include "hw/mem/memory-device.h"
  41. #include "qemu/option.h"
  42. #include "qemu/config-file.h"
  43. #include "qemu/cutils.h"
  44. QemuOptsList qemu_numa_opts = {
  45. .name = "numa",
  46. .implied_opt_name = "type",
  47. .head = QTAILQ_HEAD_INITIALIZER(qemu_numa_opts.head),
  48. .desc = { { 0 } } /* validated with OptsVisitor */
  49. };
  50. static int have_memdevs;
  51. static int have_mem;
  52. static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one.
  53. * For all nodes, nodeid < max_numa_nodeid
  54. */
  55. static void parse_numa_node(MachineState *ms, NumaNodeOptions *node,
  56. Error **errp)
  57. {
  58. Error *err = NULL;
  59. uint16_t nodenr;
  60. uint16List *cpus = NULL;
  61. MachineClass *mc = MACHINE_GET_CLASS(ms);
  62. unsigned int max_cpus = ms->smp.max_cpus;
  63. NodeInfo *numa_info = ms->numa_state->nodes;
  64. if (node->has_nodeid) {
  65. nodenr = node->nodeid;
  66. } else {
  67. nodenr = ms->numa_state->num_nodes;
  68. }
  69. if (nodenr >= MAX_NODES) {
  70. error_setg(errp, "Max number of NUMA nodes reached: %"
  71. PRIu16 "", nodenr);
  72. return;
  73. }
  74. if (numa_info[nodenr].present) {
  75. error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr);
  76. return;
  77. }
  78. if (!mc->cpu_index_to_instance_props || !mc->get_default_cpu_node_id) {
  79. error_setg(errp, "NUMA is not supported by this machine-type");
  80. return;
  81. }
  82. for (cpus = node->cpus; cpus; cpus = cpus->next) {
  83. CpuInstanceProperties props;
  84. if (cpus->value >= max_cpus) {
  85. error_setg(errp,
  86. "CPU index (%" PRIu16 ")"
  87. " should be smaller than maxcpus (%d)",
  88. cpus->value, max_cpus);
  89. return;
  90. }
  91. props = mc->cpu_index_to_instance_props(ms, cpus->value);
  92. props.node_id = nodenr;
  93. props.has_node_id = true;
  94. machine_set_cpu_numa_node(ms, &props, &err);
  95. if (err) {
  96. error_propagate(errp, err);
  97. return;
  98. }
  99. }
  100. have_memdevs = have_memdevs ? : node->has_memdev;
  101. have_mem = have_mem ? : node->has_mem;
  102. if ((node->has_mem && have_memdevs) || (node->has_memdev && have_mem)) {
  103. error_setg(errp, "numa configuration should use either mem= or memdev=,"
  104. "mixing both is not allowed");
  105. return;
  106. }
  107. if (node->has_mem) {
  108. numa_info[nodenr].node_mem = node->mem;
  109. if (!qtest_enabled()) {
  110. warn_report("Parameter -numa node,mem is deprecated,"
  111. " use -numa node,memdev instead");
  112. }
  113. }
  114. if (node->has_memdev) {
  115. Object *o;
  116. o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL);
  117. if (!o) {
  118. error_setg(errp, "memdev=%s is ambiguous", node->memdev);
  119. return;
  120. }
  121. object_ref(o);
  122. numa_info[nodenr].node_mem = object_property_get_uint(o, "size", NULL);
  123. numa_info[nodenr].node_memdev = MEMORY_BACKEND(o);
  124. }
  125. numa_info[nodenr].present = true;
  126. max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1);
  127. ms->numa_state->num_nodes++;
  128. }
  129. static
  130. void parse_numa_distance(MachineState *ms, NumaDistOptions *dist, Error **errp)
  131. {
  132. uint16_t src = dist->src;
  133. uint16_t dst = dist->dst;
  134. uint8_t val = dist->val;
  135. NodeInfo *numa_info = ms->numa_state->nodes;
  136. if (src >= MAX_NODES || dst >= MAX_NODES) {
  137. error_setg(errp, "Parameter '%s' expects an integer between 0 and %d",
  138. src >= MAX_NODES ? "src" : "dst", MAX_NODES - 1);
  139. return;
  140. }
  141. if (!numa_info[src].present || !numa_info[dst].present) {
  142. error_setg(errp, "Source/Destination NUMA node is missing. "
  143. "Please use '-numa node' option to declare it first.");
  144. return;
  145. }
  146. if (val < NUMA_DISTANCE_MIN) {
  147. error_setg(errp, "NUMA distance (%" PRIu8 ") is invalid, "
  148. "it shouldn't be less than %d.",
  149. val, NUMA_DISTANCE_MIN);
  150. return;
  151. }
  152. if (src == dst && val != NUMA_DISTANCE_MIN) {
  153. error_setg(errp, "Local distance of node %d should be %d.",
  154. src, NUMA_DISTANCE_MIN);
  155. return;
  156. }
  157. numa_info[src].distance[dst] = val;
  158. ms->numa_state->have_numa_distance = true;
  159. }
  160. void set_numa_options(MachineState *ms, NumaOptions *object, Error **errp)
  161. {
  162. Error *err = NULL;
  163. MachineClass *mc = MACHINE_GET_CLASS(ms);
  164. if (!mc->numa_mem_supported) {
  165. error_setg(errp, "NUMA is not supported by this machine-type");
  166. goto end;
  167. }
  168. switch (object->type) {
  169. case NUMA_OPTIONS_TYPE_NODE:
  170. parse_numa_node(ms, &object->u.node, &err);
  171. if (err) {
  172. goto end;
  173. }
  174. break;
  175. case NUMA_OPTIONS_TYPE_DIST:
  176. parse_numa_distance(ms, &object->u.dist, &err);
  177. if (err) {
  178. goto end;
  179. }
  180. break;
  181. case NUMA_OPTIONS_TYPE_CPU:
  182. if (!object->u.cpu.has_node_id) {
  183. error_setg(&err, "Missing mandatory node-id property");
  184. goto end;
  185. }
  186. if (!ms->numa_state->nodes[object->u.cpu.node_id].present) {
  187. error_setg(&err, "Invalid node-id=%" PRId64 ", NUMA node must be "
  188. "defined with -numa node,nodeid=ID before it's used with "
  189. "-numa cpu,node-id=ID", object->u.cpu.node_id);
  190. goto end;
  191. }
  192. machine_set_cpu_numa_node(ms, qapi_NumaCpuOptions_base(&object->u.cpu),
  193. &err);
  194. break;
  195. default:
  196. abort();
  197. }
  198. end:
  199. error_propagate(errp, err);
  200. }
  201. static int parse_numa(void *opaque, QemuOpts *opts, Error **errp)
  202. {
  203. NumaOptions *object = NULL;
  204. MachineState *ms = MACHINE(opaque);
  205. Error *err = NULL;
  206. Visitor *v = opts_visitor_new(opts);
  207. visit_type_NumaOptions(v, NULL, &object, &err);
  208. visit_free(v);
  209. if (err) {
  210. goto end;
  211. }
  212. /* Fix up legacy suffix-less format */
  213. if ((object->type == NUMA_OPTIONS_TYPE_NODE) && object->u.node.has_mem) {
  214. const char *mem_str = qemu_opt_get(opts, "mem");
  215. qemu_strtosz_MiB(mem_str, NULL, &object->u.node.mem);
  216. }
  217. set_numa_options(ms, object, &err);
  218. end:
  219. qapi_free_NumaOptions(object);
  220. if (err) {
  221. error_propagate(errp, err);
  222. return -1;
  223. }
  224. return 0;
  225. }
  226. /* If all node pair distances are symmetric, then only distances
  227. * in one direction are enough. If there is even one asymmetric
  228. * pair, though, then all distances must be provided. The
  229. * distance from a node to itself is always NUMA_DISTANCE_MIN,
  230. * so providing it is never necessary.
  231. */
  232. static void validate_numa_distance(MachineState *ms)
  233. {
  234. int src, dst;
  235. bool is_asymmetrical = false;
  236. int nb_numa_nodes = ms->numa_state->num_nodes;
  237. NodeInfo *numa_info = ms->numa_state->nodes;
  238. for (src = 0; src < nb_numa_nodes; src++) {
  239. for (dst = src; dst < nb_numa_nodes; dst++) {
  240. if (numa_info[src].distance[dst] == 0 &&
  241. numa_info[dst].distance[src] == 0) {
  242. if (src != dst) {
  243. error_report("The distance between node %d and %d is "
  244. "missing, at least one distance value "
  245. "between each nodes should be provided.",
  246. src, dst);
  247. exit(EXIT_FAILURE);
  248. }
  249. }
  250. if (numa_info[src].distance[dst] != 0 &&
  251. numa_info[dst].distance[src] != 0 &&
  252. numa_info[src].distance[dst] !=
  253. numa_info[dst].distance[src]) {
  254. is_asymmetrical = true;
  255. }
  256. }
  257. }
  258. if (is_asymmetrical) {
  259. for (src = 0; src < nb_numa_nodes; src++) {
  260. for (dst = 0; dst < nb_numa_nodes; dst++) {
  261. if (src != dst && numa_info[src].distance[dst] == 0) {
  262. error_report("At least one asymmetrical pair of "
  263. "distances is given, please provide distances "
  264. "for both directions of all node pairs.");
  265. exit(EXIT_FAILURE);
  266. }
  267. }
  268. }
  269. }
  270. }
  271. static void complete_init_numa_distance(MachineState *ms)
  272. {
  273. int src, dst;
  274. NodeInfo *numa_info = ms->numa_state->nodes;
  275. /* Fixup NUMA distance by symmetric policy because if it is an
  276. * asymmetric distance table, it should be a complete table and
  277. * there would not be any missing distance except local node, which
  278. * is verified by validate_numa_distance above.
  279. */
  280. for (src = 0; src < ms->numa_state->num_nodes; src++) {
  281. for (dst = 0; dst < ms->numa_state->num_nodes; dst++) {
  282. if (numa_info[src].distance[dst] == 0) {
  283. if (src == dst) {
  284. numa_info[src].distance[dst] = NUMA_DISTANCE_MIN;
  285. } else {
  286. numa_info[src].distance[dst] = numa_info[dst].distance[src];
  287. }
  288. }
  289. }
  290. }
  291. }
  292. void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
  293. int nb_nodes, ram_addr_t size)
  294. {
  295. int i;
  296. uint64_t usedmem = 0;
  297. /* Align each node according to the alignment
  298. * requirements of the machine class
  299. */
  300. for (i = 0; i < nb_nodes - 1; i++) {
  301. nodes[i].node_mem = (size / nb_nodes) &
  302. ~((1 << mc->numa_mem_align_shift) - 1);
  303. usedmem += nodes[i].node_mem;
  304. }
  305. nodes[i].node_mem = size - usedmem;
  306. }
  307. void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes,
  308. int nb_nodes, ram_addr_t size)
  309. {
  310. int i;
  311. uint64_t usedmem = 0, node_mem;
  312. uint64_t granularity = size / nb_nodes;
  313. uint64_t propagate = 0;
  314. for (i = 0; i < nb_nodes - 1; i++) {
  315. node_mem = (granularity + propagate) &
  316. ~((1 << mc->numa_mem_align_shift) - 1);
  317. propagate = granularity + propagate - node_mem;
  318. nodes[i].node_mem = node_mem;
  319. usedmem += node_mem;
  320. }
  321. nodes[i].node_mem = size - usedmem;
  322. }
  323. void numa_complete_configuration(MachineState *ms)
  324. {
  325. int i;
  326. MachineClass *mc = MACHINE_GET_CLASS(ms);
  327. NodeInfo *numa_info = ms->numa_state->nodes;
  328. /*
  329. * If memory hotplug is enabled (slots > 0) but without '-numa'
  330. * options explicitly on CLI, guestes will break.
  331. *
  332. * Windows: won't enable memory hotplug without SRAT table at all
  333. *
  334. * Linux: if QEMU is started with initial memory all below 4Gb
  335. * and no SRAT table present, guest kernel will use nommu DMA ops,
  336. * which breaks 32bit hw drivers when memory is hotplugged and
  337. * guest tries to use it with that drivers.
  338. *
  339. * Enable NUMA implicitly by adding a new NUMA node automatically.
  340. *
  341. * Or if MachineClass::auto_enable_numa is true and no NUMA nodes,
  342. * assume there is just one node with whole RAM.
  343. */
  344. if (ms->numa_state->num_nodes == 0 &&
  345. ((ms->ram_slots > 0 &&
  346. mc->auto_enable_numa_with_memhp) ||
  347. mc->auto_enable_numa)) {
  348. NumaNodeOptions node = { };
  349. parse_numa_node(ms, &node, &error_abort);
  350. numa_info[0].node_mem = ram_size;
  351. }
  352. assert(max_numa_nodeid <= MAX_NODES);
  353. /* No support for sparse NUMA node IDs yet: */
  354. for (i = max_numa_nodeid - 1; i >= 0; i--) {
  355. /* Report large node IDs first, to make mistakes easier to spot */
  356. if (!numa_info[i].present) {
  357. error_report("numa: Node ID missing: %d", i);
  358. exit(1);
  359. }
  360. }
  361. /* This must be always true if all nodes are present: */
  362. assert(ms->numa_state->num_nodes == max_numa_nodeid);
  363. if (ms->numa_state->num_nodes > 0) {
  364. uint64_t numa_total;
  365. if (ms->numa_state->num_nodes > MAX_NODES) {
  366. ms->numa_state->num_nodes = MAX_NODES;
  367. }
  368. /* If no memory size is given for any node, assume the default case
  369. * and distribute the available memory equally across all nodes
  370. */
  371. for (i = 0; i < ms->numa_state->num_nodes; i++) {
  372. if (numa_info[i].node_mem != 0) {
  373. break;
  374. }
  375. }
  376. if (i == ms->numa_state->num_nodes) {
  377. assert(mc->numa_auto_assign_ram);
  378. mc->numa_auto_assign_ram(mc, numa_info,
  379. ms->numa_state->num_nodes, ram_size);
  380. if (!qtest_enabled()) {
  381. warn_report("Default splitting of RAM between nodes is deprecated,"
  382. " Use '-numa node,memdev' to explictly define RAM"
  383. " allocation per node");
  384. }
  385. }
  386. numa_total = 0;
  387. for (i = 0; i < ms->numa_state->num_nodes; i++) {
  388. numa_total += numa_info[i].node_mem;
  389. }
  390. if (numa_total != ram_size) {
  391. error_report("total memory for NUMA nodes (0x%" PRIx64 ")"
  392. " should equal RAM size (0x" RAM_ADDR_FMT ")",
  393. numa_total, ram_size);
  394. exit(1);
  395. }
  396. /* QEMU needs at least all unique node pair distances to build
  397. * the whole NUMA distance table. QEMU treats the distance table
  398. * as symmetric by default, i.e. distance A->B == distance B->A.
  399. * Thus, QEMU is able to complete the distance table
  400. * initialization even though only distance A->B is provided and
  401. * distance B->A is not. QEMU knows the distance of a node to
  402. * itself is always 10, so A->A distances may be omitted. When
  403. * the distances of two nodes of a pair differ, i.e. distance
  404. * A->B != distance B->A, then that means the distance table is
  405. * asymmetric. In this case, the distances for both directions
  406. * of all node pairs are required.
  407. */
  408. if (ms->numa_state->have_numa_distance) {
  409. /* Validate enough NUMA distance information was provided. */
  410. validate_numa_distance(ms);
  411. /* Validation succeeded, now fill in any missing distances. */
  412. complete_init_numa_distance(ms);
  413. }
  414. }
  415. }
  416. void parse_numa_opts(MachineState *ms)
  417. {
  418. qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, &error_fatal);
  419. }
  420. void numa_cpu_pre_plug(const CPUArchId *slot, DeviceState *dev, Error **errp)
  421. {
  422. int node_id = object_property_get_int(OBJECT(dev), "node-id", &error_abort);
  423. if (node_id == CPU_UNSET_NUMA_NODE_ID) {
  424. /* due to bug in libvirt, it doesn't pass node-id from props on
  425. * device_add as expected, so we have to fix it up here */
  426. if (slot->props.has_node_id) {
  427. object_property_set_int(OBJECT(dev), slot->props.node_id,
  428. "node-id", errp);
  429. }
  430. } else if (node_id != slot->props.node_id) {
  431. error_setg(errp, "invalid node-id, must be %"PRId64,
  432. slot->props.node_id);
  433. }
  434. }
  435. static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner,
  436. const char *name,
  437. uint64_t ram_size)
  438. {
  439. if (mem_path) {
  440. #ifdef __linux__
  441. Error *err = NULL;
  442. memory_region_init_ram_from_file(mr, owner, name, ram_size, 0, 0,
  443. mem_path, &err);
  444. if (err) {
  445. error_report_err(err);
  446. if (mem_prealloc) {
  447. exit(1);
  448. }
  449. warn_report("falling back to regular RAM allocation");
  450. error_printf("This is deprecated. Make sure that -mem-path "
  451. " specified path has sufficient resources to allocate"
  452. " -m specified RAM amount\n");
  453. /* Legacy behavior: if allocation failed, fall back to
  454. * regular RAM allocation.
  455. */
  456. mem_path = NULL;
  457. memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal);
  458. }
  459. #else
  460. fprintf(stderr, "-mem-path not supported on this host\n");
  461. exit(1);
  462. #endif
  463. } else {
  464. memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal);
  465. }
  466. vmstate_register_ram_global(mr);
  467. }
  468. void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner,
  469. const char *name,
  470. uint64_t ram_size)
  471. {
  472. uint64_t addr = 0;
  473. int i;
  474. MachineState *ms = MACHINE(qdev_get_machine());
  475. if (ms->numa_state == NULL ||
  476. ms->numa_state->num_nodes == 0 || !have_memdevs) {
  477. allocate_system_memory_nonnuma(mr, owner, name, ram_size);
  478. return;
  479. }
  480. memory_region_init(mr, owner, name, ram_size);
  481. for (i = 0; i < ms->numa_state->num_nodes; i++) {
  482. uint64_t size = ms->numa_state->nodes[i].node_mem;
  483. HostMemoryBackend *backend = ms->numa_state->nodes[i].node_memdev;
  484. if (!backend) {
  485. continue;
  486. }
  487. MemoryRegion *seg = host_memory_backend_get_memory(backend);
  488. if (memory_region_is_mapped(seg)) {
  489. char *path = object_get_canonical_path_component(OBJECT(backend));
  490. error_report("memory backend %s is used multiple times. Each "
  491. "-numa option must use a different memdev value.",
  492. path);
  493. g_free(path);
  494. exit(1);
  495. }
  496. host_memory_backend_set_mapped(backend, true);
  497. memory_region_add_subregion(mr, addr, seg);
  498. vmstate_register_ram_global(seg);
  499. addr += size;
  500. }
  501. }
  502. static void numa_stat_memory_devices(NumaNodeMem node_mem[])
  503. {
  504. MemoryDeviceInfoList *info_list = qmp_memory_device_list();
  505. MemoryDeviceInfoList *info;
  506. PCDIMMDeviceInfo *pcdimm_info;
  507. VirtioPMEMDeviceInfo *vpi;
  508. for (info = info_list; info; info = info->next) {
  509. MemoryDeviceInfo *value = info->value;
  510. if (value) {
  511. switch (value->type) {
  512. case MEMORY_DEVICE_INFO_KIND_DIMM:
  513. case MEMORY_DEVICE_INFO_KIND_NVDIMM:
  514. pcdimm_info = value->type == MEMORY_DEVICE_INFO_KIND_DIMM ?
  515. value->u.dimm.data : value->u.nvdimm.data;
  516. node_mem[pcdimm_info->node].node_mem += pcdimm_info->size;
  517. node_mem[pcdimm_info->node].node_plugged_mem +=
  518. pcdimm_info->size;
  519. break;
  520. case MEMORY_DEVICE_INFO_KIND_VIRTIO_PMEM:
  521. vpi = value->u.virtio_pmem.data;
  522. /* TODO: once we support numa, assign to right node */
  523. node_mem[0].node_mem += vpi->size;
  524. node_mem[0].node_plugged_mem += vpi->size;
  525. break;
  526. default:
  527. g_assert_not_reached();
  528. }
  529. }
  530. }
  531. qapi_free_MemoryDeviceInfoList(info_list);
  532. }
  533. void query_numa_node_mem(NumaNodeMem node_mem[], MachineState *ms)
  534. {
  535. int i;
  536. if (ms->numa_state == NULL || ms->numa_state->num_nodes <= 0) {
  537. return;
  538. }
  539. numa_stat_memory_devices(node_mem);
  540. for (i = 0; i < ms->numa_state->num_nodes; i++) {
  541. node_mem[i].node_mem += ms->numa_state->nodes[i].node_mem;
  542. }
  543. }
  544. void ram_block_notifier_add(RAMBlockNotifier *n)
  545. {
  546. QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next);
  547. }
  548. void ram_block_notifier_remove(RAMBlockNotifier *n)
  549. {
  550. QLIST_REMOVE(n, next);
  551. }
  552. void ram_block_notify_add(void *host, size_t size)
  553. {
  554. RAMBlockNotifier *notifier;
  555. QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
  556. notifier->ram_block_added(notifier, host, size);
  557. }
  558. }
  559. void ram_block_notify_remove(void *host, size_t size)
  560. {
  561. RAMBlockNotifier *notifier;
  562. QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
  563. notifier->ram_block_removed(notifier, host, size);
  564. }
  565. }