numa.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /*
  2. * QEMU RISC-V NUMA Helper
  3. *
  4. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2 or later, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "qemu/osdep.h"
  19. #include "qemu/units.h"
  20. #include "qemu/error-report.h"
  21. #include "qapi/error.h"
  22. #include "hw/boards.h"
  23. #include "hw/qdev-properties.h"
  24. #include "hw/riscv/numa.h"
  25. #include "system/device_tree.h"
  26. static bool numa_enabled(const MachineState *ms)
  27. {
  28. return (ms->numa_state && ms->numa_state->num_nodes) ? true : false;
  29. }
  30. int riscv_socket_count(const MachineState *ms)
  31. {
  32. return (numa_enabled(ms)) ? ms->numa_state->num_nodes : 1;
  33. }
  34. int riscv_socket_first_hartid(const MachineState *ms, int socket_id)
  35. {
  36. int i, first_hartid = ms->smp.cpus;
  37. if (!numa_enabled(ms)) {
  38. return (!socket_id) ? 0 : -1;
  39. }
  40. for (i = 0; i < ms->smp.cpus; i++) {
  41. if (ms->possible_cpus->cpus[i].props.node_id != socket_id) {
  42. continue;
  43. }
  44. if (i < first_hartid) {
  45. first_hartid = i;
  46. }
  47. }
  48. return (first_hartid < ms->smp.cpus) ? first_hartid : -1;
  49. }
  50. int riscv_socket_last_hartid(const MachineState *ms, int socket_id)
  51. {
  52. int i, last_hartid = -1;
  53. if (!numa_enabled(ms)) {
  54. return (!socket_id) ? ms->smp.cpus - 1 : -1;
  55. }
  56. for (i = 0; i < ms->smp.cpus; i++) {
  57. if (ms->possible_cpus->cpus[i].props.node_id != socket_id) {
  58. continue;
  59. }
  60. if (i > last_hartid) {
  61. last_hartid = i;
  62. }
  63. }
  64. return (last_hartid < ms->smp.cpus) ? last_hartid : -1;
  65. }
  66. int riscv_socket_hart_count(const MachineState *ms, int socket_id)
  67. {
  68. int first_hartid, last_hartid;
  69. if (!numa_enabled(ms)) {
  70. return (!socket_id) ? ms->smp.cpus : -1;
  71. }
  72. first_hartid = riscv_socket_first_hartid(ms, socket_id);
  73. if (first_hartid < 0) {
  74. return -1;
  75. }
  76. last_hartid = riscv_socket_last_hartid(ms, socket_id);
  77. if (last_hartid < 0) {
  78. return -1;
  79. }
  80. if (first_hartid > last_hartid) {
  81. return -1;
  82. }
  83. return last_hartid - first_hartid + 1;
  84. }
  85. bool riscv_socket_check_hartids(const MachineState *ms, int socket_id)
  86. {
  87. int i, first_hartid, last_hartid;
  88. if (!numa_enabled(ms)) {
  89. return (!socket_id) ? true : false;
  90. }
  91. first_hartid = riscv_socket_first_hartid(ms, socket_id);
  92. if (first_hartid < 0) {
  93. return false;
  94. }
  95. last_hartid = riscv_socket_last_hartid(ms, socket_id);
  96. if (last_hartid < 0) {
  97. return false;
  98. }
  99. for (i = first_hartid; i <= last_hartid; i++) {
  100. if (ms->possible_cpus->cpus[i].props.node_id != socket_id) {
  101. return false;
  102. }
  103. }
  104. return true;
  105. }
  106. uint64_t riscv_socket_mem_offset(const MachineState *ms, int socket_id)
  107. {
  108. int i;
  109. uint64_t mem_offset = 0;
  110. if (!numa_enabled(ms)) {
  111. return 0;
  112. }
  113. for (i = 0; i < ms->numa_state->num_nodes; i++) {
  114. if (i == socket_id) {
  115. break;
  116. }
  117. mem_offset += ms->numa_state->nodes[i].node_mem;
  118. }
  119. return (i == socket_id) ? mem_offset : 0;
  120. }
  121. uint64_t riscv_socket_mem_size(const MachineState *ms, int socket_id)
  122. {
  123. if (!numa_enabled(ms)) {
  124. return (!socket_id) ? ms->ram_size : 0;
  125. }
  126. return (socket_id < ms->numa_state->num_nodes) ?
  127. ms->numa_state->nodes[socket_id].node_mem : 0;
  128. }
  129. void riscv_socket_fdt_write_id(const MachineState *ms, const char *node_name,
  130. int socket_id)
  131. {
  132. if (numa_enabled(ms)) {
  133. qemu_fdt_setprop_cell(ms->fdt, node_name, "numa-node-id", socket_id);
  134. }
  135. }
  136. void riscv_socket_fdt_write_distance_matrix(const MachineState *ms)
  137. {
  138. int i, j, idx;
  139. g_autofree uint32_t *dist_matrix = NULL;
  140. uint32_t dist_matrix_size;
  141. if (numa_enabled(ms) && ms->numa_state->have_numa_distance) {
  142. dist_matrix_size = riscv_socket_count(ms) * riscv_socket_count(ms);
  143. dist_matrix_size *= (3 * sizeof(uint32_t));
  144. dist_matrix = g_malloc0(dist_matrix_size);
  145. for (i = 0; i < riscv_socket_count(ms); i++) {
  146. for (j = 0; j < riscv_socket_count(ms); j++) {
  147. idx = (i * riscv_socket_count(ms) + j) * 3;
  148. dist_matrix[idx + 0] = cpu_to_be32(i);
  149. dist_matrix[idx + 1] = cpu_to_be32(j);
  150. dist_matrix[idx + 2] =
  151. cpu_to_be32(ms->numa_state->nodes[i].distance[j]);
  152. }
  153. }
  154. qemu_fdt_add_subnode(ms->fdt, "/distance-map");
  155. qemu_fdt_setprop_string(ms->fdt, "/distance-map", "compatible",
  156. "numa-distance-map-v1");
  157. qemu_fdt_setprop(ms->fdt, "/distance-map", "distance-matrix",
  158. dist_matrix, dist_matrix_size);
  159. }
  160. }
  161. CpuInstanceProperties
  162. riscv_numa_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
  163. {
  164. MachineClass *mc = MACHINE_GET_CLASS(ms);
  165. const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
  166. assert(cpu_index < possible_cpus->len);
  167. return possible_cpus->cpus[cpu_index].props;
  168. }
  169. int64_t riscv_numa_get_default_cpu_node_id(const MachineState *ms, int idx)
  170. {
  171. int64_t nidx = 0;
  172. if (ms->numa_state->num_nodes > ms->smp.cpus) {
  173. error_report("Number of NUMA nodes (%d)"
  174. " cannot exceed the number of available CPUs (%u).",
  175. ms->numa_state->num_nodes, ms->smp.cpus);
  176. exit(EXIT_FAILURE);
  177. }
  178. if (ms->numa_state->num_nodes) {
  179. nidx = idx / (ms->smp.cpus / ms->numa_state->num_nodes);
  180. if (ms->numa_state->num_nodes <= nidx) {
  181. nidx = ms->numa_state->num_nodes - 1;
  182. }
  183. }
  184. return nidx;
  185. }
  186. const CPUArchIdList *riscv_numa_possible_cpu_arch_ids(MachineState *ms)
  187. {
  188. int n;
  189. unsigned int max_cpus = ms->smp.max_cpus;
  190. if (ms->possible_cpus) {
  191. assert(ms->possible_cpus->len == max_cpus);
  192. return ms->possible_cpus;
  193. }
  194. ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
  195. sizeof(CPUArchId) * max_cpus);
  196. ms->possible_cpus->len = max_cpus;
  197. for (n = 0; n < ms->possible_cpus->len; n++) {
  198. ms->possible_cpus->cpus[n].type = ms->cpu_type;
  199. ms->possible_cpus->cpus[n].arch_id = n;
  200. ms->possible_cpus->cpus[n].props.has_core_id = true;
  201. ms->possible_cpus->cpus[n].props.core_id = n;
  202. }
  203. return ms->possible_cpus;
  204. }