numa.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. /*
  2. * QEMU RISC-V NUMA Helper
  3. *
  4. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2 or later, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "qemu/osdep.h"
  19. #include "qemu/units.h"
  20. #include "qemu/log.h"
  21. #include "qemu/error-report.h"
  22. #include "qapi/error.h"
  23. #include "hw/boards.h"
  24. #include "hw/qdev-properties.h"
  25. #include "hw/riscv/numa.h"
  26. #include "sysemu/device_tree.h"
  27. static bool numa_enabled(const MachineState *ms)
  28. {
  29. return (ms->numa_state && ms->numa_state->num_nodes) ? true : false;
  30. }
  31. int riscv_socket_count(const MachineState *ms)
  32. {
  33. return (numa_enabled(ms)) ? ms->numa_state->num_nodes : 1;
  34. }
  35. int riscv_socket_first_hartid(const MachineState *ms, int socket_id)
  36. {
  37. int i, first_hartid = ms->smp.cpus;
  38. if (!numa_enabled(ms)) {
  39. return (!socket_id) ? 0 : -1;
  40. }
  41. for (i = 0; i < ms->smp.cpus; i++) {
  42. if (ms->possible_cpus->cpus[i].props.node_id != socket_id) {
  43. continue;
  44. }
  45. if (i < first_hartid) {
  46. first_hartid = i;
  47. }
  48. }
  49. return (first_hartid < ms->smp.cpus) ? first_hartid : -1;
  50. }
  51. int riscv_socket_last_hartid(const MachineState *ms, int socket_id)
  52. {
  53. int i, last_hartid = -1;
  54. if (!numa_enabled(ms)) {
  55. return (!socket_id) ? ms->smp.cpus - 1 : -1;
  56. }
  57. for (i = 0; i < ms->smp.cpus; i++) {
  58. if (ms->possible_cpus->cpus[i].props.node_id != socket_id) {
  59. continue;
  60. }
  61. if (i > last_hartid) {
  62. last_hartid = i;
  63. }
  64. }
  65. return (last_hartid < ms->smp.cpus) ? last_hartid : -1;
  66. }
  67. int riscv_socket_hart_count(const MachineState *ms, int socket_id)
  68. {
  69. int first_hartid, last_hartid;
  70. if (!numa_enabled(ms)) {
  71. return (!socket_id) ? ms->smp.cpus : -1;
  72. }
  73. first_hartid = riscv_socket_first_hartid(ms, socket_id);
  74. if (first_hartid < 0) {
  75. return -1;
  76. }
  77. last_hartid = riscv_socket_last_hartid(ms, socket_id);
  78. if (last_hartid < 0) {
  79. return -1;
  80. }
  81. if (first_hartid > last_hartid) {
  82. return -1;
  83. }
  84. return last_hartid - first_hartid + 1;
  85. }
  86. bool riscv_socket_check_hartids(const MachineState *ms, int socket_id)
  87. {
  88. int i, first_hartid, last_hartid;
  89. if (!numa_enabled(ms)) {
  90. return (!socket_id) ? true : false;
  91. }
  92. first_hartid = riscv_socket_first_hartid(ms, socket_id);
  93. if (first_hartid < 0) {
  94. return false;
  95. }
  96. last_hartid = riscv_socket_last_hartid(ms, socket_id);
  97. if (last_hartid < 0) {
  98. return false;
  99. }
  100. for (i = first_hartid; i <= last_hartid; i++) {
  101. if (ms->possible_cpus->cpus[i].props.node_id != socket_id) {
  102. return false;
  103. }
  104. }
  105. return true;
  106. }
  107. uint64_t riscv_socket_mem_offset(const MachineState *ms, int socket_id)
  108. {
  109. int i;
  110. uint64_t mem_offset = 0;
  111. if (!numa_enabled(ms)) {
  112. return 0;
  113. }
  114. for (i = 0; i < ms->numa_state->num_nodes; i++) {
  115. if (i == socket_id) {
  116. break;
  117. }
  118. mem_offset += ms->numa_state->nodes[i].node_mem;
  119. }
  120. return (i == socket_id) ? mem_offset : 0;
  121. }
  122. uint64_t riscv_socket_mem_size(const MachineState *ms, int socket_id)
  123. {
  124. if (!numa_enabled(ms)) {
  125. return (!socket_id) ? ms->ram_size : 0;
  126. }
  127. return (socket_id < ms->numa_state->num_nodes) ?
  128. ms->numa_state->nodes[socket_id].node_mem : 0;
  129. }
  130. void riscv_socket_fdt_write_id(const MachineState *ms, void *fdt,
  131. const char *node_name, int socket_id)
  132. {
  133. if (numa_enabled(ms)) {
  134. qemu_fdt_setprop_cell(fdt, node_name, "numa-node-id", socket_id);
  135. }
  136. }
  137. void riscv_socket_fdt_write_distance_matrix(const MachineState *ms, void *fdt)
  138. {
  139. int i, j, idx;
  140. uint32_t *dist_matrix, dist_matrix_size;
  141. if (numa_enabled(ms) && ms->numa_state->have_numa_distance) {
  142. dist_matrix_size = riscv_socket_count(ms) * riscv_socket_count(ms);
  143. dist_matrix_size *= (3 * sizeof(uint32_t));
  144. dist_matrix = g_malloc0(dist_matrix_size);
  145. for (i = 0; i < riscv_socket_count(ms); i++) {
  146. for (j = 0; j < riscv_socket_count(ms); j++) {
  147. idx = (i * riscv_socket_count(ms) + j) * 3;
  148. dist_matrix[idx + 0] = cpu_to_be32(i);
  149. dist_matrix[idx + 1] = cpu_to_be32(j);
  150. dist_matrix[idx + 2] =
  151. cpu_to_be32(ms->numa_state->nodes[i].distance[j]);
  152. }
  153. }
  154. qemu_fdt_add_subnode(fdt, "/distance-map");
  155. qemu_fdt_setprop_string(fdt, "/distance-map", "compatible",
  156. "numa-distance-map-v1");
  157. qemu_fdt_setprop(fdt, "/distance-map", "distance-matrix",
  158. dist_matrix, dist_matrix_size);
  159. g_free(dist_matrix);
  160. }
  161. }
  162. CpuInstanceProperties
  163. riscv_numa_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
  164. {
  165. MachineClass *mc = MACHINE_GET_CLASS(ms);
  166. const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
  167. assert(cpu_index < possible_cpus->len);
  168. return possible_cpus->cpus[cpu_index].props;
  169. }
  170. int64_t riscv_numa_get_default_cpu_node_id(const MachineState *ms, int idx)
  171. {
  172. int64_t nidx = 0;
  173. if (ms->numa_state->num_nodes) {
  174. nidx = idx / (ms->smp.cpus / ms->numa_state->num_nodes);
  175. if (ms->numa_state->num_nodes <= nidx) {
  176. nidx = ms->numa_state->num_nodes - 1;
  177. }
  178. }
  179. return nidx;
  180. }
  181. const CPUArchIdList *riscv_numa_possible_cpu_arch_ids(MachineState *ms)
  182. {
  183. int n;
  184. unsigned int max_cpus = ms->smp.max_cpus;
  185. if (ms->possible_cpus) {
  186. assert(ms->possible_cpus->len == max_cpus);
  187. return ms->possible_cpus;
  188. }
  189. ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
  190. sizeof(CPUArchId) * max_cpus);
  191. ms->possible_cpus->len = max_cpus;
  192. for (n = 0; n < ms->possible_cpus->len; n++) {
  193. ms->possible_cpus->cpus[n].type = ms->cpu_type;
  194. ms->possible_cpus->cpus[n].arch_id = n;
  195. ms->possible_cpus->cpus[n].props.has_core_id = true;
  196. ms->possible_cpus->cpus[n].props.core_id = n;
  197. }
  198. return ms->possible_cpus;
  199. }