cacheinfo.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. /*
  2. * cacheinfo.c - helpers to query the host about its caches
  3. *
  4. * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
  5. * License: GNU GPL, version 2 or later.
  6. * See the COPYING file in the top-level directory.
  7. */
  8. #include "qemu/osdep.h"
  9. #include "qemu/host-utils.h"
  10. #include "qemu/atomic.h"
  11. int qemu_icache_linesize = 0;
  12. int qemu_icache_linesize_log;
  13. int qemu_dcache_linesize = 0;
  14. int qemu_dcache_linesize_log;
  15. /*
  16. * Operating system specific detection mechanisms.
  17. */
  18. #if defined(_WIN32)
  19. static void sys_cache_info(int *isize, int *dsize)
  20. {
  21. SYSTEM_LOGICAL_PROCESSOR_INFORMATION *buf;
  22. DWORD size = 0;
  23. BOOL success;
  24. size_t i, n;
  25. /* Check for the required buffer size first. Note that if the zero
  26. size we use for the probe results in success, then there is no
  27. data available; fail in that case. */
  28. success = GetLogicalProcessorInformation(0, &size);
  29. if (success || GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
  30. return;
  31. }
  32. n = size / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
  33. size = n * sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
  34. buf = g_new0(SYSTEM_LOGICAL_PROCESSOR_INFORMATION, n);
  35. if (!GetLogicalProcessorInformation(buf, &size)) {
  36. goto fail;
  37. }
  38. for (i = 0; i < n; i++) {
  39. if (buf[i].Relationship == RelationCache
  40. && buf[i].Cache.Level == 1) {
  41. switch (buf[i].Cache.Type) {
  42. case CacheUnified:
  43. *isize = *dsize = buf[i].Cache.LineSize;
  44. break;
  45. case CacheInstruction:
  46. *isize = buf[i].Cache.LineSize;
  47. break;
  48. case CacheData:
  49. *dsize = buf[i].Cache.LineSize;
  50. break;
  51. default:
  52. break;
  53. }
  54. }
  55. }
  56. fail:
  57. g_free(buf);
  58. }
  59. #elif defined(__APPLE__)
  60. # include <sys/sysctl.h>
  61. static void sys_cache_info(int *isize, int *dsize)
  62. {
  63. /* There's only a single sysctl for both I/D cache line sizes. */
  64. long size;
  65. size_t len = sizeof(size);
  66. if (!sysctlbyname("hw.cachelinesize", &size, &len, NULL, 0)) {
  67. *isize = *dsize = size;
  68. }
  69. }
  70. #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  71. # include <sys/sysctl.h>
  72. static void sys_cache_info(int *isize, int *dsize)
  73. {
  74. /* There's only a single sysctl for both I/D cache line sizes. */
  75. int size;
  76. size_t len = sizeof(size);
  77. if (!sysctlbyname("machdep.cacheline_size", &size, &len, NULL, 0)) {
  78. *isize = *dsize = size;
  79. }
  80. }
  81. #else
  82. /* POSIX */
  83. static void sys_cache_info(int *isize, int *dsize)
  84. {
  85. # ifdef _SC_LEVEL1_ICACHE_LINESIZE
  86. *isize = sysconf(_SC_LEVEL1_ICACHE_LINESIZE);
  87. # endif
  88. # ifdef _SC_LEVEL1_DCACHE_LINESIZE
  89. *dsize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
  90. # endif
  91. }
  92. #endif /* sys_cache_info */
  93. /*
  94. * Architecture (+ OS) specific detection mechanisms.
  95. */
  96. #if defined(__aarch64__)
  97. static void arch_cache_info(int *isize, int *dsize)
  98. {
  99. if (*isize == 0 || *dsize == 0) {
  100. uint64_t ctr;
  101. /* The real cache geometry is in CCSIDR_EL1/CLIDR_EL1/CSSELR_EL1,
  102. but (at least under Linux) these are marked protected by the
  103. kernel. However, CTR_EL0 contains the minimum linesize in the
  104. entire hierarchy, and is used by userspace cache flushing. */
  105. asm volatile("mrs\t%0, ctr_el0" : "=r"(ctr));
  106. if (*isize == 0) {
  107. *isize = 4 << (ctr & 0xf);
  108. }
  109. if (*dsize == 0) {
  110. *dsize = 4 << ((ctr >> 16) & 0xf);
  111. }
  112. }
  113. }
  114. #elif defined(_ARCH_PPC) && defined(__linux__)
  115. # include "elf.h"
  116. static void arch_cache_info(int *isize, int *dsize)
  117. {
  118. if (*isize == 0) {
  119. *isize = qemu_getauxval(AT_ICACHEBSIZE);
  120. }
  121. if (*dsize == 0) {
  122. *dsize = qemu_getauxval(AT_DCACHEBSIZE);
  123. }
  124. }
  125. #else
  126. static void arch_cache_info(int *isize, int *dsize) { }
  127. #endif /* arch_cache_info */
  128. /*
  129. * ... and if all else fails ...
  130. */
  131. static void fallback_cache_info(int *isize, int *dsize)
  132. {
  133. /* If we can only find one of the two, assume they're the same. */
  134. if (*isize) {
  135. if (*dsize) {
  136. /* Success! */
  137. } else {
  138. *dsize = *isize;
  139. }
  140. } else if (*dsize) {
  141. *isize = *dsize;
  142. } else {
  143. #if defined(_ARCH_PPC)
  144. /* For PPC, we're going to use the icache size computed for
  145. flush_icache_range. Which means that we must use the
  146. architecture minimum. */
  147. *isize = *dsize = 16;
  148. #else
  149. /* Otherwise, 64 bytes is not uncommon. */
  150. *isize = *dsize = 64;
  151. #endif
  152. }
  153. }
  154. static void __attribute__((constructor)) init_cache_info(void)
  155. {
  156. int isize = 0, dsize = 0;
  157. sys_cache_info(&isize, &dsize);
  158. arch_cache_info(&isize, &dsize);
  159. fallback_cache_info(&isize, &dsize);
  160. assert((isize & (isize - 1)) == 0);
  161. assert((dsize & (dsize - 1)) == 0);
  162. qemu_icache_linesize = isize;
  163. qemu_icache_linesize_log = ctz32(isize);
  164. qemu_dcache_linesize = dsize;
  165. qemu_dcache_linesize_log = ctz32(dsize);
  166. atomic64_init();
  167. }