xen-mapcache.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. /*
  2. * Copyright (C) 2011 Citrix Ltd.
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2. See
  5. * the COPYING file in the top-level directory.
  6. *
  7. * Contributions after 2012-01-13 are licensed under the terms of the
  8. * GNU GPL, version 2 or (at your option) any later version.
  9. */
  10. #include "qemu/osdep.h"
  11. #include <sys/resource.h>
  12. #include "hw/xen/xen_backend.h"
  13. #include "sysemu/blockdev.h"
  14. #include "qemu/bitmap.h"
  15. #include <xen/hvm/params.h>
  16. #include "sysemu/xen-mapcache.h"
  17. #include "trace-root.h"
  18. //#define MAPCACHE_DEBUG
  19. #ifdef MAPCACHE_DEBUG
  20. # define DPRINTF(fmt, ...) do { \
  21. fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
  22. } while (0)
  23. #else
  24. # define DPRINTF(fmt, ...) do { } while (0)
  25. #endif
  26. #if HOST_LONG_BITS == 32
  27. # define MCACHE_BUCKET_SHIFT 16
  28. # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
  29. #else
  30. # define MCACHE_BUCKET_SHIFT 20
  31. # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
  32. #endif
  33. #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
  34. /* This is the size of the virtual address space reserve to QEMU that will not
  35. * be use by MapCache.
  36. * From empirical tests I observed that qemu use 75MB more than the
  37. * max_mcache_size.
  38. */
  39. #define NON_MCACHE_MEMORY_SIZE (80 * 1024 * 1024)
  40. typedef struct MapCacheEntry {
  41. hwaddr paddr_index;
  42. uint8_t *vaddr_base;
  43. unsigned long *valid_mapping;
  44. uint8_t lock;
  45. hwaddr size;
  46. struct MapCacheEntry *next;
  47. } MapCacheEntry;
  48. typedef struct MapCacheRev {
  49. uint8_t *vaddr_req;
  50. hwaddr paddr_index;
  51. hwaddr size;
  52. QTAILQ_ENTRY(MapCacheRev) next;
  53. } MapCacheRev;
  54. typedef struct MapCache {
  55. MapCacheEntry *entry;
  56. unsigned long nr_buckets;
  57. QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries;
  58. /* For most cases (>99.9%), the page address is the same. */
  59. MapCacheEntry *last_entry;
  60. unsigned long max_mcache_size;
  61. unsigned int mcache_bucket_shift;
  62. phys_offset_to_gaddr_t phys_offset_to_gaddr;
  63. QemuMutex lock;
  64. void *opaque;
  65. } MapCache;
  66. static MapCache *mapcache;
  67. static inline void mapcache_lock(void)
  68. {
  69. qemu_mutex_lock(&mapcache->lock);
  70. }
  71. static inline void mapcache_unlock(void)
  72. {
  73. qemu_mutex_unlock(&mapcache->lock);
  74. }
  75. static inline int test_bits(int nr, int size, const unsigned long *addr)
  76. {
  77. unsigned long res = find_next_zero_bit(addr, size + nr, nr);
  78. if (res >= nr + size)
  79. return 1;
  80. else
  81. return 0;
  82. }
  83. void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
  84. {
  85. unsigned long size;
  86. struct rlimit rlimit_as;
  87. mapcache = g_malloc0(sizeof (MapCache));
  88. mapcache->phys_offset_to_gaddr = f;
  89. mapcache->opaque = opaque;
  90. qemu_mutex_init(&mapcache->lock);
  91. QTAILQ_INIT(&mapcache->locked_entries);
  92. if (geteuid() == 0) {
  93. rlimit_as.rlim_cur = RLIM_INFINITY;
  94. rlimit_as.rlim_max = RLIM_INFINITY;
  95. mapcache->max_mcache_size = MCACHE_MAX_SIZE;
  96. } else {
  97. getrlimit(RLIMIT_AS, &rlimit_as);
  98. rlimit_as.rlim_cur = rlimit_as.rlim_max;
  99. if (rlimit_as.rlim_max != RLIM_INFINITY) {
  100. fprintf(stderr, "Warning: QEMU's maximum size of virtual"
  101. " memory is not infinity.\n");
  102. }
  103. if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) {
  104. mapcache->max_mcache_size = rlimit_as.rlim_max -
  105. NON_MCACHE_MEMORY_SIZE;
  106. } else {
  107. mapcache->max_mcache_size = MCACHE_MAX_SIZE;
  108. }
  109. }
  110. setrlimit(RLIMIT_AS, &rlimit_as);
  111. mapcache->nr_buckets =
  112. (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) +
  113. (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
  114. (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
  115. size = mapcache->nr_buckets * sizeof (MapCacheEntry);
  116. size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
  117. DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__,
  118. mapcache->nr_buckets, size);
  119. mapcache->entry = g_malloc0(size);
  120. }
  121. static void xen_remap_bucket(MapCacheEntry *entry,
  122. hwaddr size,
  123. hwaddr address_index)
  124. {
  125. uint8_t *vaddr_base;
  126. xen_pfn_t *pfns;
  127. int *err;
  128. unsigned int i;
  129. hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
  130. trace_xen_remap_bucket(address_index);
  131. pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t));
  132. err = g_malloc0(nb_pfn * sizeof (int));
  133. if (entry->vaddr_base != NULL) {
  134. ram_block_notify_remove(entry->vaddr_base, entry->size);
  135. if (munmap(entry->vaddr_base, entry->size) != 0) {
  136. perror("unmap fails");
  137. exit(-1);
  138. }
  139. }
  140. g_free(entry->valid_mapping);
  141. entry->valid_mapping = NULL;
  142. for (i = 0; i < nb_pfn; i++) {
  143. pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
  144. }
  145. vaddr_base = xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE,
  146. nb_pfn, pfns, err);
  147. if (vaddr_base == NULL) {
  148. perror("xenforeignmemory_map");
  149. exit(-1);
  150. }
  151. entry->vaddr_base = vaddr_base;
  152. entry->paddr_index = address_index;
  153. entry->size = size;
  154. entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) *
  155. BITS_TO_LONGS(size >> XC_PAGE_SHIFT));
  156. ram_block_notify_add(entry->vaddr_base, entry->size);
  157. bitmap_zero(entry->valid_mapping, nb_pfn);
  158. for (i = 0; i < nb_pfn; i++) {
  159. if (!err[i]) {
  160. bitmap_set(entry->valid_mapping, i, 1);
  161. }
  162. }
  163. g_free(pfns);
  164. g_free(err);
  165. }
  166. static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size,
  167. uint8_t lock)
  168. {
  169. MapCacheEntry *entry, *pentry = NULL;
  170. hwaddr address_index;
  171. hwaddr address_offset;
  172. hwaddr cache_size = size;
  173. hwaddr test_bit_size;
  174. bool translated = false;
  175. tryagain:
  176. address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
  177. address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
  178. trace_xen_map_cache(phys_addr);
  179. /* test_bit_size is always a multiple of XC_PAGE_SIZE */
  180. if (size) {
  181. test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1));
  182. if (test_bit_size % XC_PAGE_SIZE) {
  183. test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE);
  184. }
  185. } else {
  186. test_bit_size = XC_PAGE_SIZE;
  187. }
  188. if (mapcache->last_entry != NULL &&
  189. mapcache->last_entry->paddr_index == address_index &&
  190. !lock && !size &&
  191. test_bits(address_offset >> XC_PAGE_SHIFT,
  192. test_bit_size >> XC_PAGE_SHIFT,
  193. mapcache->last_entry->valid_mapping)) {
  194. trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset);
  195. return mapcache->last_entry->vaddr_base + address_offset;
  196. }
  197. /* size is always a multiple of MCACHE_BUCKET_SIZE */
  198. if (size) {
  199. cache_size = size + address_offset;
  200. if (cache_size % MCACHE_BUCKET_SIZE) {
  201. cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE);
  202. }
  203. } else {
  204. cache_size = MCACHE_BUCKET_SIZE;
  205. }
  206. entry = &mapcache->entry[address_index % mapcache->nr_buckets];
  207. while (entry && entry->lock && entry->vaddr_base &&
  208. (entry->paddr_index != address_index || entry->size != cache_size ||
  209. !test_bits(address_offset >> XC_PAGE_SHIFT,
  210. test_bit_size >> XC_PAGE_SHIFT,
  211. entry->valid_mapping))) {
  212. pentry = entry;
  213. entry = entry->next;
  214. }
  215. if (!entry) {
  216. entry = g_malloc0(sizeof (MapCacheEntry));
  217. pentry->next = entry;
  218. xen_remap_bucket(entry, cache_size, address_index);
  219. } else if (!entry->lock) {
  220. if (!entry->vaddr_base || entry->paddr_index != address_index ||
  221. entry->size != cache_size ||
  222. !test_bits(address_offset >> XC_PAGE_SHIFT,
  223. test_bit_size >> XC_PAGE_SHIFT,
  224. entry->valid_mapping)) {
  225. xen_remap_bucket(entry, cache_size, address_index);
  226. }
  227. }
  228. if(!test_bits(address_offset >> XC_PAGE_SHIFT,
  229. test_bit_size >> XC_PAGE_SHIFT,
  230. entry->valid_mapping)) {
  231. mapcache->last_entry = NULL;
  232. if (!translated && mapcache->phys_offset_to_gaddr) {
  233. phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque);
  234. translated = true;
  235. goto tryagain;
  236. }
  237. trace_xen_map_cache_return(NULL);
  238. return NULL;
  239. }
  240. mapcache->last_entry = entry;
  241. if (lock) {
  242. MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev));
  243. entry->lock++;
  244. reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset;
  245. reventry->paddr_index = mapcache->last_entry->paddr_index;
  246. reventry->size = entry->size;
  247. QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next);
  248. }
  249. trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset);
  250. return mapcache->last_entry->vaddr_base + address_offset;
  251. }
  252. uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
  253. uint8_t lock)
  254. {
  255. uint8_t *p;
  256. mapcache_lock();
  257. p = xen_map_cache_unlocked(phys_addr, size, lock);
  258. mapcache_unlock();
  259. return p;
  260. }
  261. ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
  262. {
  263. MapCacheEntry *entry = NULL;
  264. MapCacheRev *reventry;
  265. hwaddr paddr_index;
  266. hwaddr size;
  267. ram_addr_t raddr;
  268. int found = 0;
  269. mapcache_lock();
  270. QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
  271. if (reventry->vaddr_req == ptr) {
  272. paddr_index = reventry->paddr_index;
  273. size = reventry->size;
  274. found = 1;
  275. break;
  276. }
  277. }
  278. if (!found) {
  279. fprintf(stderr, "%s, could not find %p\n", __func__, ptr);
  280. QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
  281. DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index,
  282. reventry->vaddr_req);
  283. }
  284. abort();
  285. return 0;
  286. }
  287. entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
  288. while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
  289. entry = entry->next;
  290. }
  291. if (!entry) {
  292. DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr);
  293. raddr = 0;
  294. } else {
  295. raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
  296. ((unsigned long) ptr - (unsigned long) entry->vaddr_base);
  297. }
  298. mapcache_unlock();
  299. return raddr;
  300. }
  301. static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer)
  302. {
  303. MapCacheEntry *entry = NULL, *pentry = NULL;
  304. MapCacheRev *reventry;
  305. hwaddr paddr_index;
  306. hwaddr size;
  307. int found = 0;
  308. QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
  309. if (reventry->vaddr_req == buffer) {
  310. paddr_index = reventry->paddr_index;
  311. size = reventry->size;
  312. found = 1;
  313. break;
  314. }
  315. }
  316. if (!found) {
  317. DPRINTF("%s, could not find %p\n", __func__, buffer);
  318. QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
  319. DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
  320. }
  321. return;
  322. }
  323. QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
  324. g_free(reventry);
  325. if (mapcache->last_entry != NULL &&
  326. mapcache->last_entry->paddr_index == paddr_index) {
  327. mapcache->last_entry = NULL;
  328. }
  329. entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
  330. while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
  331. pentry = entry;
  332. entry = entry->next;
  333. }
  334. if (!entry) {
  335. DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer);
  336. return;
  337. }
  338. entry->lock--;
  339. if (entry->lock > 0 || pentry == NULL) {
  340. return;
  341. }
  342. pentry->next = entry->next;
  343. ram_block_notify_remove(entry->vaddr_base, entry->size);
  344. if (munmap(entry->vaddr_base, entry->size) != 0) {
  345. perror("unmap fails");
  346. exit(-1);
  347. }
  348. g_free(entry->valid_mapping);
  349. g_free(entry);
  350. }
  351. void xen_invalidate_map_cache_entry(uint8_t *buffer)
  352. {
  353. mapcache_lock();
  354. xen_invalidate_map_cache_entry_unlocked(buffer);
  355. mapcache_unlock();
  356. }
  357. void xen_invalidate_map_cache(void)
  358. {
  359. unsigned long i;
  360. MapCacheRev *reventry;
  361. /* Flush pending AIO before destroying the mapcache */
  362. bdrv_drain_all();
  363. mapcache_lock();
  364. QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
  365. DPRINTF("There should be no locked mappings at this time, "
  366. "but "TARGET_FMT_plx" -> %p is present\n",
  367. reventry->paddr_index, reventry->vaddr_req);
  368. }
  369. for (i = 0; i < mapcache->nr_buckets; i++) {
  370. MapCacheEntry *entry = &mapcache->entry[i];
  371. if (entry->vaddr_base == NULL) {
  372. continue;
  373. }
  374. if (entry->lock > 0) {
  375. continue;
  376. }
  377. if (munmap(entry->vaddr_base, entry->size) != 0) {
  378. perror("unmap fails");
  379. exit(-1);
  380. }
  381. entry->paddr_index = 0;
  382. entry->vaddr_base = NULL;
  383. entry->size = 0;
  384. g_free(entry->valid_mapping);
  385. entry->valid_mapping = NULL;
  386. }
  387. mapcache->last_entry = NULL;
  388. mapcache_unlock();
  389. }