memory_mapping.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. /*
  2. * QEMU memory mapping
  3. *
  4. * Copyright Fujitsu, Corp. 2011, 2012
  5. *
  6. * Authors:
  7. * Wen Congyang <wency@cn.fujitsu.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/range.h"
  15. #include "qapi/error.h"
  16. #include "system/memory_mapping.h"
  17. #include "exec/memory.h"
  18. #include "exec/address-spaces.h"
  19. #include "hw/core/cpu.h"
  20. //#define DEBUG_GUEST_PHYS_REGION_ADD
  21. static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list,
  22. MemoryMapping *mapping)
  23. {
  24. MemoryMapping *p;
  25. QTAILQ_FOREACH(p, &list->head, next) {
  26. if (p->phys_addr >= mapping->phys_addr) {
  27. QTAILQ_INSERT_BEFORE(p, mapping, next);
  28. return;
  29. }
  30. }
  31. QTAILQ_INSERT_TAIL(&list->head, mapping, next);
  32. }
  33. static void create_new_memory_mapping(MemoryMappingList *list,
  34. hwaddr phys_addr,
  35. hwaddr virt_addr,
  36. ram_addr_t length)
  37. {
  38. MemoryMapping *memory_mapping;
  39. memory_mapping = g_new(MemoryMapping, 1);
  40. memory_mapping->phys_addr = phys_addr;
  41. memory_mapping->virt_addr = virt_addr;
  42. memory_mapping->length = length;
  43. list->last_mapping = memory_mapping;
  44. list->num++;
  45. memory_mapping_list_add_mapping_sorted(list, memory_mapping);
  46. }
  47. static inline bool mapping_contiguous(MemoryMapping *map,
  48. hwaddr phys_addr,
  49. hwaddr virt_addr)
  50. {
  51. return phys_addr == map->phys_addr + map->length &&
  52. virt_addr == map->virt_addr + map->length;
  53. }
  54. /*
  55. * [map->phys_addr, map->phys_addr + map->length) and
  56. * [phys_addr, phys_addr + length) have intersection?
  57. */
  58. static inline bool mapping_have_same_region(MemoryMapping *map,
  59. hwaddr phys_addr,
  60. ram_addr_t length)
  61. {
  62. return !(phys_addr + length < map->phys_addr ||
  63. phys_addr >= map->phys_addr + map->length);
  64. }
  65. /*
  66. * [map->phys_addr, map->phys_addr + map->length) and
  67. * [phys_addr, phys_addr + length) have intersection. The virtual address in the
  68. * intersection are the same?
  69. */
  70. static inline bool mapping_conflict(MemoryMapping *map,
  71. hwaddr phys_addr,
  72. hwaddr virt_addr)
  73. {
  74. return virt_addr - map->virt_addr != phys_addr - map->phys_addr;
  75. }
  76. /*
  77. * [map->virt_addr, map->virt_addr + map->length) and
  78. * [virt_addr, virt_addr + length) have intersection. And the physical address
  79. * in the intersection are the same.
  80. */
  81. static inline void mapping_merge(MemoryMapping *map,
  82. hwaddr virt_addr,
  83. ram_addr_t length)
  84. {
  85. if (virt_addr < map->virt_addr) {
  86. map->length += map->virt_addr - virt_addr;
  87. map->virt_addr = virt_addr;
  88. }
  89. if ((virt_addr + length) >
  90. (map->virt_addr + map->length)) {
  91. map->length = virt_addr + length - map->virt_addr;
  92. }
  93. }
  94. void memory_mapping_list_add_merge_sorted(MemoryMappingList *list,
  95. hwaddr phys_addr,
  96. hwaddr virt_addr,
  97. ram_addr_t length)
  98. {
  99. MemoryMapping *memory_mapping, *last_mapping;
  100. if (QTAILQ_EMPTY(&list->head)) {
  101. create_new_memory_mapping(list, phys_addr, virt_addr, length);
  102. return;
  103. }
  104. last_mapping = list->last_mapping;
  105. if (last_mapping) {
  106. if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) {
  107. last_mapping->length += length;
  108. return;
  109. }
  110. }
  111. QTAILQ_FOREACH(memory_mapping, &list->head, next) {
  112. if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) {
  113. memory_mapping->length += length;
  114. list->last_mapping = memory_mapping;
  115. return;
  116. }
  117. if (phys_addr + length < memory_mapping->phys_addr) {
  118. /* create a new region before memory_mapping */
  119. break;
  120. }
  121. if (mapping_have_same_region(memory_mapping, phys_addr, length)) {
  122. if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) {
  123. continue;
  124. }
  125. /* merge this region into memory_mapping */
  126. mapping_merge(memory_mapping, virt_addr, length);
  127. list->last_mapping = memory_mapping;
  128. return;
  129. }
  130. }
  131. /* this region can not be merged into any existed memory mapping. */
  132. create_new_memory_mapping(list, phys_addr, virt_addr, length);
  133. }
  134. void memory_mapping_list_free(MemoryMappingList *list)
  135. {
  136. MemoryMapping *p, *q;
  137. QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
  138. QTAILQ_REMOVE(&list->head, p, next);
  139. g_free(p);
  140. }
  141. list->num = 0;
  142. list->last_mapping = NULL;
  143. }
  144. void memory_mapping_list_init(MemoryMappingList *list)
  145. {
  146. list->num = 0;
  147. list->last_mapping = NULL;
  148. QTAILQ_INIT(&list->head);
  149. }
  150. void guest_phys_blocks_free(GuestPhysBlockList *list)
  151. {
  152. GuestPhysBlock *p, *q;
  153. QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
  154. QTAILQ_REMOVE(&list->head, p, next);
  155. memory_region_unref(p->mr);
  156. g_free(p);
  157. }
  158. list->num = 0;
  159. }
  160. void guest_phys_blocks_init(GuestPhysBlockList *list)
  161. {
  162. list->num = 0;
  163. QTAILQ_INIT(&list->head);
  164. }
  165. typedef struct GuestPhysListener {
  166. GuestPhysBlockList *list;
  167. MemoryListener listener;
  168. } GuestPhysListener;
  169. static void guest_phys_block_add_section(GuestPhysListener *g,
  170. MemoryRegionSection *section)
  171. {
  172. const hwaddr target_start = section->offset_within_address_space;
  173. const hwaddr target_end = target_start + int128_get64(section->size);
  174. uint8_t *host_addr = memory_region_get_ram_ptr(section->mr) +
  175. section->offset_within_region;
  176. GuestPhysBlock *predecessor = NULL;
  177. /* find continuity in guest physical address space */
  178. if (!QTAILQ_EMPTY(&g->list->head)) {
  179. hwaddr predecessor_size;
  180. predecessor = QTAILQ_LAST(&g->list->head);
  181. predecessor_size = predecessor->target_end - predecessor->target_start;
  182. /* the memory API guarantees monotonically increasing traversal */
  183. g_assert(predecessor->target_end <= target_start);
  184. /* we want continuity in both guest-physical and host-virtual memory */
  185. if (predecessor->target_end < target_start ||
  186. predecessor->host_addr + predecessor_size != host_addr ||
  187. predecessor->mr != section->mr) {
  188. predecessor = NULL;
  189. }
  190. }
  191. if (predecessor == NULL) {
  192. /* isolated mapping, allocate it and add it to the list */
  193. GuestPhysBlock *block = g_malloc0(sizeof *block);
  194. block->target_start = target_start;
  195. block->target_end = target_end;
  196. block->host_addr = host_addr;
  197. block->mr = section->mr;
  198. memory_region_ref(section->mr);
  199. QTAILQ_INSERT_TAIL(&g->list->head, block, next);
  200. ++g->list->num;
  201. } else {
  202. /* expand predecessor until @target_end; predecessor's start doesn't
  203. * change
  204. */
  205. predecessor->target_end = target_end;
  206. }
  207. #ifdef DEBUG_GUEST_PHYS_REGION_ADD
  208. fprintf(stderr, "%s: target_start=" HWADDR_FMT_plx " target_end="
  209. HWADDR_FMT_plx ": %s (count: %u)\n", __func__, target_start,
  210. target_end, predecessor ? "joined" : "added", g->list->num);
  211. #endif
  212. }
  213. static int guest_phys_ram_populate_cb(MemoryRegionSection *section,
  214. void *opaque)
  215. {
  216. GuestPhysListener *g = opaque;
  217. guest_phys_block_add_section(g, section);
  218. return 0;
  219. }
  220. static void guest_phys_blocks_region_add(MemoryListener *listener,
  221. MemoryRegionSection *section)
  222. {
  223. GuestPhysListener *g = container_of(listener, GuestPhysListener, listener);
  224. /* we only care about RAM */
  225. if (!memory_region_is_ram(section->mr) ||
  226. memory_region_is_ram_device(section->mr) ||
  227. memory_region_is_nonvolatile(section->mr)) {
  228. return;
  229. }
  230. /* for special sparse regions, only add populated parts */
  231. if (memory_region_has_ram_discard_manager(section->mr)) {
  232. RamDiscardManager *rdm;
  233. rdm = memory_region_get_ram_discard_manager(section->mr);
  234. ram_discard_manager_replay_populated(rdm, section,
  235. guest_phys_ram_populate_cb, g);
  236. return;
  237. }
  238. guest_phys_block_add_section(g, section);
  239. }
  240. void guest_phys_blocks_append(GuestPhysBlockList *list)
  241. {
  242. GuestPhysListener g = { 0 };
  243. g.list = list;
  244. g.listener.region_add = &guest_phys_blocks_region_add;
  245. memory_listener_register(&g.listener, &address_space_memory);
  246. memory_listener_unregister(&g.listener);
  247. }
  248. static CPUState *find_paging_enabled_cpu(void)
  249. {
  250. CPUState *cpu;
  251. CPU_FOREACH(cpu) {
  252. if (cpu_paging_enabled(cpu)) {
  253. return cpu;
  254. }
  255. }
  256. return NULL;
  257. }
  258. bool qemu_get_guest_memory_mapping(MemoryMappingList *list,
  259. const GuestPhysBlockList *guest_phys_blocks,
  260. Error **errp)
  261. {
  262. ERRP_GUARD();
  263. CPUState *cpu, *first_paging_enabled_cpu;
  264. GuestPhysBlock *block;
  265. ram_addr_t offset, length;
  266. first_paging_enabled_cpu = find_paging_enabled_cpu();
  267. if (first_paging_enabled_cpu) {
  268. for (cpu = first_paging_enabled_cpu; cpu != NULL;
  269. cpu = CPU_NEXT(cpu)) {
  270. if (!cpu_get_memory_mapping(cpu, list, errp)) {
  271. return false;
  272. }
  273. }
  274. return true;
  275. }
  276. /*
  277. * If the guest doesn't use paging, the virtual address is equal to physical
  278. * address.
  279. */
  280. QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
  281. offset = block->target_start;
  282. length = block->target_end - block->target_start;
  283. create_new_memory_mapping(list, offset, offset, length);
  284. }
  285. return true;
  286. }
  287. void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list,
  288. const GuestPhysBlockList *guest_phys_blocks)
  289. {
  290. GuestPhysBlock *block;
  291. QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
  292. create_new_memory_mapping(list, block->target_start, 0,
  293. block->target_end - block->target_start);
  294. }
  295. }
  296. void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
  297. int64_t length)
  298. {
  299. MemoryMapping *cur, *next;
  300. QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) {
  301. if (!ranges_overlap(cur->phys_addr, cur->length, begin, length)) {
  302. QTAILQ_REMOVE(&list->head, cur, next);
  303. g_free(cur);
  304. list->num--;
  305. continue;
  306. }
  307. if (cur->phys_addr < begin) {
  308. cur->length -= begin - cur->phys_addr;
  309. if (cur->virt_addr) {
  310. cur->virt_addr += begin - cur->phys_addr;
  311. }
  312. cur->phys_addr = begin;
  313. }
  314. if (cur->phys_addr + cur->length > begin + length) {
  315. cur->length -= cur->phys_addr + cur->length - begin - length;
  316. }
  317. }
  318. }