memory_mapping.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /*
  2. * QEMU memory mapping
  3. *
  4. * Copyright Fujitsu, Corp. 2011, 2012
  5. *
  6. * Authors:
  7. * Wen Congyang <wency@cn.fujitsu.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qapi/error.h"
  15. #include "qemu-common.h"
  16. #include "cpu.h"
  17. #include "sysemu/memory_mapping.h"
  18. #include "exec/memory.h"
  19. #include "exec/address-spaces.h"
  20. //#define DEBUG_GUEST_PHYS_REGION_ADD
  21. static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list,
  22. MemoryMapping *mapping)
  23. {
  24. MemoryMapping *p;
  25. QTAILQ_FOREACH(p, &list->head, next) {
  26. if (p->phys_addr >= mapping->phys_addr) {
  27. QTAILQ_INSERT_BEFORE(p, mapping, next);
  28. return;
  29. }
  30. }
  31. QTAILQ_INSERT_TAIL(&list->head, mapping, next);
  32. }
  33. static void create_new_memory_mapping(MemoryMappingList *list,
  34. hwaddr phys_addr,
  35. hwaddr virt_addr,
  36. ram_addr_t length)
  37. {
  38. MemoryMapping *memory_mapping;
  39. memory_mapping = g_malloc(sizeof(MemoryMapping));
  40. memory_mapping->phys_addr = phys_addr;
  41. memory_mapping->virt_addr = virt_addr;
  42. memory_mapping->length = length;
  43. list->last_mapping = memory_mapping;
  44. list->num++;
  45. memory_mapping_list_add_mapping_sorted(list, memory_mapping);
  46. }
  47. static inline bool mapping_contiguous(MemoryMapping *map,
  48. hwaddr phys_addr,
  49. hwaddr virt_addr)
  50. {
  51. return phys_addr == map->phys_addr + map->length &&
  52. virt_addr == map->virt_addr + map->length;
  53. }
  54. /*
  55. * [map->phys_addr, map->phys_addr + map->length) and
  56. * [phys_addr, phys_addr + length) have intersection?
  57. */
  58. static inline bool mapping_have_same_region(MemoryMapping *map,
  59. hwaddr phys_addr,
  60. ram_addr_t length)
  61. {
  62. return !(phys_addr + length < map->phys_addr ||
  63. phys_addr >= map->phys_addr + map->length);
  64. }
  65. /*
  66. * [map->phys_addr, map->phys_addr + map->length) and
  67. * [phys_addr, phys_addr + length) have intersection. The virtual address in the
  68. * intersection are the same?
  69. */
  70. static inline bool mapping_conflict(MemoryMapping *map,
  71. hwaddr phys_addr,
  72. hwaddr virt_addr)
  73. {
  74. return virt_addr - map->virt_addr != phys_addr - map->phys_addr;
  75. }
  76. /*
  77. * [map->virt_addr, map->virt_addr + map->length) and
  78. * [virt_addr, virt_addr + length) have intersection. And the physical address
  79. * in the intersection are the same.
  80. */
  81. static inline void mapping_merge(MemoryMapping *map,
  82. hwaddr virt_addr,
  83. ram_addr_t length)
  84. {
  85. if (virt_addr < map->virt_addr) {
  86. map->length += map->virt_addr - virt_addr;
  87. map->virt_addr = virt_addr;
  88. }
  89. if ((virt_addr + length) >
  90. (map->virt_addr + map->length)) {
  91. map->length = virt_addr + length - map->virt_addr;
  92. }
  93. }
  94. void memory_mapping_list_add_merge_sorted(MemoryMappingList *list,
  95. hwaddr phys_addr,
  96. hwaddr virt_addr,
  97. ram_addr_t length)
  98. {
  99. MemoryMapping *memory_mapping, *last_mapping;
  100. if (QTAILQ_EMPTY(&list->head)) {
  101. create_new_memory_mapping(list, phys_addr, virt_addr, length);
  102. return;
  103. }
  104. last_mapping = list->last_mapping;
  105. if (last_mapping) {
  106. if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) {
  107. last_mapping->length += length;
  108. return;
  109. }
  110. }
  111. QTAILQ_FOREACH(memory_mapping, &list->head, next) {
  112. if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) {
  113. memory_mapping->length += length;
  114. list->last_mapping = memory_mapping;
  115. return;
  116. }
  117. if (phys_addr + length < memory_mapping->phys_addr) {
  118. /* create a new region before memory_mapping */
  119. break;
  120. }
  121. if (mapping_have_same_region(memory_mapping, phys_addr, length)) {
  122. if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) {
  123. continue;
  124. }
  125. /* merge this region into memory_mapping */
  126. mapping_merge(memory_mapping, virt_addr, length);
  127. list->last_mapping = memory_mapping;
  128. return;
  129. }
  130. }
  131. /* this region can not be merged into any existed memory mapping. */
  132. create_new_memory_mapping(list, phys_addr, virt_addr, length);
  133. }
  134. void memory_mapping_list_free(MemoryMappingList *list)
  135. {
  136. MemoryMapping *p, *q;
  137. QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
  138. QTAILQ_REMOVE(&list->head, p, next);
  139. g_free(p);
  140. }
  141. list->num = 0;
  142. list->last_mapping = NULL;
  143. }
  144. void memory_mapping_list_init(MemoryMappingList *list)
  145. {
  146. list->num = 0;
  147. list->last_mapping = NULL;
  148. QTAILQ_INIT(&list->head);
  149. }
  150. void guest_phys_blocks_free(GuestPhysBlockList *list)
  151. {
  152. GuestPhysBlock *p, *q;
  153. QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
  154. QTAILQ_REMOVE(&list->head, p, next);
  155. memory_region_unref(p->mr);
  156. g_free(p);
  157. }
  158. list->num = 0;
  159. }
  160. void guest_phys_blocks_init(GuestPhysBlockList *list)
  161. {
  162. list->num = 0;
  163. QTAILQ_INIT(&list->head);
  164. }
  165. typedef struct GuestPhysListener {
  166. GuestPhysBlockList *list;
  167. MemoryListener listener;
  168. } GuestPhysListener;
  169. static void guest_phys_blocks_region_add(MemoryListener *listener,
  170. MemoryRegionSection *section)
  171. {
  172. GuestPhysListener *g;
  173. uint64_t section_size;
  174. hwaddr target_start, target_end;
  175. uint8_t *host_addr;
  176. GuestPhysBlock *predecessor;
  177. /* we only care about RAM */
  178. if (!memory_region_is_ram(section->mr) ||
  179. memory_region_is_ram_device(section->mr)) {
  180. return;
  181. }
  182. g = container_of(listener, GuestPhysListener, listener);
  183. section_size = int128_get64(section->size);
  184. target_start = section->offset_within_address_space;
  185. target_end = target_start + section_size;
  186. host_addr = memory_region_get_ram_ptr(section->mr) +
  187. section->offset_within_region;
  188. predecessor = NULL;
  189. /* find continuity in guest physical address space */
  190. if (!QTAILQ_EMPTY(&g->list->head)) {
  191. hwaddr predecessor_size;
  192. predecessor = QTAILQ_LAST(&g->list->head, GuestPhysBlockHead);
  193. predecessor_size = predecessor->target_end - predecessor->target_start;
  194. /* the memory API guarantees monotonically increasing traversal */
  195. g_assert(predecessor->target_end <= target_start);
  196. /* we want continuity in both guest-physical and host-virtual memory */
  197. if (predecessor->target_end < target_start ||
  198. predecessor->host_addr + predecessor_size != host_addr) {
  199. predecessor = NULL;
  200. }
  201. }
  202. if (predecessor == NULL) {
  203. /* isolated mapping, allocate it and add it to the list */
  204. GuestPhysBlock *block = g_malloc0(sizeof *block);
  205. block->target_start = target_start;
  206. block->target_end = target_end;
  207. block->host_addr = host_addr;
  208. block->mr = section->mr;
  209. memory_region_ref(section->mr);
  210. QTAILQ_INSERT_TAIL(&g->list->head, block, next);
  211. ++g->list->num;
  212. } else {
  213. /* expand predecessor until @target_end; predecessor's start doesn't
  214. * change
  215. */
  216. predecessor->target_end = target_end;
  217. }
  218. #ifdef DEBUG_GUEST_PHYS_REGION_ADD
  219. fprintf(stderr, "%s: target_start=" TARGET_FMT_plx " target_end="
  220. TARGET_FMT_plx ": %s (count: %u)\n", __FUNCTION__, target_start,
  221. target_end, predecessor ? "joined" : "added", g->list->num);
  222. #endif
  223. }
  224. void guest_phys_blocks_append(GuestPhysBlockList *list)
  225. {
  226. GuestPhysListener g = { 0 };
  227. g.list = list;
  228. g.listener.region_add = &guest_phys_blocks_region_add;
  229. memory_listener_register(&g.listener, &address_space_memory);
  230. memory_listener_unregister(&g.listener);
  231. }
  232. static CPUState *find_paging_enabled_cpu(CPUState *start_cpu)
  233. {
  234. CPUState *cpu;
  235. CPU_FOREACH(cpu) {
  236. if (cpu_paging_enabled(cpu)) {
  237. return cpu;
  238. }
  239. }
  240. return NULL;
  241. }
  242. void qemu_get_guest_memory_mapping(MemoryMappingList *list,
  243. const GuestPhysBlockList *guest_phys_blocks,
  244. Error **errp)
  245. {
  246. CPUState *cpu, *first_paging_enabled_cpu;
  247. GuestPhysBlock *block;
  248. ram_addr_t offset, length;
  249. first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu);
  250. if (first_paging_enabled_cpu) {
  251. for (cpu = first_paging_enabled_cpu; cpu != NULL;
  252. cpu = CPU_NEXT(cpu)) {
  253. Error *err = NULL;
  254. cpu_get_memory_mapping(cpu, list, &err);
  255. if (err) {
  256. error_propagate(errp, err);
  257. return;
  258. }
  259. }
  260. return;
  261. }
  262. /*
  263. * If the guest doesn't use paging, the virtual address is equal to physical
  264. * address.
  265. */
  266. QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
  267. offset = block->target_start;
  268. length = block->target_end - block->target_start;
  269. create_new_memory_mapping(list, offset, offset, length);
  270. }
  271. }
  272. void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list,
  273. const GuestPhysBlockList *guest_phys_blocks)
  274. {
  275. GuestPhysBlock *block;
  276. QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
  277. create_new_memory_mapping(list, block->target_start, 0,
  278. block->target_end - block->target_start);
  279. }
  280. }
  281. void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
  282. int64_t length)
  283. {
  284. MemoryMapping *cur, *next;
  285. QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) {
  286. if (cur->phys_addr >= begin + length ||
  287. cur->phys_addr + cur->length <= begin) {
  288. QTAILQ_REMOVE(&list->head, cur, next);
  289. list->num--;
  290. continue;
  291. }
  292. if (cur->phys_addr < begin) {
  293. cur->length -= begin - cur->phys_addr;
  294. if (cur->virt_addr) {
  295. cur->virt_addr += begin - cur->phys_addr;
  296. }
  297. cur->phys_addr = begin;
  298. }
  299. if (cur->phys_addr + cur->length > begin + length) {
  300. cur->length -= cur->phys_addr + cur->length - begin - length;
  301. }
  302. }
  303. }