2
0

memory_mapping.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /*
  2. * QEMU memory mapping
  3. *
  4. * Copyright Fujitsu, Corp. 2011, 2012
  5. *
  6. * Authors:
  7. * Wen Congyang <wency@cn.fujitsu.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qapi/error.h"
  15. #include "sysemu/memory_mapping.h"
  16. #include "exec/memory.h"
  17. #include "exec/address-spaces.h"
  18. #include "hw/core/cpu.h"
  19. //#define DEBUG_GUEST_PHYS_REGION_ADD
  20. static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list,
  21. MemoryMapping *mapping)
  22. {
  23. MemoryMapping *p;
  24. QTAILQ_FOREACH(p, &list->head, next) {
  25. if (p->phys_addr >= mapping->phys_addr) {
  26. QTAILQ_INSERT_BEFORE(p, mapping, next);
  27. return;
  28. }
  29. }
  30. QTAILQ_INSERT_TAIL(&list->head, mapping, next);
  31. }
  32. static void create_new_memory_mapping(MemoryMappingList *list,
  33. hwaddr phys_addr,
  34. hwaddr virt_addr,
  35. ram_addr_t length)
  36. {
  37. MemoryMapping *memory_mapping;
  38. memory_mapping = g_new(MemoryMapping, 1);
  39. memory_mapping->phys_addr = phys_addr;
  40. memory_mapping->virt_addr = virt_addr;
  41. memory_mapping->length = length;
  42. list->last_mapping = memory_mapping;
  43. list->num++;
  44. memory_mapping_list_add_mapping_sorted(list, memory_mapping);
  45. }
  46. static inline bool mapping_contiguous(MemoryMapping *map,
  47. hwaddr phys_addr,
  48. hwaddr virt_addr)
  49. {
  50. return phys_addr == map->phys_addr + map->length &&
  51. virt_addr == map->virt_addr + map->length;
  52. }
  53. /*
  54. * [map->phys_addr, map->phys_addr + map->length) and
  55. * [phys_addr, phys_addr + length) have intersection?
  56. */
  57. static inline bool mapping_have_same_region(MemoryMapping *map,
  58. hwaddr phys_addr,
  59. ram_addr_t length)
  60. {
  61. return !(phys_addr + length < map->phys_addr ||
  62. phys_addr >= map->phys_addr + map->length);
  63. }
  64. /*
  65. * [map->phys_addr, map->phys_addr + map->length) and
  66. * [phys_addr, phys_addr + length) have intersection. The virtual address in the
  67. * intersection are the same?
  68. */
  69. static inline bool mapping_conflict(MemoryMapping *map,
  70. hwaddr phys_addr,
  71. hwaddr virt_addr)
  72. {
  73. return virt_addr - map->virt_addr != phys_addr - map->phys_addr;
  74. }
  75. /*
  76. * [map->virt_addr, map->virt_addr + map->length) and
  77. * [virt_addr, virt_addr + length) have intersection. And the physical address
  78. * in the intersection are the same.
  79. */
  80. static inline void mapping_merge(MemoryMapping *map,
  81. hwaddr virt_addr,
  82. ram_addr_t length)
  83. {
  84. if (virt_addr < map->virt_addr) {
  85. map->length += map->virt_addr - virt_addr;
  86. map->virt_addr = virt_addr;
  87. }
  88. if ((virt_addr + length) >
  89. (map->virt_addr + map->length)) {
  90. map->length = virt_addr + length - map->virt_addr;
  91. }
  92. }
  93. void memory_mapping_list_add_merge_sorted(MemoryMappingList *list,
  94. hwaddr phys_addr,
  95. hwaddr virt_addr,
  96. ram_addr_t length)
  97. {
  98. MemoryMapping *memory_mapping, *last_mapping;
  99. if (QTAILQ_EMPTY(&list->head)) {
  100. create_new_memory_mapping(list, phys_addr, virt_addr, length);
  101. return;
  102. }
  103. last_mapping = list->last_mapping;
  104. if (last_mapping) {
  105. if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) {
  106. last_mapping->length += length;
  107. return;
  108. }
  109. }
  110. QTAILQ_FOREACH(memory_mapping, &list->head, next) {
  111. if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) {
  112. memory_mapping->length += length;
  113. list->last_mapping = memory_mapping;
  114. return;
  115. }
  116. if (phys_addr + length < memory_mapping->phys_addr) {
  117. /* create a new region before memory_mapping */
  118. break;
  119. }
  120. if (mapping_have_same_region(memory_mapping, phys_addr, length)) {
  121. if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) {
  122. continue;
  123. }
  124. /* merge this region into memory_mapping */
  125. mapping_merge(memory_mapping, virt_addr, length);
  126. list->last_mapping = memory_mapping;
  127. return;
  128. }
  129. }
  130. /* this region can not be merged into any existed memory mapping. */
  131. create_new_memory_mapping(list, phys_addr, virt_addr, length);
  132. }
  133. void memory_mapping_list_free(MemoryMappingList *list)
  134. {
  135. MemoryMapping *p, *q;
  136. QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
  137. QTAILQ_REMOVE(&list->head, p, next);
  138. g_free(p);
  139. }
  140. list->num = 0;
  141. list->last_mapping = NULL;
  142. }
  143. void memory_mapping_list_init(MemoryMappingList *list)
  144. {
  145. list->num = 0;
  146. list->last_mapping = NULL;
  147. QTAILQ_INIT(&list->head);
  148. }
  149. void guest_phys_blocks_free(GuestPhysBlockList *list)
  150. {
  151. GuestPhysBlock *p, *q;
  152. QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
  153. QTAILQ_REMOVE(&list->head, p, next);
  154. memory_region_unref(p->mr);
  155. g_free(p);
  156. }
  157. list->num = 0;
  158. }
  159. void guest_phys_blocks_init(GuestPhysBlockList *list)
  160. {
  161. list->num = 0;
  162. QTAILQ_INIT(&list->head);
  163. }
  164. typedef struct GuestPhysListener {
  165. GuestPhysBlockList *list;
  166. MemoryListener listener;
  167. } GuestPhysListener;
  168. static void guest_phys_block_add_section(GuestPhysListener *g,
  169. MemoryRegionSection *section)
  170. {
  171. const hwaddr target_start = section->offset_within_address_space;
  172. const hwaddr target_end = target_start + int128_get64(section->size);
  173. uint8_t *host_addr = memory_region_get_ram_ptr(section->mr) +
  174. section->offset_within_region;
  175. GuestPhysBlock *predecessor = NULL;
  176. /* find continuity in guest physical address space */
  177. if (!QTAILQ_EMPTY(&g->list->head)) {
  178. hwaddr predecessor_size;
  179. predecessor = QTAILQ_LAST(&g->list->head);
  180. predecessor_size = predecessor->target_end - predecessor->target_start;
  181. /* the memory API guarantees monotonically increasing traversal */
  182. g_assert(predecessor->target_end <= target_start);
  183. /* we want continuity in both guest-physical and host-virtual memory */
  184. if (predecessor->target_end < target_start ||
  185. predecessor->host_addr + predecessor_size != host_addr ||
  186. predecessor->mr != section->mr) {
  187. predecessor = NULL;
  188. }
  189. }
  190. if (predecessor == NULL) {
  191. /* isolated mapping, allocate it and add it to the list */
  192. GuestPhysBlock *block = g_malloc0(sizeof *block);
  193. block->target_start = target_start;
  194. block->target_end = target_end;
  195. block->host_addr = host_addr;
  196. block->mr = section->mr;
  197. memory_region_ref(section->mr);
  198. QTAILQ_INSERT_TAIL(&g->list->head, block, next);
  199. ++g->list->num;
  200. } else {
  201. /* expand predecessor until @target_end; predecessor's start doesn't
  202. * change
  203. */
  204. predecessor->target_end = target_end;
  205. }
  206. #ifdef DEBUG_GUEST_PHYS_REGION_ADD
  207. fprintf(stderr, "%s: target_start=" HWADDR_FMT_plx " target_end="
  208. HWADDR_FMT_plx ": %s (count: %u)\n", __func__, target_start,
  209. target_end, predecessor ? "joined" : "added", g->list->num);
  210. #endif
  211. }
  212. static int guest_phys_ram_populate_cb(MemoryRegionSection *section,
  213. void *opaque)
  214. {
  215. GuestPhysListener *g = opaque;
  216. guest_phys_block_add_section(g, section);
  217. return 0;
  218. }
  219. static void guest_phys_blocks_region_add(MemoryListener *listener,
  220. MemoryRegionSection *section)
  221. {
  222. GuestPhysListener *g = container_of(listener, GuestPhysListener, listener);
  223. /* we only care about RAM */
  224. if (!memory_region_is_ram(section->mr) ||
  225. memory_region_is_ram_device(section->mr) ||
  226. memory_region_is_nonvolatile(section->mr)) {
  227. return;
  228. }
  229. /* for special sparse regions, only add populated parts */
  230. if (memory_region_has_ram_discard_manager(section->mr)) {
  231. RamDiscardManager *rdm;
  232. rdm = memory_region_get_ram_discard_manager(section->mr);
  233. ram_discard_manager_replay_populated(rdm, section,
  234. guest_phys_ram_populate_cb, g);
  235. return;
  236. }
  237. guest_phys_block_add_section(g, section);
  238. }
  239. void guest_phys_blocks_append(GuestPhysBlockList *list)
  240. {
  241. GuestPhysListener g = { 0 };
  242. g.list = list;
  243. g.listener.region_add = &guest_phys_blocks_region_add;
  244. memory_listener_register(&g.listener, &address_space_memory);
  245. memory_listener_unregister(&g.listener);
  246. }
  247. static CPUState *find_paging_enabled_cpu(CPUState *start_cpu)
  248. {
  249. CPUState *cpu;
  250. CPU_FOREACH(cpu) {
  251. if (cpu_paging_enabled(cpu)) {
  252. return cpu;
  253. }
  254. }
  255. return NULL;
  256. }
  257. void qemu_get_guest_memory_mapping(MemoryMappingList *list,
  258. const GuestPhysBlockList *guest_phys_blocks,
  259. Error **errp)
  260. {
  261. CPUState *cpu, *first_paging_enabled_cpu;
  262. GuestPhysBlock *block;
  263. ram_addr_t offset, length;
  264. first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu);
  265. if (first_paging_enabled_cpu) {
  266. for (cpu = first_paging_enabled_cpu; cpu != NULL;
  267. cpu = CPU_NEXT(cpu)) {
  268. Error *err = NULL;
  269. cpu_get_memory_mapping(cpu, list, &err);
  270. if (err) {
  271. error_propagate(errp, err);
  272. return;
  273. }
  274. }
  275. return;
  276. }
  277. /*
  278. * If the guest doesn't use paging, the virtual address is equal to physical
  279. * address.
  280. */
  281. QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
  282. offset = block->target_start;
  283. length = block->target_end - block->target_start;
  284. create_new_memory_mapping(list, offset, offset, length);
  285. }
  286. }
  287. void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list,
  288. const GuestPhysBlockList *guest_phys_blocks)
  289. {
  290. GuestPhysBlock *block;
  291. QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
  292. create_new_memory_mapping(list, block->target_start, 0,
  293. block->target_end - block->target_start);
  294. }
  295. }
  296. void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
  297. int64_t length)
  298. {
  299. MemoryMapping *cur, *next;
  300. QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) {
  301. if (cur->phys_addr >= begin + length ||
  302. cur->phys_addr + cur->length <= begin) {
  303. QTAILQ_REMOVE(&list->head, cur, next);
  304. g_free(cur);
  305. list->num--;
  306. continue;
  307. }
  308. if (cur->phys_addr < begin) {
  309. cur->length -= begin - cur->phys_addr;
  310. if (cur->virt_addr) {
  311. cur->virt_addr += begin - cur->phys_addr;
  312. }
  313. cur->phys_addr = begin;
  314. }
  315. if (cur->phys_addr + cur->length > begin + length) {
  316. cur->length -= cur->phys_addr + cur->length - begin - length;
  317. }
  318. }
  319. }