memory_mapping.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /*
  2. * QEMU memory mapping
  3. *
  4. * Copyright Fujitsu, Corp. 2011, 2012
  5. *
  6. * Authors:
  7. * Wen Congyang <wency@cn.fujitsu.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "cpu.h"
  14. #include "cpu-all.h"
  15. #include "memory_mapping.h"
  16. static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list,
  17. MemoryMapping *mapping)
  18. {
  19. MemoryMapping *p;
  20. QTAILQ_FOREACH(p, &list->head, next) {
  21. if (p->phys_addr >= mapping->phys_addr) {
  22. QTAILQ_INSERT_BEFORE(p, mapping, next);
  23. return;
  24. }
  25. }
  26. QTAILQ_INSERT_TAIL(&list->head, mapping, next);
  27. }
  28. static void create_new_memory_mapping(MemoryMappingList *list,
  29. target_phys_addr_t phys_addr,
  30. target_phys_addr_t virt_addr,
  31. ram_addr_t length)
  32. {
  33. MemoryMapping *memory_mapping;
  34. memory_mapping = g_malloc(sizeof(MemoryMapping));
  35. memory_mapping->phys_addr = phys_addr;
  36. memory_mapping->virt_addr = virt_addr;
  37. memory_mapping->length = length;
  38. list->last_mapping = memory_mapping;
  39. list->num++;
  40. memory_mapping_list_add_mapping_sorted(list, memory_mapping);
  41. }
  42. static inline bool mapping_contiguous(MemoryMapping *map,
  43. target_phys_addr_t phys_addr,
  44. target_phys_addr_t virt_addr)
  45. {
  46. return phys_addr == map->phys_addr + map->length &&
  47. virt_addr == map->virt_addr + map->length;
  48. }
  49. /*
  50. * [map->phys_addr, map->phys_addr + map->length) and
  51. * [phys_addr, phys_addr + length) have intersection?
  52. */
  53. static inline bool mapping_have_same_region(MemoryMapping *map,
  54. target_phys_addr_t phys_addr,
  55. ram_addr_t length)
  56. {
  57. return !(phys_addr + length < map->phys_addr ||
  58. phys_addr >= map->phys_addr + map->length);
  59. }
  60. /*
  61. * [map->phys_addr, map->phys_addr + map->length) and
  62. * [phys_addr, phys_addr + length) have intersection. The virtual address in the
  63. * intersection are the same?
  64. */
  65. static inline bool mapping_conflict(MemoryMapping *map,
  66. target_phys_addr_t phys_addr,
  67. target_phys_addr_t virt_addr)
  68. {
  69. return virt_addr - map->virt_addr != phys_addr - map->phys_addr;
  70. }
  71. /*
  72. * [map->virt_addr, map->virt_addr + map->length) and
  73. * [virt_addr, virt_addr + length) have intersection. And the physical address
  74. * in the intersection are the same.
  75. */
  76. static inline void mapping_merge(MemoryMapping *map,
  77. target_phys_addr_t virt_addr,
  78. ram_addr_t length)
  79. {
  80. if (virt_addr < map->virt_addr) {
  81. map->length += map->virt_addr - virt_addr;
  82. map->virt_addr = virt_addr;
  83. }
  84. if ((virt_addr + length) >
  85. (map->virt_addr + map->length)) {
  86. map->length = virt_addr + length - map->virt_addr;
  87. }
  88. }
  89. void memory_mapping_list_add_merge_sorted(MemoryMappingList *list,
  90. target_phys_addr_t phys_addr,
  91. target_phys_addr_t virt_addr,
  92. ram_addr_t length)
  93. {
  94. MemoryMapping *memory_mapping, *last_mapping;
  95. if (QTAILQ_EMPTY(&list->head)) {
  96. create_new_memory_mapping(list, phys_addr, virt_addr, length);
  97. return;
  98. }
  99. last_mapping = list->last_mapping;
  100. if (last_mapping) {
  101. if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) {
  102. last_mapping->length += length;
  103. return;
  104. }
  105. }
  106. QTAILQ_FOREACH(memory_mapping, &list->head, next) {
  107. if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) {
  108. memory_mapping->length += length;
  109. list->last_mapping = memory_mapping;
  110. return;
  111. }
  112. if (phys_addr + length < memory_mapping->phys_addr) {
  113. /* create a new region before memory_mapping */
  114. break;
  115. }
  116. if (mapping_have_same_region(memory_mapping, phys_addr, length)) {
  117. if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) {
  118. continue;
  119. }
  120. /* merge this region into memory_mapping */
  121. mapping_merge(memory_mapping, virt_addr, length);
  122. list->last_mapping = memory_mapping;
  123. return;
  124. }
  125. }
  126. /* this region can not be merged into any existed memory mapping. */
  127. create_new_memory_mapping(list, phys_addr, virt_addr, length);
  128. }
  129. void memory_mapping_list_free(MemoryMappingList *list)
  130. {
  131. MemoryMapping *p, *q;
  132. QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
  133. QTAILQ_REMOVE(&list->head, p, next);
  134. g_free(p);
  135. }
  136. list->num = 0;
  137. list->last_mapping = NULL;
  138. }
  139. void memory_mapping_list_init(MemoryMappingList *list)
  140. {
  141. list->num = 0;
  142. list->last_mapping = NULL;
  143. QTAILQ_INIT(&list->head);
  144. }
  145. static CPUArchState *find_paging_enabled_cpu(CPUArchState *start_cpu)
  146. {
  147. CPUArchState *env;
  148. for (env = start_cpu; env != NULL; env = env->next_cpu) {
  149. if (cpu_paging_enabled(env)) {
  150. return env;
  151. }
  152. }
  153. return NULL;
  154. }
  155. int qemu_get_guest_memory_mapping(MemoryMappingList *list)
  156. {
  157. CPUArchState *env, *first_paging_enabled_cpu;
  158. RAMBlock *block;
  159. ram_addr_t offset, length;
  160. int ret;
  161. first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu);
  162. if (first_paging_enabled_cpu) {
  163. for (env = first_paging_enabled_cpu; env != NULL; env = env->next_cpu) {
  164. ret = cpu_get_memory_mapping(list, env);
  165. if (ret < 0) {
  166. return -1;
  167. }
  168. }
  169. return 0;
  170. }
  171. /*
  172. * If the guest doesn't use paging, the virtual address is equal to physical
  173. * address.
  174. */
  175. QLIST_FOREACH(block, &ram_list.blocks, next) {
  176. offset = block->offset;
  177. length = block->length;
  178. create_new_memory_mapping(list, offset, offset, length);
  179. }
  180. return 0;
  181. }
  182. void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list)
  183. {
  184. RAMBlock *block;
  185. QLIST_FOREACH(block, &ram_list.blocks, next) {
  186. create_new_memory_mapping(list, block->offset, 0, block->length);
  187. }
  188. }
  189. void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
  190. int64_t length)
  191. {
  192. MemoryMapping *cur, *next;
  193. QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) {
  194. if (cur->phys_addr >= begin + length ||
  195. cur->phys_addr + cur->length <= begin) {
  196. QTAILQ_REMOVE(&list->head, cur, next);
  197. list->num--;
  198. continue;
  199. }
  200. if (cur->phys_addr < begin) {
  201. cur->length -= begin - cur->phys_addr;
  202. if (cur->virt_addr) {
  203. cur->virt_addr += begin - cur->phys_addr;
  204. }
  205. cur->phys_addr = begin;
  206. }
  207. if (cur->phys_addr + cur->length > begin + length) {
  208. cur->length -= cur->phys_addr + cur->length - begin - length;
  209. }
  210. }
  211. }