hv-balloon-our_range_memslots.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. /*
  2. * QEMU Hyper-V Dynamic Memory Protocol driver
  3. *
  4. * Copyright (C) 2020-2023 Oracle and/or its affiliates.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  7. * See the COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "hv-balloon-internal.h"
  11. #include "hv-balloon-our_range_memslots.h"
  12. #include "trace.h"
  13. /* OurRange */
  14. static void our_range_init(OurRange *our_range, uint64_t start, uint64_t count)
  15. {
  16. assert(count <= UINT64_MAX - start);
  17. our_range->range.start = start;
  18. our_range->range.count = count;
  19. hvb_page_range_tree_init(&our_range->removed_guest);
  20. hvb_page_range_tree_init(&our_range->removed_both);
  21. /* mark the whole range as unused but for potential use */
  22. our_range->added = 0;
  23. our_range->unusable_tail = 0;
  24. }
  25. static void our_range_destroy(OurRange *our_range)
  26. {
  27. hvb_page_range_tree_destroy(&our_range->removed_guest);
  28. hvb_page_range_tree_destroy(&our_range->removed_both);
  29. }
  30. void hvb_our_range_clear_removed_trees(OurRange *our_range)
  31. {
  32. hvb_page_range_tree_destroy(&our_range->removed_guest);
  33. hvb_page_range_tree_destroy(&our_range->removed_both);
  34. hvb_page_range_tree_init(&our_range->removed_guest);
  35. hvb_page_range_tree_init(&our_range->removed_both);
  36. }
  37. void hvb_our_range_mark_added(OurRange *our_range, uint64_t additional_size)
  38. {
  39. assert(additional_size <= UINT64_MAX - our_range->added);
  40. our_range->added += additional_size;
  41. assert(our_range->added <= UINT64_MAX - our_range->unusable_tail);
  42. assert(our_range->added + our_range->unusable_tail <=
  43. our_range->range.count);
  44. }
  45. /* OurRangeMemslots */
  46. static void our_range_memslots_init_slots(OurRangeMemslots *our_range,
  47. MemoryRegion *backing_mr,
  48. Object *memslot_owner)
  49. {
  50. OurRangeMemslotsSlots *memslots = &our_range->slots;
  51. unsigned int idx;
  52. uint64_t memslot_offset;
  53. assert(memslots->count > 0);
  54. memslots->slots = g_new0(MemoryRegion, memslots->count);
  55. /* Initialize our memslots, but don't map them yet. */
  56. assert(memslots->size_each > 0);
  57. for (idx = 0, memslot_offset = 0; idx < memslots->count;
  58. idx++, memslot_offset += memslots->size_each) {
  59. uint64_t memslot_size;
  60. g_autofree char *name = NULL;
  61. /* The size of the last memslot might be smaller. */
  62. if (idx == memslots->count - 1) {
  63. uint64_t region_size;
  64. assert(our_range->mr);
  65. region_size = memory_region_size(our_range->mr);
  66. memslot_size = region_size - memslot_offset;
  67. } else {
  68. memslot_size = memslots->size_each;
  69. }
  70. name = g_strdup_printf("memslot-%u", idx);
  71. memory_region_init_alias(&memslots->slots[idx], memslot_owner, name,
  72. backing_mr, memslot_offset, memslot_size);
  73. /*
  74. * We want to be able to atomically and efficiently activate/deactivate
  75. * individual memslots without affecting adjacent memslots in memory
  76. * notifiers.
  77. */
  78. memory_region_set_unmergeable(&memslots->slots[idx], true);
  79. }
  80. memslots->mapped_count = 0;
  81. }
  82. OurRangeMemslots *hvb_our_range_memslots_new(uint64_t addr,
  83. MemoryRegion *parent_mr,
  84. MemoryRegion *backing_mr,
  85. Object *memslot_owner,
  86. unsigned int memslot_count,
  87. uint64_t memslot_size)
  88. {
  89. OurRangeMemslots *our_range;
  90. our_range = g_malloc(sizeof(*our_range));
  91. our_range_init(&our_range->range,
  92. addr / HV_BALLOON_PAGE_SIZE,
  93. memory_region_size(parent_mr) / HV_BALLOON_PAGE_SIZE);
  94. our_range->slots.size_each = memslot_size;
  95. our_range->slots.count = memslot_count;
  96. our_range->mr = parent_mr;
  97. our_range_memslots_init_slots(our_range, backing_mr, memslot_owner);
  98. return our_range;
  99. }
  100. static void our_range_memslots_free_memslots(OurRangeMemslots *our_range)
  101. {
  102. OurRangeMemslotsSlots *memslots = &our_range->slots;
  103. unsigned int idx;
  104. uint64_t offset;
  105. memory_region_transaction_begin();
  106. for (idx = 0, offset = 0; idx < memslots->mapped_count;
  107. idx++, offset += memslots->size_each) {
  108. trace_hv_balloon_unmap_slot(idx, memslots->count, offset);
  109. assert(memory_region_is_mapped(&memslots->slots[idx]));
  110. memory_region_del_subregion(our_range->mr, &memslots->slots[idx]);
  111. }
  112. memory_region_transaction_commit();
  113. for (idx = 0; idx < memslots->count; idx++) {
  114. object_unparent(OBJECT(&memslots->slots[idx]));
  115. }
  116. g_clear_pointer(&our_range->slots.slots, g_free);
  117. }
  118. void hvb_our_range_memslots_free(OurRangeMemslots *our_range)
  119. {
  120. OurRangeMemslotsSlots *memslots = &our_range->slots;
  121. MemoryRegion *hostmem_mr;
  122. RAMBlock *rb;
  123. assert(our_range->slots.count > 0);
  124. assert(our_range->slots.slots);
  125. hostmem_mr = memslots->slots[0].alias;
  126. rb = hostmem_mr->ram_block;
  127. ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb));
  128. our_range_memslots_free_memslots(our_range);
  129. our_range_destroy(&our_range->range);
  130. g_free(our_range);
  131. }
  132. void hvb_our_range_memslots_ensure_mapped_additional(OurRangeMemslots *our_range,
  133. uint64_t additional_map_size)
  134. {
  135. OurRangeMemslotsSlots *memslots = &our_range->slots;
  136. uint64_t total_map_size;
  137. unsigned int idx;
  138. uint64_t offset;
  139. total_map_size = (our_range->range.added + additional_map_size) *
  140. HV_BALLOON_PAGE_SIZE;
  141. idx = memslots->mapped_count;
  142. assert(memslots->size_each > 0);
  143. offset = idx * memslots->size_each;
  144. /*
  145. * Activate all memslots covered by the newly added region in a single
  146. * transaction.
  147. */
  148. memory_region_transaction_begin();
  149. for ( ; idx < memslots->count;
  150. idx++, offset += memslots->size_each) {
  151. /*
  152. * If this memslot starts beyond or at the end of the range to map so
  153. * does every next one.
  154. */
  155. if (offset >= total_map_size) {
  156. break;
  157. }
  158. /*
  159. * Instead of enabling/disabling memslot, we add/remove them. This
  160. * should make address space updates faster, because we don't have to
  161. * loop over many disabled subregions.
  162. */
  163. trace_hv_balloon_map_slot(idx, memslots->count, offset);
  164. assert(!memory_region_is_mapped(&memslots->slots[idx]));
  165. memory_region_add_subregion(our_range->mr, offset,
  166. &memslots->slots[idx]);
  167. memslots->mapped_count++;
  168. }
  169. memory_region_transaction_commit();
  170. }