hv-balloon-our_range_memslots.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. /*
  2. * QEMU Hyper-V Dynamic Memory Protocol driver
  3. *
  4. * Copyright (C) 2020-2023 Oracle and/or its affiliates.
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  7. * See the COPYING file in the top-level directory.
  8. */
  9. #ifndef HW_HYPERV_HV_BALLOON_OUR_RANGE_MEMSLOTS_H
  10. #define HW_HYPERV_HV_BALLOON_OUR_RANGE_MEMSLOTS_H
  11. #include "exec/memory.h"
  12. #include "qom/object.h"
  13. #include "hv-balloon-page_range_tree.h"
  14. /* OurRange */
  15. #define OUR_RANGE(ptr) ((OurRange *)(ptr))
  16. /* "our range" means the memory range owned by this driver (for hot-adding) */
  17. typedef struct OurRange {
  18. PageRange range;
  19. /* How many pages were hot-added to the guest */
  20. uint64_t added;
  21. /* Pages at the end not currently usable */
  22. uint64_t unusable_tail;
  23. /* Memory removed from the guest */
  24. PageRangeTree removed_guest, removed_both;
  25. } OurRange;
  26. static inline uint64_t our_range_get_remaining_start(OurRange *our_range)
  27. {
  28. return our_range->range.start + our_range->added;
  29. }
  30. static inline uint64_t our_range_get_remaining_size(OurRange *our_range)
  31. {
  32. return our_range->range.count - our_range->added - our_range->unusable_tail;
  33. }
  34. void hvb_our_range_mark_added(OurRange *our_range, uint64_t additional_size);
  35. static inline void our_range_mark_remaining_unusable(OurRange *our_range)
  36. {
  37. our_range->unusable_tail = our_range->range.count - our_range->added;
  38. }
  39. static inline PageRangeTree our_range_get_removed_tree(OurRange *our_range,
  40. bool both)
  41. {
  42. if (both) {
  43. return our_range->removed_both;
  44. } else {
  45. return our_range->removed_guest;
  46. }
  47. }
  48. static inline bool our_range_is_removed_tree_empty(OurRange *our_range,
  49. bool both)
  50. {
  51. if (both) {
  52. return page_range_tree_is_empty(our_range->removed_both);
  53. } else {
  54. return page_range_tree_is_empty(our_range->removed_guest);
  55. }
  56. }
  57. void hvb_our_range_clear_removed_trees(OurRange *our_range);
  58. /* OurRangeMemslots */
  59. typedef struct OurRangeMemslotsSlots {
  60. /* Nominal size of each memslot (the last one might be smaller) */
  61. uint64_t size_each;
  62. /* Slots array and its element count */
  63. MemoryRegion *slots;
  64. unsigned int count;
  65. /* How many slots are currently mapped */
  66. unsigned int mapped_count;
  67. } OurRangeMemslotsSlots;
  68. typedef struct OurRangeMemslots {
  69. OurRange range;
  70. /* Memslots covering our range */
  71. OurRangeMemslotsSlots slots;
  72. MemoryRegion *mr;
  73. } OurRangeMemslots;
  74. OurRangeMemslots *hvb_our_range_memslots_new(uint64_t addr,
  75. MemoryRegion *parent_mr,
  76. MemoryRegion *backing_mr,
  77. Object *memslot_owner,
  78. unsigned int memslot_count,
  79. uint64_t memslot_size);
  80. void hvb_our_range_memslots_free(OurRangeMemslots *our_range);
  81. G_DEFINE_AUTOPTR_CLEANUP_FUNC(OurRangeMemslots, hvb_our_range_memslots_free)
  82. void hvb_our_range_memslots_ensure_mapped_additional(OurRangeMemslots *our_range,
  83. uint64_t additional_map_size);
  84. #endif