1 /*
2  * QEMU Hyper-V Dynamic Memory Protocol driver
3  *
4  * Copyright (C) 2020-2023 Oracle and/or its affiliates.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  */
9 
10 #ifndef HW_HYPERV_HV_BALLOON_OUR_RANGE_MEMSLOTS_H
11 #define HW_HYPERV_HV_BALLOON_OUR_RANGE_MEMSLOTS_H
12 
13 
14 #include "exec/memory.h"
15 #include "qom/object.h"
16 #include "hv-balloon-page_range_tree.h"
17 
18 /* OurRange */
19 #define OUR_RANGE(ptr) ((OurRange *)(ptr))
20 
21 /* "our range" means the memory range owned by this driver (for hot-adding) */
22 typedef struct OurRange {
23     PageRange range;
24 
25     /* How many pages were hot-added to the guest */
26     uint64_t added;
27 
28     /* Pages at the end not currently usable */
29     uint64_t unusable_tail;
30 
31     /* Memory removed from the guest */
32     PageRangeTree removed_guest, removed_both;
33 } OurRange;
34 
35 static inline uint64_t our_range_get_remaining_start(OurRange *our_range)
36 {
37     return our_range->range.start + our_range->added;
38 }
39 
40 static inline uint64_t our_range_get_remaining_size(OurRange *our_range)
41 {
42     return our_range->range.count - our_range->added - our_range->unusable_tail;
43 }
44 
45 void hvb_our_range_mark_added(OurRange *our_range, uint64_t additional_size);
46 
47 static inline void our_range_mark_remaining_unusable(OurRange *our_range)
48 {
49     our_range->unusable_tail = our_range->range.count - our_range->added;
50 }
51 
52 static inline PageRangeTree our_range_get_removed_tree(OurRange *our_range,
53                                                        bool both)
54 {
55     if (both) {
56         return our_range->removed_both;
57     } else {
58         return our_range->removed_guest;
59     }
60 }
61 
62 static inline bool our_range_is_removed_tree_empty(OurRange *our_range,
63                                                    bool both)
64 {
65     if (both) {
66         return page_range_tree_is_empty(our_range->removed_both);
67     } else {
68         return page_range_tree_is_empty(our_range->removed_guest);
69     }
70 }
71 
72 void hvb_our_range_clear_removed_trees(OurRange *our_range);
73 
74 /* OurRangeMemslots */
75 typedef struct OurRangeMemslotsSlots {
76     /* Nominal size of each memslot (the last one might be smaller) */
77     uint64_t size_each;
78 
79     /* Slots array and its element count */
80     MemoryRegion *slots;
81     unsigned int count;
82 
83     /* How many slots are currently mapped */
84     unsigned int mapped_count;
85 } OurRangeMemslotsSlots;
86 
87 typedef struct OurRangeMemslots {
88     OurRange range;
89 
90     /* Memslots covering our range */
91     OurRangeMemslotsSlots slots;
92 
93     MemoryRegion *mr;
94 } OurRangeMemslots;
95 
96 OurRangeMemslots *hvb_our_range_memslots_new(uint64_t addr,
97                                              MemoryRegion *parent_mr,
98                                              MemoryRegion *backing_mr,
99                                              Object *memslot_owner,
100                                              unsigned int memslot_count,
101                                              uint64_t memslot_size);
102 void hvb_our_range_memslots_free(OurRangeMemslots *our_range);
103 
104 G_DEFINE_AUTOPTR_CLEANUP_FUNC(OurRangeMemslots, hvb_our_range_memslots_free)
105 
106 void hvb_our_range_memslots_ensure_mapped_additional(OurRangeMemslots *our_range,
107                                                      uint64_t additional_map_size);
108 
109 #endif
110