1 /*
2  * QEMU Hyper-V Dynamic Memory Protocol driver
3  *
4  * Copyright (C) 2020-2023 Oracle and/or its affiliates.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  */
9 
10 #ifndef HW_HYPERV_HV_BALLOON_OUR_RANGE_MEMSLOTS_H
11 #define HW_HYPERV_HV_BALLOON_OUR_RANGE_MEMSLOTS_H
12 
13 #include "qemu/osdep.h"
14 
15 #include "exec/memory.h"
16 #include "qom/object.h"
17 #include "hv-balloon-page_range_tree.h"
18 
19 /* OurRange */
20 #define OUR_RANGE(ptr) ((OurRange *)(ptr))
21 
22 /* "our range" means the memory range owned by this driver (for hot-adding) */
23 typedef struct OurRange {
24     PageRange range;
25 
26     /* How many pages were hot-added to the guest */
27     uint64_t added;
28 
29     /* Pages at the end not currently usable */
30     uint64_t unusable_tail;
31 
32     /* Memory removed from the guest */
33     PageRangeTree removed_guest, removed_both;
34 } OurRange;
35 
36 static inline uint64_t our_range_get_remaining_start(OurRange *our_range)
37 {
38     return our_range->range.start + our_range->added;
39 }
40 
41 static inline uint64_t our_range_get_remaining_size(OurRange *our_range)
42 {
43     return our_range->range.count - our_range->added - our_range->unusable_tail;
44 }
45 
46 void hvb_our_range_mark_added(OurRange *our_range, uint64_t additional_size);
47 
48 static inline void our_range_mark_remaining_unusable(OurRange *our_range)
49 {
50     our_range->unusable_tail = our_range->range.count - our_range->added;
51 }
52 
53 static inline PageRangeTree our_range_get_removed_tree(OurRange *our_range,
54                                                        bool both)
55 {
56     if (both) {
57         return our_range->removed_both;
58     } else {
59         return our_range->removed_guest;
60     }
61 }
62 
63 static inline bool our_range_is_removed_tree_empty(OurRange *our_range,
64                                                    bool both)
65 {
66     if (both) {
67         return page_range_tree_is_empty(our_range->removed_both);
68     } else {
69         return page_range_tree_is_empty(our_range->removed_guest);
70     }
71 }
72 
73 void hvb_our_range_clear_removed_trees(OurRange *our_range);
74 
75 /* OurRangeMemslots */
76 typedef struct OurRangeMemslotsSlots {
77     /* Nominal size of each memslot (the last one might be smaller) */
78     uint64_t size_each;
79 
80     /* Slots array and its element count */
81     MemoryRegion *slots;
82     unsigned int count;
83 
84     /* How many slots are currently mapped */
85     unsigned int mapped_count;
86 } OurRangeMemslotsSlots;
87 
88 typedef struct OurRangeMemslots {
89     OurRange range;
90 
91     /* Memslots covering our range */
92     OurRangeMemslotsSlots slots;
93 
94     MemoryRegion *mr;
95 } OurRangeMemslots;
96 
97 OurRangeMemslots *hvb_our_range_memslots_new(uint64_t addr,
98                                              MemoryRegion *parent_mr,
99                                              MemoryRegion *backing_mr,
100                                              Object *memslot_owner,
101                                              unsigned int memslot_count,
102                                              uint64_t memslot_size);
103 void hvb_our_range_memslots_free(OurRangeMemslots *our_range);
104 
105 G_DEFINE_AUTOPTR_CLEANUP_FUNC(OurRangeMemslots, hvb_our_range_memslots_free)
106 
107 void hvb_our_range_memslots_ensure_mapped_additional(OurRangeMemslots *our_range,
108                                                      uint64_t additional_map_size);
109 
110 #endif
111