1 /*
2  * QEMU Hyper-V Dynamic Memory Protocol driver
3  *
4  * Copyright (C) 2020-2023 Oracle and/or its affiliates.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  */
9 
10 #include "hv-balloon-internal.h"
11 #include "hv-balloon-our_range_memslots.h"
12 #include "trace.h"
13 
14 /* OurRange */
15 static void our_range_init(OurRange *our_range, uint64_t start, uint64_t count)
16 {
17     assert(count <= UINT64_MAX - start);
18     our_range->range.start = start;
19     our_range->range.count = count;
20 
21     hvb_page_range_tree_init(&our_range->removed_guest);
22     hvb_page_range_tree_init(&our_range->removed_both);
23 
24     /* mark the whole range as unused but for potential use */
25     our_range->added = 0;
26     our_range->unusable_tail = 0;
27 }
28 
29 static void our_range_destroy(OurRange *our_range)
30 {
31     hvb_page_range_tree_destroy(&our_range->removed_guest);
32     hvb_page_range_tree_destroy(&our_range->removed_both);
33 }
34 
35 void hvb_our_range_clear_removed_trees(OurRange *our_range)
36 {
37     hvb_page_range_tree_destroy(&our_range->removed_guest);
38     hvb_page_range_tree_destroy(&our_range->removed_both);
39     hvb_page_range_tree_init(&our_range->removed_guest);
40     hvb_page_range_tree_init(&our_range->removed_both);
41 }
42 
43 void hvb_our_range_mark_added(OurRange *our_range, uint64_t additional_size)
44 {
45     assert(additional_size <= UINT64_MAX - our_range->added);
46 
47     our_range->added += additional_size;
48 
49     assert(our_range->added <= UINT64_MAX - our_range->unusable_tail);
50     assert(our_range->added + our_range->unusable_tail <=
51            our_range->range.count);
52 }
53 
54 /* OurRangeMemslots */
55 static void our_range_memslots_init_slots(OurRangeMemslots *our_range,
56                                           MemoryRegion *backing_mr,
57                                           Object *memslot_owner)
58 {
59     OurRangeMemslotsSlots *memslots = &our_range->slots;
60     unsigned int idx;
61     uint64_t memslot_offset;
62 
63     assert(memslots->count > 0);
64     memslots->slots = g_new0(MemoryRegion, memslots->count);
65 
66     /* Initialize our memslots, but don't map them yet. */
67     assert(memslots->size_each > 0);
68     for (idx = 0, memslot_offset = 0; idx < memslots->count;
69          idx++, memslot_offset += memslots->size_each) {
70         uint64_t memslot_size;
71         g_autofree char *name = NULL;
72 
73         /* The size of the last memslot might be smaller. */
74         if (idx == memslots->count - 1) {
75             uint64_t region_size;
76 
77             assert(our_range->mr);
78             region_size = memory_region_size(our_range->mr);
79             memslot_size = region_size - memslot_offset;
80         } else {
81             memslot_size = memslots->size_each;
82         }
83 
84         name = g_strdup_printf("memslot-%u", idx);
85         memory_region_init_alias(&memslots->slots[idx], memslot_owner, name,
86                                  backing_mr, memslot_offset, memslot_size);
87         /*
88          * We want to be able to atomically and efficiently activate/deactivate
89          * individual memslots without affecting adjacent memslots in memory
90          * notifiers.
91          */
92         memory_region_set_unmergeable(&memslots->slots[idx], true);
93     }
94 
95     memslots->mapped_count = 0;
96 }
97 
98 OurRangeMemslots *hvb_our_range_memslots_new(uint64_t addr,
99                                              MemoryRegion *parent_mr,
100                                              MemoryRegion *backing_mr,
101                                              Object *memslot_owner,
102                                              unsigned int memslot_count,
103                                              uint64_t memslot_size)
104 {
105     OurRangeMemslots *our_range;
106 
107     our_range = g_malloc(sizeof(*our_range));
108     our_range_init(&our_range->range,
109                    addr / HV_BALLOON_PAGE_SIZE,
110                    memory_region_size(parent_mr) / HV_BALLOON_PAGE_SIZE);
111     our_range->slots.size_each = memslot_size;
112     our_range->slots.count = memslot_count;
113     our_range->mr = parent_mr;
114     our_range_memslots_init_slots(our_range, backing_mr, memslot_owner);
115 
116     return our_range;
117 }
118 
119 static void our_range_memslots_free_memslots(OurRangeMemslots *our_range)
120 {
121     OurRangeMemslotsSlots *memslots = &our_range->slots;
122     unsigned int idx;
123     uint64_t offset;
124 
125     memory_region_transaction_begin();
126     for (idx = 0, offset = 0; idx < memslots->mapped_count;
127          idx++, offset += memslots->size_each) {
128         trace_hv_balloon_unmap_slot(idx, memslots->count, offset);
129         assert(memory_region_is_mapped(&memslots->slots[idx]));
130         memory_region_del_subregion(our_range->mr, &memslots->slots[idx]);
131     }
132     memory_region_transaction_commit();
133 
134     for (idx = 0; idx < memslots->count; idx++) {
135         object_unparent(OBJECT(&memslots->slots[idx]));
136     }
137 
138     g_clear_pointer(&our_range->slots.slots, g_free);
139 }
140 
141 void hvb_our_range_memslots_free(OurRangeMemslots *our_range)
142 {
143     OurRangeMemslotsSlots *memslots = &our_range->slots;
144     MemoryRegion *hostmem_mr;
145     RAMBlock *rb;
146 
147     assert(our_range->slots.count > 0);
148     assert(our_range->slots.slots);
149 
150     hostmem_mr = memslots->slots[0].alias;
151     rb = hostmem_mr->ram_block;
152     ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb));
153 
154     our_range_memslots_free_memslots(our_range);
155     our_range_destroy(&our_range->range);
156     g_free(our_range);
157 }
158 
159 void hvb_our_range_memslots_ensure_mapped_additional(OurRangeMemslots *our_range,
160                                                      uint64_t additional_map_size)
161 {
162     OurRangeMemslotsSlots *memslots = &our_range->slots;
163     uint64_t total_map_size;
164     unsigned int idx;
165     uint64_t offset;
166 
167     total_map_size = (our_range->range.added + additional_map_size) *
168         HV_BALLOON_PAGE_SIZE;
169     idx = memslots->mapped_count;
170     assert(memslots->size_each > 0);
171     offset = idx * memslots->size_each;
172 
173     /*
174      * Activate all memslots covered by the newly added region in a single
175      * transaction.
176      */
177     memory_region_transaction_begin();
178     for ( ; idx < memslots->count;
179           idx++, offset += memslots->size_each) {
180         /*
181          * If this memslot starts beyond or at the end of the range to map so
182          * does every next one.
183          */
184         if (offset >= total_map_size) {
185             break;
186         }
187 
188         /*
189          * Instead of enabling/disabling memslot, we add/remove them. This
190          * should make address space updates faster, because we don't have to
191          * loop over many disabled subregions.
192          */
193         trace_hv_balloon_map_slot(idx, memslots->count, offset);
194         assert(!memory_region_is_mapped(&memslots->slots[idx]));
195         memory_region_add_subregion(our_range->mr, offset,
196                                     &memslots->slots[idx]);
197 
198         memslots->mapped_count++;
199     }
200     memory_region_transaction_commit();
201 }
202