1 /*
2  * QEMU Hyper-V Dynamic Memory Protocol driver
3  *
4  * Copyright (C) 2020-2023 Oracle and/or its affiliates.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  */
9 
10 #include "qemu/osdep.h"
11 #include "hv-balloon-internal.h"
12 #include "hv-balloon-our_range_memslots.h"
13 #include "trace.h"
14 
15 /* OurRange */
16 static void our_range_init(OurRange *our_range, uint64_t start, uint64_t count)
17 {
18     assert(count <= UINT64_MAX - start);
19     our_range->range.start = start;
20     our_range->range.count = count;
21 
22     hvb_page_range_tree_init(&our_range->removed_guest);
23     hvb_page_range_tree_init(&our_range->removed_both);
24 
25     /* mark the whole range as unused but for potential use */
26     our_range->added = 0;
27     our_range->unusable_tail = 0;
28 }
29 
30 static void our_range_destroy(OurRange *our_range)
31 {
32     hvb_page_range_tree_destroy(&our_range->removed_guest);
33     hvb_page_range_tree_destroy(&our_range->removed_both);
34 }
35 
36 void hvb_our_range_clear_removed_trees(OurRange *our_range)
37 {
38     hvb_page_range_tree_destroy(&our_range->removed_guest);
39     hvb_page_range_tree_destroy(&our_range->removed_both);
40     hvb_page_range_tree_init(&our_range->removed_guest);
41     hvb_page_range_tree_init(&our_range->removed_both);
42 }
43 
44 void hvb_our_range_mark_added(OurRange *our_range, uint64_t additional_size)
45 {
46     assert(additional_size <= UINT64_MAX - our_range->added);
47 
48     our_range->added += additional_size;
49 
50     assert(our_range->added <= UINT64_MAX - our_range->unusable_tail);
51     assert(our_range->added + our_range->unusable_tail <=
52            our_range->range.count);
53 }
54 
55 /* OurRangeMemslots */
56 static void our_range_memslots_init_slots(OurRangeMemslots *our_range,
57                                           MemoryRegion *backing_mr,
58                                           Object *memslot_owner)
59 {
60     OurRangeMemslotsSlots *memslots = &our_range->slots;
61     unsigned int idx;
62     uint64_t memslot_offset;
63 
64     assert(memslots->count > 0);
65     memslots->slots = g_new0(MemoryRegion, memslots->count);
66 
67     /* Initialize our memslots, but don't map them yet. */
68     assert(memslots->size_each > 0);
69     for (idx = 0, memslot_offset = 0; idx < memslots->count;
70          idx++, memslot_offset += memslots->size_each) {
71         uint64_t memslot_size;
72         g_autofree char *name = NULL;
73 
74         /* The size of the last memslot might be smaller. */
75         if (idx == memslots->count - 1) {
76             uint64_t region_size;
77 
78             assert(our_range->mr);
79             region_size = memory_region_size(our_range->mr);
80             memslot_size = region_size - memslot_offset;
81         } else {
82             memslot_size = memslots->size_each;
83         }
84 
85         name = g_strdup_printf("memslot-%u", idx);
86         memory_region_init_alias(&memslots->slots[idx], memslot_owner, name,
87                                  backing_mr, memslot_offset, memslot_size);
88         /*
89          * We want to be able to atomically and efficiently activate/deactivate
90          * individual memslots without affecting adjacent memslots in memory
91          * notifiers.
92          */
93         memory_region_set_unmergeable(&memslots->slots[idx], true);
94     }
95 
96     memslots->mapped_count = 0;
97 }
98 
99 OurRangeMemslots *hvb_our_range_memslots_new(uint64_t addr,
100                                              MemoryRegion *parent_mr,
101                                              MemoryRegion *backing_mr,
102                                              Object *memslot_owner,
103                                              unsigned int memslot_count,
104                                              uint64_t memslot_size)
105 {
106     OurRangeMemslots *our_range;
107 
108     our_range = g_malloc(sizeof(*our_range));
109     our_range_init(&our_range->range,
110                    addr / HV_BALLOON_PAGE_SIZE,
111                    memory_region_size(parent_mr) / HV_BALLOON_PAGE_SIZE);
112     our_range->slots.size_each = memslot_size;
113     our_range->slots.count = memslot_count;
114     our_range->mr = parent_mr;
115     our_range_memslots_init_slots(our_range, backing_mr, memslot_owner);
116 
117     return our_range;
118 }
119 
120 static void our_range_memslots_free_memslots(OurRangeMemslots *our_range)
121 {
122     OurRangeMemslotsSlots *memslots = &our_range->slots;
123     unsigned int idx;
124     uint64_t offset;
125 
126     memory_region_transaction_begin();
127     for (idx = 0, offset = 0; idx < memslots->mapped_count;
128          idx++, offset += memslots->size_each) {
129         trace_hv_balloon_unmap_slot(idx, memslots->count, offset);
130         assert(memory_region_is_mapped(&memslots->slots[idx]));
131         memory_region_del_subregion(our_range->mr, &memslots->slots[idx]);
132     }
133     memory_region_transaction_commit();
134 
135     for (idx = 0; idx < memslots->count; idx++) {
136         object_unparent(OBJECT(&memslots->slots[idx]));
137     }
138 
139     g_clear_pointer(&our_range->slots.slots, g_free);
140 }
141 
142 void hvb_our_range_memslots_free(OurRangeMemslots *our_range)
143 {
144     OurRangeMemslotsSlots *memslots = &our_range->slots;
145     MemoryRegion *hostmem_mr;
146     RAMBlock *rb;
147 
148     assert(our_range->slots.count > 0);
149     assert(our_range->slots.slots);
150 
151     hostmem_mr = memslots->slots[0].alias;
152     rb = hostmem_mr->ram_block;
153     ram_block_discard_range(rb, 0, qemu_ram_get_used_length(rb));
154 
155     our_range_memslots_free_memslots(our_range);
156     our_range_destroy(&our_range->range);
157     g_free(our_range);
158 }
159 
160 void hvb_our_range_memslots_ensure_mapped_additional(OurRangeMemslots *our_range,
161                                                      uint64_t additional_map_size)
162 {
163     OurRangeMemslotsSlots *memslots = &our_range->slots;
164     uint64_t total_map_size;
165     unsigned int idx;
166     uint64_t offset;
167 
168     total_map_size = (our_range->range.added + additional_map_size) *
169         HV_BALLOON_PAGE_SIZE;
170     idx = memslots->mapped_count;
171     assert(memslots->size_each > 0);
172     offset = idx * memslots->size_each;
173 
174     /*
175      * Activate all memslots covered by the newly added region in a single
176      * transaction.
177      */
178     memory_region_transaction_begin();
179     for ( ; idx < memslots->count;
180           idx++, offset += memslots->size_each) {
181         /*
182          * If this memslot starts beyond or at the end of the range to map so
183          * does every next one.
184          */
185         if (offset >= total_map_size) {
186             break;
187         }
188 
189         /*
190          * Instead of enabling/disabling memslot, we add/remove them. This
191          * should make address space updates faster, because we don't have to
192          * loop over many disabled subregions.
193          */
194         trace_hv_balloon_map_slot(idx, memslots->count, offset);
195         assert(!memory_region_is_mapped(&memslots->slots[idx]));
196         memory_region_add_subregion(our_range->mr, offset,
197                                     &memslots->slots[idx]);
198 
199         memslots->mapped_count++;
200     }
201     memory_region_transaction_commit();
202 }
203