xref: /qemu/hw/mem/memory-device.c (revision 8b7b9c5c)
1 /*
2  * Memory Device Interface
3  *
4  * Copyright ProfitBricks GmbH 2012
5  * Copyright (C) 2014 Red Hat Inc
6  * Copyright (c) 2018 Red Hat Inc
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9  * See the COPYING file in the top-level directory.
10  */
11 
12 #include "qemu/osdep.h"
13 #include "qemu/error-report.h"
14 #include "hw/mem/memory-device.h"
15 #include "qapi/error.h"
16 #include "hw/boards.h"
17 #include "qemu/range.h"
18 #include "hw/virtio/vhost.h"
19 #include "sysemu/kvm.h"
20 #include "exec/address-spaces.h"
21 #include "trace.h"
22 
23 static gint memory_device_addr_sort(gconstpointer a, gconstpointer b)
24 {
25     const MemoryDeviceState *md_a = MEMORY_DEVICE(a);
26     const MemoryDeviceState *md_b = MEMORY_DEVICE(b);
27     const MemoryDeviceClass *mdc_a = MEMORY_DEVICE_GET_CLASS(a);
28     const MemoryDeviceClass *mdc_b = MEMORY_DEVICE_GET_CLASS(b);
29     const uint64_t addr_a = mdc_a->get_addr(md_a);
30     const uint64_t addr_b = mdc_b->get_addr(md_b);
31 
32     if (addr_a > addr_b) {
33         return 1;
34     } else if (addr_a < addr_b) {
35         return -1;
36     }
37     return 0;
38 }
39 
40 static int memory_device_build_list(Object *obj, void *opaque)
41 {
42     GSList **list = opaque;
43 
44     if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
45         DeviceState *dev = DEVICE(obj);
46         if (dev->realized) { /* only realized memory devices matter */
47             *list = g_slist_insert_sorted(*list, dev, memory_device_addr_sort);
48         }
49     }
50 
51     object_child_foreach(obj, memory_device_build_list, opaque);
52     return 0;
53 }
54 
55 static void memory_device_check_addable(MachineState *ms, MemoryRegion *mr,
56                                         Error **errp)
57 {
58     const uint64_t used_region_size = ms->device_memory->used_region_size;
59     const uint64_t size = memory_region_size(mr);
60 
61     /* we will need a new memory slot for kvm and vhost */
62     if (kvm_enabled() && !kvm_has_free_slot(ms)) {
63         error_setg(errp, "hypervisor has no free memory slots left");
64         return;
65     }
66     if (!vhost_has_free_slot()) {
67         error_setg(errp, "a used vhost backend has no free memory slots left");
68         return;
69     }
70 
71     /* will we exceed the total amount of memory specified */
72     if (used_region_size + size < used_region_size ||
73         used_region_size + size > ms->maxram_size - ms->ram_size) {
74         error_setg(errp, "not enough space, currently 0x%" PRIx64
75                    " in use of total space for memory devices 0x" RAM_ADDR_FMT,
76                    used_region_size, ms->maxram_size - ms->ram_size);
77         return;
78     }
79 
80 }
81 
82 static uint64_t memory_device_get_free_addr(MachineState *ms,
83                                             const uint64_t *hint,
84                                             uint64_t align, uint64_t size,
85                                             Error **errp)
86 {
87     GSList *list = NULL, *item;
88     Range as, new = range_empty;
89 
90     range_init_nofail(&as, ms->device_memory->base,
91                       memory_region_size(&ms->device_memory->mr));
92 
93     /* start of address space indicates the maximum alignment we expect */
94     if (!QEMU_IS_ALIGNED(range_lob(&as), align)) {
95         warn_report("the alignment (0x%" PRIx64 ") exceeds the expected"
96                     " maximum alignment, memory will get fragmented and not"
97                     " all 'maxmem' might be usable for memory devices.",
98                     align);
99     }
100 
101     if (hint && !QEMU_IS_ALIGNED(*hint, align)) {
102         error_setg(errp, "address must be aligned to 0x%" PRIx64 " bytes",
103                    align);
104         return 0;
105     }
106 
107     if (!QEMU_IS_ALIGNED(size, align)) {
108         error_setg(errp, "backend memory size must be multiple of 0x%"
109                    PRIx64, align);
110         return 0;
111     }
112 
113     if (hint) {
114         if (range_init(&new, *hint, size) || !range_contains_range(&as, &new)) {
115             error_setg(errp, "can't add memory device [0x%" PRIx64 ":0x%" PRIx64
116                        "], usable range for memory devices [0x%" PRIx64 ":0x%"
117                        PRIx64 "]", *hint, size, range_lob(&as),
118                        range_size(&as));
119             return 0;
120         }
121     } else {
122         if (range_init(&new, QEMU_ALIGN_UP(range_lob(&as), align), size)) {
123             error_setg(errp, "can't add memory device, device too big");
124             return 0;
125         }
126     }
127 
128     /* find address range that will fit new memory device */
129     object_child_foreach(OBJECT(ms), memory_device_build_list, &list);
130     for (item = list; item; item = g_slist_next(item)) {
131         const MemoryDeviceState *md = item->data;
132         const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(OBJECT(md));
133         uint64_t next_addr;
134         Range tmp;
135 
136         range_init_nofail(&tmp, mdc->get_addr(md),
137                           memory_device_get_region_size(md, &error_abort));
138 
139         if (range_overlaps_range(&tmp, &new)) {
140             if (hint) {
141                 const DeviceState *d = DEVICE(md);
142                 error_setg(errp, "address range conflicts with memory device"
143                            " id='%s'", d->id ? d->id : "(unnamed)");
144                 goto out;
145             }
146 
147             next_addr = QEMU_ALIGN_UP(range_upb(&tmp) + 1, align);
148             if (!next_addr || range_init(&new, next_addr, range_size(&new))) {
149                 range_make_empty(&new);
150                 break;
151             }
152         } else if (range_lob(&tmp) > range_upb(&new)) {
153             break;
154         }
155     }
156 
157     if (!range_contains_range(&as, &new)) {
158         error_setg(errp, "could not find position in guest address space for "
159                    "memory device - memory fragmented due to alignments");
160     }
161 out:
162     g_slist_free(list);
163     return range_lob(&new);
164 }
165 
166 MemoryDeviceInfoList *qmp_memory_device_list(void)
167 {
168     GSList *devices = NULL, *item;
169     MemoryDeviceInfoList *list = NULL, **tail = &list;
170 
171     object_child_foreach(qdev_get_machine(), memory_device_build_list,
172                          &devices);
173 
174     for (item = devices; item; item = g_slist_next(item)) {
175         const MemoryDeviceState *md = MEMORY_DEVICE(item->data);
176         const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(item->data);
177         MemoryDeviceInfo *info = g_new0(MemoryDeviceInfo, 1);
178 
179         mdc->fill_device_info(md, info);
180 
181         QAPI_LIST_APPEND(tail, info);
182     }
183 
184     g_slist_free(devices);
185 
186     return list;
187 }
188 
189 static int memory_device_plugged_size(Object *obj, void *opaque)
190 {
191     uint64_t *size = opaque;
192 
193     if (object_dynamic_cast(obj, TYPE_MEMORY_DEVICE)) {
194         const DeviceState *dev = DEVICE(obj);
195         const MemoryDeviceState *md = MEMORY_DEVICE(obj);
196         const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(obj);
197 
198         if (dev->realized) {
199             *size += mdc->get_plugged_size(md, &error_abort);
200         }
201     }
202 
203     object_child_foreach(obj, memory_device_plugged_size, opaque);
204     return 0;
205 }
206 
207 uint64_t get_plugged_memory_size(void)
208 {
209     uint64_t size = 0;
210 
211     memory_device_plugged_size(qdev_get_machine(), &size);
212 
213     return size;
214 }
215 
216 void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms,
217                             const uint64_t *legacy_align, Error **errp)
218 {
219     const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
220     Error *local_err = NULL;
221     uint64_t addr, align = 0;
222     MemoryRegion *mr;
223 
224     if (!ms->device_memory) {
225         error_setg(errp, "the configuration is not prepared for memory devices"
226                          " (e.g., for memory hotplug), consider specifying the"
227                          " maxmem option");
228         return;
229     }
230 
231     mr = mdc->get_memory_region(md, &local_err);
232     if (local_err) {
233         goto out;
234     }
235 
236     memory_device_check_addable(ms, mr, &local_err);
237     if (local_err) {
238         goto out;
239     }
240 
241     if (legacy_align) {
242         align = *legacy_align;
243     } else {
244         if (mdc->get_min_alignment) {
245             align = mdc->get_min_alignment(md);
246         }
247         align = MAX(align, memory_region_get_alignment(mr));
248     }
249     addr = mdc->get_addr(md);
250     addr = memory_device_get_free_addr(ms, !addr ? NULL : &addr, align,
251                                        memory_region_size(mr), &local_err);
252     if (local_err) {
253         goto out;
254     }
255     mdc->set_addr(md, addr, &local_err);
256     if (!local_err) {
257         trace_memory_device_pre_plug(DEVICE(md)->id ? DEVICE(md)->id : "",
258                                      addr);
259     }
260 out:
261     error_propagate(errp, local_err);
262 }
263 
264 void memory_device_plug(MemoryDeviceState *md, MachineState *ms)
265 {
266     const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
267     const uint64_t addr = mdc->get_addr(md);
268     MemoryRegion *mr;
269 
270     /*
271      * We expect that a previous call to memory_device_pre_plug() succeeded, so
272      * it can't fail at this point.
273      */
274     mr = mdc->get_memory_region(md, &error_abort);
275     g_assert(ms->device_memory);
276 
277     ms->device_memory->used_region_size += memory_region_size(mr);
278     memory_region_add_subregion(&ms->device_memory->mr,
279                                 addr - ms->device_memory->base, mr);
280     trace_memory_device_plug(DEVICE(md)->id ? DEVICE(md)->id : "", addr);
281 }
282 
283 void memory_device_unplug(MemoryDeviceState *md, MachineState *ms)
284 {
285     const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
286     MemoryRegion *mr;
287 
288     /*
289      * We expect that a previous call to memory_device_pre_plug() succeeded, so
290      * it can't fail at this point.
291      */
292     mr = mdc->get_memory_region(md, &error_abort);
293     g_assert(ms->device_memory);
294 
295     memory_region_del_subregion(&ms->device_memory->mr, mr);
296     ms->device_memory->used_region_size -= memory_region_size(mr);
297     trace_memory_device_unplug(DEVICE(md)->id ? DEVICE(md)->id : "",
298                                mdc->get_addr(md));
299 }
300 
301 uint64_t memory_device_get_region_size(const MemoryDeviceState *md,
302                                        Error **errp)
303 {
304     const MemoryDeviceClass *mdc = MEMORY_DEVICE_GET_CLASS(md);
305     MemoryRegion *mr;
306 
307     /* dropping const here is fine as we don't touch the memory region */
308     mr = mdc->get_memory_region((MemoryDeviceState *)md, errp);
309     if (!mr) {
310         return 0;
311     }
312 
313     return memory_region_size(mr);
314 }
315 
316 void machine_memory_devices_init(MachineState *ms, hwaddr base, uint64_t size)
317 {
318     g_assert(size);
319     g_assert(!ms->device_memory);
320     ms->device_memory = g_new0(DeviceMemoryState, 1);
321     ms->device_memory->base = base;
322 
323     memory_region_init(&ms->device_memory->mr, OBJECT(ms), "device-memory",
324                        size);
325     memory_region_add_subregion(get_system_memory(), ms->device_memory->base,
326                                 &ms->device_memory->mr);
327 }
328 
329 static const TypeInfo memory_device_info = {
330     .name          = TYPE_MEMORY_DEVICE,
331     .parent        = TYPE_INTERFACE,
332     .class_size = sizeof(MemoryDeviceClass),
333 };
334 
335 static void memory_device_register_types(void)
336 {
337     type_register_static(&memory_device_info);
338 }
339 
340 type_init(memory_device_register_types)
341