xref: /qemu/include/hw/mem/memory-device.h (revision 940bb5fa)
1 /*
2  * Memory Device Interface
3  *
4  * Copyright (c) 2018 Red Hat, Inc.
5  *
6  * Authors:
7  *  David Hildenbrand <david@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #ifndef MEMORY_DEVICE_H
14 #define MEMORY_DEVICE_H
15 
16 #include "hw/qdev-core.h"
17 #include "qemu/typedefs.h"
18 #include "qapi/qapi-types-machine.h"
19 #include "qom/object.h"
20 
21 #define TYPE_MEMORY_DEVICE "memory-device"
22 
23 typedef struct MemoryDeviceClass MemoryDeviceClass;
24 DECLARE_CLASS_CHECKERS(MemoryDeviceClass, MEMORY_DEVICE,
25                        TYPE_MEMORY_DEVICE)
26 #define MEMORY_DEVICE(obj) \
27      INTERFACE_CHECK(MemoryDeviceState, (obj), TYPE_MEMORY_DEVICE)
28 
29 typedef struct MemoryDeviceState MemoryDeviceState;
30 
31 /**
32  * MemoryDeviceClass:
33  *
34  * All memory devices need to implement TYPE_MEMORY_DEVICE as an interface.
35  *
36  * A memory device is a device that owns a memory region which is
37  * mapped into guest physical address space at a certain address. The
38  * address in guest physical memory can either be specified explicitly
39  * or get assigned automatically.
40  *
41  * Some memory device might not own a memory region in certain device
42  * configurations. Such devices can logically get (un)plugged, however,
43  * empty memory devices are mostly ignored by the memory device code.
44  *
45  * Conceptually, memory devices only span one memory region. If multiple
46  * successive memory regions are used, a covering memory region has to
47  * be provided. Scattered memory regions are not supported for single
48  * devices.
49  *
50  * The device memory region returned via @get_memory_region may either be a
51  * single RAM memory region or a memory region container with subregions
52  * that are RAM memory regions or aliases to RAM memory regions. Other
53  * memory regions or subregions are not supported.
54  *
55  * If the device memory region returned via @get_memory_region is a
56  * memory region container, it's supported to dynamically (un)map subregions
57  * as long as the number of memslots returned by @get_memslots() won't
58  * be exceeded and as long as all memory regions are of the same kind (e.g.,
59  * all RAM or all ROM).
60  */
61 struct MemoryDeviceClass {
62     /* private */
63     InterfaceClass parent_class;
64 
65     /*
66      * Return the address of the memory device in guest physical memory.
67      *
68      * Called when (un)plugging a memory device or when iterating over
69      * all memory devices mapped into guest physical address space.
70      *
71      * If "0" is returned, no address has been specified by the user and
72      * no address has been assigned to this memory device yet.
73      */
74     uint64_t (*get_addr)(const MemoryDeviceState *md);
75 
76     /*
77      * Set the address of the memory device in guest physical memory.
78      *
79      * Called when plugging the memory device to configure the determined
80      * address in guest physical memory.
81      */
82     void (*set_addr)(MemoryDeviceState *md, uint64_t addr, Error **errp);
83 
84     /*
85      * Return the amount of memory provided by the memory device currently
86      * usable ("plugged") by the VM.
87      *
88      * Called when calculating the total amount of ram available to the
89      * VM (e.g. to report memory stats to the user).
90      *
91      * This is helpful for devices that dynamically manage the amount of
92      * memory accessible by the guest via the reserved memory region. For
93      * most devices, this corresponds to the size of the memory region.
94      */
95     uint64_t (*get_plugged_size)(const MemoryDeviceState *md, Error **errp);
96 
97     /*
98      * Return the memory region of the memory device. If the device is
99      * completely empty, returns NULL without an error.
100      *
101      * Called when (un)plugging the memory device, to (un)map the
102      * memory region in guest physical memory, but also to detect the
103      * required alignment during address assignment or when the size of the
104      * memory region is required.
105      */
106     MemoryRegion *(*get_memory_region)(MemoryDeviceState *md, Error **errp);
107 
108     /*
109      * Optional: Instruct the memory device to decide how many memory slots
110      * it requires, not exceeding the given limit.
111      *
112      * Called exactly once when pre-plugging the memory device, before
113      * querying the number of memslots using @get_memslots the first time.
114      */
115     void (*decide_memslots)(MemoryDeviceState *md, unsigned int limit);
116 
117     /*
118      * Optional for memory devices that require only a single memslot,
119      * required for all other memory devices: Return the number of memslots
120      * (distinct RAM memory regions in the device memory region) that are
121      * required by the device.
122      *
123      * If this function is not implemented, the assumption is "1".
124      *
125      * Called when (un)plugging the memory device, to check if the requirements
126      * can be satisfied, and to do proper accounting.
127      */
128     unsigned int (*get_memslots)(MemoryDeviceState *md);
129 
130     /*
131      * Optional: Return the desired minimum alignment of the device in guest
132      * physical address space. The final alignment is computed based on this
133      * alignment and the alignment requirements of the memory region.
134      *
135      * Called when plugging the memory device to detect the required alignment
136      * during address assignment.
137      */
138     uint64_t (*get_min_alignment)(const MemoryDeviceState *md);
139 
140     /*
141      * Translate the memory device into #MemoryDeviceInfo.
142      */
143     void (*fill_device_info)(const MemoryDeviceState *md,
144                              MemoryDeviceInfo *info);
145 };
146 
147 /*
148  * Traditionally, KVM/vhost in many setups supported 509 memslots, whereby
149  * 253 memslots were "reserved" for boot memory and other devices (such
150  * as PCI BARs, which can get mapped dynamically) and 256 memslots were
151  * dedicated for DIMMs. These magic numbers worked reliably in the past.
152  *
153  * Further, using many memslots can negatively affect performance, so setting
154  * the soft-limit of memslots used by memory devices to the traditional
155  * DIMM limit of 256 sounds reasonable.
156  *
157  * If we have less than 509 memslots, we will instruct memory devices that
158  * support automatically deciding how many memslots to use to only use a single
159  * one.
160  *
161  * Hotplugging vhost devices with at least 509 memslots is not expected to
162  * cause problems, not even when devices automatically decided how many memslots
163  * to use.
164  */
165 #define MEMORY_DEVICES_SOFT_MEMSLOT_LIMIT 256
166 #define MEMORY_DEVICES_SAFE_MAX_MEMSLOTS 509
167 
168 MemoryDeviceInfoList *qmp_memory_device_list(void);
169 uint64_t get_plugged_memory_size(void);
170 unsigned int memory_devices_get_reserved_memslots(void);
171 bool memory_devices_memslot_auto_decision_active(void);
172 void memory_device_pre_plug(MemoryDeviceState *md, MachineState *ms,
173                             const uint64_t *legacy_align, Error **errp);
174 void memory_device_plug(MemoryDeviceState *md, MachineState *ms);
175 void memory_device_unplug(MemoryDeviceState *md, MachineState *ms);
176 uint64_t memory_device_get_region_size(const MemoryDeviceState *md,
177                                        Error **errp);
178 
179 #endif
180