1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/memop.h"
23 #include "exec/ramlist.h"
24 #include "qemu/bswap.h"
25 #include "qemu/queue.h"
26 #include "qemu/int128.h"
27 #include "qemu/range.h"
28 #include "qemu/notify.h"
29 #include "qom/object.h"
30 #include "qemu/rcu.h"
31
32 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
33
34 #define MAX_PHYS_ADDR_SPACE_BITS 62
35 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
36
37 #define TYPE_MEMORY_REGION "memory-region"
38 DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION,
39 TYPE_MEMORY_REGION)
40
41 #define TYPE_IOMMU_MEMORY_REGION "iommu-memory-region"
42 typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass;
43 DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass,
44 IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION)
45
46 #define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager"
47 typedef struct RamDiscardManagerClass RamDiscardManagerClass;
48 typedef struct RamDiscardManager RamDiscardManager;
49 DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass,
50 RAM_DISCARD_MANAGER, TYPE_RAM_DISCARD_MANAGER);
51
52 #ifdef CONFIG_FUZZ
53 void fuzz_dma_read_cb(size_t addr,
54 size_t len,
55 MemoryRegion *mr);
56 #else
fuzz_dma_read_cb(size_t addr,size_t len,MemoryRegion * mr)57 static inline void fuzz_dma_read_cb(size_t addr,
58 size_t len,
59 MemoryRegion *mr)
60 {
61 /* Do Nothing */
62 }
63 #endif
64
65 /* Possible bits for global_dirty_log_{start|stop} */
66
67 /* Dirty tracking enabled because migration is running */
68 #define GLOBAL_DIRTY_MIGRATION (1U << 0)
69
70 /* Dirty tracking enabled because measuring dirty rate */
71 #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1)
72
73 /* Dirty tracking enabled because dirty limit */
74 #define GLOBAL_DIRTY_LIMIT (1U << 2)
75
76 #define GLOBAL_DIRTY_MASK (0x7)
77
78 extern unsigned int global_dirty_tracking;
79
80 typedef struct MemoryRegionOps MemoryRegionOps;
81
82 struct ReservedRegion {
83 Range range;
84 unsigned type;
85 };
86
87 /**
88 * struct MemoryRegionSection: describes a fragment of a #MemoryRegion
89 *
90 * @mr: the region, or %NULL if empty
91 * @fv: the flat view of the address space the region is mapped in
92 * @offset_within_region: the beginning of the section, relative to @mr's start
93 * @size: the size of the section; will not exceed @mr's boundaries
94 * @offset_within_address_space: the address of the first byte of the section
95 * relative to the region's address space
96 * @readonly: writes to this section are ignored
97 * @nonvolatile: this section is non-volatile
98 * @unmergeable: this section should not get merged with adjacent sections
99 */
100 struct MemoryRegionSection {
101 Int128 size;
102 MemoryRegion *mr;
103 FlatView *fv;
104 hwaddr offset_within_region;
105 hwaddr offset_within_address_space;
106 bool readonly;
107 bool nonvolatile;
108 bool unmergeable;
109 };
110
111 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
112
113 /* See address_space_translate: bit 0 is read, bit 1 is write. */
114 typedef enum {
115 IOMMU_NONE = 0,
116 IOMMU_RO = 1,
117 IOMMU_WO = 2,
118 IOMMU_RW = 3,
119 } IOMMUAccessFlags;
120
121 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
122
123 struct IOMMUTLBEntry {
124 AddressSpace *target_as;
125 hwaddr iova;
126 hwaddr translated_addr;
127 hwaddr addr_mask; /* 0xfff = 4k translation */
128 IOMMUAccessFlags perm;
129 };
130
131 /*
132 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
133 * register with one or multiple IOMMU Notifier capability bit(s).
134 *
135 * Normally there're two use cases for the notifiers:
136 *
137 * (1) When the device needs accurate synchronizations of the vIOMMU page
138 * tables, it needs to register with both MAP|UNMAP notifies (which
139 * is defined as IOMMU_NOTIFIER_IOTLB_EVENTS below).
140 *
141 * Regarding to accurate synchronization, it's when the notified
142 * device maintains a shadow page table and must be notified on each
143 * guest MAP (page table entry creation) and UNMAP (invalidation)
144 * events (e.g. VFIO). Both notifications must be accurate so that
145 * the shadow page table is fully in sync with the guest view.
146 *
147 * (2) When the device doesn't need accurate synchronizations of the
148 * vIOMMU page tables, it needs to register only with UNMAP or
149 * DEVIOTLB_UNMAP notifies.
150 *
151 * It's when the device maintains a cache of IOMMU translations
152 * (IOTLB) and is able to fill that cache by requesting translations
153 * from the vIOMMU through a protocol similar to ATS (Address
154 * Translation Service).
155 *
156 * Note that in this mode the vIOMMU will not maintain a shadowed
157 * page table for the address space, and the UNMAP messages can cover
158 * more than the pages that used to get mapped. The IOMMU notifiee
159 * should be able to take care of over-sized invalidations.
160 */
161 typedef enum {
162 IOMMU_NOTIFIER_NONE = 0,
163 /* Notify cache invalidations */
164 IOMMU_NOTIFIER_UNMAP = 0x1,
165 /* Notify entry changes (newly created entries) */
166 IOMMU_NOTIFIER_MAP = 0x2,
167 /* Notify changes on device IOTLB entries */
168 IOMMU_NOTIFIER_DEVIOTLB_UNMAP = 0x04,
169 } IOMMUNotifierFlag;
170
171 #define IOMMU_NOTIFIER_IOTLB_EVENTS (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
172 #define IOMMU_NOTIFIER_DEVIOTLB_EVENTS IOMMU_NOTIFIER_DEVIOTLB_UNMAP
173 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_IOTLB_EVENTS | \
174 IOMMU_NOTIFIER_DEVIOTLB_EVENTS)
175
176 struct IOMMUNotifier;
177 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
178 IOMMUTLBEntry *data);
179
180 struct IOMMUNotifier {
181 IOMMUNotify notify;
182 IOMMUNotifierFlag notifier_flags;
183 /* Notify for address space range start <= addr <= end */
184 hwaddr start;
185 hwaddr end;
186 int iommu_idx;
187 QLIST_ENTRY(IOMMUNotifier) node;
188 };
189 typedef struct IOMMUNotifier IOMMUNotifier;
190
191 typedef struct IOMMUTLBEvent {
192 IOMMUNotifierFlag type;
193 IOMMUTLBEntry entry;
194 } IOMMUTLBEvent;
195
196 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
197 #define RAM_PREALLOC (1 << 0)
198
199 /* RAM is mmap-ed with MAP_SHARED */
200 #define RAM_SHARED (1 << 1)
201
202 /* Only a portion of RAM (used_length) is actually used, and migrated.
203 * Resizing RAM while migrating can result in the migration being canceled.
204 */
205 #define RAM_RESIZEABLE (1 << 2)
206
207 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
208 * zero the page and wake waiting processes.
209 * (Set during postcopy)
210 */
211 #define RAM_UF_ZEROPAGE (1 << 3)
212
213 /* RAM can be migrated */
214 #define RAM_MIGRATABLE (1 << 4)
215
216 /* RAM is a persistent kind memory */
217 #define RAM_PMEM (1 << 5)
218
219
220 /*
221 * UFFDIO_WRITEPROTECT is used on this RAMBlock to
222 * support 'write-tracking' migration type.
223 * Implies ram_state->ram_wt_enabled.
224 */
225 #define RAM_UF_WRITEPROTECT (1 << 6)
226
227 /*
228 * RAM is mmap-ed with MAP_NORESERVE. When set, reserving swap space (or huge
229 * pages if applicable) is skipped: will bail out if not supported. When not
230 * set, the OS will do the reservation, if supported for the memory type.
231 */
232 #define RAM_NORESERVE (1 << 7)
233
234 /* RAM that isn't accessible through normal means. */
235 #define RAM_PROTECTED (1 << 8)
236
237 /* RAM is an mmap-ed named file */
238 #define RAM_NAMED_FILE (1 << 9)
239
240 /* RAM is mmap-ed read-only */
241 #define RAM_READONLY (1 << 10)
242
243 /* RAM FD is opened read-only */
244 #define RAM_READONLY_FD (1 << 11)
245
246 /* RAM can be private that has kvm guest memfd backend */
247 #define RAM_GUEST_MEMFD (1 << 12)
248
iommu_notifier_init(IOMMUNotifier * n,IOMMUNotify fn,IOMMUNotifierFlag flags,hwaddr start,hwaddr end,int iommu_idx)249 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
250 IOMMUNotifierFlag flags,
251 hwaddr start, hwaddr end,
252 int iommu_idx)
253 {
254 n->notify = fn;
255 n->notifier_flags = flags;
256 n->start = start;
257 n->end = end;
258 n->iommu_idx = iommu_idx;
259 }
260
261 /*
262 * Memory region callbacks
263 */
264 struct MemoryRegionOps {
265 /* Read from the memory region. @addr is relative to @mr; @size is
266 * in bytes. */
267 uint64_t (*read)(void *opaque,
268 hwaddr addr,
269 unsigned size);
270 /* Write to the memory region. @addr is relative to @mr; @size is
271 * in bytes. */
272 void (*write)(void *opaque,
273 hwaddr addr,
274 uint64_t data,
275 unsigned size);
276
277 MemTxResult (*read_with_attrs)(void *opaque,
278 hwaddr addr,
279 uint64_t *data,
280 unsigned size,
281 MemTxAttrs attrs);
282 MemTxResult (*write_with_attrs)(void *opaque,
283 hwaddr addr,
284 uint64_t data,
285 unsigned size,
286 MemTxAttrs attrs);
287
288 enum device_endian endianness;
289 /* Guest-visible constraints: */
290 struct {
291 /* If nonzero, specify bounds on access sizes beyond which a machine
292 * check is thrown.
293 */
294 unsigned min_access_size;
295 unsigned max_access_size;
296 /* If true, unaligned accesses are supported. Otherwise unaligned
297 * accesses throw machine checks.
298 */
299 bool unaligned;
300 /*
301 * If present, and returns #false, the transaction is not accepted
302 * by the device (and results in machine dependent behaviour such
303 * as a machine check exception).
304 */
305 bool (*accepts)(void *opaque, hwaddr addr,
306 unsigned size, bool is_write,
307 MemTxAttrs attrs);
308 } valid;
309 /* Internal implementation constraints: */
310 struct {
311 /* If nonzero, specifies the minimum size implemented. Smaller sizes
312 * will be rounded upwards and a partial result will be returned.
313 */
314 unsigned min_access_size;
315 /* If nonzero, specifies the maximum size implemented. Larger sizes
316 * will be done as a series of accesses with smaller sizes.
317 */
318 unsigned max_access_size;
319 /* If true, unaligned accesses are supported. Otherwise all accesses
320 * are converted to (possibly multiple) naturally aligned accesses.
321 */
322 bool unaligned;
323 } impl;
324 };
325
326 typedef struct MemoryRegionClass {
327 /* private */
328 ObjectClass parent_class;
329 } MemoryRegionClass;
330
331
332 enum IOMMUMemoryRegionAttr {
333 IOMMU_ATTR_SPAPR_TCE_FD
334 };
335
336 /*
337 * IOMMUMemoryRegionClass:
338 *
339 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
340 * and provide an implementation of at least the @translate method here
341 * to handle requests to the memory region. Other methods are optional.
342 *
343 * The IOMMU implementation must use the IOMMU notifier infrastructure
344 * to report whenever mappings are changed, by calling
345 * memory_region_notify_iommu() (or, if necessary, by calling
346 * memory_region_notify_iommu_one() for each registered notifier).
347 *
348 * Conceptually an IOMMU provides a mapping from input address
349 * to an output TLB entry. If the IOMMU is aware of memory transaction
350 * attributes and the output TLB entry depends on the transaction
351 * attributes, we represent this using IOMMU indexes. Each index
352 * selects a particular translation table that the IOMMU has:
353 *
354 * @attrs_to_index returns the IOMMU index for a set of transaction attributes
355 *
356 * @translate takes an input address and an IOMMU index
357 *
358 * and the mapping returned can only depend on the input address and the
359 * IOMMU index.
360 *
361 * Most IOMMUs don't care about the transaction attributes and support
362 * only a single IOMMU index. A more complex IOMMU might have one index
363 * for secure transactions and one for non-secure transactions.
364 */
365 struct IOMMUMemoryRegionClass {
366 /* private: */
367 MemoryRegionClass parent_class;
368
369 /* public: */
370 /**
371 * @translate:
372 *
373 * Return a TLB entry that contains a given address.
374 *
375 * The IOMMUAccessFlags indicated via @flag are optional and may
376 * be specified as IOMMU_NONE to indicate that the caller needs
377 * the full translation information for both reads and writes. If
378 * the access flags are specified then the IOMMU implementation
379 * may use this as an optimization, to stop doing a page table
380 * walk as soon as it knows that the requested permissions are not
381 * allowed. If IOMMU_NONE is passed then the IOMMU must do the
382 * full page table walk and report the permissions in the returned
383 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
384 * return different mappings for reads and writes.)
385 *
386 * The returned information remains valid while the caller is
387 * holding the big QEMU lock or is inside an RCU critical section;
388 * if the caller wishes to cache the mapping beyond that it must
389 * register an IOMMU notifier so it can invalidate its cached
390 * information when the IOMMU mapping changes.
391 *
392 * @iommu: the IOMMUMemoryRegion
393 *
394 * @hwaddr: address to be translated within the memory region
395 *
396 * @flag: requested access permission
397 *
398 * @iommu_idx: IOMMU index for the translation
399 */
400 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
401 IOMMUAccessFlags flag, int iommu_idx);
402 /**
403 * @get_min_page_size:
404 *
405 * Returns minimum supported page size in bytes.
406 *
407 * If this method is not provided then the minimum is assumed to
408 * be TARGET_PAGE_SIZE.
409 *
410 * @iommu: the IOMMUMemoryRegion
411 */
412 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
413 /**
414 * @notify_flag_changed:
415 *
416 * Called when IOMMU Notifier flag changes (ie when the set of
417 * events which IOMMU users are requesting notification for changes).
418 * Optional method -- need not be provided if the IOMMU does not
419 * need to know exactly which events must be notified.
420 *
421 * @iommu: the IOMMUMemoryRegion
422 *
423 * @old_flags: events which previously needed to be notified
424 *
425 * @new_flags: events which now need to be notified
426 *
427 * Returns 0 on success, or a negative errno; in particular
428 * returns -EINVAL if the new flag bitmap is not supported by the
429 * IOMMU memory region. In case of failure, the error object
430 * must be created
431 */
432 int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
433 IOMMUNotifierFlag old_flags,
434 IOMMUNotifierFlag new_flags,
435 Error **errp);
436 /**
437 * @replay:
438 *
439 * Called to handle memory_region_iommu_replay().
440 *
441 * The default implementation of memory_region_iommu_replay() is to
442 * call the IOMMU translate method for every page in the address space
443 * with flag == IOMMU_NONE and then call the notifier if translate
444 * returns a valid mapping. If this method is implemented then it
445 * overrides the default behaviour, and must provide the full semantics
446 * of memory_region_iommu_replay(), by calling @notifier for every
447 * translation present in the IOMMU.
448 *
449 * Optional method -- an IOMMU only needs to provide this method
450 * if the default is inefficient or produces undesirable side effects.
451 *
452 * Note: this is not related to record-and-replay functionality.
453 */
454 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
455
456 /**
457 * @get_attr:
458 *
459 * Get IOMMU misc attributes. This is an optional method that
460 * can be used to allow users of the IOMMU to get implementation-specific
461 * information. The IOMMU implements this method to handle calls
462 * by IOMMU users to memory_region_iommu_get_attr() by filling in
463 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
464 * the IOMMU supports. If the method is unimplemented then
465 * memory_region_iommu_get_attr() will always return -EINVAL.
466 *
467 * @iommu: the IOMMUMemoryRegion
468 *
469 * @attr: attribute being queried
470 *
471 * @data: memory to fill in with the attribute data
472 *
473 * Returns 0 on success, or a negative errno; in particular
474 * returns -EINVAL for unrecognized or unimplemented attribute types.
475 */
476 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
477 void *data);
478
479 /**
480 * @attrs_to_index:
481 *
482 * Return the IOMMU index to use for a given set of transaction attributes.
483 *
484 * Optional method: if an IOMMU only supports a single IOMMU index then
485 * the default implementation of memory_region_iommu_attrs_to_index()
486 * will return 0.
487 *
488 * The indexes supported by an IOMMU must be contiguous, starting at 0.
489 *
490 * @iommu: the IOMMUMemoryRegion
491 * @attrs: memory transaction attributes
492 */
493 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
494
495 /**
496 * @num_indexes:
497 *
498 * Return the number of IOMMU indexes this IOMMU supports.
499 *
500 * Optional method: if this method is not provided, then
501 * memory_region_iommu_num_indexes() will return 1, indicating that
502 * only a single IOMMU index is supported.
503 *
504 * @iommu: the IOMMUMemoryRegion
505 */
506 int (*num_indexes)(IOMMUMemoryRegion *iommu);
507
508 /**
509 * @iommu_set_page_size_mask:
510 *
511 * Restrict the page size mask that can be supported with a given IOMMU
512 * memory region. Used for example to propagate host physical IOMMU page
513 * size mask limitations to the virtual IOMMU.
514 *
515 * Optional method: if this method is not provided, then the default global
516 * page mask is used.
517 *
518 * @iommu: the IOMMUMemoryRegion
519 *
520 * @page_size_mask: a bitmask of supported page sizes. At least one bit,
521 * representing the smallest page size, must be set. Additional set bits
522 * represent supported block sizes. For example a host physical IOMMU that
523 * uses page tables with a page size of 4kB, and supports 2MB and 4GB
524 * blocks, will set mask 0x40201000. A granule of 4kB with indiscriminate
525 * block sizes is specified with mask 0xfffffffffffff000.
526 *
527 * Returns 0 on success, or a negative error. In case of failure, the error
528 * object must be created.
529 */
530 int (*iommu_set_page_size_mask)(IOMMUMemoryRegion *iommu,
531 uint64_t page_size_mask,
532 Error **errp);
533 /**
534 * @iommu_set_iova_ranges:
535 *
536 * Propagate information about the usable IOVA ranges for a given IOMMU
537 * memory region. Used for example to propagate host physical device
538 * reserved memory region constraints to the virtual IOMMU.
539 *
540 * Optional method: if this method is not provided, then the default IOVA
541 * aperture is used.
542 *
543 * @iommu: the IOMMUMemoryRegion
544 *
545 * @iova_ranges: list of ordered IOVA ranges (at least one range)
546 *
547 * Returns 0 on success, or a negative error. In case of failure, the error
548 * object must be created.
549 */
550 int (*iommu_set_iova_ranges)(IOMMUMemoryRegion *iommu,
551 GList *iova_ranges,
552 Error **errp);
553 };
554
555 typedef struct RamDiscardListener RamDiscardListener;
556 typedef int (*NotifyRamPopulate)(RamDiscardListener *rdl,
557 MemoryRegionSection *section);
558 typedef void (*NotifyRamDiscard)(RamDiscardListener *rdl,
559 MemoryRegionSection *section);
560
561 struct RamDiscardListener {
562 /*
563 * @notify_populate:
564 *
565 * Notification that previously discarded memory is about to get populated.
566 * Listeners are able to object. If any listener objects, already
567 * successfully notified listeners are notified about a discard again.
568 *
569 * @rdl: the #RamDiscardListener getting notified
570 * @section: the #MemoryRegionSection to get populated. The section
571 * is aligned within the memory region to the minimum granularity
572 * unless it would exceed the registered section.
573 *
574 * Returns 0 on success. If the notification is rejected by the listener,
575 * an error is returned.
576 */
577 NotifyRamPopulate notify_populate;
578
579 /*
580 * @notify_discard:
581 *
582 * Notification that previously populated memory was discarded successfully
583 * and listeners should drop all references to such memory and prevent
584 * new population (e.g., unmap).
585 *
586 * @rdl: the #RamDiscardListener getting notified
587 * @section: the #MemoryRegionSection to get populated. The section
588 * is aligned within the memory region to the minimum granularity
589 * unless it would exceed the registered section.
590 */
591 NotifyRamDiscard notify_discard;
592
593 /*
594 * @double_discard_supported:
595 *
596 * The listener suppors getting @notify_discard notifications that span
597 * already discarded parts.
598 */
599 bool double_discard_supported;
600
601 MemoryRegionSection *section;
602 QLIST_ENTRY(RamDiscardListener) next;
603 };
604
ram_discard_listener_init(RamDiscardListener * rdl,NotifyRamPopulate populate_fn,NotifyRamDiscard discard_fn,bool double_discard_supported)605 static inline void ram_discard_listener_init(RamDiscardListener *rdl,
606 NotifyRamPopulate populate_fn,
607 NotifyRamDiscard discard_fn,
608 bool double_discard_supported)
609 {
610 rdl->notify_populate = populate_fn;
611 rdl->notify_discard = discard_fn;
612 rdl->double_discard_supported = double_discard_supported;
613 }
614
615 typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque);
616 typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque);
617
618 /*
619 * RamDiscardManagerClass:
620 *
621 * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion
622 * regions are currently populated to be used/accessed by the VM, notifying
623 * after parts were discarded (freeing up memory) and before parts will be
624 * populated (consuming memory), to be used/accessed by the VM.
625 *
626 * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the
627 * #MemoryRegion isn't mapped into an address space yet (either directly
628 * or via an alias); it cannot change while the #MemoryRegion is
629 * mapped into an address space.
630 *
631 * The #RamDiscardManager is intended to be used by technologies that are
632 * incompatible with discarding of RAM (e.g., VFIO, which may pin all
633 * memory inside a #MemoryRegion), and require proper coordination to only
634 * map the currently populated parts, to hinder parts that are expected to
635 * remain discarded from silently getting populated and consuming memory.
636 * Technologies that support discarding of RAM don't have to bother and can
637 * simply map the whole #MemoryRegion.
638 *
639 * An example #RamDiscardManager is virtio-mem, which logically (un)plugs
640 * memory within an assigned RAM #MemoryRegion, coordinated with the VM.
641 * Logically unplugging memory consists of discarding RAM. The VM agreed to not
642 * access unplugged (discarded) memory - especially via DMA. virtio-mem will
643 * properly coordinate with listeners before memory is plugged (populated),
644 * and after memory is unplugged (discarded).
645 *
646 * Listeners are called in multiples of the minimum granularity (unless it
647 * would exceed the registered range) and changes are aligned to the minimum
648 * granularity within the #MemoryRegion. Listeners have to prepare for memory
649 * becoming discarded in a different granularity than it was populated and the
650 * other way around.
651 */
652 struct RamDiscardManagerClass {
653 /* private */
654 InterfaceClass parent_class;
655
656 /* public */
657
658 /**
659 * @get_min_granularity:
660 *
661 * Get the minimum granularity in which listeners will get notified
662 * about changes within the #MemoryRegion via the #RamDiscardManager.
663 *
664 * @rdm: the #RamDiscardManager
665 * @mr: the #MemoryRegion
666 *
667 * Returns the minimum granularity.
668 */
669 uint64_t (*get_min_granularity)(const RamDiscardManager *rdm,
670 const MemoryRegion *mr);
671
672 /**
673 * @is_populated:
674 *
675 * Check whether the given #MemoryRegionSection is completely populated
676 * (i.e., no parts are currently discarded) via the #RamDiscardManager.
677 * There are no alignment requirements.
678 *
679 * @rdm: the #RamDiscardManager
680 * @section: the #MemoryRegionSection
681 *
682 * Returns whether the given range is completely populated.
683 */
684 bool (*is_populated)(const RamDiscardManager *rdm,
685 const MemoryRegionSection *section);
686
687 /**
688 * @replay_populated:
689 *
690 * Call the #ReplayRamPopulate callback for all populated parts within the
691 * #MemoryRegionSection via the #RamDiscardManager.
692 *
693 * In case any call fails, no further calls are made.
694 *
695 * @rdm: the #RamDiscardManager
696 * @section: the #MemoryRegionSection
697 * @replay_fn: the #ReplayRamPopulate callback
698 * @opaque: pointer to forward to the callback
699 *
700 * Returns 0 on success, or a negative error if any notification failed.
701 */
702 int (*replay_populated)(const RamDiscardManager *rdm,
703 MemoryRegionSection *section,
704 ReplayRamPopulate replay_fn, void *opaque);
705
706 /**
707 * @replay_discarded:
708 *
709 * Call the #ReplayRamDiscard callback for all discarded parts within the
710 * #MemoryRegionSection via the #RamDiscardManager.
711 *
712 * @rdm: the #RamDiscardManager
713 * @section: the #MemoryRegionSection
714 * @replay_fn: the #ReplayRamDiscard callback
715 * @opaque: pointer to forward to the callback
716 */
717 void (*replay_discarded)(const RamDiscardManager *rdm,
718 MemoryRegionSection *section,
719 ReplayRamDiscard replay_fn, void *opaque);
720
721 /**
722 * @register_listener:
723 *
724 * Register a #RamDiscardListener for the given #MemoryRegionSection and
725 * immediately notify the #RamDiscardListener about all populated parts
726 * within the #MemoryRegionSection via the #RamDiscardManager.
727 *
728 * In case any notification fails, no further notifications are triggered
729 * and an error is logged.
730 *
731 * @rdm: the #RamDiscardManager
732 * @rdl: the #RamDiscardListener
733 * @section: the #MemoryRegionSection
734 */
735 void (*register_listener)(RamDiscardManager *rdm,
736 RamDiscardListener *rdl,
737 MemoryRegionSection *section);
738
739 /**
740 * @unregister_listener:
741 *
742 * Unregister a previously registered #RamDiscardListener via the
743 * #RamDiscardManager after notifying the #RamDiscardListener about all
744 * populated parts becoming unpopulated within the registered
745 * #MemoryRegionSection.
746 *
747 * @rdm: the #RamDiscardManager
748 * @rdl: the #RamDiscardListener
749 */
750 void (*unregister_listener)(RamDiscardManager *rdm,
751 RamDiscardListener *rdl);
752 };
753
754 uint64_t ram_discard_manager_get_min_granularity(const RamDiscardManager *rdm,
755 const MemoryRegion *mr);
756
757 bool ram_discard_manager_is_populated(const RamDiscardManager *rdm,
758 const MemoryRegionSection *section);
759
760 int ram_discard_manager_replay_populated(const RamDiscardManager *rdm,
761 MemoryRegionSection *section,
762 ReplayRamPopulate replay_fn,
763 void *opaque);
764
765 void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm,
766 MemoryRegionSection *section,
767 ReplayRamDiscard replay_fn,
768 void *opaque);
769
770 void ram_discard_manager_register_listener(RamDiscardManager *rdm,
771 RamDiscardListener *rdl,
772 MemoryRegionSection *section);
773
774 void ram_discard_manager_unregister_listener(RamDiscardManager *rdm,
775 RamDiscardListener *rdl);
776
777 /**
778 * memory_get_xlat_addr: Extract addresses from a TLB entry
779 *
780 * @iotlb: pointer to an #IOMMUTLBEntry
781 * @vaddr: virtual address
782 * @ram_addr: RAM address
783 * @read_only: indicates if writes are allowed
784 * @mr_has_discard_manager: indicates memory is controlled by a
785 * RamDiscardManager
786 * @errp: pointer to Error*, to store an error if it happens.
787 *
788 * Return: true on success, else false setting @errp with error.
789 */
790 bool memory_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
791 ram_addr_t *ram_addr, bool *read_only,
792 bool *mr_has_discard_manager, Error **errp);
793
794 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
795 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
796
797 /** MemoryRegion:
798 *
799 * A struct representing a memory region.
800 */
801 struct MemoryRegion {
802 Object parent_obj;
803
804 /* private: */
805
806 /* The following fields should fit in a cache line */
807 bool romd_mode;
808 bool ram;
809 bool subpage;
810 bool readonly; /* For RAM regions */
811 bool nonvolatile;
812 bool rom_device;
813 bool flush_coalesced_mmio;
814 bool unmergeable;
815 uint8_t dirty_log_mask;
816 bool is_iommu;
817 RAMBlock *ram_block;
818 Object *owner;
819 /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */
820 DeviceState *dev;
821
822 const MemoryRegionOps *ops;
823 void *opaque;
824 MemoryRegion *container;
825 int mapped_via_alias; /* Mapped via an alias, container might be NULL */
826 Int128 size;
827 hwaddr addr;
828 void (*destructor)(MemoryRegion *mr);
829 uint64_t align;
830 bool terminates;
831 bool ram_device;
832 bool enabled;
833 bool warning_printed; /* For reservations */
834 uint8_t vga_logging_count;
835 MemoryRegion *alias;
836 hwaddr alias_offset;
837 int32_t priority;
838 QTAILQ_HEAD(, MemoryRegion) subregions;
839 QTAILQ_ENTRY(MemoryRegion) subregions_link;
840 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
841 const char *name;
842 unsigned ioeventfd_nb;
843 MemoryRegionIoeventfd *ioeventfds;
844 RamDiscardManager *rdm; /* Only for RAM */
845
846 /* For devices designed to perform re-entrant IO into their own IO MRs */
847 bool disable_reentrancy_guard;
848 };
849
850 struct IOMMUMemoryRegion {
851 MemoryRegion parent_obj;
852
853 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
854 IOMMUNotifierFlag iommu_notify_flags;
855 };
856
857 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
858 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
859
860 #define MEMORY_LISTENER_PRIORITY_MIN 0
861 #define MEMORY_LISTENER_PRIORITY_ACCEL 10
862 #define MEMORY_LISTENER_PRIORITY_DEV_BACKEND 10
863
864 /**
865 * struct MemoryListener: callbacks structure for updates to the physical memory map
866 *
867 * Allows a component to adjust to changes in the guest-visible memory map.
868 * Use with memory_listener_register() and memory_listener_unregister().
869 */
870 struct MemoryListener {
871 /**
872 * @begin:
873 *
874 * Called at the beginning of an address space update transaction.
875 * Followed by calls to #MemoryListener.region_add(),
876 * #MemoryListener.region_del(), #MemoryListener.region_nop(),
877 * #MemoryListener.log_start() and #MemoryListener.log_stop() in
878 * increasing address order.
879 *
880 * @listener: The #MemoryListener.
881 */
882 void (*begin)(MemoryListener *listener);
883
884 /**
885 * @commit:
886 *
887 * Called at the end of an address space update transaction,
888 * after the last call to #MemoryListener.region_add(),
889 * #MemoryListener.region_del() or #MemoryListener.region_nop(),
890 * #MemoryListener.log_start() and #MemoryListener.log_stop().
891 *
892 * @listener: The #MemoryListener.
893 */
894 void (*commit)(MemoryListener *listener);
895
896 /**
897 * @region_add:
898 *
899 * Called during an address space update transaction,
900 * for a section of the address space that is new in this address space
901 * space since the last transaction.
902 *
903 * @listener: The #MemoryListener.
904 * @section: The new #MemoryRegionSection.
905 */
906 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
907
908 /**
909 * @region_del:
910 *
911 * Called during an address space update transaction,
912 * for a section of the address space that has disappeared in the address
913 * space since the last transaction.
914 *
915 * @listener: The #MemoryListener.
916 * @section: The old #MemoryRegionSection.
917 */
918 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
919
920 /**
921 * @region_nop:
922 *
923 * Called during an address space update transaction,
924 * for a section of the address space that is in the same place in the address
925 * space as in the last transaction.
926 *
927 * @listener: The #MemoryListener.
928 * @section: The #MemoryRegionSection.
929 */
930 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
931
932 /**
933 * @log_start:
934 *
935 * Called during an address space update transaction, after
936 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
937 * #MemoryListener.region_nop(), if dirty memory logging clients have
938 * become active since the last transaction.
939 *
940 * @listener: The #MemoryListener.
941 * @section: The #MemoryRegionSection.
942 * @old: A bitmap of dirty memory logging clients that were active in
943 * the previous transaction.
944 * @new: A bitmap of dirty memory logging clients that are active in
945 * the current transaction.
946 */
947 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
948 int old, int new);
949
950 /**
951 * @log_stop:
952 *
953 * Called during an address space update transaction, after
954 * one of #MemoryListener.region_add(), #MemoryListener.region_del() or
955 * #MemoryListener.region_nop() and possibly after
956 * #MemoryListener.log_start(), if dirty memory logging clients have
957 * become inactive since the last transaction.
958 *
959 * @listener: The #MemoryListener.
960 * @section: The #MemoryRegionSection.
961 * @old: A bitmap of dirty memory logging clients that were active in
962 * the previous transaction.
963 * @new: A bitmap of dirty memory logging clients that are active in
964 * the current transaction.
965 */
966 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
967 int old, int new);
968
969 /**
970 * @log_sync:
971 *
972 * Called by memory_region_snapshot_and_clear_dirty() and
973 * memory_global_dirty_log_sync(), before accessing QEMU's "official"
974 * copy of the dirty memory bitmap for a #MemoryRegionSection.
975 *
976 * @listener: The #MemoryListener.
977 * @section: The #MemoryRegionSection.
978 */
979 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
980
981 /**
982 * @log_sync_global:
983 *
984 * This is the global version of @log_sync when the listener does
985 * not have a way to synchronize the log with finer granularity.
986 * When the listener registers with @log_sync_global defined, then
987 * its @log_sync must be NULL. Vice versa.
988 *
989 * @listener: The #MemoryListener.
990 * @last_stage: The last stage to synchronize the log during migration.
991 * The caller should guarantee that the synchronization with true for
992 * @last_stage is triggered for once after all VCPUs have been stopped.
993 */
994 void (*log_sync_global)(MemoryListener *listener, bool last_stage);
995
996 /**
997 * @log_clear:
998 *
999 * Called before reading the dirty memory bitmap for a
1000 * #MemoryRegionSection.
1001 *
1002 * @listener: The #MemoryListener.
1003 * @section: The #MemoryRegionSection.
1004 */
1005 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
1006
1007 /**
1008 * @log_global_start:
1009 *
1010 * Called by memory_global_dirty_log_start(), which
1011 * enables the %DIRTY_LOG_MIGRATION client on all memory regions in
1012 * the address space. #MemoryListener.log_global_start() is also
1013 * called when a #MemoryListener is added, if global dirty logging is
1014 * active at that time.
1015 *
1016 * @listener: The #MemoryListener.
1017 * @errp: pointer to Error*, to store an error if it happens.
1018 *
1019 * Return: true on success, else false setting @errp with error.
1020 */
1021 bool (*log_global_start)(MemoryListener *listener, Error **errp);
1022
1023 /**
1024 * @log_global_stop:
1025 *
1026 * Called by memory_global_dirty_log_stop(), which
1027 * disables the %DIRTY_LOG_MIGRATION client on all memory regions in
1028 * the address space.
1029 *
1030 * @listener: The #MemoryListener.
1031 */
1032 void (*log_global_stop)(MemoryListener *listener);
1033
1034 /**
1035 * @log_global_after_sync:
1036 *
1037 * Called after reading the dirty memory bitmap
1038 * for any #MemoryRegionSection.
1039 *
1040 * @listener: The #MemoryListener.
1041 */
1042 void (*log_global_after_sync)(MemoryListener *listener);
1043
1044 /**
1045 * @eventfd_add:
1046 *
1047 * Called during an address space update transaction,
1048 * for a section of the address space that has had a new ioeventfd
1049 * registration since the last transaction.
1050 *
1051 * @listener: The #MemoryListener.
1052 * @section: The new #MemoryRegionSection.
1053 * @match_data: The @match_data parameter for the new ioeventfd.
1054 * @data: The @data parameter for the new ioeventfd.
1055 * @e: The #EventNotifier parameter for the new ioeventfd.
1056 */
1057 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
1058 bool match_data, uint64_t data, EventNotifier *e);
1059
1060 /**
1061 * @eventfd_del:
1062 *
1063 * Called during an address space update transaction,
1064 * for a section of the address space that has dropped an ioeventfd
1065 * registration since the last transaction.
1066 *
1067 * @listener: The #MemoryListener.
1068 * @section: The new #MemoryRegionSection.
1069 * @match_data: The @match_data parameter for the dropped ioeventfd.
1070 * @data: The @data parameter for the dropped ioeventfd.
1071 * @e: The #EventNotifier parameter for the dropped ioeventfd.
1072 */
1073 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
1074 bool match_data, uint64_t data, EventNotifier *e);
1075
1076 /**
1077 * @coalesced_io_add:
1078 *
1079 * Called during an address space update transaction,
1080 * for a section of the address space that has had a new coalesced
1081 * MMIO range registration since the last transaction.
1082 *
1083 * @listener: The #MemoryListener.
1084 * @section: The new #MemoryRegionSection.
1085 * @addr: The starting address for the coalesced MMIO range.
1086 * @len: The length of the coalesced MMIO range.
1087 */
1088 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
1089 hwaddr addr, hwaddr len);
1090
1091 /**
1092 * @coalesced_io_del:
1093 *
1094 * Called during an address space update transaction,
1095 * for a section of the address space that has dropped a coalesced
1096 * MMIO range since the last transaction.
1097 *
1098 * @listener: The #MemoryListener.
1099 * @section: The new #MemoryRegionSection.
1100 * @addr: The starting address for the coalesced MMIO range.
1101 * @len: The length of the coalesced MMIO range.
1102 */
1103 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
1104 hwaddr addr, hwaddr len);
1105 /**
1106 * @priority:
1107 *
1108 * Govern the order in which memory listeners are invoked. Lower priorities
1109 * are invoked earlier for "add" or "start" callbacks, and later for "delete"
1110 * or "stop" callbacks.
1111 */
1112 unsigned priority;
1113
1114 /**
1115 * @name:
1116 *
1117 * Name of the listener. It can be used in contexts where we'd like to
1118 * identify one memory listener with the rest.
1119 */
1120 const char *name;
1121
1122 /* private: */
1123 AddressSpace *address_space;
1124 QTAILQ_ENTRY(MemoryListener) link;
1125 QTAILQ_ENTRY(MemoryListener) link_as;
1126 };
1127
1128 typedef struct AddressSpaceMapClient {
1129 QEMUBH *bh;
1130 QLIST_ENTRY(AddressSpaceMapClient) link;
1131 } AddressSpaceMapClient;
1132
1133 typedef struct {
1134 MemoryRegion *mr;
1135 void *buffer;
1136 hwaddr addr;
1137 hwaddr len;
1138 bool in_use;
1139 } BounceBuffer;
1140
1141 /**
1142 * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects
1143 */
1144 struct AddressSpace {
1145 /* private: */
1146 struct rcu_head rcu;
1147 char *name;
1148 MemoryRegion *root;
1149
1150 /* Accessed via RCU. */
1151 struct FlatView *current_map;
1152
1153 int ioeventfd_nb;
1154 int ioeventfd_notifiers;
1155 struct MemoryRegionIoeventfd *ioeventfds;
1156 QTAILQ_HEAD(, MemoryListener) listeners;
1157 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
1158
1159 /* Bounce buffer to use for this address space. */
1160 BounceBuffer bounce;
1161 /* List of callbacks to invoke when buffers free up */
1162 QemuMutex map_client_list_lock;
1163 QLIST_HEAD(, AddressSpaceMapClient) map_client_list;
1164 };
1165
1166 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
1167 typedef struct FlatRange FlatRange;
1168
1169 /* Flattened global view of current active memory hierarchy. Kept in sorted
1170 * order.
1171 */
1172 struct FlatView {
1173 struct rcu_head rcu;
1174 unsigned ref;
1175 FlatRange *ranges;
1176 unsigned nr;
1177 unsigned nr_allocated;
1178 struct AddressSpaceDispatch *dispatch;
1179 MemoryRegion *root;
1180 };
1181
address_space_to_flatview(AddressSpace * as)1182 static inline FlatView *address_space_to_flatview(AddressSpace *as)
1183 {
1184 return qatomic_rcu_read(&as->current_map);
1185 }
1186
1187 /**
1188 * typedef flatview_cb: callback for flatview_for_each_range()
1189 *
1190 * @start: start address of the range within the FlatView
1191 * @len: length of the range in bytes
1192 * @mr: MemoryRegion covering this range
1193 * @offset_in_region: offset of the first byte of the range within @mr
1194 * @opaque: data pointer passed to flatview_for_each_range()
1195 *
1196 * Returns: true to stop the iteration, false to keep going.
1197 */
1198 typedef bool (*flatview_cb)(Int128 start,
1199 Int128 len,
1200 const MemoryRegion *mr,
1201 hwaddr offset_in_region,
1202 void *opaque);
1203
1204 /**
1205 * flatview_for_each_range: Iterate through a FlatView
1206 * @fv: the FlatView to iterate through
1207 * @cb: function to call for each range
1208 * @opaque: opaque data pointer to pass to @cb
1209 *
1210 * A FlatView is made up of a list of non-overlapping ranges, each of
1211 * which is a slice of a MemoryRegion. This function iterates through
1212 * each range in @fv, calling @cb. The callback function can terminate
1213 * iteration early by returning 'true'.
1214 */
1215 void flatview_for_each_range(FlatView *fv, flatview_cb cb, void *opaque);
1216
MemoryRegionSection_eq(MemoryRegionSection * a,MemoryRegionSection * b)1217 static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
1218 MemoryRegionSection *b)
1219 {
1220 return a->mr == b->mr &&
1221 a->fv == b->fv &&
1222 a->offset_within_region == b->offset_within_region &&
1223 a->offset_within_address_space == b->offset_within_address_space &&
1224 int128_eq(a->size, b->size) &&
1225 a->readonly == b->readonly &&
1226 a->nonvolatile == b->nonvolatile;
1227 }
1228
1229 /**
1230 * memory_region_section_new_copy: Copy a memory region section
1231 *
1232 * Allocate memory for a new copy, copy the memory region section, and
1233 * properly take a reference on all relevant members.
1234 *
1235 * @s: the #MemoryRegionSection to copy
1236 */
1237 MemoryRegionSection *memory_region_section_new_copy(MemoryRegionSection *s);
1238
1239 /**
1240 * memory_region_section_new_copy: Free a copied memory region section
1241 *
1242 * Free a copy of a memory section created via memory_region_section_new_copy().
1243 * properly dropping references on all relevant members.
1244 *
1245 * @s: the #MemoryRegionSection to copy
1246 */
1247 void memory_region_section_free_copy(MemoryRegionSection *s);
1248
1249 /**
1250 * memory_region_init: Initialize a memory region
1251 *
1252 * The region typically acts as a container for other memory regions. Use
1253 * memory_region_add_subregion() to add subregions.
1254 *
1255 * @mr: the #MemoryRegion to be initialized
1256 * @owner: the object that tracks the region's reference count
1257 * @name: used for debugging; not visible to the user or ABI
1258 * @size: size of the region; any subregions beyond this size will be clipped
1259 */
1260 void memory_region_init(MemoryRegion *mr,
1261 Object *owner,
1262 const char *name,
1263 uint64_t size);
1264
1265 /**
1266 * memory_region_ref: Add 1 to a memory region's reference count
1267 *
1268 * Whenever memory regions are accessed outside the BQL, they need to be
1269 * preserved against hot-unplug. MemoryRegions actually do not have their
1270 * own reference count; they piggyback on a QOM object, their "owner".
1271 * This function adds a reference to the owner.
1272 *
1273 * All MemoryRegions must have an owner if they can disappear, even if the
1274 * device they belong to operates exclusively under the BQL. This is because
1275 * the region could be returned at any time by memory_region_find, and this
1276 * is usually under guest control.
1277 *
1278 * @mr: the #MemoryRegion
1279 */
1280 void memory_region_ref(MemoryRegion *mr);
1281
1282 /**
1283 * memory_region_unref: Remove 1 to a memory region's reference count
1284 *
1285 * Whenever memory regions are accessed outside the BQL, they need to be
1286 * preserved against hot-unplug. MemoryRegions actually do not have their
1287 * own reference count; they piggyback on a QOM object, their "owner".
1288 * This function removes a reference to the owner and possibly destroys it.
1289 *
1290 * @mr: the #MemoryRegion
1291 */
1292 void memory_region_unref(MemoryRegion *mr);
1293
1294 /**
1295 * memory_region_init_io: Initialize an I/O memory region.
1296 *
1297 * Accesses into the region will cause the callbacks in @ops to be called.
1298 * if @size is nonzero, subregions will be clipped to @size.
1299 *
1300 * @mr: the #MemoryRegion to be initialized.
1301 * @owner: the object that tracks the region's reference count
1302 * @ops: a structure containing read and write callbacks to be used when
1303 * I/O is performed on the region.
1304 * @opaque: passed to the read and write callbacks of the @ops structure.
1305 * @name: used for debugging; not visible to the user or ABI
1306 * @size: size of the region.
1307 */
1308 void memory_region_init_io(MemoryRegion *mr,
1309 Object *owner,
1310 const MemoryRegionOps *ops,
1311 void *opaque,
1312 const char *name,
1313 uint64_t size);
1314
1315 /**
1316 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
1317 * into the region will modify memory
1318 * directly.
1319 *
1320 * @mr: the #MemoryRegion to be initialized.
1321 * @owner: the object that tracks the region's reference count
1322 * @name: Region name, becomes part of RAMBlock name used in migration stream
1323 * must be unique within any device
1324 * @size: size of the region.
1325 * @errp: pointer to Error*, to store an error if it happens.
1326 *
1327 * Note that this function does not do anything to cause the data in the
1328 * RAM memory region to be migrated; that is the responsibility of the caller.
1329 *
1330 * Return: true on success, else false setting @errp with error.
1331 */
1332 bool memory_region_init_ram_nomigrate(MemoryRegion *mr,
1333 Object *owner,
1334 const char *name,
1335 uint64_t size,
1336 Error **errp);
1337
1338 /**
1339 * memory_region_init_ram_flags_nomigrate: Initialize RAM memory region.
1340 * Accesses into the region will
1341 * modify memory directly.
1342 *
1343 * @mr: the #MemoryRegion to be initialized.
1344 * @owner: the object that tracks the region's reference count
1345 * @name: Region name, becomes part of RAMBlock name used in migration stream
1346 * must be unique within any device
1347 * @size: size of the region.
1348 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_NORESERVE,
1349 * RAM_GUEST_MEMFD.
1350 * @errp: pointer to Error*, to store an error if it happens.
1351 *
1352 * Note that this function does not do anything to cause the data in the
1353 * RAM memory region to be migrated; that is the responsibility of the caller.
1354 *
1355 * Return: true on success, else false setting @errp with error.
1356 */
1357 bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr,
1358 Object *owner,
1359 const char *name,
1360 uint64_t size,
1361 uint32_t ram_flags,
1362 Error **errp);
1363
1364 /**
1365 * memory_region_init_resizeable_ram: Initialize memory region with resizable
1366 * RAM. Accesses into the region will
1367 * modify memory directly. Only an initial
1368 * portion of this RAM is actually used.
1369 * Changing the size while migrating
1370 * can result in the migration being
1371 * canceled.
1372 *
1373 * @mr: the #MemoryRegion to be initialized.
1374 * @owner: the object that tracks the region's reference count
1375 * @name: Region name, becomes part of RAMBlock name used in migration stream
1376 * must be unique within any device
1377 * @size: used size of the region.
1378 * @max_size: max size of the region.
1379 * @resized: callback to notify owner about used size change.
1380 * @errp: pointer to Error*, to store an error if it happens.
1381 *
1382 * Note that this function does not do anything to cause the data in the
1383 * RAM memory region to be migrated; that is the responsibility of the caller.
1384 *
1385 * Return: true on success, else false setting @errp with error.
1386 */
1387 bool memory_region_init_resizeable_ram(MemoryRegion *mr,
1388 Object *owner,
1389 const char *name,
1390 uint64_t size,
1391 uint64_t max_size,
1392 void (*resized)(const char*,
1393 uint64_t length,
1394 void *host),
1395 Error **errp);
1396 #ifdef CONFIG_POSIX
1397
1398 /**
1399 * memory_region_init_ram_from_file: Initialize RAM memory region with a
1400 * mmap-ed backend.
1401 *
1402 * @mr: the #MemoryRegion to be initialized.
1403 * @owner: the object that tracks the region's reference count
1404 * @name: Region name, becomes part of RAMBlock name used in migration stream
1405 * must be unique within any device
1406 * @size: size of the region.
1407 * @align: alignment of the region base address; if 0, the default alignment
1408 * (getpagesize()) will be used.
1409 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1410 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
1411 * RAM_READONLY_FD, RAM_GUEST_MEMFD
1412 * @path: the path in which to allocate the RAM.
1413 * @offset: offset within the file referenced by path
1414 * @errp: pointer to Error*, to store an error if it happens.
1415 *
1416 * Note that this function does not do anything to cause the data in the
1417 * RAM memory region to be migrated; that is the responsibility of the caller.
1418 *
1419 * Return: true on success, else false setting @errp with error.
1420 */
1421 bool memory_region_init_ram_from_file(MemoryRegion *mr,
1422 Object *owner,
1423 const char *name,
1424 uint64_t size,
1425 uint64_t align,
1426 uint32_t ram_flags,
1427 const char *path,
1428 ram_addr_t offset,
1429 Error **errp);
1430
1431 /**
1432 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
1433 * mmap-ed backend.
1434 *
1435 * @mr: the #MemoryRegion to be initialized.
1436 * @owner: the object that tracks the region's reference count
1437 * @name: the name of the region.
1438 * @size: size of the region.
1439 * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
1440 * RAM_NORESERVE, RAM_PROTECTED, RAM_NAMED_FILE, RAM_READONLY,
1441 * RAM_READONLY_FD, RAM_GUEST_MEMFD
1442 * @fd: the fd to mmap.
1443 * @offset: offset within the file referenced by fd
1444 * @errp: pointer to Error*, to store an error if it happens.
1445 *
1446 * Note that this function does not do anything to cause the data in the
1447 * RAM memory region to be migrated; that is the responsibility of the caller.
1448 *
1449 * Return: true on success, else false setting @errp with error.
1450 */
1451 bool memory_region_init_ram_from_fd(MemoryRegion *mr,
1452 Object *owner,
1453 const char *name,
1454 uint64_t size,
1455 uint32_t ram_flags,
1456 int fd,
1457 ram_addr_t offset,
1458 Error **errp);
1459 #endif
1460
1461 /**
1462 * memory_region_init_ram_ptr: Initialize RAM memory region from a
1463 * user-provided pointer. Accesses into the
1464 * region will modify memory directly.
1465 *
1466 * @mr: the #MemoryRegion to be initialized.
1467 * @owner: the object that tracks the region's reference count
1468 * @name: Region name, becomes part of RAMBlock name used in migration stream
1469 * must be unique within any device
1470 * @size: size of the region.
1471 * @ptr: memory to be mapped; must contain at least @size bytes.
1472 *
1473 * Note that this function does not do anything to cause the data in the
1474 * RAM memory region to be migrated; that is the responsibility of the caller.
1475 */
1476 void memory_region_init_ram_ptr(MemoryRegion *mr,
1477 Object *owner,
1478 const char *name,
1479 uint64_t size,
1480 void *ptr);
1481
1482 /**
1483 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
1484 * a user-provided pointer.
1485 *
1486 * A RAM device represents a mapping to a physical device, such as to a PCI
1487 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
1488 * into the VM address space and access to the region will modify memory
1489 * directly. However, the memory region should not be included in a memory
1490 * dump (device may not be enabled/mapped at the time of the dump), and
1491 * operations incompatible with manipulating MMIO should be avoided. Replaces
1492 * skip_dump flag.
1493 *
1494 * @mr: the #MemoryRegion to be initialized.
1495 * @owner: the object that tracks the region's reference count
1496 * @name: the name of the region.
1497 * @size: size of the region.
1498 * @ptr: memory to be mapped; must contain at least @size bytes.
1499 *
1500 * Note that this function does not do anything to cause the data in the
1501 * RAM memory region to be migrated; that is the responsibility of the caller.
1502 * (For RAM device memory regions, migrating the contents rarely makes sense.)
1503 */
1504 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1505 Object *owner,
1506 const char *name,
1507 uint64_t size,
1508 void *ptr);
1509
1510 /**
1511 * memory_region_init_alias: Initialize a memory region that aliases all or a
1512 * part of another memory region.
1513 *
1514 * @mr: the #MemoryRegion to be initialized.
1515 * @owner: the object that tracks the region's reference count
1516 * @name: used for debugging; not visible to the user or ABI
1517 * @orig: the region to be referenced; @mr will be equivalent to
1518 * @orig between @offset and @offset + @size - 1.
1519 * @offset: start of the section in @orig to be referenced.
1520 * @size: size of the region.
1521 */
1522 void memory_region_init_alias(MemoryRegion *mr,
1523 Object *owner,
1524 const char *name,
1525 MemoryRegion *orig,
1526 hwaddr offset,
1527 uint64_t size);
1528
1529 /**
1530 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
1531 *
1532 * This has the same effect as calling memory_region_init_ram_nomigrate()
1533 * and then marking the resulting region read-only with
1534 * memory_region_set_readonly().
1535 *
1536 * Note that this function does not do anything to cause the data in the
1537 * RAM side of the memory region to be migrated; that is the responsibility
1538 * of the caller.
1539 *
1540 * @mr: the #MemoryRegion to be initialized.
1541 * @owner: the object that tracks the region's reference count
1542 * @name: Region name, becomes part of RAMBlock name used in migration stream
1543 * must be unique within any device
1544 * @size: size of the region.
1545 * @errp: pointer to Error*, to store an error if it happens.
1546 *
1547 * Return: true on success, else false setting @errp with error.
1548 */
1549 bool memory_region_init_rom_nomigrate(MemoryRegion *mr,
1550 Object *owner,
1551 const char *name,
1552 uint64_t size,
1553 Error **errp);
1554
1555 /**
1556 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
1557 * Writes are handled via callbacks.
1558 *
1559 * Note that this function does not do anything to cause the data in the
1560 * RAM side of the memory region to be migrated; that is the responsibility
1561 * of the caller.
1562 *
1563 * @mr: the #MemoryRegion to be initialized.
1564 * @owner: the object that tracks the region's reference count
1565 * @ops: callbacks for write access handling (must not be NULL).
1566 * @opaque: passed to the read and write callbacks of the @ops structure.
1567 * @name: Region name, becomes part of RAMBlock name used in migration stream
1568 * must be unique within any device
1569 * @size: size of the region.
1570 * @errp: pointer to Error*, to store an error if it happens.
1571 *
1572 * Return: true on success, else false setting @errp with error.
1573 */
1574 bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1575 Object *owner,
1576 const MemoryRegionOps *ops,
1577 void *opaque,
1578 const char *name,
1579 uint64_t size,
1580 Error **errp);
1581
1582 /**
1583 * memory_region_init_iommu: Initialize a memory region of a custom type
1584 * that translates addresses
1585 *
1586 * An IOMMU region translates addresses and forwards accesses to a target
1587 * memory region.
1588 *
1589 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
1590 * @_iommu_mr should be a pointer to enough memory for an instance of
1591 * that subclass, @instance_size is the size of that subclass, and
1592 * @mrtypename is its name. This function will initialize @_iommu_mr as an
1593 * instance of the subclass, and its methods will then be called to handle
1594 * accesses to the memory region. See the documentation of
1595 * #IOMMUMemoryRegionClass for further details.
1596 *
1597 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
1598 * @instance_size: the IOMMUMemoryRegion subclass instance size
1599 * @mrtypename: the type name of the #IOMMUMemoryRegion
1600 * @owner: the object that tracks the region's reference count
1601 * @name: used for debugging; not visible to the user or ABI
1602 * @size: size of the region.
1603 */
1604 void memory_region_init_iommu(void *_iommu_mr,
1605 size_t instance_size,
1606 const char *mrtypename,
1607 Object *owner,
1608 const char *name,
1609 uint64_t size);
1610
1611 /**
1612 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
1613 * region will modify memory directly.
1614 *
1615 * @mr: the #MemoryRegion to be initialized
1616 * @owner: the object that tracks the region's reference count (must be
1617 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
1618 * @name: name of the memory region
1619 * @size: size of the region in bytes
1620 * @errp: pointer to Error*, to store an error if it happens.
1621 *
1622 * This function allocates RAM for a board model or device, and
1623 * arranges for it to be migrated (by calling vmstate_register_ram()
1624 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1625 * @owner is NULL).
1626 *
1627 * TODO: Currently we restrict @owner to being either NULL (for
1628 * global RAM regions with no owner) or devices, so that we can
1629 * give the RAM block a unique name for migration purposes.
1630 * We should lift this restriction and allow arbitrary Objects.
1631 * If you pass a non-NULL non-device @owner then we will assert.
1632 *
1633 * Return: true on success, else false setting @errp with error.
1634 */
1635 bool memory_region_init_ram(MemoryRegion *mr,
1636 Object *owner,
1637 const char *name,
1638 uint64_t size,
1639 Error **errp);
1640
1641 bool memory_region_init_ram_guest_memfd(MemoryRegion *mr,
1642 Object *owner,
1643 const char *name,
1644 uint64_t size,
1645 Error **errp);
1646
1647 /**
1648 * memory_region_init_rom: Initialize a ROM memory region.
1649 *
1650 * This has the same effect as calling memory_region_init_ram()
1651 * and then marking the resulting region read-only with
1652 * memory_region_set_readonly(). This includes arranging for the
1653 * contents to be migrated.
1654 *
1655 * TODO: Currently we restrict @owner to being either NULL (for
1656 * global RAM regions with no owner) or devices, so that we can
1657 * give the RAM block a unique name for migration purposes.
1658 * We should lift this restriction and allow arbitrary Objects.
1659 * If you pass a non-NULL non-device @owner then we will assert.
1660 *
1661 * @mr: the #MemoryRegion to be initialized.
1662 * @owner: the object that tracks the region's reference count
1663 * @name: Region name, becomes part of RAMBlock name used in migration stream
1664 * must be unique within any device
1665 * @size: size of the region.
1666 * @errp: pointer to Error*, to store an error if it happens.
1667 *
1668 * Return: true on success, else false setting @errp with error.
1669 */
1670 bool memory_region_init_rom(MemoryRegion *mr,
1671 Object *owner,
1672 const char *name,
1673 uint64_t size,
1674 Error **errp);
1675
1676 /**
1677 * memory_region_init_rom_device: Initialize a ROM memory region.
1678 * Writes are handled via callbacks.
1679 *
1680 * This function initializes a memory region backed by RAM for reads
1681 * and callbacks for writes, and arranges for the RAM backing to
1682 * be migrated (by calling vmstate_register_ram()
1683 * if @owner is a DeviceState, or vmstate_register_ram_global() if
1684 * @owner is NULL).
1685 *
1686 * TODO: Currently we restrict @owner to being either NULL (for
1687 * global RAM regions with no owner) or devices, so that we can
1688 * give the RAM block a unique name for migration purposes.
1689 * We should lift this restriction and allow arbitrary Objects.
1690 * If you pass a non-NULL non-device @owner then we will assert.
1691 *
1692 * @mr: the #MemoryRegion to be initialized.
1693 * @owner: the object that tracks the region's reference count
1694 * @ops: callbacks for write access handling (must not be NULL).
1695 * @opaque: passed to the read and write callbacks of the @ops structure.
1696 * @name: Region name, becomes part of RAMBlock name used in migration stream
1697 * must be unique within any device
1698 * @size: size of the region.
1699 * @errp: pointer to Error*, to store an error if it happens.
1700 *
1701 * Return: true on success, else false setting @errp with error.
1702 */
1703 bool memory_region_init_rom_device(MemoryRegion *mr,
1704 Object *owner,
1705 const MemoryRegionOps *ops,
1706 void *opaque,
1707 const char *name,
1708 uint64_t size,
1709 Error **errp);
1710
1711
1712 /**
1713 * memory_region_owner: get a memory region's owner.
1714 *
1715 * @mr: the memory region being queried.
1716 */
1717 Object *memory_region_owner(MemoryRegion *mr);
1718
1719 /**
1720 * memory_region_size: get a memory region's size.
1721 *
1722 * @mr: the memory region being queried.
1723 */
1724 uint64_t memory_region_size(MemoryRegion *mr);
1725
1726 /**
1727 * memory_region_is_ram: check whether a memory region is random access
1728 *
1729 * Returns %true if a memory region is random access.
1730 *
1731 * @mr: the memory region being queried
1732 */
memory_region_is_ram(MemoryRegion * mr)1733 static inline bool memory_region_is_ram(MemoryRegion *mr)
1734 {
1735 return mr->ram;
1736 }
1737
1738 /**
1739 * memory_region_is_ram_device: check whether a memory region is a ram device
1740 *
1741 * Returns %true if a memory region is a device backed ram region
1742 *
1743 * @mr: the memory region being queried
1744 */
1745 bool memory_region_is_ram_device(MemoryRegion *mr);
1746
1747 /**
1748 * memory_region_is_romd: check whether a memory region is in ROMD mode
1749 *
1750 * Returns %true if a memory region is a ROM device and currently set to allow
1751 * direct reads.
1752 *
1753 * @mr: the memory region being queried
1754 */
memory_region_is_romd(MemoryRegion * mr)1755 static inline bool memory_region_is_romd(MemoryRegion *mr)
1756 {
1757 return mr->rom_device && mr->romd_mode;
1758 }
1759
1760 /**
1761 * memory_region_is_protected: check whether a memory region is protected
1762 *
1763 * Returns %true if a memory region is protected RAM and cannot be accessed
1764 * via standard mechanisms, e.g. DMA.
1765 *
1766 * @mr: the memory region being queried
1767 */
1768 bool memory_region_is_protected(MemoryRegion *mr);
1769
1770 /**
1771 * memory_region_has_guest_memfd: check whether a memory region has guest_memfd
1772 * associated
1773 *
1774 * Returns %true if a memory region's ram_block has valid guest_memfd assigned.
1775 *
1776 * @mr: the memory region being queried
1777 */
1778 bool memory_region_has_guest_memfd(MemoryRegion *mr);
1779
1780 /**
1781 * memory_region_get_iommu: check whether a memory region is an iommu
1782 *
1783 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
1784 * otherwise NULL.
1785 *
1786 * @mr: the memory region being queried
1787 */
memory_region_get_iommu(MemoryRegion * mr)1788 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
1789 {
1790 if (mr->alias) {
1791 return memory_region_get_iommu(mr->alias);
1792 }
1793 if (mr->is_iommu) {
1794 return (IOMMUMemoryRegion *) mr;
1795 }
1796 return NULL;
1797 }
1798
1799 /**
1800 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1801 * if an iommu or NULL if not
1802 *
1803 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1804 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1805 *
1806 * @iommu_mr: the memory region being queried
1807 */
memory_region_get_iommu_class_nocheck(IOMMUMemoryRegion * iommu_mr)1808 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1809 IOMMUMemoryRegion *iommu_mr)
1810 {
1811 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1812 }
1813
1814 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1815
1816 /**
1817 * memory_region_iommu_get_min_page_size: get minimum supported page size
1818 * for an iommu
1819 *
1820 * Returns minimum supported page size for an iommu.
1821 *
1822 * @iommu_mr: the memory region being queried
1823 */
1824 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1825
1826 /**
1827 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1828 *
1829 * Note: for any IOMMU implementation, an in-place mapping change
1830 * should be notified with an UNMAP followed by a MAP.
1831 *
1832 * @iommu_mr: the memory region that was changed
1833 * @iommu_idx: the IOMMU index for the translation table which has changed
1834 * @event: TLB event with the new entry in the IOMMU translation table.
1835 * The entry replaces all old entries for the same virtual I/O address
1836 * range.
1837 */
1838 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1839 int iommu_idx,
1840 IOMMUTLBEvent event);
1841
1842 /**
1843 * memory_region_notify_iommu_one: notify a change in an IOMMU translation
1844 * entry to a single notifier
1845 *
1846 * This works just like memory_region_notify_iommu(), but it only
1847 * notifies a specific notifier, not all of them.
1848 *
1849 * @notifier: the notifier to be notified
1850 * @event: TLB event with the new entry in the IOMMU translation table.
1851 * The entry replaces all old entries for the same virtual I/O address
1852 * range.
1853 */
1854 void memory_region_notify_iommu_one(IOMMUNotifier *notifier,
1855 IOMMUTLBEvent *event);
1856
1857 /**
1858 * memory_region_unmap_iommu_notifier_range: notify a unmap for an IOMMU
1859 * translation that covers the
1860 * range of a notifier
1861 *
1862 * @notifier: the notifier to be notified
1863 */
1864 void memory_region_unmap_iommu_notifier_range(IOMMUNotifier *notifier);
1865
1866
1867 /**
1868 * memory_region_register_iommu_notifier: register a notifier for changes to
1869 * IOMMU translation entries.
1870 *
1871 * Returns 0 on success, or a negative errno otherwise. In particular,
1872 * -EINVAL indicates that at least one of the attributes of the notifier
1873 * is not supported (flag/range) by the IOMMU memory region. In case of error
1874 * the error object must be created.
1875 *
1876 * @mr: the memory region to observe
1877 * @n: the IOMMUNotifier to be added; the notify callback receives a
1878 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1879 * ceases to be valid on exit from the notifier.
1880 * @errp: pointer to Error*, to store an error if it happens.
1881 */
1882 int memory_region_register_iommu_notifier(MemoryRegion *mr,
1883 IOMMUNotifier *n, Error **errp);
1884
1885 /**
1886 * memory_region_iommu_replay: replay existing IOMMU translations to
1887 * a notifier with the minimum page granularity returned by
1888 * mr->iommu_ops->get_page_size().
1889 *
1890 * Note: this is not related to record-and-replay functionality.
1891 *
1892 * @iommu_mr: the memory region to observe
1893 * @n: the notifier to which to replay iommu mappings
1894 */
1895 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1896
1897 /**
1898 * memory_region_unregister_iommu_notifier: unregister a notifier for
1899 * changes to IOMMU translation entries.
1900 *
1901 * @mr: the memory region which was observed and for which notity_stopped()
1902 * needs to be called
1903 * @n: the notifier to be removed.
1904 */
1905 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1906 IOMMUNotifier *n);
1907
1908 /**
1909 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1910 * defined on the IOMMU.
1911 *
1912 * Returns 0 on success, or a negative errno otherwise. In particular,
1913 * -EINVAL indicates that the IOMMU does not support the requested
1914 * attribute.
1915 *
1916 * @iommu_mr: the memory region
1917 * @attr: the requested attribute
1918 * @data: a pointer to the requested attribute data
1919 */
1920 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1921 enum IOMMUMemoryRegionAttr attr,
1922 void *data);
1923
1924 /**
1925 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1926 * use for translations with the given memory transaction attributes.
1927 *
1928 * @iommu_mr: the memory region
1929 * @attrs: the memory transaction attributes
1930 */
1931 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1932 MemTxAttrs attrs);
1933
1934 /**
1935 * memory_region_iommu_num_indexes: return the total number of IOMMU
1936 * indexes that this IOMMU supports.
1937 *
1938 * @iommu_mr: the memory region
1939 */
1940 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1941
1942 /**
1943 * memory_region_iommu_set_page_size_mask: set the supported page
1944 * sizes for a given IOMMU memory region
1945 *
1946 * @iommu_mr: IOMMU memory region
1947 * @page_size_mask: supported page size mask
1948 * @errp: pointer to Error*, to store an error if it happens.
1949 */
1950 int memory_region_iommu_set_page_size_mask(IOMMUMemoryRegion *iommu_mr,
1951 uint64_t page_size_mask,
1952 Error **errp);
1953
1954 /**
1955 * memory_region_iommu_set_iova_ranges - Set the usable IOVA ranges
1956 * for a given IOMMU MR region
1957 *
1958 * @iommu: IOMMU memory region
1959 * @iova_ranges: list of ordered IOVA ranges (at least one range)
1960 * @errp: pointer to Error*, to store an error if it happens.
1961 */
1962 int memory_region_iommu_set_iova_ranges(IOMMUMemoryRegion *iommu,
1963 GList *iova_ranges,
1964 Error **errp);
1965
1966 /**
1967 * memory_region_name: get a memory region's name
1968 *
1969 * Returns the string that was used to initialize the memory region.
1970 *
1971 * @mr: the memory region being queried
1972 */
1973 const char *memory_region_name(const MemoryRegion *mr);
1974
1975 /**
1976 * memory_region_is_logging: return whether a memory region is logging writes
1977 *
1978 * Returns %true if the memory region is logging writes for the given client
1979 *
1980 * @mr: the memory region being queried
1981 * @client: the client being queried
1982 */
1983 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1984
1985 /**
1986 * memory_region_get_dirty_log_mask: return the clients for which a
1987 * memory region is logging writes.
1988 *
1989 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1990 * are the bit indices.
1991 *
1992 * @mr: the memory region being queried
1993 */
1994 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1995
1996 /**
1997 * memory_region_is_rom: check whether a memory region is ROM
1998 *
1999 * Returns %true if a memory region is read-only memory.
2000 *
2001 * @mr: the memory region being queried
2002 */
memory_region_is_rom(MemoryRegion * mr)2003 static inline bool memory_region_is_rom(MemoryRegion *mr)
2004 {
2005 return mr->ram && mr->readonly;
2006 }
2007
2008 /**
2009 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
2010 *
2011 * Returns %true is a memory region is non-volatile memory.
2012 *
2013 * @mr: the memory region being queried
2014 */
memory_region_is_nonvolatile(MemoryRegion * mr)2015 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
2016 {
2017 return mr->nonvolatile;
2018 }
2019
2020 /**
2021 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
2022 *
2023 * Returns a file descriptor backing a file-based RAM memory region,
2024 * or -1 if the region is not a file-based RAM memory region.
2025 *
2026 * @mr: the RAM or alias memory region being queried.
2027 */
2028 int memory_region_get_fd(MemoryRegion *mr);
2029
2030 /**
2031 * memory_region_from_host: Convert a pointer into a RAM memory region
2032 * and an offset within it.
2033 *
2034 * Given a host pointer inside a RAM memory region (created with
2035 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
2036 * the MemoryRegion and the offset within it.
2037 *
2038 * Use with care; by the time this function returns, the returned pointer is
2039 * not protected by RCU anymore. If the caller is not within an RCU critical
2040 * section and does not hold the BQL, it must have other means of
2041 * protecting the pointer, such as a reference to the region that includes
2042 * the incoming ram_addr_t.
2043 *
2044 * @ptr: the host pointer to be converted
2045 * @offset: the offset within memory region
2046 */
2047 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
2048
2049 /**
2050 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
2051 *
2052 * Returns a host pointer to a RAM memory region (created with
2053 * memory_region_init_ram() or memory_region_init_ram_ptr()).
2054 *
2055 * Use with care; by the time this function returns, the returned pointer is
2056 * not protected by RCU anymore. If the caller is not within an RCU critical
2057 * section and does not hold the BQL, it must have other means of
2058 * protecting the pointer, such as a reference to the region that includes
2059 * the incoming ram_addr_t.
2060 *
2061 * @mr: the memory region being queried.
2062 */
2063 void *memory_region_get_ram_ptr(MemoryRegion *mr);
2064
2065 /* memory_region_ram_resize: Resize a RAM region.
2066 *
2067 * Resizing RAM while migrating can result in the migration being canceled.
2068 * Care has to be taken if the guest might have already detected the memory.
2069 *
2070 * @mr: a memory region created with @memory_region_init_resizeable_ram.
2071 * @newsize: the new size the region
2072 * @errp: pointer to Error*, to store an error if it happens.
2073 */
2074 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
2075 Error **errp);
2076
2077 /**
2078 * memory_region_msync: Synchronize selected address range of
2079 * a memory mapped region
2080 *
2081 * @mr: the memory region to be msync
2082 * @addr: the initial address of the range to be sync
2083 * @size: the size of the range to be sync
2084 */
2085 void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size);
2086
2087 /**
2088 * memory_region_writeback: Trigger cache writeback for
2089 * selected address range
2090 *
2091 * @mr: the memory region to be updated
2092 * @addr: the initial address of the range to be written back
2093 * @size: the size of the range to be written back
2094 */
2095 void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size);
2096
2097 /**
2098 * memory_region_set_log: Turn dirty logging on or off for a region.
2099 *
2100 * Turns dirty logging on or off for a specified client (display, migration).
2101 * Only meaningful for RAM regions.
2102 *
2103 * @mr: the memory region being updated.
2104 * @log: whether dirty logging is to be enabled or disabled.
2105 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
2106 */
2107 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
2108
2109 /**
2110 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
2111 *
2112 * Marks a range of bytes as dirty, after it has been dirtied outside
2113 * guest code.
2114 *
2115 * @mr: the memory region being dirtied.
2116 * @addr: the address (relative to the start of the region) being dirtied.
2117 * @size: size of the range being dirtied.
2118 */
2119 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2120 hwaddr size);
2121
2122 /**
2123 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
2124 *
2125 * This function is called when the caller wants to clear the remote
2126 * dirty bitmap of a memory range within the memory region. This can
2127 * be used by e.g. KVM to manually clear dirty log when
2128 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
2129 * kernel.
2130 *
2131 * @mr: the memory region to clear the dirty log upon
2132 * @start: start address offset within the memory region
2133 * @len: length of the memory region to clear dirty bitmap
2134 */
2135 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2136 hwaddr len);
2137
2138 /**
2139 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
2140 * bitmap and clear it.
2141 *
2142 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
2143 * returns the snapshot. The snapshot can then be used to query dirty
2144 * status, using memory_region_snapshot_get_dirty. Snapshotting allows
2145 * querying the same page multiple times, which is especially useful for
2146 * display updates where the scanlines often are not page aligned.
2147 *
2148 * The dirty bitmap region which gets copied into the snapshot (and
2149 * cleared afterwards) can be larger than requested. The boundaries
2150 * are rounded up/down so complete bitmap longs (covering 64 pages on
2151 * 64bit hosts) can be copied over into the bitmap snapshot. Which
2152 * isn't a problem for display updates as the extra pages are outside
2153 * the visible area, and in case the visible area changes a full
2154 * display redraw is due anyway. Should other use cases for this
2155 * function emerge we might have to revisit this implementation
2156 * detail.
2157 *
2158 * Use g_free to release DirtyBitmapSnapshot.
2159 *
2160 * @mr: the memory region being queried.
2161 * @addr: the address (relative to the start of the region) being queried.
2162 * @size: the size of the range being queried.
2163 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
2164 */
2165 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2166 hwaddr addr,
2167 hwaddr size,
2168 unsigned client);
2169
2170 /**
2171 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
2172 * in the specified dirty bitmap snapshot.
2173 *
2174 * @mr: the memory region being queried.
2175 * @snap: the dirty bitmap snapshot
2176 * @addr: the address (relative to the start of the region) being queried.
2177 * @size: the size of the range being queried.
2178 */
2179 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
2180 DirtyBitmapSnapshot *snap,
2181 hwaddr addr, hwaddr size);
2182
2183 /**
2184 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
2185 * client.
2186 *
2187 * Marks a range of pages as no longer dirty.
2188 *
2189 * @mr: the region being updated.
2190 * @addr: the start of the subrange being cleaned.
2191 * @size: the size of the subrange being cleaned.
2192 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
2193 * %DIRTY_MEMORY_VGA.
2194 */
2195 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2196 hwaddr size, unsigned client);
2197
2198 /**
2199 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
2200 * TBs (for self-modifying code).
2201 *
2202 * The MemoryRegionOps->write() callback of a ROM device must use this function
2203 * to mark byte ranges that have been modified internally, such as by directly
2204 * accessing the memory returned by memory_region_get_ram_ptr().
2205 *
2206 * This function marks the range dirty and invalidates TBs so that TCG can
2207 * detect self-modifying code.
2208 *
2209 * @mr: the region being flushed.
2210 * @addr: the start, relative to the start of the region, of the range being
2211 * flushed.
2212 * @size: the size, in bytes, of the range being flushed.
2213 */
2214 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
2215
2216 /**
2217 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
2218 *
2219 * Allows a memory region to be marked as read-only (turning it into a ROM).
2220 * only useful on RAM regions.
2221 *
2222 * @mr: the region being updated.
2223 * @readonly: whether rhe region is to be ROM or RAM.
2224 */
2225 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
2226
2227 /**
2228 * memory_region_set_nonvolatile: Turn a memory region non-volatile
2229 *
2230 * Allows a memory region to be marked as non-volatile.
2231 * only useful on RAM regions.
2232 *
2233 * @mr: the region being updated.
2234 * @nonvolatile: whether rhe region is to be non-volatile.
2235 */
2236 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
2237
2238 /**
2239 * memory_region_rom_device_set_romd: enable/disable ROMD mode
2240 *
2241 * Allows a ROM device (initialized with memory_region_init_rom_device() to
2242 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
2243 * device is mapped to guest memory and satisfies read access directly.
2244 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
2245 * Writes are always handled by the #MemoryRegion.write function.
2246 *
2247 * @mr: the memory region to be updated
2248 * @romd_mode: %true to put the region into ROMD mode
2249 */
2250 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
2251
2252 /**
2253 * memory_region_set_coalescing: Enable memory coalescing for the region.
2254 *
2255 * Enabled writes to a region to be queued for later processing. MMIO ->write
2256 * callbacks may be delayed until a non-coalesced MMIO is issued.
2257 * Only useful for IO regions. Roughly similar to write-combining hardware.
2258 *
2259 * @mr: the memory region to be write coalesced
2260 */
2261 void memory_region_set_coalescing(MemoryRegion *mr);
2262
2263 /**
2264 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
2265 * a region.
2266 *
2267 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
2268 * Multiple calls can be issued coalesced disjoint ranges.
2269 *
2270 * @mr: the memory region to be updated.
2271 * @offset: the start of the range within the region to be coalesced.
2272 * @size: the size of the subrange to be coalesced.
2273 */
2274 void memory_region_add_coalescing(MemoryRegion *mr,
2275 hwaddr offset,
2276 uint64_t size);
2277
2278 /**
2279 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
2280 *
2281 * Disables any coalescing caused by memory_region_set_coalescing() or
2282 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
2283 * hardware.
2284 *
2285 * @mr: the memory region to be updated.
2286 */
2287 void memory_region_clear_coalescing(MemoryRegion *mr);
2288
2289 /**
2290 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
2291 * accesses.
2292 *
2293 * Ensure that pending coalesced MMIO request are flushed before the memory
2294 * region is accessed. This property is automatically enabled for all regions
2295 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
2296 *
2297 * @mr: the memory region to be updated.
2298 */
2299 void memory_region_set_flush_coalesced(MemoryRegion *mr);
2300
2301 /**
2302 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
2303 * accesses.
2304 *
2305 * Clear the automatic coalesced MMIO flushing enabled via
2306 * memory_region_set_flush_coalesced. Note that this service has no effect on
2307 * memory regions that have MMIO coalescing enabled for themselves. For them,
2308 * automatic flushing will stop once coalescing is disabled.
2309 *
2310 * @mr: the memory region to be updated.
2311 */
2312 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
2313
2314 /**
2315 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
2316 * is written to a location.
2317 *
2318 * Marks a word in an IO region (initialized with memory_region_init_io())
2319 * as a trigger for an eventfd event. The I/O callback will not be called.
2320 * The caller must be prepared to handle failure (that is, take the required
2321 * action if the callback _is_ called).
2322 *
2323 * @mr: the memory region being updated.
2324 * @addr: the address within @mr that is to be monitored
2325 * @size: the size of the access to trigger the eventfd
2326 * @match_data: whether to match against @data, instead of just @addr
2327 * @data: the data to match against the guest write
2328 * @e: event notifier to be triggered when @addr, @size, and @data all match.
2329 **/
2330 void memory_region_add_eventfd(MemoryRegion *mr,
2331 hwaddr addr,
2332 unsigned size,
2333 bool match_data,
2334 uint64_t data,
2335 EventNotifier *e);
2336
2337 /**
2338 * memory_region_del_eventfd: Cancel an eventfd.
2339 *
2340 * Cancels an eventfd trigger requested by a previous
2341 * memory_region_add_eventfd() call.
2342 *
2343 * @mr: the memory region being updated.
2344 * @addr: the address within @mr that is to be monitored
2345 * @size: the size of the access to trigger the eventfd
2346 * @match_data: whether to match against @data, instead of just @addr
2347 * @data: the data to match against the guest write
2348 * @e: event notifier to be triggered when @addr, @size, and @data all match.
2349 */
2350 void memory_region_del_eventfd(MemoryRegion *mr,
2351 hwaddr addr,
2352 unsigned size,
2353 bool match_data,
2354 uint64_t data,
2355 EventNotifier *e);
2356
2357 /**
2358 * memory_region_add_subregion: Add a subregion to a container.
2359 *
2360 * Adds a subregion at @offset. The subregion may not overlap with other
2361 * subregions (except for those explicitly marked as overlapping). A region
2362 * may only be added once as a subregion (unless removed with
2363 * memory_region_del_subregion()); use memory_region_init_alias() if you
2364 * want a region to be a subregion in multiple locations.
2365 *
2366 * @mr: the region to contain the new subregion; must be a container
2367 * initialized with memory_region_init().
2368 * @offset: the offset relative to @mr where @subregion is added.
2369 * @subregion: the subregion to be added.
2370 */
2371 void memory_region_add_subregion(MemoryRegion *mr,
2372 hwaddr offset,
2373 MemoryRegion *subregion);
2374 /**
2375 * memory_region_add_subregion_overlap: Add a subregion to a container
2376 * with overlap.
2377 *
2378 * Adds a subregion at @offset. The subregion may overlap with other
2379 * subregions. Conflicts are resolved by having a higher @priority hide a
2380 * lower @priority. Subregions without priority are taken as @priority 0.
2381 * A region may only be added once as a subregion (unless removed with
2382 * memory_region_del_subregion()); use memory_region_init_alias() if you
2383 * want a region to be a subregion in multiple locations.
2384 *
2385 * @mr: the region to contain the new subregion; must be a container
2386 * initialized with memory_region_init().
2387 * @offset: the offset relative to @mr where @subregion is added.
2388 * @subregion: the subregion to be added.
2389 * @priority: used for resolving overlaps; highest priority wins.
2390 */
2391 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2392 hwaddr offset,
2393 MemoryRegion *subregion,
2394 int priority);
2395
2396 /**
2397 * memory_region_get_ram_addr: Get the ram address associated with a memory
2398 * region
2399 *
2400 * @mr: the region to be queried
2401 */
2402 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
2403
2404 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
2405 /**
2406 * memory_region_del_subregion: Remove a subregion.
2407 *
2408 * Removes a subregion from its container.
2409 *
2410 * @mr: the container to be updated.
2411 * @subregion: the region being removed; must be a current subregion of @mr.
2412 */
2413 void memory_region_del_subregion(MemoryRegion *mr,
2414 MemoryRegion *subregion);
2415
2416 /*
2417 * memory_region_set_enabled: dynamically enable or disable a region
2418 *
2419 * Enables or disables a memory region. A disabled memory region
2420 * ignores all accesses to itself and its subregions. It does not
2421 * obscure sibling subregions with lower priority - it simply behaves as
2422 * if it was removed from the hierarchy.
2423 *
2424 * Regions default to being enabled.
2425 *
2426 * @mr: the region to be updated
2427 * @enabled: whether to enable or disable the region
2428 */
2429 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
2430
2431 /*
2432 * memory_region_set_address: dynamically update the address of a region
2433 *
2434 * Dynamically updates the address of a region, relative to its container.
2435 * May be used on regions are currently part of a memory hierarchy.
2436 *
2437 * @mr: the region to be updated
2438 * @addr: new address, relative to container region
2439 */
2440 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
2441
2442 /*
2443 * memory_region_set_size: dynamically update the size of a region.
2444 *
2445 * Dynamically updates the size of a region.
2446 *
2447 * @mr: the region to be updated
2448 * @size: used size of the region.
2449 */
2450 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
2451
2452 /*
2453 * memory_region_set_alias_offset: dynamically update a memory alias's offset
2454 *
2455 * Dynamically updates the offset into the target region that an alias points
2456 * to, as if the fourth argument to memory_region_init_alias() has changed.
2457 *
2458 * @mr: the #MemoryRegion to be updated; should be an alias.
2459 * @offset: the new offset into the target memory region
2460 */
2461 void memory_region_set_alias_offset(MemoryRegion *mr,
2462 hwaddr offset);
2463
2464 /*
2465 * memory_region_set_unmergeable: Set a memory region unmergeable
2466 *
2467 * Mark a memory region unmergeable, resulting in the memory region (or
2468 * everything contained in a memory region container) not getting merged when
2469 * simplifying the address space and notifying memory listeners. Consequently,
2470 * memory listeners will never get notified about ranges that are larger than
2471 * the original memory regions.
2472 *
2473 * This is primarily useful when multiple aliases to a RAM memory region are
2474 * mapped into a memory region container, and updates (e.g., enable/disable or
2475 * map/unmap) of individual memory region aliases are not supposed to affect
2476 * other memory regions in the same container.
2477 *
2478 * @mr: the #MemoryRegion to be updated
2479 * @unmergeable: whether to mark the #MemoryRegion unmergeable
2480 */
2481 void memory_region_set_unmergeable(MemoryRegion *mr, bool unmergeable);
2482
2483 /**
2484 * memory_region_present: checks if an address relative to a @container
2485 * translates into #MemoryRegion within @container
2486 *
2487 * Answer whether a #MemoryRegion within @container covers the address
2488 * @addr.
2489 *
2490 * @container: a #MemoryRegion within which @addr is a relative address
2491 * @addr: the area within @container to be searched
2492 */
2493 bool memory_region_present(MemoryRegion *container, hwaddr addr);
2494
2495 /**
2496 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
2497 * into another memory region, which does not necessarily imply that it is
2498 * mapped into an address space.
2499 *
2500 * @mr: a #MemoryRegion which should be checked if it's mapped
2501 */
2502 bool memory_region_is_mapped(MemoryRegion *mr);
2503
2504 /**
2505 * memory_region_get_ram_discard_manager: get the #RamDiscardManager for a
2506 * #MemoryRegion
2507 *
2508 * The #RamDiscardManager cannot change while a memory region is mapped.
2509 *
2510 * @mr: the #MemoryRegion
2511 */
2512 RamDiscardManager *memory_region_get_ram_discard_manager(MemoryRegion *mr);
2513
2514 /**
2515 * memory_region_has_ram_discard_manager: check whether a #MemoryRegion has a
2516 * #RamDiscardManager assigned
2517 *
2518 * @mr: the #MemoryRegion
2519 */
memory_region_has_ram_discard_manager(MemoryRegion * mr)2520 static inline bool memory_region_has_ram_discard_manager(MemoryRegion *mr)
2521 {
2522 return !!memory_region_get_ram_discard_manager(mr);
2523 }
2524
2525 /**
2526 * memory_region_set_ram_discard_manager: set the #RamDiscardManager for a
2527 * #MemoryRegion
2528 *
2529 * This function must not be called for a mapped #MemoryRegion, a #MemoryRegion
2530 * that does not cover RAM, or a #MemoryRegion that already has a
2531 * #RamDiscardManager assigned.
2532 *
2533 * @mr: the #MemoryRegion
2534 * @rdm: #RamDiscardManager to set
2535 */
2536 void memory_region_set_ram_discard_manager(MemoryRegion *mr,
2537 RamDiscardManager *rdm);
2538
2539 /**
2540 * memory_region_find: translate an address/size relative to a
2541 * MemoryRegion into a #MemoryRegionSection.
2542 *
2543 * Locates the first #MemoryRegion within @mr that overlaps the range
2544 * given by @addr and @size.
2545 *
2546 * Returns a #MemoryRegionSection that describes a contiguous overlap.
2547 * It will have the following characteristics:
2548 * - @size = 0 iff no overlap was found
2549 * - @mr is non-%NULL iff an overlap was found
2550 *
2551 * Remember that in the return value the @offset_within_region is
2552 * relative to the returned region (in the .@mr field), not to the
2553 * @mr argument.
2554 *
2555 * Similarly, the .@offset_within_address_space is relative to the
2556 * address space that contains both regions, the passed and the
2557 * returned one. However, in the special case where the @mr argument
2558 * has no container (and thus is the root of the address space), the
2559 * following will hold:
2560 * - @offset_within_address_space >= @addr
2561 * - @offset_within_address_space + .@size <= @addr + @size
2562 *
2563 * @mr: a MemoryRegion within which @addr is a relative address
2564 * @addr: start of the area within @as to be searched
2565 * @size: size of the area to be searched
2566 */
2567 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2568 hwaddr addr, uint64_t size);
2569
2570 /**
2571 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2572 *
2573 * Synchronizes the dirty page log for all address spaces.
2574 *
2575 * @last_stage: whether this is the last stage of live migration
2576 */
2577 void memory_global_dirty_log_sync(bool last_stage);
2578
2579 /**
2580 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
2581 *
2582 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
2583 * This function must be called after the dirty log bitmap is cleared, and
2584 * before dirty guest memory pages are read. If you are using
2585 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
2586 * care of doing this.
2587 */
2588 void memory_global_after_dirty_log_sync(void);
2589
2590 /**
2591 * memory_region_transaction_begin: Start a transaction.
2592 *
2593 * During a transaction, changes will be accumulated and made visible
2594 * only when the transaction ends (is committed).
2595 */
2596 void memory_region_transaction_begin(void);
2597
2598 /**
2599 * memory_region_transaction_commit: Commit a transaction and make changes
2600 * visible to the guest.
2601 */
2602 void memory_region_transaction_commit(void);
2603
2604 /**
2605 * memory_listener_register: register callbacks to be called when memory
2606 * sections are mapped or unmapped into an address
2607 * space
2608 *
2609 * @listener: an object containing the callbacks to be called
2610 * @filter: if non-%NULL, only regions in this address space will be observed
2611 */
2612 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
2613
2614 /**
2615 * memory_listener_unregister: undo the effect of memory_listener_register()
2616 *
2617 * @listener: an object containing the callbacks to be removed
2618 */
2619 void memory_listener_unregister(MemoryListener *listener);
2620
2621 /**
2622 * memory_global_dirty_log_start: begin dirty logging for all regions
2623 *
2624 * @flags: purpose of starting dirty log, migration or dirty rate
2625 * @errp: pointer to Error*, to store an error if it happens.
2626 *
2627 * Return: true on success, else false setting @errp with error.
2628 */
2629 bool memory_global_dirty_log_start(unsigned int flags, Error **errp);
2630
2631 /**
2632 * memory_global_dirty_log_stop: end dirty logging for all regions
2633 *
2634 * @flags: purpose of stopping dirty log, migration or dirty rate
2635 */
2636 void memory_global_dirty_log_stop(unsigned int flags);
2637
2638 void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled);
2639
2640 bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
2641 unsigned size, bool is_write,
2642 MemTxAttrs attrs);
2643
2644 /**
2645 * memory_region_dispatch_read: perform a read directly to the specified
2646 * MemoryRegion.
2647 *
2648 * @mr: #MemoryRegion to access
2649 * @addr: address within that region
2650 * @pval: pointer to uint64_t which the data is written to
2651 * @op: size, sign, and endianness of the memory operation
2652 * @attrs: memory transaction attributes to use for the access
2653 */
2654 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
2655 hwaddr addr,
2656 uint64_t *pval,
2657 MemOp op,
2658 MemTxAttrs attrs);
2659 /**
2660 * memory_region_dispatch_write: perform a write directly to the specified
2661 * MemoryRegion.
2662 *
2663 * @mr: #MemoryRegion to access
2664 * @addr: address within that region
2665 * @data: data to write
2666 * @op: size, sign, and endianness of the memory operation
2667 * @attrs: memory transaction attributes to use for the access
2668 */
2669 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
2670 hwaddr addr,
2671 uint64_t data,
2672 MemOp op,
2673 MemTxAttrs attrs);
2674
2675 /**
2676 * address_space_init: initializes an address space
2677 *
2678 * @as: an uninitialized #AddressSpace
2679 * @root: a #MemoryRegion that routes addresses for the address space
2680 * @name: an address space name. The name is only used for debugging
2681 * output.
2682 */
2683 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
2684
2685 /**
2686 * address_space_destroy: destroy an address space
2687 *
2688 * Releases all resources associated with an address space. After an address space
2689 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
2690 * as well.
2691 *
2692 * @as: address space to be destroyed
2693 */
2694 void address_space_destroy(AddressSpace *as);
2695
2696 /**
2697 * address_space_remove_listeners: unregister all listeners of an address space
2698 *
2699 * Removes all callbacks previously registered with memory_listener_register()
2700 * for @as.
2701 *
2702 * @as: an initialized #AddressSpace
2703 */
2704 void address_space_remove_listeners(AddressSpace *as);
2705
2706 /**
2707 * address_space_rw: read from or write to an address space.
2708 *
2709 * Return a MemTxResult indicating whether the operation succeeded
2710 * or failed (eg unassigned memory, device rejected the transaction,
2711 * IOMMU fault).
2712 *
2713 * @as: #AddressSpace to be accessed
2714 * @addr: address within that address space
2715 * @attrs: memory transaction attributes
2716 * @buf: buffer with the data transferred
2717 * @len: the number of bytes to read or write
2718 * @is_write: indicates the transfer direction
2719 */
2720 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
2721 MemTxAttrs attrs, void *buf,
2722 hwaddr len, bool is_write);
2723
2724 /**
2725 * address_space_write: write to address space.
2726 *
2727 * Return a MemTxResult indicating whether the operation succeeded
2728 * or failed (eg unassigned memory, device rejected the transaction,
2729 * IOMMU fault).
2730 *
2731 * @as: #AddressSpace to be accessed
2732 * @addr: address within that address space
2733 * @attrs: memory transaction attributes
2734 * @buf: buffer with the data transferred
2735 * @len: the number of bytes to write
2736 */
2737 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
2738 MemTxAttrs attrs,
2739 const void *buf, hwaddr len);
2740
2741 /**
2742 * address_space_write_rom: write to address space, including ROM.
2743 *
2744 * This function writes to the specified address space, but will
2745 * write data to both ROM and RAM. This is used for non-guest
2746 * writes like writes from the gdb debug stub or initial loading
2747 * of ROM contents.
2748 *
2749 * Note that portions of the write which attempt to write data to
2750 * a device will be silently ignored -- only real RAM and ROM will
2751 * be written to.
2752 *
2753 * Return a MemTxResult indicating whether the operation succeeded
2754 * or failed (eg unassigned memory, device rejected the transaction,
2755 * IOMMU fault).
2756 *
2757 * @as: #AddressSpace to be accessed
2758 * @addr: address within that address space
2759 * @attrs: memory transaction attributes
2760 * @buf: buffer with the data transferred
2761 * @len: the number of bytes to write
2762 */
2763 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
2764 MemTxAttrs attrs,
2765 const void *buf, hwaddr len);
2766
2767 /* address_space_ld*: load from an address space
2768 * address_space_st*: store to an address space
2769 *
2770 * These functions perform a load or store of the byte, word,
2771 * longword or quad to the specified address within the AddressSpace.
2772 * The _le suffixed functions treat the data as little endian;
2773 * _be indicates big endian; no suffix indicates "same endianness
2774 * as guest CPU".
2775 *
2776 * The "guest CPU endianness" accessors are deprecated for use outside
2777 * target-* code; devices should be CPU-agnostic and use either the LE
2778 * or the BE accessors.
2779 *
2780 * @as #AddressSpace to be accessed
2781 * @addr: address within that address space
2782 * @val: data value, for stores
2783 * @attrs: memory transaction attributes
2784 * @result: location to write the success/failure of the transaction;
2785 * if NULL, this information is discarded
2786 */
2787
2788 #define SUFFIX
2789 #define ARG1 as
2790 #define ARG1_DECL AddressSpace *as
2791 #include "exec/memory_ldst.h.inc"
2792
2793 #define SUFFIX
2794 #define ARG1 as
2795 #define ARG1_DECL AddressSpace *as
2796 #include "exec/memory_ldst_phys.h.inc"
2797
2798 struct MemoryRegionCache {
2799 void *ptr;
2800 hwaddr xlat;
2801 hwaddr len;
2802 FlatView *fv;
2803 MemoryRegionSection mrs;
2804 bool is_write;
2805 };
2806
2807 /* address_space_ld*_cached: load from a cached #MemoryRegion
2808 * address_space_st*_cached: store into a cached #MemoryRegion
2809 *
2810 * These functions perform a load or store of the byte, word,
2811 * longword or quad to the specified address. The address is
2812 * a physical address in the AddressSpace, but it must lie within
2813 * a #MemoryRegion that was mapped with address_space_cache_init.
2814 *
2815 * The _le suffixed functions treat the data as little endian;
2816 * _be indicates big endian; no suffix indicates "same endianness
2817 * as guest CPU".
2818 *
2819 * The "guest CPU endianness" accessors are deprecated for use outside
2820 * target-* code; devices should be CPU-agnostic and use either the LE
2821 * or the BE accessors.
2822 *
2823 * @cache: previously initialized #MemoryRegionCache to be accessed
2824 * @addr: address within the address space
2825 * @val: data value, for stores
2826 * @attrs: memory transaction attributes
2827 * @result: location to write the success/failure of the transaction;
2828 * if NULL, this information is discarded
2829 */
2830
2831 #define SUFFIX _cached_slow
2832 #define ARG1 cache
2833 #define ARG1_DECL MemoryRegionCache *cache
2834 #include "exec/memory_ldst.h.inc"
2835
2836 /* Inline fast path for direct RAM access. */
address_space_ldub_cached(MemoryRegionCache * cache,hwaddr addr,MemTxAttrs attrs,MemTxResult * result)2837 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
2838 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
2839 {
2840 assert(addr < cache->len);
2841 if (likely(cache->ptr)) {
2842 return ldub_p(cache->ptr + addr);
2843 } else {
2844 return address_space_ldub_cached_slow(cache, addr, attrs, result);
2845 }
2846 }
2847
address_space_stb_cached(MemoryRegionCache * cache,hwaddr addr,uint8_t val,MemTxAttrs attrs,MemTxResult * result)2848 static inline void address_space_stb_cached(MemoryRegionCache *cache,
2849 hwaddr addr, uint8_t val, MemTxAttrs attrs, MemTxResult *result)
2850 {
2851 assert(addr < cache->len);
2852 if (likely(cache->ptr)) {
2853 stb_p(cache->ptr + addr, val);
2854 } else {
2855 address_space_stb_cached_slow(cache, addr, val, attrs, result);
2856 }
2857 }
2858
2859 #define ENDIANNESS _le
2860 #include "exec/memory_ldst_cached.h.inc"
2861
2862 #define ENDIANNESS _be
2863 #include "exec/memory_ldst_cached.h.inc"
2864
2865 #define SUFFIX _cached
2866 #define ARG1 cache
2867 #define ARG1_DECL MemoryRegionCache *cache
2868 #include "exec/memory_ldst_phys.h.inc"
2869
2870 /* address_space_cache_init: prepare for repeated access to a physical
2871 * memory region
2872 *
2873 * @cache: #MemoryRegionCache to be filled
2874 * @as: #AddressSpace to be accessed
2875 * @addr: address within that address space
2876 * @len: length of buffer
2877 * @is_write: indicates the transfer direction
2878 *
2879 * Will only work with RAM, and may map a subset of the requested range by
2880 * returning a value that is less than @len. On failure, return a negative
2881 * errno value.
2882 *
2883 * Because it only works with RAM, this function can be used for
2884 * read-modify-write operations. In this case, is_write should be %true.
2885 *
2886 * Note that addresses passed to the address_space_*_cached functions
2887 * are relative to @addr.
2888 */
2889 int64_t address_space_cache_init(MemoryRegionCache *cache,
2890 AddressSpace *as,
2891 hwaddr addr,
2892 hwaddr len,
2893 bool is_write);
2894
2895 /**
2896 * address_space_cache_init_empty: Initialize empty #MemoryRegionCache
2897 *
2898 * @cache: The #MemoryRegionCache to operate on.
2899 *
2900 * Initializes #MemoryRegionCache structure without memory region attached.
2901 * Cache initialized this way can only be safely destroyed, but not used.
2902 */
address_space_cache_init_empty(MemoryRegionCache * cache)2903 static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
2904 {
2905 cache->mrs.mr = NULL;
2906 /* There is no real need to initialize fv, but it makes Coverity happy. */
2907 cache->fv = NULL;
2908 }
2909
2910 /**
2911 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
2912 *
2913 * @cache: The #MemoryRegionCache to operate on.
2914 * @addr: The first physical address that was written, relative to the
2915 * address that was passed to @address_space_cache_init.
2916 * @access_len: The number of bytes that were written starting at @addr.
2917 */
2918 void address_space_cache_invalidate(MemoryRegionCache *cache,
2919 hwaddr addr,
2920 hwaddr access_len);
2921
2922 /**
2923 * address_space_cache_destroy: free a #MemoryRegionCache
2924 *
2925 * @cache: The #MemoryRegionCache whose memory should be released.
2926 */
2927 void address_space_cache_destroy(MemoryRegionCache *cache);
2928
2929 /* address_space_get_iotlb_entry: translate an address into an IOTLB
2930 * entry. Should be called from an RCU critical section.
2931 */
2932 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
2933 bool is_write, MemTxAttrs attrs);
2934
2935 /* address_space_translate: translate an address range into an address space
2936 * into a MemoryRegion and an address range into that section. Should be
2937 * called from an RCU critical section, to avoid that the last reference
2938 * to the returned region disappears after address_space_translate returns.
2939 *
2940 * @fv: #FlatView to be accessed
2941 * @addr: address within that address space
2942 * @xlat: pointer to address within the returned memory region section's
2943 * #MemoryRegion.
2944 * @len: pointer to length
2945 * @is_write: indicates the transfer direction
2946 * @attrs: memory attributes
2947 */
2948 MemoryRegion *flatview_translate(FlatView *fv,
2949 hwaddr addr, hwaddr *xlat,
2950 hwaddr *len, bool is_write,
2951 MemTxAttrs attrs);
2952
address_space_translate(AddressSpace * as,hwaddr addr,hwaddr * xlat,hwaddr * len,bool is_write,MemTxAttrs attrs)2953 static inline MemoryRegion *address_space_translate(AddressSpace *as,
2954 hwaddr addr, hwaddr *xlat,
2955 hwaddr *len, bool is_write,
2956 MemTxAttrs attrs)
2957 {
2958 return flatview_translate(address_space_to_flatview(as),
2959 addr, xlat, len, is_write, attrs);
2960 }
2961
2962 /* address_space_access_valid: check for validity of accessing an address
2963 * space range
2964 *
2965 * Check whether memory is assigned to the given address space range, and
2966 * access is permitted by any IOMMU regions that are active for the address
2967 * space.
2968 *
2969 * For now, addr and len should be aligned to a page size. This limitation
2970 * will be lifted in the future.
2971 *
2972 * @as: #AddressSpace to be accessed
2973 * @addr: address within that address space
2974 * @len: length of the area to be checked
2975 * @is_write: indicates the transfer direction
2976 * @attrs: memory attributes
2977 */
2978 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
2979 bool is_write, MemTxAttrs attrs);
2980
2981 /* address_space_map: map a physical memory region into a host virtual address
2982 *
2983 * May map a subset of the requested range, given by and returned in @plen.
2984 * May return %NULL and set *@plen to zero(0), if resources needed to perform
2985 * the mapping are exhausted.
2986 * Use only for reads OR writes - not for read-modify-write operations.
2987 * Use address_space_register_map_client() to know when retrying the map
2988 * operation is likely to succeed.
2989 *
2990 * @as: #AddressSpace to be accessed
2991 * @addr: address within that address space
2992 * @plen: pointer to length of buffer; updated on return
2993 * @is_write: indicates the transfer direction
2994 * @attrs: memory attributes
2995 */
2996 void *address_space_map(AddressSpace *as, hwaddr addr,
2997 hwaddr *plen, bool is_write, MemTxAttrs attrs);
2998
2999 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
3000 *
3001 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
3002 * the amount of memory that was actually read or written by the caller.
3003 *
3004 * @as: #AddressSpace used
3005 * @buffer: host pointer as returned by address_space_map()
3006 * @len: buffer length as returned by address_space_map()
3007 * @access_len: amount of data actually transferred
3008 * @is_write: indicates the transfer direction
3009 */
3010 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3011 bool is_write, hwaddr access_len);
3012
3013 /*
3014 * address_space_register_map_client: Register a callback to invoke when
3015 * resources for address_space_map() are available again.
3016 *
3017 * address_space_map may fail when there are not enough resources available,
3018 * such as when bounce buffer memory would exceed the limit. The callback can
3019 * be used to retry the address_space_map operation. Note that the callback
3020 * gets automatically removed after firing.
3021 *
3022 * @as: #AddressSpace to be accessed
3023 * @bh: callback to invoke when address_space_map() retry is appropriate
3024 */
3025 void address_space_register_map_client(AddressSpace *as, QEMUBH *bh);
3026
3027 /*
3028 * address_space_unregister_map_client: Unregister a callback that has
3029 * previously been registered and not fired yet.
3030 *
3031 * @as: #AddressSpace to be accessed
3032 * @bh: callback to unregister
3033 */
3034 void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh);
3035
3036 /* Internal functions, part of the implementation of address_space_read. */
3037 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
3038 MemTxAttrs attrs, void *buf, hwaddr len);
3039 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
3040 MemTxAttrs attrs, void *buf,
3041 hwaddr len, hwaddr addr1, hwaddr l,
3042 MemoryRegion *mr);
3043 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
3044
3045 /* Internal functions, part of the implementation of address_space_read_cached
3046 * and address_space_write_cached. */
3047 MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache,
3048 hwaddr addr, void *buf, hwaddr len);
3049 MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache,
3050 hwaddr addr, const void *buf,
3051 hwaddr len);
3052
3053 int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr);
3054 bool prepare_mmio_access(MemoryRegion *mr);
3055
memory_access_is_direct(MemoryRegion * mr,bool is_write)3056 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
3057 {
3058 if (is_write) {
3059 return memory_region_is_ram(mr) && !mr->readonly &&
3060 !mr->rom_device && !memory_region_is_ram_device(mr);
3061 } else {
3062 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
3063 memory_region_is_romd(mr);
3064 }
3065 }
3066
3067 /**
3068 * address_space_read: read from an address space.
3069 *
3070 * Return a MemTxResult indicating whether the operation succeeded
3071 * or failed (eg unassigned memory, device rejected the transaction,
3072 * IOMMU fault). Called within RCU critical section.
3073 *
3074 * @as: #AddressSpace to be accessed
3075 * @addr: address within that address space
3076 * @attrs: memory transaction attributes
3077 * @buf: buffer with the data transferred
3078 * @len: length of the data transferred
3079 */
3080 static inline __attribute__((__always_inline__))
address_space_read(AddressSpace * as,hwaddr addr,MemTxAttrs attrs,void * buf,hwaddr len)3081 MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
3082 MemTxAttrs attrs, void *buf,
3083 hwaddr len)
3084 {
3085 MemTxResult result = MEMTX_OK;
3086 hwaddr l, addr1;
3087 void *ptr;
3088 MemoryRegion *mr;
3089 FlatView *fv;
3090
3091 if (__builtin_constant_p(len)) {
3092 if (len) {
3093 RCU_READ_LOCK_GUARD();
3094 fv = address_space_to_flatview(as);
3095 l = len;
3096 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
3097 if (len == l && memory_access_is_direct(mr, false)) {
3098 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3099 memcpy(buf, ptr, len);
3100 } else {
3101 result = flatview_read_continue(fv, addr, attrs, buf, len,
3102 addr1, l, mr);
3103 }
3104 }
3105 } else {
3106 result = address_space_read_full(as, addr, attrs, buf, len);
3107 }
3108 return result;
3109 }
3110
3111 /**
3112 * address_space_read_cached: read from a cached RAM region
3113 *
3114 * @cache: Cached region to be addressed
3115 * @addr: address relative to the base of the RAM region
3116 * @buf: buffer with the data transferred
3117 * @len: length of the data transferred
3118 */
3119 static inline MemTxResult
address_space_read_cached(MemoryRegionCache * cache,hwaddr addr,void * buf,hwaddr len)3120 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
3121 void *buf, hwaddr len)
3122 {
3123 assert(addr < cache->len && len <= cache->len - addr);
3124 fuzz_dma_read_cb(cache->xlat + addr, len, cache->mrs.mr);
3125 if (likely(cache->ptr)) {
3126 memcpy(buf, cache->ptr + addr, len);
3127 return MEMTX_OK;
3128 } else {
3129 return address_space_read_cached_slow(cache, addr, buf, len);
3130 }
3131 }
3132
3133 /**
3134 * address_space_write_cached: write to a cached RAM region
3135 *
3136 * @cache: Cached region to be addressed
3137 * @addr: address relative to the base of the RAM region
3138 * @buf: buffer with the data transferred
3139 * @len: length of the data transferred
3140 */
3141 static inline MemTxResult
address_space_write_cached(MemoryRegionCache * cache,hwaddr addr,const void * buf,hwaddr len)3142 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
3143 const void *buf, hwaddr len)
3144 {
3145 assert(addr < cache->len && len <= cache->len - addr);
3146 if (likely(cache->ptr)) {
3147 memcpy(cache->ptr + addr, buf, len);
3148 return MEMTX_OK;
3149 } else {
3150 return address_space_write_cached_slow(cache, addr, buf, len);
3151 }
3152 }
3153
3154 /**
3155 * address_space_set: Fill address space with a constant byte.
3156 *
3157 * Return a MemTxResult indicating whether the operation succeeded
3158 * or failed (eg unassigned memory, device rejected the transaction,
3159 * IOMMU fault).
3160 *
3161 * @as: #AddressSpace to be accessed
3162 * @addr: address within that address space
3163 * @c: constant byte to fill the memory
3164 * @len: the number of bytes to fill with the constant byte
3165 * @attrs: memory transaction attributes
3166 */
3167 MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
3168 uint8_t c, hwaddr len, MemTxAttrs attrs);
3169
3170 #ifdef COMPILING_PER_TARGET
3171 /* enum device_endian to MemOp. */
devend_memop(enum device_endian end)3172 static inline MemOp devend_memop(enum device_endian end)
3173 {
3174 QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
3175 DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
3176
3177 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
3178 /* Swap if non-host endianness or native (target) endianness */
3179 return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
3180 #else
3181 const int non_host_endianness =
3182 DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
3183
3184 /* In this case, native (target) endianness needs no swap. */
3185 return (end == non_host_endianness) ? MO_BSWAP : 0;
3186 #endif
3187 }
3188 #endif /* COMPILING_PER_TARGET */
3189
3190 /*
3191 * Inhibit technologies that require discarding of pages in RAM blocks, e.g.,
3192 * to manage the actual amount of memory consumed by the VM (then, the memory
3193 * provided by RAM blocks might be bigger than the desired memory consumption).
3194 * This *must* be set if:
3195 * - Discarding parts of a RAM blocks does not result in the change being
3196 * reflected in the VM and the pages getting freed.
3197 * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous
3198 * discards blindly.
3199 * - Discarding parts of a RAM blocks will result in integrity issues (e.g.,
3200 * encrypted VMs).
3201 * Technologies that only temporarily pin the current working set of a
3202 * driver are fine, because we don't expect such pages to be discarded
3203 * (esp. based on guest action like balloon inflation).
3204 *
3205 * This is *not* to be used to protect from concurrent discards (esp.,
3206 * postcopy).
3207 *
3208 * Returns 0 if successful. Returns -EBUSY if a technology that relies on
3209 * discards to work reliably is active.
3210 */
3211 int ram_block_discard_disable(bool state);
3212
3213 /*
3214 * See ram_block_discard_disable(): only disable uncoordinated discards,
3215 * keeping coordinated discards (via the RamDiscardManager) enabled.
3216 */
3217 int ram_block_uncoordinated_discard_disable(bool state);
3218
3219 /*
3220 * Inhibit technologies that disable discarding of pages in RAM blocks.
3221 *
3222 * Returns 0 if successful. Returns -EBUSY if discards are already set to
3223 * broken.
3224 */
3225 int ram_block_discard_require(bool state);
3226
3227 /*
3228 * See ram_block_discard_require(): only inhibit technologies that disable
3229 * uncoordinated discarding of pages in RAM blocks, allowing co-existence with
3230 * technologies that only inhibit uncoordinated discards (via the
3231 * RamDiscardManager).
3232 */
3233 int ram_block_coordinated_discard_require(bool state);
3234
3235 /*
3236 * Test if any discarding of memory in ram blocks is disabled.
3237 */
3238 bool ram_block_discard_is_disabled(void);
3239
3240 /*
3241 * Test if any discarding of memory in ram blocks is required to work reliably.
3242 */
3243 bool ram_block_discard_is_required(void);
3244
3245 #endif
3246
3247 #endif
3248