xref: /qemu/include/exec/memory.h (revision ab9056ff)
1 /*
2  * Physical memory management API
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Avi Kivity <avi@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #ifndef MEMORY_H
15 #define MEMORY_H
16 
17 #ifndef CONFIG_USER_ONLY
18 
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/memop.h"
23 #include "exec/ramlist.h"
24 #include "qemu/bswap.h"
25 #include "qemu/queue.h"
26 #include "qemu/int128.h"
27 #include "qemu/notify.h"
28 #include "qom/object.h"
29 #include "qemu/rcu.h"
30 
31 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
32 
33 #define MAX_PHYS_ADDR_SPACE_BITS 62
34 #define MAX_PHYS_ADDR            (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
35 
36 #define TYPE_MEMORY_REGION "qemu:memory-region"
37 #define MEMORY_REGION(obj) \
38         OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
39 
40 #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region"
41 #define IOMMU_MEMORY_REGION(obj) \
42         OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION)
43 #define IOMMU_MEMORY_REGION_CLASS(klass) \
44         OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \
45                          TYPE_IOMMU_MEMORY_REGION)
46 #define IOMMU_MEMORY_REGION_GET_CLASS(obj) \
47         OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
48                          TYPE_IOMMU_MEMORY_REGION)
49 
50 extern bool global_dirty_log;
51 
52 typedef struct MemoryRegionOps MemoryRegionOps;
53 typedef struct MemoryRegionMmio MemoryRegionMmio;
54 
55 struct MemoryRegionMmio {
56     CPUReadMemoryFunc *read[3];
57     CPUWriteMemoryFunc *write[3];
58 };
59 
60 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
61 
62 /* See address_space_translate: bit 0 is read, bit 1 is write.  */
63 typedef enum {
64     IOMMU_NONE = 0,
65     IOMMU_RO   = 1,
66     IOMMU_WO   = 2,
67     IOMMU_RW   = 3,
68 } IOMMUAccessFlags;
69 
70 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
71 
72 struct IOMMUTLBEntry {
73     AddressSpace    *target_as;
74     hwaddr           iova;
75     hwaddr           translated_addr;
76     hwaddr           addr_mask;  /* 0xfff = 4k translation */
77     IOMMUAccessFlags perm;
78 };
79 
80 /*
81  * Bitmap for different IOMMUNotifier capabilities. Each notifier can
82  * register with one or multiple IOMMU Notifier capability bit(s).
83  */
84 typedef enum {
85     IOMMU_NOTIFIER_NONE = 0,
86     /* Notify cache invalidations */
87     IOMMU_NOTIFIER_UNMAP = 0x1,
88     /* Notify entry changes (newly created entries) */
89     IOMMU_NOTIFIER_MAP = 0x2,
90 } IOMMUNotifierFlag;
91 
92 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
93 
94 struct IOMMUNotifier;
95 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
96                             IOMMUTLBEntry *data);
97 
98 struct IOMMUNotifier {
99     IOMMUNotify notify;
100     IOMMUNotifierFlag notifier_flags;
101     /* Notify for address space range start <= addr <= end */
102     hwaddr start;
103     hwaddr end;
104     int iommu_idx;
105     QLIST_ENTRY(IOMMUNotifier) node;
106 };
107 typedef struct IOMMUNotifier IOMMUNotifier;
108 
109 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
110 #define RAM_PREALLOC   (1 << 0)
111 
112 /* RAM is mmap-ed with MAP_SHARED */
113 #define RAM_SHARED     (1 << 1)
114 
115 /* Only a portion of RAM (used_length) is actually used, and migrated.
116  * This used_length size can change across reboots.
117  */
118 #define RAM_RESIZEABLE (1 << 2)
119 
120 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
121  * zero the page and wake waiting processes.
122  * (Set during postcopy)
123  */
124 #define RAM_UF_ZEROPAGE (1 << 3)
125 
126 /* RAM can be migrated */
127 #define RAM_MIGRATABLE (1 << 4)
128 
129 /* RAM is a persistent kind memory */
130 #define RAM_PMEM (1 << 5)
131 
132 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
133                                        IOMMUNotifierFlag flags,
134                                        hwaddr start, hwaddr end,
135                                        int iommu_idx)
136 {
137     n->notify = fn;
138     n->notifier_flags = flags;
139     n->start = start;
140     n->end = end;
141     n->iommu_idx = iommu_idx;
142 }
143 
144 /*
145  * Memory region callbacks
146  */
147 struct MemoryRegionOps {
148     /* Read from the memory region. @addr is relative to @mr; @size is
149      * in bytes. */
150     uint64_t (*read)(void *opaque,
151                      hwaddr addr,
152                      unsigned size);
153     /* Write to the memory region. @addr is relative to @mr; @size is
154      * in bytes. */
155     void (*write)(void *opaque,
156                   hwaddr addr,
157                   uint64_t data,
158                   unsigned size);
159 
160     MemTxResult (*read_with_attrs)(void *opaque,
161                                    hwaddr addr,
162                                    uint64_t *data,
163                                    unsigned size,
164                                    MemTxAttrs attrs);
165     MemTxResult (*write_with_attrs)(void *opaque,
166                                     hwaddr addr,
167                                     uint64_t data,
168                                     unsigned size,
169                                     MemTxAttrs attrs);
170 
171     enum device_endian endianness;
172     /* Guest-visible constraints: */
173     struct {
174         /* If nonzero, specify bounds on access sizes beyond which a machine
175          * check is thrown.
176          */
177         unsigned min_access_size;
178         unsigned max_access_size;
179         /* If true, unaligned accesses are supported.  Otherwise unaligned
180          * accesses throw machine checks.
181          */
182          bool unaligned;
183         /*
184          * If present, and returns #false, the transaction is not accepted
185          * by the device (and results in machine dependent behaviour such
186          * as a machine check exception).
187          */
188         bool (*accepts)(void *opaque, hwaddr addr,
189                         unsigned size, bool is_write,
190                         MemTxAttrs attrs);
191     } valid;
192     /* Internal implementation constraints: */
193     struct {
194         /* If nonzero, specifies the minimum size implemented.  Smaller sizes
195          * will be rounded upwards and a partial result will be returned.
196          */
197         unsigned min_access_size;
198         /* If nonzero, specifies the maximum size implemented.  Larger sizes
199          * will be done as a series of accesses with smaller sizes.
200          */
201         unsigned max_access_size;
202         /* If true, unaligned accesses are supported.  Otherwise all accesses
203          * are converted to (possibly multiple) naturally aligned accesses.
204          */
205         bool unaligned;
206     } impl;
207 };
208 
209 typedef struct MemoryRegionClass {
210     /* private */
211     ObjectClass parent_class;
212 } MemoryRegionClass;
213 
214 
215 enum IOMMUMemoryRegionAttr {
216     IOMMU_ATTR_SPAPR_TCE_FD
217 };
218 
219 /**
220  * IOMMUMemoryRegionClass:
221  *
222  * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
223  * and provide an implementation of at least the @translate method here
224  * to handle requests to the memory region. Other methods are optional.
225  *
226  * The IOMMU implementation must use the IOMMU notifier infrastructure
227  * to report whenever mappings are changed, by calling
228  * memory_region_notify_iommu() (or, if necessary, by calling
229  * memory_region_notify_one() for each registered notifier).
230  *
231  * Conceptually an IOMMU provides a mapping from input address
232  * to an output TLB entry. If the IOMMU is aware of memory transaction
233  * attributes and the output TLB entry depends on the transaction
234  * attributes, we represent this using IOMMU indexes. Each index
235  * selects a particular translation table that the IOMMU has:
236  *   @attrs_to_index returns the IOMMU index for a set of transaction attributes
237  *   @translate takes an input address and an IOMMU index
238  * and the mapping returned can only depend on the input address and the
239  * IOMMU index.
240  *
241  * Most IOMMUs don't care about the transaction attributes and support
242  * only a single IOMMU index. A more complex IOMMU might have one index
243  * for secure transactions and one for non-secure transactions.
244  */
245 typedef struct IOMMUMemoryRegionClass {
246     /* private */
247     MemoryRegionClass parent_class;
248 
249     /*
250      * Return a TLB entry that contains a given address.
251      *
252      * The IOMMUAccessFlags indicated via @flag are optional and may
253      * be specified as IOMMU_NONE to indicate that the caller needs
254      * the full translation information for both reads and writes. If
255      * the access flags are specified then the IOMMU implementation
256      * may use this as an optimization, to stop doing a page table
257      * walk as soon as it knows that the requested permissions are not
258      * allowed. If IOMMU_NONE is passed then the IOMMU must do the
259      * full page table walk and report the permissions in the returned
260      * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
261      * return different mappings for reads and writes.)
262      *
263      * The returned information remains valid while the caller is
264      * holding the big QEMU lock or is inside an RCU critical section;
265      * if the caller wishes to cache the mapping beyond that it must
266      * register an IOMMU notifier so it can invalidate its cached
267      * information when the IOMMU mapping changes.
268      *
269      * @iommu: the IOMMUMemoryRegion
270      * @hwaddr: address to be translated within the memory region
271      * @flag: requested access permissions
272      * @iommu_idx: IOMMU index for the translation
273      */
274     IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
275                                IOMMUAccessFlags flag, int iommu_idx);
276     /* Returns minimum supported page size in bytes.
277      * If this method is not provided then the minimum is assumed to
278      * be TARGET_PAGE_SIZE.
279      *
280      * @iommu: the IOMMUMemoryRegion
281      */
282     uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
283     /* Called when IOMMU Notifier flag changes (ie when the set of
284      * events which IOMMU users are requesting notification for changes).
285      * Optional method -- need not be provided if the IOMMU does not
286      * need to know exactly which events must be notified.
287      *
288      * @iommu: the IOMMUMemoryRegion
289      * @old_flags: events which previously needed to be notified
290      * @new_flags: events which now need to be notified
291      *
292      * Returns 0 on success, or a negative errno; in particular
293      * returns -EINVAL if the new flag bitmap is not supported by the
294      * IOMMU memory region. In case of failure, the error object
295      * must be created
296      */
297     int (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
298                                IOMMUNotifierFlag old_flags,
299                                IOMMUNotifierFlag new_flags,
300                                Error **errp);
301     /* Called to handle memory_region_iommu_replay().
302      *
303      * The default implementation of memory_region_iommu_replay() is to
304      * call the IOMMU translate method for every page in the address space
305      * with flag == IOMMU_NONE and then call the notifier if translate
306      * returns a valid mapping. If this method is implemented then it
307      * overrides the default behaviour, and must provide the full semantics
308      * of memory_region_iommu_replay(), by calling @notifier for every
309      * translation present in the IOMMU.
310      *
311      * Optional method -- an IOMMU only needs to provide this method
312      * if the default is inefficient or produces undesirable side effects.
313      *
314      * Note: this is not related to record-and-replay functionality.
315      */
316     void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
317 
318     /* Get IOMMU misc attributes. This is an optional method that
319      * can be used to allow users of the IOMMU to get implementation-specific
320      * information. The IOMMU implements this method to handle calls
321      * by IOMMU users to memory_region_iommu_get_attr() by filling in
322      * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
323      * the IOMMU supports. If the method is unimplemented then
324      * memory_region_iommu_get_attr() will always return -EINVAL.
325      *
326      * @iommu: the IOMMUMemoryRegion
327      * @attr: attribute being queried
328      * @data: memory to fill in with the attribute data
329      *
330      * Returns 0 on success, or a negative errno; in particular
331      * returns -EINVAL for unrecognized or unimplemented attribute types.
332      */
333     int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
334                     void *data);
335 
336     /* Return the IOMMU index to use for a given set of transaction attributes.
337      *
338      * Optional method: if an IOMMU only supports a single IOMMU index then
339      * the default implementation of memory_region_iommu_attrs_to_index()
340      * will return 0.
341      *
342      * The indexes supported by an IOMMU must be contiguous, starting at 0.
343      *
344      * @iommu: the IOMMUMemoryRegion
345      * @attrs: memory transaction attributes
346      */
347     int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
348 
349     /* Return the number of IOMMU indexes this IOMMU supports.
350      *
351      * Optional method: if this method is not provided, then
352      * memory_region_iommu_num_indexes() will return 1, indicating that
353      * only a single IOMMU index is supported.
354      *
355      * @iommu: the IOMMUMemoryRegion
356      */
357     int (*num_indexes)(IOMMUMemoryRegion *iommu);
358 } IOMMUMemoryRegionClass;
359 
360 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
361 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
362 
363 struct MemoryRegion {
364     Object parent_obj;
365 
366     /* All fields are private - violators will be prosecuted */
367 
368     /* The following fields should fit in a cache line */
369     bool romd_mode;
370     bool ram;
371     bool subpage;
372     bool readonly; /* For RAM regions */
373     bool nonvolatile;
374     bool rom_device;
375     bool flush_coalesced_mmio;
376     bool global_locking;
377     uint8_t dirty_log_mask;
378     bool is_iommu;
379     RAMBlock *ram_block;
380     Object *owner;
381 
382     const MemoryRegionOps *ops;
383     void *opaque;
384     MemoryRegion *container;
385     Int128 size;
386     hwaddr addr;
387     void (*destructor)(MemoryRegion *mr);
388     uint64_t align;
389     bool terminates;
390     bool ram_device;
391     bool enabled;
392     bool warning_printed; /* For reservations */
393     uint8_t vga_logging_count;
394     MemoryRegion *alias;
395     hwaddr alias_offset;
396     int32_t priority;
397     QTAILQ_HEAD(, MemoryRegion) subregions;
398     QTAILQ_ENTRY(MemoryRegion) subregions_link;
399     QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
400     const char *name;
401     unsigned ioeventfd_nb;
402     MemoryRegionIoeventfd *ioeventfds;
403 };
404 
405 struct IOMMUMemoryRegion {
406     MemoryRegion parent_obj;
407 
408     QLIST_HEAD(, IOMMUNotifier) iommu_notify;
409     IOMMUNotifierFlag iommu_notify_flags;
410 };
411 
412 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
413     QLIST_FOREACH((n), &(mr)->iommu_notify, node)
414 
415 /**
416  * MemoryListener: callbacks structure for updates to the physical memory map
417  *
418  * Allows a component to adjust to changes in the guest-visible memory map.
419  * Use with memory_listener_register() and memory_listener_unregister().
420  */
421 struct MemoryListener {
422     void (*begin)(MemoryListener *listener);
423     void (*commit)(MemoryListener *listener);
424     void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
425     void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
426     void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
427     void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
428                       int old, int new);
429     void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
430                      int old, int new);
431     void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
432     void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
433     void (*log_global_start)(MemoryListener *listener);
434     void (*log_global_stop)(MemoryListener *listener);
435     void (*log_global_after_sync)(MemoryListener *listener);
436     void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
437                         bool match_data, uint64_t data, EventNotifier *e);
438     void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
439                         bool match_data, uint64_t data, EventNotifier *e);
440     void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
441                                hwaddr addr, hwaddr len);
442     void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
443                                hwaddr addr, hwaddr len);
444     /* Lower = earlier (during add), later (during del) */
445     unsigned priority;
446     AddressSpace *address_space;
447     QTAILQ_ENTRY(MemoryListener) link;
448     QTAILQ_ENTRY(MemoryListener) link_as;
449 };
450 
451 /**
452  * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
453  */
454 struct AddressSpace {
455     /* All fields are private. */
456     struct rcu_head rcu;
457     char *name;
458     MemoryRegion *root;
459 
460     /* Accessed via RCU.  */
461     struct FlatView *current_map;
462 
463     int ioeventfd_nb;
464     struct MemoryRegionIoeventfd *ioeventfds;
465     QTAILQ_HEAD(, MemoryListener) listeners;
466     QTAILQ_ENTRY(AddressSpace) address_spaces_link;
467 };
468 
469 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
470 typedef struct FlatRange FlatRange;
471 
472 /* Flattened global view of current active memory hierarchy.  Kept in sorted
473  * order.
474  */
475 struct FlatView {
476     struct rcu_head rcu;
477     unsigned ref;
478     FlatRange *ranges;
479     unsigned nr;
480     unsigned nr_allocated;
481     struct AddressSpaceDispatch *dispatch;
482     MemoryRegion *root;
483 };
484 
485 static inline FlatView *address_space_to_flatview(AddressSpace *as)
486 {
487     return atomic_rcu_read(&as->current_map);
488 }
489 
490 
491 /**
492  * MemoryRegionSection: describes a fragment of a #MemoryRegion
493  *
494  * @mr: the region, or %NULL if empty
495  * @fv: the flat view of the address space the region is mapped in
496  * @offset_within_region: the beginning of the section, relative to @mr's start
497  * @size: the size of the section; will not exceed @mr's boundaries
498  * @offset_within_address_space: the address of the first byte of the section
499  *     relative to the region's address space
500  * @readonly: writes to this section are ignored
501  * @nonvolatile: this section is non-volatile
502  */
503 struct MemoryRegionSection {
504     Int128 size;
505     MemoryRegion *mr;
506     FlatView *fv;
507     hwaddr offset_within_region;
508     hwaddr offset_within_address_space;
509     bool readonly;
510     bool nonvolatile;
511 };
512 
513 static inline bool MemoryRegionSection_eq(MemoryRegionSection *a,
514                                           MemoryRegionSection *b)
515 {
516     return a->mr == b->mr &&
517            a->fv == b->fv &&
518            a->offset_within_region == b->offset_within_region &&
519            a->offset_within_address_space == b->offset_within_address_space &&
520            int128_eq(a->size, b->size) &&
521            a->readonly == b->readonly &&
522            a->nonvolatile == b->nonvolatile;
523 }
524 
525 /**
526  * memory_region_init: Initialize a memory region
527  *
528  * The region typically acts as a container for other memory regions.  Use
529  * memory_region_add_subregion() to add subregions.
530  *
531  * @mr: the #MemoryRegion to be initialized
532  * @owner: the object that tracks the region's reference count
533  * @name: used for debugging; not visible to the user or ABI
534  * @size: size of the region; any subregions beyond this size will be clipped
535  */
536 void memory_region_init(MemoryRegion *mr,
537                         struct Object *owner,
538                         const char *name,
539                         uint64_t size);
540 
541 /**
542  * memory_region_ref: Add 1 to a memory region's reference count
543  *
544  * Whenever memory regions are accessed outside the BQL, they need to be
545  * preserved against hot-unplug.  MemoryRegions actually do not have their
546  * own reference count; they piggyback on a QOM object, their "owner".
547  * This function adds a reference to the owner.
548  *
549  * All MemoryRegions must have an owner if they can disappear, even if the
550  * device they belong to operates exclusively under the BQL.  This is because
551  * the region could be returned at any time by memory_region_find, and this
552  * is usually under guest control.
553  *
554  * @mr: the #MemoryRegion
555  */
556 void memory_region_ref(MemoryRegion *mr);
557 
558 /**
559  * memory_region_unref: Remove 1 to a memory region's reference count
560  *
561  * Whenever memory regions are accessed outside the BQL, they need to be
562  * preserved against hot-unplug.  MemoryRegions actually do not have their
563  * own reference count; they piggyback on a QOM object, their "owner".
564  * This function removes a reference to the owner and possibly destroys it.
565  *
566  * @mr: the #MemoryRegion
567  */
568 void memory_region_unref(MemoryRegion *mr);
569 
570 /**
571  * memory_region_init_io: Initialize an I/O memory region.
572  *
573  * Accesses into the region will cause the callbacks in @ops to be called.
574  * if @size is nonzero, subregions will be clipped to @size.
575  *
576  * @mr: the #MemoryRegion to be initialized.
577  * @owner: the object that tracks the region's reference count
578  * @ops: a structure containing read and write callbacks to be used when
579  *       I/O is performed on the region.
580  * @opaque: passed to the read and write callbacks of the @ops structure.
581  * @name: used for debugging; not visible to the user or ABI
582  * @size: size of the region.
583  */
584 void memory_region_init_io(MemoryRegion *mr,
585                            struct Object *owner,
586                            const MemoryRegionOps *ops,
587                            void *opaque,
588                            const char *name,
589                            uint64_t size);
590 
591 /**
592  * memory_region_init_ram_nomigrate:  Initialize RAM memory region.  Accesses
593  *                                    into the region will modify memory
594  *                                    directly.
595  *
596  * @mr: the #MemoryRegion to be initialized.
597  * @owner: the object that tracks the region's reference count
598  * @name: Region name, becomes part of RAMBlock name used in migration stream
599  *        must be unique within any device
600  * @size: size of the region.
601  * @errp: pointer to Error*, to store an error if it happens.
602  *
603  * Note that this function does not do anything to cause the data in the
604  * RAM memory region to be migrated; that is the responsibility of the caller.
605  */
606 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
607                                       struct Object *owner,
608                                       const char *name,
609                                       uint64_t size,
610                                       Error **errp);
611 
612 /**
613  * memory_region_init_ram_shared_nomigrate:  Initialize RAM memory region.
614  *                                           Accesses into the region will
615  *                                           modify memory directly.
616  *
617  * @mr: the #MemoryRegion to be initialized.
618  * @owner: the object that tracks the region's reference count
619  * @name: Region name, becomes part of RAMBlock name used in migration stream
620  *        must be unique within any device
621  * @size: size of the region.
622  * @share: allow remapping RAM to different addresses
623  * @errp: pointer to Error*, to store an error if it happens.
624  *
625  * Note that this function is similar to memory_region_init_ram_nomigrate.
626  * The only difference is part of the RAM region can be remapped.
627  */
628 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
629                                              struct Object *owner,
630                                              const char *name,
631                                              uint64_t size,
632                                              bool share,
633                                              Error **errp);
634 
635 /**
636  * memory_region_init_resizeable_ram:  Initialize memory region with resizeable
637  *                                     RAM.  Accesses into the region will
638  *                                     modify memory directly.  Only an initial
639  *                                     portion of this RAM is actually used.
640  *                                     The used size can change across reboots.
641  *
642  * @mr: the #MemoryRegion to be initialized.
643  * @owner: the object that tracks the region's reference count
644  * @name: Region name, becomes part of RAMBlock name used in migration stream
645  *        must be unique within any device
646  * @size: used size of the region.
647  * @max_size: max size of the region.
648  * @resized: callback to notify owner about used size change.
649  * @errp: pointer to Error*, to store an error if it happens.
650  *
651  * Note that this function does not do anything to cause the data in the
652  * RAM memory region to be migrated; that is the responsibility of the caller.
653  */
654 void memory_region_init_resizeable_ram(MemoryRegion *mr,
655                                        struct Object *owner,
656                                        const char *name,
657                                        uint64_t size,
658                                        uint64_t max_size,
659                                        void (*resized)(const char*,
660                                                        uint64_t length,
661                                                        void *host),
662                                        Error **errp);
663 #ifdef CONFIG_POSIX
664 
665 /**
666  * memory_region_init_ram_from_file:  Initialize RAM memory region with a
667  *                                    mmap-ed backend.
668  *
669  * @mr: the #MemoryRegion to be initialized.
670  * @owner: the object that tracks the region's reference count
671  * @name: Region name, becomes part of RAMBlock name used in migration stream
672  *        must be unique within any device
673  * @size: size of the region.
674  * @align: alignment of the region base address; if 0, the default alignment
675  *         (getpagesize()) will be used.
676  * @ram_flags: Memory region features:
677  *             - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag
678  *             - RAM_PMEM: the memory is persistent memory
679  *             Other bits are ignored now.
680  * @path: the path in which to allocate the RAM.
681  * @errp: pointer to Error*, to store an error if it happens.
682  *
683  * Note that this function does not do anything to cause the data in the
684  * RAM memory region to be migrated; that is the responsibility of the caller.
685  */
686 void memory_region_init_ram_from_file(MemoryRegion *mr,
687                                       struct Object *owner,
688                                       const char *name,
689                                       uint64_t size,
690                                       uint64_t align,
691                                       uint32_t ram_flags,
692                                       const char *path,
693                                       Error **errp);
694 
695 /**
696  * memory_region_init_ram_from_fd:  Initialize RAM memory region with a
697  *                                  mmap-ed backend.
698  *
699  * @mr: the #MemoryRegion to be initialized.
700  * @owner: the object that tracks the region's reference count
701  * @name: the name of the region.
702  * @size: size of the region.
703  * @share: %true if memory must be mmaped with the MAP_SHARED flag
704  * @fd: the fd to mmap.
705  * @errp: pointer to Error*, to store an error if it happens.
706  *
707  * Note that this function does not do anything to cause the data in the
708  * RAM memory region to be migrated; that is the responsibility of the caller.
709  */
710 void memory_region_init_ram_from_fd(MemoryRegion *mr,
711                                     struct Object *owner,
712                                     const char *name,
713                                     uint64_t size,
714                                     bool share,
715                                     int fd,
716                                     Error **errp);
717 #endif
718 
719 /**
720  * memory_region_init_ram_ptr:  Initialize RAM memory region from a
721  *                              user-provided pointer.  Accesses into the
722  *                              region will modify memory directly.
723  *
724  * @mr: the #MemoryRegion to be initialized.
725  * @owner: the object that tracks the region's reference count
726  * @name: Region name, becomes part of RAMBlock name used in migration stream
727  *        must be unique within any device
728  * @size: size of the region.
729  * @ptr: memory to be mapped; must contain at least @size bytes.
730  *
731  * Note that this function does not do anything to cause the data in the
732  * RAM memory region to be migrated; that is the responsibility of the caller.
733  */
734 void memory_region_init_ram_ptr(MemoryRegion *mr,
735                                 struct Object *owner,
736                                 const char *name,
737                                 uint64_t size,
738                                 void *ptr);
739 
740 /**
741  * memory_region_init_ram_device_ptr:  Initialize RAM device memory region from
742  *                                     a user-provided pointer.
743  *
744  * A RAM device represents a mapping to a physical device, such as to a PCI
745  * MMIO BAR of an vfio-pci assigned device.  The memory region may be mapped
746  * into the VM address space and access to the region will modify memory
747  * directly.  However, the memory region should not be included in a memory
748  * dump (device may not be enabled/mapped at the time of the dump), and
749  * operations incompatible with manipulating MMIO should be avoided.  Replaces
750  * skip_dump flag.
751  *
752  * @mr: the #MemoryRegion to be initialized.
753  * @owner: the object that tracks the region's reference count
754  * @name: the name of the region.
755  * @size: size of the region.
756  * @ptr: memory to be mapped; must contain at least @size bytes.
757  *
758  * Note that this function does not do anything to cause the data in the
759  * RAM memory region to be migrated; that is the responsibility of the caller.
760  * (For RAM device memory regions, migrating the contents rarely makes sense.)
761  */
762 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
763                                        struct Object *owner,
764                                        const char *name,
765                                        uint64_t size,
766                                        void *ptr);
767 
768 /**
769  * memory_region_init_alias: Initialize a memory region that aliases all or a
770  *                           part of another memory region.
771  *
772  * @mr: the #MemoryRegion to be initialized.
773  * @owner: the object that tracks the region's reference count
774  * @name: used for debugging; not visible to the user or ABI
775  * @orig: the region to be referenced; @mr will be equivalent to
776  *        @orig between @offset and @offset + @size - 1.
777  * @offset: start of the section in @orig to be referenced.
778  * @size: size of the region.
779  */
780 void memory_region_init_alias(MemoryRegion *mr,
781                               struct Object *owner,
782                               const char *name,
783                               MemoryRegion *orig,
784                               hwaddr offset,
785                               uint64_t size);
786 
787 /**
788  * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
789  *
790  * This has the same effect as calling memory_region_init_ram_nomigrate()
791  * and then marking the resulting region read-only with
792  * memory_region_set_readonly().
793  *
794  * Note that this function does not do anything to cause the data in the
795  * RAM side of the memory region to be migrated; that is the responsibility
796  * of the caller.
797  *
798  * @mr: the #MemoryRegion to be initialized.
799  * @owner: the object that tracks the region's reference count
800  * @name: Region name, becomes part of RAMBlock name used in migration stream
801  *        must be unique within any device
802  * @size: size of the region.
803  * @errp: pointer to Error*, to store an error if it happens.
804  */
805 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
806                                       struct Object *owner,
807                                       const char *name,
808                                       uint64_t size,
809                                       Error **errp);
810 
811 /**
812  * memory_region_init_rom_device_nomigrate:  Initialize a ROM memory region.
813  *                                 Writes are handled via callbacks.
814  *
815  * Note that this function does not do anything to cause the data in the
816  * RAM side of the memory region to be migrated; that is the responsibility
817  * of the caller.
818  *
819  * @mr: the #MemoryRegion to be initialized.
820  * @owner: the object that tracks the region's reference count
821  * @ops: callbacks for write access handling (must not be NULL).
822  * @opaque: passed to the read and write callbacks of the @ops structure.
823  * @name: Region name, becomes part of RAMBlock name used in migration stream
824  *        must be unique within any device
825  * @size: size of the region.
826  * @errp: pointer to Error*, to store an error if it happens.
827  */
828 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
829                                              struct Object *owner,
830                                              const MemoryRegionOps *ops,
831                                              void *opaque,
832                                              const char *name,
833                                              uint64_t size,
834                                              Error **errp);
835 
836 /**
837  * memory_region_init_iommu: Initialize a memory region of a custom type
838  * that translates addresses
839  *
840  * An IOMMU region translates addresses and forwards accesses to a target
841  * memory region.
842  *
843  * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
844  * @_iommu_mr should be a pointer to enough memory for an instance of
845  * that subclass, @instance_size is the size of that subclass, and
846  * @mrtypename is its name. This function will initialize @_iommu_mr as an
847  * instance of the subclass, and its methods will then be called to handle
848  * accesses to the memory region. See the documentation of
849  * #IOMMUMemoryRegionClass for further details.
850  *
851  * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
852  * @instance_size: the IOMMUMemoryRegion subclass instance size
853  * @mrtypename: the type name of the #IOMMUMemoryRegion
854  * @owner: the object that tracks the region's reference count
855  * @name: used for debugging; not visible to the user or ABI
856  * @size: size of the region.
857  */
858 void memory_region_init_iommu(void *_iommu_mr,
859                               size_t instance_size,
860                               const char *mrtypename,
861                               Object *owner,
862                               const char *name,
863                               uint64_t size);
864 
865 /**
866  * memory_region_init_ram - Initialize RAM memory region.  Accesses into the
867  *                          region will modify memory directly.
868  *
869  * @mr: the #MemoryRegion to be initialized
870  * @owner: the object that tracks the region's reference count (must be
871  *         TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
872  * @name: name of the memory region
873  * @size: size of the region in bytes
874  * @errp: pointer to Error*, to store an error if it happens.
875  *
876  * This function allocates RAM for a board model or device, and
877  * arranges for it to be migrated (by calling vmstate_register_ram()
878  * if @owner is a DeviceState, or vmstate_register_ram_global() if
879  * @owner is NULL).
880  *
881  * TODO: Currently we restrict @owner to being either NULL (for
882  * global RAM regions with no owner) or devices, so that we can
883  * give the RAM block a unique name for migration purposes.
884  * We should lift this restriction and allow arbitrary Objects.
885  * If you pass a non-NULL non-device @owner then we will assert.
886  */
887 void memory_region_init_ram(MemoryRegion *mr,
888                             struct Object *owner,
889                             const char *name,
890                             uint64_t size,
891                             Error **errp);
892 
893 /**
894  * memory_region_init_rom: Initialize a ROM memory region.
895  *
896  * This has the same effect as calling memory_region_init_ram()
897  * and then marking the resulting region read-only with
898  * memory_region_set_readonly(). This includes arranging for the
899  * contents to be migrated.
900  *
901  * TODO: Currently we restrict @owner to being either NULL (for
902  * global RAM regions with no owner) or devices, so that we can
903  * give the RAM block a unique name for migration purposes.
904  * We should lift this restriction and allow arbitrary Objects.
905  * If you pass a non-NULL non-device @owner then we will assert.
906  *
907  * @mr: the #MemoryRegion to be initialized.
908  * @owner: the object that tracks the region's reference count
909  * @name: Region name, becomes part of RAMBlock name used in migration stream
910  *        must be unique within any device
911  * @size: size of the region.
912  * @errp: pointer to Error*, to store an error if it happens.
913  */
914 void memory_region_init_rom(MemoryRegion *mr,
915                             struct Object *owner,
916                             const char *name,
917                             uint64_t size,
918                             Error **errp);
919 
920 /**
921  * memory_region_init_rom_device:  Initialize a ROM memory region.
922  *                                 Writes are handled via callbacks.
923  *
924  * This function initializes a memory region backed by RAM for reads
925  * and callbacks for writes, and arranges for the RAM backing to
926  * be migrated (by calling vmstate_register_ram()
927  * if @owner is a DeviceState, or vmstate_register_ram_global() if
928  * @owner is NULL).
929  *
930  * TODO: Currently we restrict @owner to being either NULL (for
931  * global RAM regions with no owner) or devices, so that we can
932  * give the RAM block a unique name for migration purposes.
933  * We should lift this restriction and allow arbitrary Objects.
934  * If you pass a non-NULL non-device @owner then we will assert.
935  *
936  * @mr: the #MemoryRegion to be initialized.
937  * @owner: the object that tracks the region's reference count
938  * @ops: callbacks for write access handling (must not be NULL).
939  * @name: Region name, becomes part of RAMBlock name used in migration stream
940  *        must be unique within any device
941  * @size: size of the region.
942  * @errp: pointer to Error*, to store an error if it happens.
943  */
944 void memory_region_init_rom_device(MemoryRegion *mr,
945                                    struct Object *owner,
946                                    const MemoryRegionOps *ops,
947                                    void *opaque,
948                                    const char *name,
949                                    uint64_t size,
950                                    Error **errp);
951 
952 
953 /**
954  * memory_region_owner: get a memory region's owner.
955  *
956  * @mr: the memory region being queried.
957  */
958 struct Object *memory_region_owner(MemoryRegion *mr);
959 
960 /**
961  * memory_region_size: get a memory region's size.
962  *
963  * @mr: the memory region being queried.
964  */
965 uint64_t memory_region_size(MemoryRegion *mr);
966 
967 /**
968  * memory_region_is_ram: check whether a memory region is random access
969  *
970  * Returns %true if a memory region is random access.
971  *
972  * @mr: the memory region being queried
973  */
974 static inline bool memory_region_is_ram(MemoryRegion *mr)
975 {
976     return mr->ram;
977 }
978 
979 /**
980  * memory_region_is_ram_device: check whether a memory region is a ram device
981  *
982  * Returns %true if a memory region is a device backed ram region
983  *
984  * @mr: the memory region being queried
985  */
986 bool memory_region_is_ram_device(MemoryRegion *mr);
987 
988 /**
989  * memory_region_is_romd: check whether a memory region is in ROMD mode
990  *
991  * Returns %true if a memory region is a ROM device and currently set to allow
992  * direct reads.
993  *
994  * @mr: the memory region being queried
995  */
996 static inline bool memory_region_is_romd(MemoryRegion *mr)
997 {
998     return mr->rom_device && mr->romd_mode;
999 }
1000 
1001 /**
1002  * memory_region_get_iommu: check whether a memory region is an iommu
1003  *
1004  * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
1005  * otherwise NULL.
1006  *
1007  * @mr: the memory region being queried
1008  */
1009 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
1010 {
1011     if (mr->alias) {
1012         return memory_region_get_iommu(mr->alias);
1013     }
1014     if (mr->is_iommu) {
1015         return (IOMMUMemoryRegion *) mr;
1016     }
1017     return NULL;
1018 }
1019 
1020 /**
1021  * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1022  *   if an iommu or NULL if not
1023  *
1024  * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1025  * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1026  *
1027  * @mr: the memory region being queried
1028  */
1029 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1030         IOMMUMemoryRegion *iommu_mr)
1031 {
1032     return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1033 }
1034 
1035 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1036 
1037 /**
1038  * memory_region_iommu_get_min_page_size: get minimum supported page size
1039  * for an iommu
1040  *
1041  * Returns minimum supported page size for an iommu.
1042  *
1043  * @iommu_mr: the memory region being queried
1044  */
1045 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1046 
1047 /**
1048  * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1049  *
1050  * The notification type will be decided by entry.perm bits:
1051  *
1052  * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
1053  * - For MAP (newly added entry) notifies: set entry.perm to the
1054  *   permission of the page (which is definitely !IOMMU_NONE).
1055  *
1056  * Note: for any IOMMU implementation, an in-place mapping change
1057  * should be notified with an UNMAP followed by a MAP.
1058  *
1059  * @iommu_mr: the memory region that was changed
1060  * @iommu_idx: the IOMMU index for the translation table which has changed
1061  * @entry: the new entry in the IOMMU translation table.  The entry
1062  *         replaces all old entries for the same virtual I/O address range.
1063  *         Deleted entries have .@perm == 0.
1064  */
1065 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1066                                 int iommu_idx,
1067                                 IOMMUTLBEntry entry);
1068 
1069 /**
1070  * memory_region_notify_one: notify a change in an IOMMU translation
1071  *                           entry to a single notifier
1072  *
1073  * This works just like memory_region_notify_iommu(), but it only
1074  * notifies a specific notifier, not all of them.
1075  *
1076  * @notifier: the notifier to be notified
1077  * @entry: the new entry in the IOMMU translation table.  The entry
1078  *         replaces all old entries for the same virtual I/O address range.
1079  *         Deleted entries have .@perm == 0.
1080  */
1081 void memory_region_notify_one(IOMMUNotifier *notifier,
1082                               IOMMUTLBEntry *entry);
1083 
1084 /**
1085  * memory_region_register_iommu_notifier: register a notifier for changes to
1086  * IOMMU translation entries.
1087  *
1088  * Returns 0 on success, or a negative errno otherwise. In particular,
1089  * -EINVAL indicates that at least one of the attributes of the notifier
1090  * is not supported (flag/range) by the IOMMU memory region. In case of error
1091  * the error object must be created.
1092  *
1093  * @mr: the memory region to observe
1094  * @n: the IOMMUNotifier to be added; the notify callback receives a
1095  *     pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1096  *     ceases to be valid on exit from the notifier.
1097  */
1098 int memory_region_register_iommu_notifier(MemoryRegion *mr,
1099                                           IOMMUNotifier *n, Error **errp);
1100 
1101 /**
1102  * memory_region_iommu_replay: replay existing IOMMU translations to
1103  * a notifier with the minimum page granularity returned by
1104  * mr->iommu_ops->get_page_size().
1105  *
1106  * Note: this is not related to record-and-replay functionality.
1107  *
1108  * @iommu_mr: the memory region to observe
1109  * @n: the notifier to which to replay iommu mappings
1110  */
1111 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1112 
1113 /**
1114  * memory_region_unregister_iommu_notifier: unregister a notifier for
1115  * changes to IOMMU translation entries.
1116  *
1117  * @mr: the memory region which was observed and for which notity_stopped()
1118  *      needs to be called
1119  * @n: the notifier to be removed.
1120  */
1121 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1122                                              IOMMUNotifier *n);
1123 
1124 /**
1125  * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1126  * defined on the IOMMU.
1127  *
1128  * Returns 0 on success, or a negative errno otherwise. In particular,
1129  * -EINVAL indicates that the IOMMU does not support the requested
1130  * attribute.
1131  *
1132  * @iommu_mr: the memory region
1133  * @attr: the requested attribute
1134  * @data: a pointer to the requested attribute data
1135  */
1136 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1137                                  enum IOMMUMemoryRegionAttr attr,
1138                                  void *data);
1139 
1140 /**
1141  * memory_region_iommu_attrs_to_index: return the IOMMU index to
1142  * use for translations with the given memory transaction attributes.
1143  *
1144  * @iommu_mr: the memory region
1145  * @attrs: the memory transaction attributes
1146  */
1147 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1148                                        MemTxAttrs attrs);
1149 
1150 /**
1151  * memory_region_iommu_num_indexes: return the total number of IOMMU
1152  * indexes that this IOMMU supports.
1153  *
1154  * @iommu_mr: the memory region
1155  */
1156 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1157 
1158 /**
1159  * memory_region_name: get a memory region's name
1160  *
1161  * Returns the string that was used to initialize the memory region.
1162  *
1163  * @mr: the memory region being queried
1164  */
1165 const char *memory_region_name(const MemoryRegion *mr);
1166 
1167 /**
1168  * memory_region_is_logging: return whether a memory region is logging writes
1169  *
1170  * Returns %true if the memory region is logging writes for the given client
1171  *
1172  * @mr: the memory region being queried
1173  * @client: the client being queried
1174  */
1175 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1176 
1177 /**
1178  * memory_region_get_dirty_log_mask: return the clients for which a
1179  * memory region is logging writes.
1180  *
1181  * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1182  * are the bit indices.
1183  *
1184  * @mr: the memory region being queried
1185  */
1186 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1187 
1188 /**
1189  * memory_region_is_rom: check whether a memory region is ROM
1190  *
1191  * Returns %true if a memory region is read-only memory.
1192  *
1193  * @mr: the memory region being queried
1194  */
1195 static inline bool memory_region_is_rom(MemoryRegion *mr)
1196 {
1197     return mr->ram && mr->readonly;
1198 }
1199 
1200 /**
1201  * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1202  *
1203  * Returns %true is a memory region is non-volatile memory.
1204  *
1205  * @mr: the memory region being queried
1206  */
1207 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1208 {
1209     return mr->nonvolatile;
1210 }
1211 
1212 /**
1213  * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1214  *
1215  * Returns a file descriptor backing a file-based RAM memory region,
1216  * or -1 if the region is not a file-based RAM memory region.
1217  *
1218  * @mr: the RAM or alias memory region being queried.
1219  */
1220 int memory_region_get_fd(MemoryRegion *mr);
1221 
1222 /**
1223  * memory_region_from_host: Convert a pointer into a RAM memory region
1224  * and an offset within it.
1225  *
1226  * Given a host pointer inside a RAM memory region (created with
1227  * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1228  * the MemoryRegion and the offset within it.
1229  *
1230  * Use with care; by the time this function returns, the returned pointer is
1231  * not protected by RCU anymore.  If the caller is not within an RCU critical
1232  * section and does not hold the iothread lock, it must have other means of
1233  * protecting the pointer, such as a reference to the region that includes
1234  * the incoming ram_addr_t.
1235  *
1236  * @ptr: the host pointer to be converted
1237  * @offset: the offset within memory region
1238  */
1239 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1240 
1241 /**
1242  * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1243  *
1244  * Returns a host pointer to a RAM memory region (created with
1245  * memory_region_init_ram() or memory_region_init_ram_ptr()).
1246  *
1247  * Use with care; by the time this function returns, the returned pointer is
1248  * not protected by RCU anymore.  If the caller is not within an RCU critical
1249  * section and does not hold the iothread lock, it must have other means of
1250  * protecting the pointer, such as a reference to the region that includes
1251  * the incoming ram_addr_t.
1252  *
1253  * @mr: the memory region being queried.
1254  */
1255 void *memory_region_get_ram_ptr(MemoryRegion *mr);
1256 
1257 /* memory_region_ram_resize: Resize a RAM region.
1258  *
1259  * Only legal before guest might have detected the memory size: e.g. on
1260  * incoming migration, or right after reset.
1261  *
1262  * @mr: a memory region created with @memory_region_init_resizeable_ram.
1263  * @newsize: the new size the region
1264  * @errp: pointer to Error*, to store an error if it happens.
1265  */
1266 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
1267                               Error **errp);
1268 
1269 /**
1270  * memory_region_set_log: Turn dirty logging on or off for a region.
1271  *
1272  * Turns dirty logging on or off for a specified client (display, migration).
1273  * Only meaningful for RAM regions.
1274  *
1275  * @mr: the memory region being updated.
1276  * @log: whether dirty logging is to be enabled or disabled.
1277  * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
1278  */
1279 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
1280 
1281 /**
1282  * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
1283  *
1284  * Marks a range of bytes as dirty, after it has been dirtied outside
1285  * guest code.
1286  *
1287  * @mr: the memory region being dirtied.
1288  * @addr: the address (relative to the start of the region) being dirtied.
1289  * @size: size of the range being dirtied.
1290  */
1291 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1292                              hwaddr size);
1293 
1294 /**
1295  * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
1296  *
1297  * This function is called when the caller wants to clear the remote
1298  * dirty bitmap of a memory range within the memory region.  This can
1299  * be used by e.g. KVM to manually clear dirty log when
1300  * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
1301  * kernel.
1302  *
1303  * @mr:     the memory region to clear the dirty log upon
1304  * @start:  start address offset within the memory region
1305  * @len:    length of the memory region to clear dirty bitmap
1306  */
1307 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
1308                                       hwaddr len);
1309 
1310 /**
1311  * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
1312  *                                         bitmap and clear it.
1313  *
1314  * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
1315  * returns the snapshot.  The snapshot can then be used to query dirty
1316  * status, using memory_region_snapshot_get_dirty.  Snapshotting allows
1317  * querying the same page multiple times, which is especially useful for
1318  * display updates where the scanlines often are not page aligned.
1319  *
1320  * The dirty bitmap region which gets copyed into the snapshot (and
1321  * cleared afterwards) can be larger than requested.  The boundaries
1322  * are rounded up/down so complete bitmap longs (covering 64 pages on
1323  * 64bit hosts) can be copied over into the bitmap snapshot.  Which
1324  * isn't a problem for display updates as the extra pages are outside
1325  * the visible area, and in case the visible area changes a full
1326  * display redraw is due anyway.  Should other use cases for this
1327  * function emerge we might have to revisit this implementation
1328  * detail.
1329  *
1330  * Use g_free to release DirtyBitmapSnapshot.
1331  *
1332  * @mr: the memory region being queried.
1333  * @addr: the address (relative to the start of the region) being queried.
1334  * @size: the size of the range being queried.
1335  * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
1336  */
1337 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1338                                                             hwaddr addr,
1339                                                             hwaddr size,
1340                                                             unsigned client);
1341 
1342 /**
1343  * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
1344  *                                   in the specified dirty bitmap snapshot.
1345  *
1346  * @mr: the memory region being queried.
1347  * @snap: the dirty bitmap snapshot
1348  * @addr: the address (relative to the start of the region) being queried.
1349  * @size: the size of the range being queried.
1350  */
1351 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
1352                                       DirtyBitmapSnapshot *snap,
1353                                       hwaddr addr, hwaddr size);
1354 
1355 /**
1356  * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
1357  *                            client.
1358  *
1359  * Marks a range of pages as no longer dirty.
1360  *
1361  * @mr: the region being updated.
1362  * @addr: the start of the subrange being cleaned.
1363  * @size: the size of the subrange being cleaned.
1364  * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1365  *          %DIRTY_MEMORY_VGA.
1366  */
1367 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1368                                hwaddr size, unsigned client);
1369 
1370 /**
1371  * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
1372  *                                 TBs (for self-modifying code).
1373  *
1374  * The MemoryRegionOps->write() callback of a ROM device must use this function
1375  * to mark byte ranges that have been modified internally, such as by directly
1376  * accessing the memory returned by memory_region_get_ram_ptr().
1377  *
1378  * This function marks the range dirty and invalidates TBs so that TCG can
1379  * detect self-modifying code.
1380  *
1381  * @mr: the region being flushed.
1382  * @addr: the start, relative to the start of the region, of the range being
1383  *        flushed.
1384  * @size: the size, in bytes, of the range being flushed.
1385  */
1386 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
1387 
1388 /**
1389  * memory_region_set_readonly: Turn a memory region read-only (or read-write)
1390  *
1391  * Allows a memory region to be marked as read-only (turning it into a ROM).
1392  * only useful on RAM regions.
1393  *
1394  * @mr: the region being updated.
1395  * @readonly: whether rhe region is to be ROM or RAM.
1396  */
1397 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
1398 
1399 /**
1400  * memory_region_set_nonvolatile: Turn a memory region non-volatile
1401  *
1402  * Allows a memory region to be marked as non-volatile.
1403  * only useful on RAM regions.
1404  *
1405  * @mr: the region being updated.
1406  * @nonvolatile: whether rhe region is to be non-volatile.
1407  */
1408 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
1409 
1410 /**
1411  * memory_region_rom_device_set_romd: enable/disable ROMD mode
1412  *
1413  * Allows a ROM device (initialized with memory_region_init_rom_device() to
1414  * set to ROMD mode (default) or MMIO mode.  When it is in ROMD mode, the
1415  * device is mapped to guest memory and satisfies read access directly.
1416  * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1417  * Writes are always handled by the #MemoryRegion.write function.
1418  *
1419  * @mr: the memory region to be updated
1420  * @romd_mode: %true to put the region into ROMD mode
1421  */
1422 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
1423 
1424 /**
1425  * memory_region_set_coalescing: Enable memory coalescing for the region.
1426  *
1427  * Enabled writes to a region to be queued for later processing. MMIO ->write
1428  * callbacks may be delayed until a non-coalesced MMIO is issued.
1429  * Only useful for IO regions.  Roughly similar to write-combining hardware.
1430  *
1431  * @mr: the memory region to be write coalesced
1432  */
1433 void memory_region_set_coalescing(MemoryRegion *mr);
1434 
1435 /**
1436  * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1437  *                               a region.
1438  *
1439  * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1440  * Multiple calls can be issued coalesced disjoint ranges.
1441  *
1442  * @mr: the memory region to be updated.
1443  * @offset: the start of the range within the region to be coalesced.
1444  * @size: the size of the subrange to be coalesced.
1445  */
1446 void memory_region_add_coalescing(MemoryRegion *mr,
1447                                   hwaddr offset,
1448                                   uint64_t size);
1449 
1450 /**
1451  * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1452  *
1453  * Disables any coalescing caused by memory_region_set_coalescing() or
1454  * memory_region_add_coalescing().  Roughly equivalent to uncacheble memory
1455  * hardware.
1456  *
1457  * @mr: the memory region to be updated.
1458  */
1459 void memory_region_clear_coalescing(MemoryRegion *mr);
1460 
1461 /**
1462  * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1463  *                                    accesses.
1464  *
1465  * Ensure that pending coalesced MMIO request are flushed before the memory
1466  * region is accessed. This property is automatically enabled for all regions
1467  * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1468  *
1469  * @mr: the memory region to be updated.
1470  */
1471 void memory_region_set_flush_coalesced(MemoryRegion *mr);
1472 
1473 /**
1474  * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1475  *                                      accesses.
1476  *
1477  * Clear the automatic coalesced MMIO flushing enabled via
1478  * memory_region_set_flush_coalesced. Note that this service has no effect on
1479  * memory regions that have MMIO coalescing enabled for themselves. For them,
1480  * automatic flushing will stop once coalescing is disabled.
1481  *
1482  * @mr: the memory region to be updated.
1483  */
1484 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1485 
1486 /**
1487  * memory_region_clear_global_locking: Declares that access processing does
1488  *                                     not depend on the QEMU global lock.
1489  *
1490  * By clearing this property, accesses to the memory region will be processed
1491  * outside of QEMU's global lock (unless the lock is held on when issuing the
1492  * access request). In this case, the device model implementing the access
1493  * handlers is responsible for synchronization of concurrency.
1494  *
1495  * @mr: the memory region to be updated.
1496  */
1497 void memory_region_clear_global_locking(MemoryRegion *mr);
1498 
1499 /**
1500  * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1501  *                            is written to a location.
1502  *
1503  * Marks a word in an IO region (initialized with memory_region_init_io())
1504  * as a trigger for an eventfd event.  The I/O callback will not be called.
1505  * The caller must be prepared to handle failure (that is, take the required
1506  * action if the callback _is_ called).
1507  *
1508  * @mr: the memory region being updated.
1509  * @addr: the address within @mr that is to be monitored
1510  * @size: the size of the access to trigger the eventfd
1511  * @match_data: whether to match against @data, instead of just @addr
1512  * @data: the data to match against the guest write
1513  * @e: event notifier to be triggered when @addr, @size, and @data all match.
1514  **/
1515 void memory_region_add_eventfd(MemoryRegion *mr,
1516                                hwaddr addr,
1517                                unsigned size,
1518                                bool match_data,
1519                                uint64_t data,
1520                                EventNotifier *e);
1521 
1522 /**
1523  * memory_region_del_eventfd: Cancel an eventfd.
1524  *
1525  * Cancels an eventfd trigger requested by a previous
1526  * memory_region_add_eventfd() call.
1527  *
1528  * @mr: the memory region being updated.
1529  * @addr: the address within @mr that is to be monitored
1530  * @size: the size of the access to trigger the eventfd
1531  * @match_data: whether to match against @data, instead of just @addr
1532  * @data: the data to match against the guest write
1533  * @e: event notifier to be triggered when @addr, @size, and @data all match.
1534  */
1535 void memory_region_del_eventfd(MemoryRegion *mr,
1536                                hwaddr addr,
1537                                unsigned size,
1538                                bool match_data,
1539                                uint64_t data,
1540                                EventNotifier *e);
1541 
1542 /**
1543  * memory_region_add_subregion: Add a subregion to a container.
1544  *
1545  * Adds a subregion at @offset.  The subregion may not overlap with other
1546  * subregions (except for those explicitly marked as overlapping).  A region
1547  * may only be added once as a subregion (unless removed with
1548  * memory_region_del_subregion()); use memory_region_init_alias() if you
1549  * want a region to be a subregion in multiple locations.
1550  *
1551  * @mr: the region to contain the new subregion; must be a container
1552  *      initialized with memory_region_init().
1553  * @offset: the offset relative to @mr where @subregion is added.
1554  * @subregion: the subregion to be added.
1555  */
1556 void memory_region_add_subregion(MemoryRegion *mr,
1557                                  hwaddr offset,
1558                                  MemoryRegion *subregion);
1559 /**
1560  * memory_region_add_subregion_overlap: Add a subregion to a container
1561  *                                      with overlap.
1562  *
1563  * Adds a subregion at @offset.  The subregion may overlap with other
1564  * subregions.  Conflicts are resolved by having a higher @priority hide a
1565  * lower @priority. Subregions without priority are taken as @priority 0.
1566  * A region may only be added once as a subregion (unless removed with
1567  * memory_region_del_subregion()); use memory_region_init_alias() if you
1568  * want a region to be a subregion in multiple locations.
1569  *
1570  * @mr: the region to contain the new subregion; must be a container
1571  *      initialized with memory_region_init().
1572  * @offset: the offset relative to @mr where @subregion is added.
1573  * @subregion: the subregion to be added.
1574  * @priority: used for resolving overlaps; highest priority wins.
1575  */
1576 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1577                                          hwaddr offset,
1578                                          MemoryRegion *subregion,
1579                                          int priority);
1580 
1581 /**
1582  * memory_region_get_ram_addr: Get the ram address associated with a memory
1583  *                             region
1584  */
1585 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1586 
1587 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1588 /**
1589  * memory_region_del_subregion: Remove a subregion.
1590  *
1591  * Removes a subregion from its container.
1592  *
1593  * @mr: the container to be updated.
1594  * @subregion: the region being removed; must be a current subregion of @mr.
1595  */
1596 void memory_region_del_subregion(MemoryRegion *mr,
1597                                  MemoryRegion *subregion);
1598 
1599 /*
1600  * memory_region_set_enabled: dynamically enable or disable a region
1601  *
1602  * Enables or disables a memory region.  A disabled memory region
1603  * ignores all accesses to itself and its subregions.  It does not
1604  * obscure sibling subregions with lower priority - it simply behaves as
1605  * if it was removed from the hierarchy.
1606  *
1607  * Regions default to being enabled.
1608  *
1609  * @mr: the region to be updated
1610  * @enabled: whether to enable or disable the region
1611  */
1612 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1613 
1614 /*
1615  * memory_region_set_address: dynamically update the address of a region
1616  *
1617  * Dynamically updates the address of a region, relative to its container.
1618  * May be used on regions are currently part of a memory hierarchy.
1619  *
1620  * @mr: the region to be updated
1621  * @addr: new address, relative to container region
1622  */
1623 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1624 
1625 /*
1626  * memory_region_set_size: dynamically update the size of a region.
1627  *
1628  * Dynamically updates the size of a region.
1629  *
1630  * @mr: the region to be updated
1631  * @size: used size of the region.
1632  */
1633 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1634 
1635 /*
1636  * memory_region_set_alias_offset: dynamically update a memory alias's offset
1637  *
1638  * Dynamically updates the offset into the target region that an alias points
1639  * to, as if the fourth argument to memory_region_init_alias() has changed.
1640  *
1641  * @mr: the #MemoryRegion to be updated; should be an alias.
1642  * @offset: the new offset into the target memory region
1643  */
1644 void memory_region_set_alias_offset(MemoryRegion *mr,
1645                                     hwaddr offset);
1646 
1647 /**
1648  * memory_region_present: checks if an address relative to a @container
1649  * translates into #MemoryRegion within @container
1650  *
1651  * Answer whether a #MemoryRegion within @container covers the address
1652  * @addr.
1653  *
1654  * @container: a #MemoryRegion within which @addr is a relative address
1655  * @addr: the area within @container to be searched
1656  */
1657 bool memory_region_present(MemoryRegion *container, hwaddr addr);
1658 
1659 /**
1660  * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1661  * into any address space.
1662  *
1663  * @mr: a #MemoryRegion which should be checked if it's mapped
1664  */
1665 bool memory_region_is_mapped(MemoryRegion *mr);
1666 
1667 /**
1668  * memory_region_find: translate an address/size relative to a
1669  * MemoryRegion into a #MemoryRegionSection.
1670  *
1671  * Locates the first #MemoryRegion within @mr that overlaps the range
1672  * given by @addr and @size.
1673  *
1674  * Returns a #MemoryRegionSection that describes a contiguous overlap.
1675  * It will have the following characteristics:
1676  *    .@size = 0 iff no overlap was found
1677  *    .@mr is non-%NULL iff an overlap was found
1678  *
1679  * Remember that in the return value the @offset_within_region is
1680  * relative to the returned region (in the .@mr field), not to the
1681  * @mr argument.
1682  *
1683  * Similarly, the .@offset_within_address_space is relative to the
1684  * address space that contains both regions, the passed and the
1685  * returned one.  However, in the special case where the @mr argument
1686  * has no container (and thus is the root of the address space), the
1687  * following will hold:
1688  *    .@offset_within_address_space >= @addr
1689  *    .@offset_within_address_space + .@size <= @addr + @size
1690  *
1691  * @mr: a MemoryRegion within which @addr is a relative address
1692  * @addr: start of the area within @as to be searched
1693  * @size: size of the area to be searched
1694  */
1695 MemoryRegionSection memory_region_find(MemoryRegion *mr,
1696                                        hwaddr addr, uint64_t size);
1697 
1698 /**
1699  * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1700  *
1701  * Synchronizes the dirty page log for all address spaces.
1702  */
1703 void memory_global_dirty_log_sync(void);
1704 
1705 /**
1706  * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1707  *
1708  * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
1709  * This function must be called after the dirty log bitmap is cleared, and
1710  * before dirty guest memory pages are read.  If you are using
1711  * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
1712  * care of doing this.
1713  */
1714 void memory_global_after_dirty_log_sync(void);
1715 
1716 /**
1717  * memory_region_transaction_begin: Start a transaction.
1718  *
1719  * During a transaction, changes will be accumulated and made visible
1720  * only when the transaction ends (is committed).
1721  */
1722 void memory_region_transaction_begin(void);
1723 
1724 /**
1725  * memory_region_transaction_commit: Commit a transaction and make changes
1726  *                                   visible to the guest.
1727  */
1728 void memory_region_transaction_commit(void);
1729 
1730 /**
1731  * memory_listener_register: register callbacks to be called when memory
1732  *                           sections are mapped or unmapped into an address
1733  *                           space
1734  *
1735  * @listener: an object containing the callbacks to be called
1736  * @filter: if non-%NULL, only regions in this address space will be observed
1737  */
1738 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
1739 
1740 /**
1741  * memory_listener_unregister: undo the effect of memory_listener_register()
1742  *
1743  * @listener: an object containing the callbacks to be removed
1744  */
1745 void memory_listener_unregister(MemoryListener *listener);
1746 
1747 /**
1748  * memory_global_dirty_log_start: begin dirty logging for all regions
1749  */
1750 void memory_global_dirty_log_start(void);
1751 
1752 /**
1753  * memory_global_dirty_log_stop: end dirty logging for all regions
1754  */
1755 void memory_global_dirty_log_stop(void);
1756 
1757 void mtree_info(bool flatview, bool dispatch_tree, bool owner);
1758 
1759 /**
1760  * memory_region_dispatch_read: perform a read directly to the specified
1761  * MemoryRegion.
1762  *
1763  * @mr: #MemoryRegion to access
1764  * @addr: address within that region
1765  * @pval: pointer to uint64_t which the data is written to
1766  * @op: size, sign, and endianness of the memory operation
1767  * @attrs: memory transaction attributes to use for the access
1768  */
1769 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1770                                         hwaddr addr,
1771                                         uint64_t *pval,
1772                                         MemOp op,
1773                                         MemTxAttrs attrs);
1774 /**
1775  * memory_region_dispatch_write: perform a write directly to the specified
1776  * MemoryRegion.
1777  *
1778  * @mr: #MemoryRegion to access
1779  * @addr: address within that region
1780  * @data: data to write
1781  * @op: size, sign, and endianness of the memory operation
1782  * @attrs: memory transaction attributes to use for the access
1783  */
1784 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1785                                          hwaddr addr,
1786                                          uint64_t data,
1787                                          MemOp op,
1788                                          MemTxAttrs attrs);
1789 
1790 /**
1791  * address_space_init: initializes an address space
1792  *
1793  * @as: an uninitialized #AddressSpace
1794  * @root: a #MemoryRegion that routes addresses for the address space
1795  * @name: an address space name.  The name is only used for debugging
1796  *        output.
1797  */
1798 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1799 
1800 /**
1801  * address_space_destroy: destroy an address space
1802  *
1803  * Releases all resources associated with an address space.  After an address space
1804  * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1805  * as well.
1806  *
1807  * @as: address space to be destroyed
1808  */
1809 void address_space_destroy(AddressSpace *as);
1810 
1811 /**
1812  * address_space_remove_listeners: unregister all listeners of an address space
1813  *
1814  * Removes all callbacks previously registered with memory_listener_register()
1815  * for @as.
1816  *
1817  * @as: an initialized #AddressSpace
1818  */
1819 void address_space_remove_listeners(AddressSpace *as);
1820 
1821 /**
1822  * address_space_rw: read from or write to an address space.
1823  *
1824  * Return a MemTxResult indicating whether the operation succeeded
1825  * or failed (eg unassigned memory, device rejected the transaction,
1826  * IOMMU fault).
1827  *
1828  * @as: #AddressSpace to be accessed
1829  * @addr: address within that address space
1830  * @attrs: memory transaction attributes
1831  * @buf: buffer with the data transferred
1832  * @len: the number of bytes to read or write
1833  * @is_write: indicates the transfer direction
1834  */
1835 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
1836                              MemTxAttrs attrs, uint8_t *buf,
1837                              hwaddr len, bool is_write);
1838 
1839 /**
1840  * address_space_write: write to address space.
1841  *
1842  * Return a MemTxResult indicating whether the operation succeeded
1843  * or failed (eg unassigned memory, device rejected the transaction,
1844  * IOMMU fault).
1845  *
1846  * @as: #AddressSpace to be accessed
1847  * @addr: address within that address space
1848  * @attrs: memory transaction attributes
1849  * @buf: buffer with the data transferred
1850  * @len: the number of bytes to write
1851  */
1852 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
1853                                 MemTxAttrs attrs,
1854                                 const uint8_t *buf, hwaddr len);
1855 
1856 /**
1857  * address_space_write_rom: write to address space, including ROM.
1858  *
1859  * This function writes to the specified address space, but will
1860  * write data to both ROM and RAM. This is used for non-guest
1861  * writes like writes from the gdb debug stub or initial loading
1862  * of ROM contents.
1863  *
1864  * Note that portions of the write which attempt to write data to
1865  * a device will be silently ignored -- only real RAM and ROM will
1866  * be written to.
1867  *
1868  * Return a MemTxResult indicating whether the operation succeeded
1869  * or failed (eg unassigned memory, device rejected the transaction,
1870  * IOMMU fault).
1871  *
1872  * @as: #AddressSpace to be accessed
1873  * @addr: address within that address space
1874  * @attrs: memory transaction attributes
1875  * @buf: buffer with the data transferred
1876  * @len: the number of bytes to write
1877  */
1878 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
1879                                     MemTxAttrs attrs,
1880                                     const uint8_t *buf, hwaddr len);
1881 
1882 /* address_space_ld*: load from an address space
1883  * address_space_st*: store to an address space
1884  *
1885  * These functions perform a load or store of the byte, word,
1886  * longword or quad to the specified address within the AddressSpace.
1887  * The _le suffixed functions treat the data as little endian;
1888  * _be indicates big endian; no suffix indicates "same endianness
1889  * as guest CPU".
1890  *
1891  * The "guest CPU endianness" accessors are deprecated for use outside
1892  * target-* code; devices should be CPU-agnostic and use either the LE
1893  * or the BE accessors.
1894  *
1895  * @as #AddressSpace to be accessed
1896  * @addr: address within that address space
1897  * @val: data value, for stores
1898  * @attrs: memory transaction attributes
1899  * @result: location to write the success/failure of the transaction;
1900  *   if NULL, this information is discarded
1901  */
1902 
1903 #define SUFFIX
1904 #define ARG1         as
1905 #define ARG1_DECL    AddressSpace *as
1906 #include "exec/memory_ldst.inc.h"
1907 
1908 #define SUFFIX
1909 #define ARG1         as
1910 #define ARG1_DECL    AddressSpace *as
1911 #include "exec/memory_ldst_phys.inc.h"
1912 
1913 struct MemoryRegionCache {
1914     void *ptr;
1915     hwaddr xlat;
1916     hwaddr len;
1917     FlatView *fv;
1918     MemoryRegionSection mrs;
1919     bool is_write;
1920 };
1921 
1922 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
1923 
1924 
1925 /* address_space_ld*_cached: load from a cached #MemoryRegion
1926  * address_space_st*_cached: store into a cached #MemoryRegion
1927  *
1928  * These functions perform a load or store of the byte, word,
1929  * longword or quad to the specified address.  The address is
1930  * a physical address in the AddressSpace, but it must lie within
1931  * a #MemoryRegion that was mapped with address_space_cache_init.
1932  *
1933  * The _le suffixed functions treat the data as little endian;
1934  * _be indicates big endian; no suffix indicates "same endianness
1935  * as guest CPU".
1936  *
1937  * The "guest CPU endianness" accessors are deprecated for use outside
1938  * target-* code; devices should be CPU-agnostic and use either the LE
1939  * or the BE accessors.
1940  *
1941  * @cache: previously initialized #MemoryRegionCache to be accessed
1942  * @addr: address within the address space
1943  * @val: data value, for stores
1944  * @attrs: memory transaction attributes
1945  * @result: location to write the success/failure of the transaction;
1946  *   if NULL, this information is discarded
1947  */
1948 
1949 #define SUFFIX       _cached_slow
1950 #define ARG1         cache
1951 #define ARG1_DECL    MemoryRegionCache *cache
1952 #include "exec/memory_ldst.inc.h"
1953 
1954 /* Inline fast path for direct RAM access.  */
1955 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
1956     hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
1957 {
1958     assert(addr < cache->len);
1959     if (likely(cache->ptr)) {
1960         return ldub_p(cache->ptr + addr);
1961     } else {
1962         return address_space_ldub_cached_slow(cache, addr, attrs, result);
1963     }
1964 }
1965 
1966 static inline void address_space_stb_cached(MemoryRegionCache *cache,
1967     hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
1968 {
1969     assert(addr < cache->len);
1970     if (likely(cache->ptr)) {
1971         stb_p(cache->ptr + addr, val);
1972     } else {
1973         address_space_stb_cached_slow(cache, addr, val, attrs, result);
1974     }
1975 }
1976 
1977 #define ENDIANNESS   _le
1978 #include "exec/memory_ldst_cached.inc.h"
1979 
1980 #define ENDIANNESS   _be
1981 #include "exec/memory_ldst_cached.inc.h"
1982 
1983 #define SUFFIX       _cached
1984 #define ARG1         cache
1985 #define ARG1_DECL    MemoryRegionCache *cache
1986 #include "exec/memory_ldst_phys.inc.h"
1987 
1988 /* address_space_cache_init: prepare for repeated access to a physical
1989  * memory region
1990  *
1991  * @cache: #MemoryRegionCache to be filled
1992  * @as: #AddressSpace to be accessed
1993  * @addr: address within that address space
1994  * @len: length of buffer
1995  * @is_write: indicates the transfer direction
1996  *
1997  * Will only work with RAM, and may map a subset of the requested range by
1998  * returning a value that is less than @len.  On failure, return a negative
1999  * errno value.
2000  *
2001  * Because it only works with RAM, this function can be used for
2002  * read-modify-write operations.  In this case, is_write should be %true.
2003  *
2004  * Note that addresses passed to the address_space_*_cached functions
2005  * are relative to @addr.
2006  */
2007 int64_t address_space_cache_init(MemoryRegionCache *cache,
2008                                  AddressSpace *as,
2009                                  hwaddr addr,
2010                                  hwaddr len,
2011                                  bool is_write);
2012 
2013 /**
2014  * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
2015  *
2016  * @cache: The #MemoryRegionCache to operate on.
2017  * @addr: The first physical address that was written, relative to the
2018  * address that was passed to @address_space_cache_init.
2019  * @access_len: The number of bytes that were written starting at @addr.
2020  */
2021 void address_space_cache_invalidate(MemoryRegionCache *cache,
2022                                     hwaddr addr,
2023                                     hwaddr access_len);
2024 
2025 /**
2026  * address_space_cache_destroy: free a #MemoryRegionCache
2027  *
2028  * @cache: The #MemoryRegionCache whose memory should be released.
2029  */
2030 void address_space_cache_destroy(MemoryRegionCache *cache);
2031 
2032 /* address_space_get_iotlb_entry: translate an address into an IOTLB
2033  * entry. Should be called from an RCU critical section.
2034  */
2035 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
2036                                             bool is_write, MemTxAttrs attrs);
2037 
2038 /* address_space_translate: translate an address range into an address space
2039  * into a MemoryRegion and an address range into that section.  Should be
2040  * called from an RCU critical section, to avoid that the last reference
2041  * to the returned region disappears after address_space_translate returns.
2042  *
2043  * @fv: #FlatView to be accessed
2044  * @addr: address within that address space
2045  * @xlat: pointer to address within the returned memory region section's
2046  * #MemoryRegion.
2047  * @len: pointer to length
2048  * @is_write: indicates the transfer direction
2049  * @attrs: memory attributes
2050  */
2051 MemoryRegion *flatview_translate(FlatView *fv,
2052                                  hwaddr addr, hwaddr *xlat,
2053                                  hwaddr *len, bool is_write,
2054                                  MemTxAttrs attrs);
2055 
2056 static inline MemoryRegion *address_space_translate(AddressSpace *as,
2057                                                     hwaddr addr, hwaddr *xlat,
2058                                                     hwaddr *len, bool is_write,
2059                                                     MemTxAttrs attrs)
2060 {
2061     return flatview_translate(address_space_to_flatview(as),
2062                               addr, xlat, len, is_write, attrs);
2063 }
2064 
2065 /* address_space_access_valid: check for validity of accessing an address
2066  * space range
2067  *
2068  * Check whether memory is assigned to the given address space range, and
2069  * access is permitted by any IOMMU regions that are active for the address
2070  * space.
2071  *
2072  * For now, addr and len should be aligned to a page size.  This limitation
2073  * will be lifted in the future.
2074  *
2075  * @as: #AddressSpace to be accessed
2076  * @addr: address within that address space
2077  * @len: length of the area to be checked
2078  * @is_write: indicates the transfer direction
2079  * @attrs: memory attributes
2080  */
2081 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
2082                                 bool is_write, MemTxAttrs attrs);
2083 
2084 /* address_space_map: map a physical memory region into a host virtual address
2085  *
2086  * May map a subset of the requested range, given by and returned in @plen.
2087  * May return %NULL if resources needed to perform the mapping are exhausted.
2088  * Use only for reads OR writes - not for read-modify-write operations.
2089  * Use cpu_register_map_client() to know when retrying the map operation is
2090  * likely to succeed.
2091  *
2092  * @as: #AddressSpace to be accessed
2093  * @addr: address within that address space
2094  * @plen: pointer to length of buffer; updated on return
2095  * @is_write: indicates the transfer direction
2096  * @attrs: memory attributes
2097  */
2098 void *address_space_map(AddressSpace *as, hwaddr addr,
2099                         hwaddr *plen, bool is_write, MemTxAttrs attrs);
2100 
2101 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2102  *
2103  * Will also mark the memory as dirty if @is_write == %true.  @access_len gives
2104  * the amount of memory that was actually read or written by the caller.
2105  *
2106  * @as: #AddressSpace used
2107  * @buffer: host pointer as returned by address_space_map()
2108  * @len: buffer length as returned by address_space_map()
2109  * @access_len: amount of data actually transferred
2110  * @is_write: indicates the transfer direction
2111  */
2112 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2113                          int is_write, hwaddr access_len);
2114 
2115 
2116 /* Internal functions, part of the implementation of address_space_read.  */
2117 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2118                                     MemTxAttrs attrs, uint8_t *buf, hwaddr len);
2119 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2120                                    MemTxAttrs attrs, uint8_t *buf,
2121                                    hwaddr len, hwaddr addr1, hwaddr l,
2122                                    MemoryRegion *mr);
2123 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
2124 
2125 /* Internal functions, part of the implementation of address_space_read_cached
2126  * and address_space_write_cached.  */
2127 void address_space_read_cached_slow(MemoryRegionCache *cache,
2128                                     hwaddr addr, void *buf, hwaddr len);
2129 void address_space_write_cached_slow(MemoryRegionCache *cache,
2130                                      hwaddr addr, const void *buf, hwaddr len);
2131 
2132 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
2133 {
2134     if (is_write) {
2135         return memory_region_is_ram(mr) &&
2136                !mr->readonly && !memory_region_is_ram_device(mr);
2137     } else {
2138         return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
2139                memory_region_is_romd(mr);
2140     }
2141 }
2142 
2143 /**
2144  * address_space_read: read from an address space.
2145  *
2146  * Return a MemTxResult indicating whether the operation succeeded
2147  * or failed (eg unassigned memory, device rejected the transaction,
2148  * IOMMU fault).  Called within RCU critical section.
2149  *
2150  * @as: #AddressSpace to be accessed
2151  * @addr: address within that address space
2152  * @attrs: memory transaction attributes
2153  * @buf: buffer with the data transferred
2154  */
2155 static inline __attribute__((__always_inline__))
2156 MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
2157                                MemTxAttrs attrs, uint8_t *buf,
2158                                hwaddr len)
2159 {
2160     MemTxResult result = MEMTX_OK;
2161     hwaddr l, addr1;
2162     void *ptr;
2163     MemoryRegion *mr;
2164     FlatView *fv;
2165 
2166     if (__builtin_constant_p(len)) {
2167         if (len) {
2168             rcu_read_lock();
2169             fv = address_space_to_flatview(as);
2170             l = len;
2171             mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
2172             if (len == l && memory_access_is_direct(mr, false)) {
2173                 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2174                 memcpy(buf, ptr, len);
2175             } else {
2176                 result = flatview_read_continue(fv, addr, attrs, buf, len,
2177                                                 addr1, l, mr);
2178             }
2179             rcu_read_unlock();
2180         }
2181     } else {
2182         result = address_space_read_full(as, addr, attrs, buf, len);
2183     }
2184     return result;
2185 }
2186 
2187 /**
2188  * address_space_read_cached: read from a cached RAM region
2189  *
2190  * @cache: Cached region to be addressed
2191  * @addr: address relative to the base of the RAM region
2192  * @buf: buffer with the data transferred
2193  * @len: length of the data transferred
2194  */
2195 static inline void
2196 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
2197                           void *buf, hwaddr len)
2198 {
2199     assert(addr < cache->len && len <= cache->len - addr);
2200     if (likely(cache->ptr)) {
2201         memcpy(buf, cache->ptr + addr, len);
2202     } else {
2203         address_space_read_cached_slow(cache, addr, buf, len);
2204     }
2205 }
2206 
2207 /**
2208  * address_space_write_cached: write to a cached RAM region
2209  *
2210  * @cache: Cached region to be addressed
2211  * @addr: address relative to the base of the RAM region
2212  * @buf: buffer with the data transferred
2213  * @len: length of the data transferred
2214  */
2215 static inline void
2216 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
2217                            void *buf, hwaddr len)
2218 {
2219     assert(addr < cache->len && len <= cache->len - addr);
2220     if (likely(cache->ptr)) {
2221         memcpy(cache->ptr + addr, buf, len);
2222     } else {
2223         address_space_write_cached_slow(cache, addr, buf, len);
2224     }
2225 }
2226 
2227 #ifdef NEED_CPU_H
2228 /* enum device_endian to MemOp.  */
2229 static inline MemOp devend_memop(enum device_endian end)
2230 {
2231     QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN &&
2232                       DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN);
2233 
2234 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
2235     /* Swap if non-host endianness or native (target) endianness */
2236     return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP;
2237 #else
2238     const int non_host_endianness =
2239         DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN;
2240 
2241     /* In this case, native (target) endianness needs no swap.  */
2242     return (end == non_host_endianness) ? MO_BSWAP : 0;
2243 #endif
2244 }
2245 #endif
2246 
2247 #endif
2248 
2249 #endif
2250