xref: /qemu/include/qemu/mmap-alloc.h (revision c5955f4f)
1 #ifndef QEMU_MMAP_ALLOC_H
2 #define QEMU_MMAP_ALLOC_H
3 
4 
5 size_t qemu_fd_getpagesize(int fd);
6 
7 size_t qemu_mempath_getpagesize(const char *mem_path);
8 
9 /**
10  * qemu_ram_mmap: mmap anonymous memory, the specified file or device.
11  *
12  * mmap() abstraction to map guest RAM, simplifying flag handling, taking
13  * care of alignment requirements and installing guard pages.
14  *
15  * Parameters:
16  *  @fd: the file or the device to mmap
17  *  @size: the number of bytes to be mmaped
18  *  @align: if not zero, specify the alignment of the starting mapping address;
19  *          otherwise, the alignment in use will be determined by QEMU.
20  *  @qemu_map_flags: QEMU_MAP_* flags
21  *  @map_offset: map starts at offset of map_offset from the start of fd
22  *
23  * Internally, MAP_PRIVATE, MAP_ANONYMOUS and MAP_SHARED_VALIDATE are set
24  * implicitly based on other parameters.
25  *
26  * Return:
27  *  On success, return a pointer to the mapped area.
28  *  On failure, return MAP_FAILED.
29  */
30 void *qemu_ram_mmap(int fd,
31                     size_t size,
32                     size_t align,
33                     uint32_t qemu_map_flags,
34                     off_t map_offset);
35 
36 void qemu_ram_munmap(int fd, void *ptr, size_t size);
37 
38 /*
39  * Abstraction of PROT_ and MAP_ flags as passed to mmap(), for example,
40  * consumed by qemu_ram_mmap().
41  */
42 
43 /* Map PROT_READ instead of PROT_READ | PROT_WRITE. */
44 #define QEMU_MAP_READONLY   (1 << 0)
45 
46 /* Use MAP_SHARED instead of MAP_PRIVATE. */
47 #define QEMU_MAP_SHARED     (1 << 1)
48 
49 /*
50  * Use MAP_SYNC | MAP_SHARED_VALIDATE if supported. Ignored without
51  * QEMU_MAP_SHARED. If mapping fails, warn and fallback to !QEMU_MAP_SYNC.
52  */
53 #define QEMU_MAP_SYNC       (1 << 2)
54 
55 /*
56  * Use MAP_NORESERVE to skip reservation of swap space (or huge pages if
57  * applicable). Bail out if not supported/effective.
58  */
59 #define QEMU_MAP_NORESERVE  (1 << 3)
60 
61 #endif
62