154936004Sbellard /*
254936004Sbellard * mmap support for qemu
354936004Sbellard *
454936004Sbellard * Copyright (c) 2003 Fabrice Bellard
554936004Sbellard *
654936004Sbellard * This program is free software; you can redistribute it and/or modify
754936004Sbellard * it under the terms of the GNU General Public License as published by
854936004Sbellard * the Free Software Foundation; either version 2 of the License, or
954936004Sbellard * (at your option) any later version.
1054936004Sbellard *
1154936004Sbellard * This program is distributed in the hope that it will be useful,
1254936004Sbellard * but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1454936004Sbellard * GNU General Public License for more details.
1554936004Sbellard *
1654936004Sbellard * You should have received a copy of the GNU General Public License
178167ee88SBlue Swirl * along with this program; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard */
19d39594e9SPeter Maydell #include "qemu/osdep.h"
20225a206cSRichard Henderson #include <sys/shm.h>
2111d96056SAlex Bennée #include "trace.h"
2210d0d505SAlex Bennée #include "exec/log.h"
2354936004Sbellard #include "qemu.h"
243b249d26SPeter Maydell #include "user-internals.h"
255423e6d3SPeter Maydell #include "user-mmap.h"
268655b4c7SIlya Leoshkevich #include "target_mman.h"
27044e95c8SRichard Henderson #include "qemu/interval-tree.h"
2854936004Sbellard
295a534314SPeter Maydell #ifdef TARGET_ARM
305a534314SPeter Maydell #include "target/arm/cpu-features.h"
315a534314SPeter Maydell #endif
325a534314SPeter Maydell
331e6eec8bSBlue Swirl static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
34dfd3f85cSJuan Quintela static __thread int mmap_lock_count;
35c8a706feSpbrook
mmap_lock(void)36c8a706feSpbrook void mmap_lock(void)
37c8a706feSpbrook {
38c8a706feSpbrook if (mmap_lock_count++ == 0) {
39c8a706feSpbrook pthread_mutex_lock(&mmap_mutex);
40c8a706feSpbrook }
41c8a706feSpbrook }
42c8a706feSpbrook
mmap_unlock(void)43c8a706feSpbrook void mmap_unlock(void)
44c8a706feSpbrook {
45990ef918SRichard Henderson assert(mmap_lock_count > 0);
46c8a706feSpbrook if (--mmap_lock_count == 0) {
47c8a706feSpbrook pthread_mutex_unlock(&mmap_mutex);
48c8a706feSpbrook }
49c8a706feSpbrook }
50d5975363Spbrook
have_mmap_lock(void)51301e40edSAlex Bennée bool have_mmap_lock(void)
52301e40edSAlex Bennée {
53301e40edSAlex Bennée return mmap_lock_count > 0 ? true : false;
54301e40edSAlex Bennée }
55301e40edSAlex Bennée
56d5975363Spbrook /* Grab lock to make sure things are in a consistent state after fork(). */
mmap_fork_start(void)57d5975363Spbrook void mmap_fork_start(void)
58d5975363Spbrook {
59d5975363Spbrook if (mmap_lock_count)
60d5975363Spbrook abort();
61d5975363Spbrook pthread_mutex_lock(&mmap_mutex);
62d5975363Spbrook }
63d5975363Spbrook
mmap_fork_end(int child)64d5975363Spbrook void mmap_fork_end(int child)
65d5975363Spbrook {
662b730f79SRichard Henderson if (child) {
67d5975363Spbrook pthread_mutex_init(&mmap_mutex, NULL);
682b730f79SRichard Henderson } else {
69d5975363Spbrook pthread_mutex_unlock(&mmap_mutex);
70d5975363Spbrook }
712b730f79SRichard Henderson }
72c8a706feSpbrook
73044e95c8SRichard Henderson /* Protected by mmap_lock. */
74044e95c8SRichard Henderson static IntervalTreeRoot shm_regions;
75044e95c8SRichard Henderson
shm_region_add(abi_ptr start,abi_ptr last)76044e95c8SRichard Henderson static void shm_region_add(abi_ptr start, abi_ptr last)
77044e95c8SRichard Henderson {
78044e95c8SRichard Henderson IntervalTreeNode *i = g_new0(IntervalTreeNode, 1);
79044e95c8SRichard Henderson
80044e95c8SRichard Henderson i->start = start;
81044e95c8SRichard Henderson i->last = last;
82044e95c8SRichard Henderson interval_tree_insert(i, &shm_regions);
83044e95c8SRichard Henderson }
84044e95c8SRichard Henderson
shm_region_find(abi_ptr start)85044e95c8SRichard Henderson static abi_ptr shm_region_find(abi_ptr start)
86044e95c8SRichard Henderson {
87044e95c8SRichard Henderson IntervalTreeNode *i;
88044e95c8SRichard Henderson
89044e95c8SRichard Henderson for (i = interval_tree_iter_first(&shm_regions, start, start); i;
90044e95c8SRichard Henderson i = interval_tree_iter_next(i, start, start)) {
91044e95c8SRichard Henderson if (i->start == start) {
92044e95c8SRichard Henderson return i->last;
93044e95c8SRichard Henderson }
94044e95c8SRichard Henderson }
95044e95c8SRichard Henderson return 0;
96044e95c8SRichard Henderson }
97044e95c8SRichard Henderson
shm_region_rm_complete(abi_ptr start,abi_ptr last)98044e95c8SRichard Henderson static void shm_region_rm_complete(abi_ptr start, abi_ptr last)
99044e95c8SRichard Henderson {
100044e95c8SRichard Henderson IntervalTreeNode *i, *n;
101044e95c8SRichard Henderson
102044e95c8SRichard Henderson for (i = interval_tree_iter_first(&shm_regions, start, last); i; i = n) {
103044e95c8SRichard Henderson n = interval_tree_iter_next(i, start, last);
104044e95c8SRichard Henderson if (i->start >= start && i->last <= last) {
105044e95c8SRichard Henderson interval_tree_remove(i, &shm_regions);
106044e95c8SRichard Henderson g_free(i);
107044e95c8SRichard Henderson }
108044e95c8SRichard Henderson }
109044e95c8SRichard Henderson }
110044e95c8SRichard Henderson
1119dba3ca5SRichard Henderson /*
1129dba3ca5SRichard Henderson * Validate target prot bitmask.
1139dba3ca5SRichard Henderson * Return the prot bitmask for the host in *HOST_PROT.
1149dba3ca5SRichard Henderson * Return 0 if the target prot bitmask is invalid, otherwise
1159dba3ca5SRichard Henderson * the internal qemu page_flags (which will include PAGE_VALID).
1169dba3ca5SRichard Henderson */
validate_prot_to_pageflags(int prot)1170dd55812SRichard Henderson static int validate_prot_to_pageflags(int prot)
1189dba3ca5SRichard Henderson {
1199dba3ca5SRichard Henderson int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
1209dba3ca5SRichard Henderson int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
1219dba3ca5SRichard Henderson
122be5d6f48SRichard Henderson #ifdef TARGET_AARCH64
123d109b46dSRichard Henderson {
124d109b46dSRichard Henderson ARMCPU *cpu = ARM_CPU(thread_cpu);
125d109b46dSRichard Henderson
126be5d6f48SRichard Henderson /*
127be5d6f48SRichard Henderson * The PROT_BTI bit is only accepted if the cpu supports the feature.
128be5d6f48SRichard Henderson * Since this is the unusual case, don't bother checking unless
129be5d6f48SRichard Henderson * the bit has been requested. If set and valid, record the bit
130be5d6f48SRichard Henderson * within QEMU's page_flags.
131be5d6f48SRichard Henderson */
132d109b46dSRichard Henderson if ((prot & TARGET_PROT_BTI) && cpu_isar_feature(aa64_bti, cpu)) {
133be5d6f48SRichard Henderson valid |= TARGET_PROT_BTI;
134be5d6f48SRichard Henderson page_flags |= PAGE_BTI;
135be5d6f48SRichard Henderson }
136d109b46dSRichard Henderson /* Similarly for the PROT_MTE bit. */
137d109b46dSRichard Henderson if ((prot & TARGET_PROT_MTE) && cpu_isar_feature(aa64_mte, cpu)) {
138d109b46dSRichard Henderson valid |= TARGET_PROT_MTE;
139d109b46dSRichard Henderson page_flags |= PAGE_MTE;
140d109b46dSRichard Henderson }
141be5d6f48SRichard Henderson }
1424c184e70SHelge Deller #elif defined(TARGET_HPPA)
1434c184e70SHelge Deller valid |= PROT_GROWSDOWN | PROT_GROWSUP;
144be5d6f48SRichard Henderson #endif
145be5d6f48SRichard Henderson
1469dba3ca5SRichard Henderson return prot & ~valid ? 0 : page_flags;
1479dba3ca5SRichard Henderson }
1489dba3ca5SRichard Henderson
1490dd55812SRichard Henderson /*
1500dd55812SRichard Henderson * For the host, we need not pass anything except read/write/exec.
1510dd55812SRichard Henderson * While PROT_SEM is allowed by all hosts, it is also ignored, so
1520dd55812SRichard Henderson * don't bother transforming guest bit to host bit. Any other
1530dd55812SRichard Henderson * target-specific prot bits will not be understood by the host
1540dd55812SRichard Henderson * and will need to be encoded into page_flags for qemu emulation.
1550dd55812SRichard Henderson *
1560dd55812SRichard Henderson * Pages that are executable by the guest will never be executed
1570dd55812SRichard Henderson * by the host, but the host will need to be able to read them.
1580dd55812SRichard Henderson */
target_to_host_prot(int prot)1590dd55812SRichard Henderson static int target_to_host_prot(int prot)
1600dd55812SRichard Henderson {
1610dd55812SRichard Henderson return (prot & (PROT_READ | PROT_WRITE)) |
1620dd55812SRichard Henderson (prot & PROT_EXEC ? PROT_READ : 0);
1630dd55812SRichard Henderson }
1640dd55812SRichard Henderson
16553a5960aSpbrook /* NOTE: all the constants are the HOST ones, but addresses are target. */
target_mprotect(abi_ulong start,abi_ulong len,int target_prot)1669dba3ca5SRichard Henderson int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
16754936004Sbellard {
168621ac47dSRichard Henderson int host_page_size = qemu_real_host_page_size();
1697bdc1accSRichard Henderson abi_ulong starts[3];
1707bdc1accSRichard Henderson abi_ulong lens[3];
1717bdc1accSRichard Henderson int prots[3];
1727bdc1accSRichard Henderson abi_ulong host_start, host_last, last;
1737bdc1accSRichard Henderson int prot1, ret, page_flags, nranges;
17454936004Sbellard
1759dba3ca5SRichard Henderson trace_target_mprotect(start, len, target_prot);
17654936004Sbellard
1779dba3ca5SRichard Henderson if ((start & ~TARGET_PAGE_MASK) != 0) {
17878cf3390SMax Filippov return -TARGET_EINVAL;
1799dba3ca5SRichard Henderson }
1800dd55812SRichard Henderson page_flags = validate_prot_to_pageflags(target_prot);
1819dba3ca5SRichard Henderson if (!page_flags) {
1829dba3ca5SRichard Henderson return -TARGET_EINVAL;
1839dba3ca5SRichard Henderson }
1849dba3ca5SRichard Henderson if (len == 0) {
18554936004Sbellard return 0;
1869dba3ca5SRichard Henderson }
1877bdc1accSRichard Henderson len = TARGET_PAGE_ALIGN(len);
1887bdc1accSRichard Henderson if (!guest_range_valid_untagged(start, len)) {
1897bdc1accSRichard Henderson return -TARGET_ENOMEM;
1907bdc1accSRichard Henderson }
1917bdc1accSRichard Henderson
1927bdc1accSRichard Henderson last = start + len - 1;
193621ac47dSRichard Henderson host_start = start & -host_page_size;
194b36b2b1dSRichard Henderson host_last = ROUND_UP(last, host_page_size) - 1;
1957bdc1accSRichard Henderson nranges = 0;
19654936004Sbellard
197c8a706feSpbrook mmap_lock();
1987bdc1accSRichard Henderson
199621ac47dSRichard Henderson if (host_last - host_start < host_page_size) {
2007bdc1accSRichard Henderson /* Single host page contains all guest pages: sum the prot. */
2010dd55812SRichard Henderson prot1 = target_prot;
2027bdc1accSRichard Henderson for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
2037bdc1accSRichard Henderson prot1 |= page_get_flags(a);
20454936004Sbellard }
2057bdc1accSRichard Henderson for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
2067bdc1accSRichard Henderson prot1 |= page_get_flags(a + 1);
207d418c81eSbellard }
2087bdc1accSRichard Henderson starts[nranges] = host_start;
209621ac47dSRichard Henderson lens[nranges] = host_page_size;
2107bdc1accSRichard Henderson prots[nranges] = prot1;
2117bdc1accSRichard Henderson nranges++;
2127bdc1accSRichard Henderson } else {
2137bdc1accSRichard Henderson if (host_start < start) {
2147bdc1accSRichard Henderson /* Host page contains more than one guest page: sum the prot. */
2157bdc1accSRichard Henderson prot1 = target_prot;
2167bdc1accSRichard Henderson for (abi_ulong a = host_start; a < start; a += TARGET_PAGE_SIZE) {
2177bdc1accSRichard Henderson prot1 |= page_get_flags(a);
218d418c81eSbellard }
2197bdc1accSRichard Henderson /* If the resulting sum differs, create a new range. */
2207bdc1accSRichard Henderson if (prot1 != target_prot) {
2217bdc1accSRichard Henderson starts[nranges] = host_start;
222621ac47dSRichard Henderson lens[nranges] = host_page_size;
2237bdc1accSRichard Henderson prots[nranges] = prot1;
2247bdc1accSRichard Henderson nranges++;
225621ac47dSRichard Henderson host_start += host_page_size;
22654936004Sbellard }
2277bdc1accSRichard Henderson }
2287bdc1accSRichard Henderson
2297bdc1accSRichard Henderson if (last < host_last) {
2307bdc1accSRichard Henderson /* Host page contains more than one guest page: sum the prot. */
2310dd55812SRichard Henderson prot1 = target_prot;
2327bdc1accSRichard Henderson for (abi_ulong a = last; a < host_last; a += TARGET_PAGE_SIZE) {
2337bdc1accSRichard Henderson prot1 |= page_get_flags(a + 1);
23454936004Sbellard }
2357bdc1accSRichard Henderson /* If the resulting sum differs, create a new range. */
2367bdc1accSRichard Henderson if (prot1 != target_prot) {
237621ac47dSRichard Henderson host_last -= host_page_size;
2387bdc1accSRichard Henderson starts[nranges] = host_last + 1;
239621ac47dSRichard Henderson lens[nranges] = host_page_size;
2407bdc1accSRichard Henderson prots[nranges] = prot1;
2417bdc1accSRichard Henderson nranges++;
2429dba3ca5SRichard Henderson }
24354936004Sbellard }
24454936004Sbellard
2457bdc1accSRichard Henderson /* Create a range for the middle, if any remains. */
2467bdc1accSRichard Henderson if (host_start < host_last) {
2477bdc1accSRichard Henderson starts[nranges] = host_start;
2487bdc1accSRichard Henderson lens[nranges] = host_last - host_start + 1;
2497bdc1accSRichard Henderson prots[nranges] = target_prot;
2507bdc1accSRichard Henderson nranges++;
2517bdc1accSRichard Henderson }
2527bdc1accSRichard Henderson }
2537bdc1accSRichard Henderson
2547bdc1accSRichard Henderson for (int i = 0; i < nranges; ++i) {
2557bdc1accSRichard Henderson ret = mprotect(g2h_untagged(starts[i]), lens[i],
2567bdc1accSRichard Henderson target_to_host_prot(prots[i]));
2579dba3ca5SRichard Henderson if (ret != 0) {
258c8a706feSpbrook goto error;
25954936004Sbellard }
2609dba3ca5SRichard Henderson }
261aa98e2d8SIlya Leoshkevich
2627bdc1accSRichard Henderson page_set_flags(start, last, page_flags);
263aa98e2d8SIlya Leoshkevich ret = 0;
264aa98e2d8SIlya Leoshkevich
265c8a706feSpbrook error:
266c8a706feSpbrook mmap_unlock();
267c8a706feSpbrook return ret;
26854936004Sbellard }
26954936004Sbellard
2702952b642SRichard Henderson /*
2712952b642SRichard Henderson * Perform munmap on behalf of the target, with host parameters.
2722952b642SRichard Henderson * If reserved_va, we must replace the memory reservation.
2732952b642SRichard Henderson */
do_munmap(void * addr,size_t len)2742952b642SRichard Henderson static int do_munmap(void *addr, size_t len)
2752952b642SRichard Henderson {
2762952b642SRichard Henderson if (reserved_va) {
2772952b642SRichard Henderson void *ptr = mmap(addr, len, PROT_NONE,
2782952b642SRichard Henderson MAP_FIXED | MAP_ANONYMOUS
2792952b642SRichard Henderson | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
2802952b642SRichard Henderson return ptr == addr ? 0 : -1;
2812952b642SRichard Henderson }
2822952b642SRichard Henderson return munmap(addr, len);
2832952b642SRichard Henderson }
2842952b642SRichard Henderson
285eb5027acSRichard Henderson /*
286eb5027acSRichard Henderson * Map an incomplete host page.
287eb5027acSRichard Henderson *
288eb5027acSRichard Henderson * Here be dragons. This case will not work if there is an existing
289eb5027acSRichard Henderson * overlapping host page, which is file mapped, and for which the mapping
290eb5027acSRichard Henderson * is beyond the end of the file. In that case, we will see SIGBUS when
291eb5027acSRichard Henderson * trying to write a portion of this page.
292eb5027acSRichard Henderson *
293eb5027acSRichard Henderson * FIXME: Work around this with a temporary signal handler and longjmp.
294eb5027acSRichard Henderson */
mmap_frag(abi_ulong real_start,abi_ulong start,abi_ulong last,int prot,int flags,int fd,off_t offset)29599982bebSRichard Henderson static bool mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong last,
29655baec0fSRichard Henderson int prot, int flags, int fd, off_t offset)
29754936004Sbellard {
298621ac47dSRichard Henderson int host_page_size = qemu_real_host_page_size();
29999982bebSRichard Henderson abi_ulong real_last;
30053a5960aSpbrook void *host_start;
30199982bebSRichard Henderson int prot_old, prot_new;
30299982bebSRichard Henderson int host_prot_old, host_prot_new;
30354936004Sbellard
30499982bebSRichard Henderson if (!(flags & MAP_ANONYMOUS)
30599982bebSRichard Henderson && (flags & MAP_TYPE) == MAP_SHARED
30699982bebSRichard Henderson && (prot & PROT_WRITE)) {
30799982bebSRichard Henderson /*
30899982bebSRichard Henderson * msync() won't work with the partial page, so we return an
30999982bebSRichard Henderson * error if write is possible while it is a shared mapping.
31099982bebSRichard Henderson */
31199982bebSRichard Henderson errno = EINVAL;
31299982bebSRichard Henderson return false;
31399982bebSRichard Henderson }
31499982bebSRichard Henderson
315621ac47dSRichard Henderson real_last = real_start + host_page_size - 1;
3163e8f1628SRichard Henderson host_start = g2h_untagged(real_start);
31754936004Sbellard
31899982bebSRichard Henderson /* Get the protection of the target pages outside the mapping. */
31999982bebSRichard Henderson prot_old = 0;
32099982bebSRichard Henderson for (abi_ulong a = real_start; a < start; a += TARGET_PAGE_SIZE) {
32199982bebSRichard Henderson prot_old |= page_get_flags(a);
32254936004Sbellard }
32399982bebSRichard Henderson for (abi_ulong a = real_last; a > last; a -= TARGET_PAGE_SIZE) {
32499982bebSRichard Henderson prot_old |= page_get_flags(a);
3252b730f79SRichard Henderson }
32654936004Sbellard
32799982bebSRichard Henderson if (prot_old == 0) {
32899982bebSRichard Henderson /*
32999982bebSRichard Henderson * Since !(prot_old & PAGE_VALID), there were no guest pages
33099982bebSRichard Henderson * outside of the fragment we need to map. Allocate a new host
33199982bebSRichard Henderson * page to cover, discarding whatever else may have been present.
33299982bebSRichard Henderson */
333621ac47dSRichard Henderson void *p = mmap(host_start, host_page_size,
3340dd55812SRichard Henderson target_to_host_prot(prot),
33554936004Sbellard flags | MAP_ANONYMOUS, -1, 0);
336ddcdd8c4SAkihiko Odaki if (p != host_start) {
337ddcdd8c4SAkihiko Odaki if (p != MAP_FAILED) {
3383bfa271eSRichard Henderson do_munmap(p, host_page_size);
339ddcdd8c4SAkihiko Odaki errno = EEXIST;
340ddcdd8c4SAkihiko Odaki }
34199982bebSRichard Henderson return false;
3422b730f79SRichard Henderson }
34399982bebSRichard Henderson prot_old = prot;
34454936004Sbellard }
34599982bebSRichard Henderson prot_new = prot | prot_old;
34654936004Sbellard
34799982bebSRichard Henderson host_prot_old = target_to_host_prot(prot_old);
34899982bebSRichard Henderson host_prot_new = target_to_host_prot(prot_new);
34999982bebSRichard Henderson
35099982bebSRichard Henderson /* Adjust protection to be able to write. */
35199982bebSRichard Henderson if (!(host_prot_old & PROT_WRITE)) {
35299982bebSRichard Henderson host_prot_old |= PROT_WRITE;
353621ac47dSRichard Henderson mprotect(host_start, host_page_size, host_prot_old);
3542b730f79SRichard Henderson }
35554936004Sbellard
35699982bebSRichard Henderson /* Read or zero the new guest pages. */
35799982bebSRichard Henderson if (flags & MAP_ANONYMOUS) {
35899982bebSRichard Henderson memset(g2h_untagged(start), 0, last - start + 1);
35954936004Sbellard } else {
36099982bebSRichard Henderson if (pread(fd, g2h_untagged(start), last - start + 1, offset) == -1) {
36199982bebSRichard Henderson return false;
362e6deac9cSChen Gang }
36354936004Sbellard }
36499982bebSRichard Henderson
36599982bebSRichard Henderson /* Put final protection */
36699982bebSRichard Henderson if (host_prot_new != host_prot_old) {
367621ac47dSRichard Henderson mprotect(host_start, host_page_size, host_prot_new);
36899982bebSRichard Henderson }
36999982bebSRichard Henderson return true;
37054936004Sbellard }
37154936004Sbellard
372c8fb5cf9SRichard Henderson abi_ulong task_unmapped_base;
373da2b71faSRichard Henderson abi_ulong elf_et_dyn_base;
374c8fb5cf9SRichard Henderson abi_ulong mmap_next_start;
375a03e2d42Sbellard
3762b730f79SRichard Henderson /*
3772b730f79SRichard Henderson * Subroutine of mmap_find_vma, used when we have pre-allocated
3782b730f79SRichard Henderson * a chunk of guest address space.
3792b730f79SRichard Henderson */
mmap_find_vma_reserved(abi_ulong start,abi_ulong size,abi_ulong align)38030ab9ef2SRichard Henderson static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
38130ab9ef2SRichard Henderson abi_ulong align)
38268a1c816SPaul Brook {
3834c13048eSRichard Henderson target_ulong ret;
38468a1c816SPaul Brook
3854c13048eSRichard Henderson ret = page_find_range_empty(start, reserved_va, size, align);
3864c13048eSRichard Henderson if (ret == -1 && start > mmap_min_addr) {
3874c13048eSRichard Henderson /* Restart at the beginning of the address space. */
3884c13048eSRichard Henderson ret = page_find_range_empty(mmap_min_addr, start - 1, size, align);
38968a1c816SPaul Brook }
39068a1c816SPaul Brook
3914c13048eSRichard Henderson return ret;
39230ab9ef2SRichard Henderson }
39368a1c816SPaul Brook
394fe3b4152SKirill A. Shutemov /*
395fe3b4152SKirill A. Shutemov * Find and reserve a free memory area of size 'size'. The search
396fe3b4152SKirill A. Shutemov * starts at 'start'.
397fe3b4152SKirill A. Shutemov * It must be called with mmap_lock() held.
398fe3b4152SKirill A. Shutemov * Return -1 if error.
399a03e2d42Sbellard */
mmap_find_vma(abi_ulong start,abi_ulong size,abi_ulong align)40030ab9ef2SRichard Henderson abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
401a03e2d42Sbellard {
402621ac47dSRichard Henderson int host_page_size = qemu_real_host_page_size();
40314f24e14SRichard Henderson void *ptr, *prev;
404fe3b4152SKirill A. Shutemov abi_ulong addr;
40514f24e14SRichard Henderson int wrapped, repeat;
406fe3b4152SKirill A. Shutemov
407621ac47dSRichard Henderson align = MAX(align, host_page_size);
408443b7505SRichard Henderson
409fe3b4152SKirill A. Shutemov /* If 'start' == 0, then a default start address is used. */
41014f24e14SRichard Henderson if (start == 0) {
411fe3b4152SKirill A. Shutemov start = mmap_next_start;
41214f24e14SRichard Henderson } else {
413621ac47dSRichard Henderson start &= -host_page_size;
41414f24e14SRichard Henderson }
41530ab9ef2SRichard Henderson start = ROUND_UP(start, align);
416b36b2b1dSRichard Henderson size = ROUND_UP(size, host_page_size);
417fe3b4152SKirill A. Shutemov
418b76f21a7SLaurent Vivier if (reserved_va) {
41930ab9ef2SRichard Henderson return mmap_find_vma_reserved(start, size, align);
42068a1c816SPaul Brook }
42168a1c816SPaul Brook
422a03e2d42Sbellard addr = start;
42314f24e14SRichard Henderson wrapped = repeat = 0;
42414f24e14SRichard Henderson prev = 0;
425fe3b4152SKirill A. Shutemov
42614f24e14SRichard Henderson for (;; prev = ptr) {
427fe3b4152SKirill A. Shutemov /*
428fe3b4152SKirill A. Shutemov * Reserve needed memory area to avoid a race.
429fe3b4152SKirill A. Shutemov * It should be discarded using:
430fe3b4152SKirill A. Shutemov * - mmap() with MAP_FIXED flag
431fe3b4152SKirill A. Shutemov * - mremap() with MREMAP_FIXED flag
432fe3b4152SKirill A. Shutemov * - shmat() with SHM_REMAP flag
433fe3b4152SKirill A. Shutemov */
4343e8f1628SRichard Henderson ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
435fe3b4152SKirill A. Shutemov MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
436fe3b4152SKirill A. Shutemov
437fe3b4152SKirill A. Shutemov /* ENOMEM, if host address space has no memory */
43814f24e14SRichard Henderson if (ptr == MAP_FAILED) {
439a03e2d42Sbellard return (abi_ulong)-1;
440a03e2d42Sbellard }
441fe3b4152SKirill A. Shutemov
4422b730f79SRichard Henderson /*
4432b730f79SRichard Henderson * Count the number of sequential returns of the same address.
4442b730f79SRichard Henderson * This is used to modify the search algorithm below.
4452b730f79SRichard Henderson */
44614f24e14SRichard Henderson repeat = (ptr == prev ? repeat + 1 : 0);
447fe3b4152SKirill A. Shutemov
44814f24e14SRichard Henderson if (h2g_valid(ptr + size - 1)) {
44914f24e14SRichard Henderson addr = h2g(ptr);
45014f24e14SRichard Henderson
45130ab9ef2SRichard Henderson if ((addr & (align - 1)) == 0) {
45214f24e14SRichard Henderson /* Success. */
453c8fb5cf9SRichard Henderson if (start == mmap_next_start && addr >= task_unmapped_base) {
45414f24e14SRichard Henderson mmap_next_start = addr + size;
45514f24e14SRichard Henderson }
45614f24e14SRichard Henderson return addr;
45714f24e14SRichard Henderson }
45814f24e14SRichard Henderson
45914f24e14SRichard Henderson /* The address is not properly aligned for the target. */
46014f24e14SRichard Henderson switch (repeat) {
46114f24e14SRichard Henderson case 0:
4622b730f79SRichard Henderson /*
4632b730f79SRichard Henderson * Assume the result that the kernel gave us is the
4642b730f79SRichard Henderson * first with enough free space, so start again at the
4652b730f79SRichard Henderson * next higher target page.
4662b730f79SRichard Henderson */
46730ab9ef2SRichard Henderson addr = ROUND_UP(addr, align);
46814f24e14SRichard Henderson break;
46914f24e14SRichard Henderson case 1:
4702b730f79SRichard Henderson /*
4712b730f79SRichard Henderson * Sometimes the kernel decides to perform the allocation
4722b730f79SRichard Henderson * at the top end of memory instead.
4732b730f79SRichard Henderson */
47430ab9ef2SRichard Henderson addr &= -align;
47514f24e14SRichard Henderson break;
47614f24e14SRichard Henderson case 2:
47714f24e14SRichard Henderson /* Start over at low memory. */
47814f24e14SRichard Henderson addr = 0;
47914f24e14SRichard Henderson break;
48014f24e14SRichard Henderson default:
48114f24e14SRichard Henderson /* Fail. This unaligned block must the last. */
48214f24e14SRichard Henderson addr = -1;
48314f24e14SRichard Henderson break;
48414f24e14SRichard Henderson }
48514f24e14SRichard Henderson } else {
4862b730f79SRichard Henderson /*
4872b730f79SRichard Henderson * Since the result the kernel gave didn't fit, start
4882b730f79SRichard Henderson * again at low memory. If any repetition, fail.
4892b730f79SRichard Henderson */
49014f24e14SRichard Henderson addr = (repeat ? -1 : 0);
49114f24e14SRichard Henderson }
49214f24e14SRichard Henderson
49314f24e14SRichard Henderson /* Unmap and try again. */
49414f24e14SRichard Henderson munmap(ptr, size);
49514f24e14SRichard Henderson
49614f24e14SRichard Henderson /* ENOMEM if we checked the whole of the target address space. */
497d0b3e4f5SBlue Swirl if (addr == (abi_ulong)-1) {
49814f24e14SRichard Henderson return (abi_ulong)-1;
49914f24e14SRichard Henderson } else if (addr == 0) {
50014f24e14SRichard Henderson if (wrapped) {
50114f24e14SRichard Henderson return (abi_ulong)-1;
50214f24e14SRichard Henderson }
50314f24e14SRichard Henderson wrapped = 1;
5042b730f79SRichard Henderson /*
5052b730f79SRichard Henderson * Don't actually use 0 when wrapping, instead indicate
5062b730f79SRichard Henderson * that we'd truly like an allocation in low memory.
5072b730f79SRichard Henderson */
50814f24e14SRichard Henderson addr = (mmap_min_addr > TARGET_PAGE_SIZE
50914f24e14SRichard Henderson ? TARGET_PAGE_ALIGN(mmap_min_addr)
51014f24e14SRichard Henderson : TARGET_PAGE_SIZE);
51114f24e14SRichard Henderson } else if (wrapped && addr >= start) {
51214f24e14SRichard Henderson return (abi_ulong)-1;
51314f24e14SRichard Henderson }
51414f24e14SRichard Henderson }
515a03e2d42Sbellard }
516a03e2d42Sbellard
5176ecc2557SRichard Henderson /*
5186ecc2557SRichard Henderson * Record a successful mmap within the user-exec interval tree.
5196ecc2557SRichard Henderson */
mmap_end(abi_ulong start,abi_ulong last,abi_ulong passthrough_start,abi_ulong passthrough_last,int flags,int page_flags)5206ecc2557SRichard Henderson static abi_long mmap_end(abi_ulong start, abi_ulong last,
5216ecc2557SRichard Henderson abi_ulong passthrough_start,
5226ecc2557SRichard Henderson abi_ulong passthrough_last,
5236ecc2557SRichard Henderson int flags, int page_flags)
5246ecc2557SRichard Henderson {
5256ecc2557SRichard Henderson if (flags & MAP_ANONYMOUS) {
5266ecc2557SRichard Henderson page_flags |= PAGE_ANON;
5276ecc2557SRichard Henderson }
5286ecc2557SRichard Henderson page_flags |= PAGE_RESET;
5296ecc2557SRichard Henderson if (passthrough_start > passthrough_last) {
5306ecc2557SRichard Henderson page_set_flags(start, last, page_flags);
5316ecc2557SRichard Henderson } else {
5326ecc2557SRichard Henderson if (start < passthrough_start) {
5336ecc2557SRichard Henderson page_set_flags(start, passthrough_start - 1, page_flags);
5346ecc2557SRichard Henderson }
5356ecc2557SRichard Henderson page_set_flags(passthrough_start, passthrough_last,
5366ecc2557SRichard Henderson page_flags | PAGE_PASSTHROUGH);
5376ecc2557SRichard Henderson if (passthrough_last < last) {
5386ecc2557SRichard Henderson page_set_flags(passthrough_last + 1, last, page_flags);
5396ecc2557SRichard Henderson }
5406ecc2557SRichard Henderson }
5416ecc2557SRichard Henderson shm_region_rm_complete(start, last);
5426ecc2557SRichard Henderson trace_target_mmap_complete(start);
5436ecc2557SRichard Henderson if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
5446ecc2557SRichard Henderson FILE *f = qemu_log_trylock();
5456ecc2557SRichard Henderson if (f) {
5466ecc2557SRichard Henderson fprintf(f, "page layout changed following mmap\n");
5476ecc2557SRichard Henderson page_dump(f);
5486ecc2557SRichard Henderson qemu_log_unlock(f);
5496ecc2557SRichard Henderson }
5506ecc2557SRichard Henderson }
5516ecc2557SRichard Henderson return start;
5526ecc2557SRichard Henderson }
5536ecc2557SRichard Henderson
55468098de9SRichard Henderson /*
55568098de9SRichard Henderson * Special case host page size == target page size,
55668098de9SRichard Henderson * where there are no edge conditions.
55768098de9SRichard Henderson */
mmap_h_eq_g(abi_ulong start,abi_ulong len,int host_prot,int flags,int page_flags,int fd,off_t offset)55868098de9SRichard Henderson static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len,
55968098de9SRichard Henderson int host_prot, int flags, int page_flags,
56068098de9SRichard Henderson int fd, off_t offset)
56168098de9SRichard Henderson {
56268098de9SRichard Henderson void *p, *want_p = g2h_untagged(start);
56368098de9SRichard Henderson abi_ulong last;
56468098de9SRichard Henderson
56568098de9SRichard Henderson p = mmap(want_p, len, host_prot, flags, fd, offset);
56668098de9SRichard Henderson if (p == MAP_FAILED) {
56768098de9SRichard Henderson return -1;
56868098de9SRichard Henderson }
56968098de9SRichard Henderson /* If the host kernel does not support MAP_FIXED_NOREPLACE, emulate. */
57068098de9SRichard Henderson if ((flags & MAP_FIXED_NOREPLACE) && p != want_p) {
57168098de9SRichard Henderson do_munmap(p, len);
57268098de9SRichard Henderson errno = EEXIST;
57368098de9SRichard Henderson return -1;
57468098de9SRichard Henderson }
57568098de9SRichard Henderson
57668098de9SRichard Henderson start = h2g(p);
57768098de9SRichard Henderson last = start + len - 1;
57868098de9SRichard Henderson return mmap_end(start, last, start, last, flags, page_flags);
57968098de9SRichard Henderson }
58068098de9SRichard Henderson
5818080b2f8SRichard Henderson /*
5828080b2f8SRichard Henderson * Special case host page size < target page size.
5838080b2f8SRichard Henderson *
5848080b2f8SRichard Henderson * The two special cases are increased guest alignment, and mapping
5858080b2f8SRichard Henderson * past the end of a file.
5868080b2f8SRichard Henderson *
5878080b2f8SRichard Henderson * When mapping files into a memory area larger than the file,
5888080b2f8SRichard Henderson * accesses to pages beyond the file size will cause a SIGBUS.
5898080b2f8SRichard Henderson *
5908080b2f8SRichard Henderson * For example, if mmaping a file of 100 bytes on a host with 4K
5918080b2f8SRichard Henderson * pages emulating a target with 8K pages, the target expects to
5928080b2f8SRichard Henderson * be able to access the first 8K. But the host will trap us on
5938080b2f8SRichard Henderson * any access beyond 4K.
5948080b2f8SRichard Henderson *
5958080b2f8SRichard Henderson * When emulating a target with a larger page-size than the hosts,
5968080b2f8SRichard Henderson * we may need to truncate file maps at EOF and add extra anonymous
5978080b2f8SRichard Henderson * pages up to the targets page boundary.
5988080b2f8SRichard Henderson *
5998080b2f8SRichard Henderson * This workaround only works for files that do not change.
6008080b2f8SRichard Henderson * If the file is later extended (e.g. ftruncate), the SIGBUS
6018080b2f8SRichard Henderson * vanishes and the proper behaviour is that changes within the
6028080b2f8SRichard Henderson * anon page should be reflected in the file.
6038080b2f8SRichard Henderson *
6048080b2f8SRichard Henderson * However, this case is rather common with executable images,
6058080b2f8SRichard Henderson * so the workaround is important for even trivial tests, whereas
6068080b2f8SRichard Henderson * the mmap of of a file being extended is less common.
6078080b2f8SRichard Henderson */
mmap_h_lt_g(abi_ulong start,abi_ulong len,int host_prot,int mmap_flags,int page_flags,int fd,off_t offset,int host_page_size)6088080b2f8SRichard Henderson static abi_long mmap_h_lt_g(abi_ulong start, abi_ulong len, int host_prot,
6098080b2f8SRichard Henderson int mmap_flags, int page_flags, int fd,
6108080b2f8SRichard Henderson off_t offset, int host_page_size)
6118080b2f8SRichard Henderson {
6128080b2f8SRichard Henderson void *p, *want_p = g2h_untagged(start);
6138080b2f8SRichard Henderson off_t fileend_adj = 0;
6148080b2f8SRichard Henderson int flags = mmap_flags;
6158080b2f8SRichard Henderson abi_ulong last, pass_last;
6168080b2f8SRichard Henderson
6178080b2f8SRichard Henderson if (!(flags & MAP_ANONYMOUS)) {
6188080b2f8SRichard Henderson struct stat sb;
6198080b2f8SRichard Henderson
6208080b2f8SRichard Henderson if (fstat(fd, &sb) == -1) {
6218080b2f8SRichard Henderson return -1;
6228080b2f8SRichard Henderson }
6238080b2f8SRichard Henderson if (offset >= sb.st_size) {
6248080b2f8SRichard Henderson /*
6258080b2f8SRichard Henderson * The entire map is beyond the end of the file.
6268080b2f8SRichard Henderson * Transform it to an anonymous mapping.
6278080b2f8SRichard Henderson */
6288080b2f8SRichard Henderson flags |= MAP_ANONYMOUS;
6298080b2f8SRichard Henderson fd = -1;
6308080b2f8SRichard Henderson offset = 0;
6318080b2f8SRichard Henderson } else if (offset + len > sb.st_size) {
6328080b2f8SRichard Henderson /*
6338080b2f8SRichard Henderson * A portion of the map is beyond the end of the file.
6348080b2f8SRichard Henderson * Truncate the file portion of the allocation.
6358080b2f8SRichard Henderson */
6368080b2f8SRichard Henderson fileend_adj = offset + len - sb.st_size;
6378080b2f8SRichard Henderson }
6388080b2f8SRichard Henderson }
6398080b2f8SRichard Henderson
6408080b2f8SRichard Henderson if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
6418080b2f8SRichard Henderson if (fileend_adj) {
6428080b2f8SRichard Henderson p = mmap(want_p, len, host_prot, flags | MAP_ANONYMOUS, -1, 0);
6438080b2f8SRichard Henderson } else {
6448080b2f8SRichard Henderson p = mmap(want_p, len, host_prot, flags, fd, offset);
6458080b2f8SRichard Henderson }
6468080b2f8SRichard Henderson if (p != want_p) {
6478080b2f8SRichard Henderson if (p != MAP_FAILED) {
6488080b2f8SRichard Henderson /* Host does not support MAP_FIXED_NOREPLACE: emulate. */
6498080b2f8SRichard Henderson do_munmap(p, len);
6508080b2f8SRichard Henderson errno = EEXIST;
6518080b2f8SRichard Henderson }
6528080b2f8SRichard Henderson return -1;
6538080b2f8SRichard Henderson }
6548080b2f8SRichard Henderson
6558080b2f8SRichard Henderson if (fileend_adj) {
6568080b2f8SRichard Henderson void *t = mmap(p, len - fileend_adj, host_prot,
6578080b2f8SRichard Henderson (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED,
6588080b2f8SRichard Henderson fd, offset);
6598080b2f8SRichard Henderson
6608080b2f8SRichard Henderson if (t == MAP_FAILED) {
6618080b2f8SRichard Henderson int save_errno = errno;
6628080b2f8SRichard Henderson
6638080b2f8SRichard Henderson /*
6648080b2f8SRichard Henderson * We failed a map over the top of the successful anonymous
6658080b2f8SRichard Henderson * mapping above. The only failure mode is running out of VMAs,
6668080b2f8SRichard Henderson * and there's nothing that we can do to detect that earlier.
6678080b2f8SRichard Henderson * If we have replaced an existing mapping with MAP_FIXED,
6688080b2f8SRichard Henderson * then we cannot properly recover. It's a coin toss whether
6698080b2f8SRichard Henderson * it would be better to exit or continue here.
6708080b2f8SRichard Henderson */
6718080b2f8SRichard Henderson if (!(flags & MAP_FIXED_NOREPLACE) &&
6728080b2f8SRichard Henderson !page_check_range_empty(start, start + len - 1)) {
6738080b2f8SRichard Henderson qemu_log("QEMU target_mmap late failure: %s",
6748080b2f8SRichard Henderson strerror(save_errno));
6758080b2f8SRichard Henderson }
6768080b2f8SRichard Henderson
6778080b2f8SRichard Henderson do_munmap(want_p, len);
6788080b2f8SRichard Henderson errno = save_errno;
6798080b2f8SRichard Henderson return -1;
6808080b2f8SRichard Henderson }
6818080b2f8SRichard Henderson }
6828080b2f8SRichard Henderson } else {
6838080b2f8SRichard Henderson size_t host_len, part_len;
6848080b2f8SRichard Henderson
6858080b2f8SRichard Henderson /*
6868080b2f8SRichard Henderson * Take care to align the host memory. Perform a larger anonymous
6878080b2f8SRichard Henderson * allocation and extract the aligned portion. Remap the file on
6888080b2f8SRichard Henderson * top of that.
6898080b2f8SRichard Henderson */
6908080b2f8SRichard Henderson host_len = len + TARGET_PAGE_SIZE - host_page_size;
6918080b2f8SRichard Henderson p = mmap(want_p, host_len, host_prot, flags | MAP_ANONYMOUS, -1, 0);
6928080b2f8SRichard Henderson if (p == MAP_FAILED) {
6938080b2f8SRichard Henderson return -1;
6948080b2f8SRichard Henderson }
6958080b2f8SRichard Henderson
6968080b2f8SRichard Henderson part_len = (uintptr_t)p & (TARGET_PAGE_SIZE - 1);
6978080b2f8SRichard Henderson if (part_len) {
6988080b2f8SRichard Henderson part_len = TARGET_PAGE_SIZE - part_len;
6998080b2f8SRichard Henderson do_munmap(p, part_len);
7008080b2f8SRichard Henderson p += part_len;
7018080b2f8SRichard Henderson host_len -= part_len;
7028080b2f8SRichard Henderson }
7038080b2f8SRichard Henderson if (len < host_len) {
7048080b2f8SRichard Henderson do_munmap(p + len, host_len - len);
7058080b2f8SRichard Henderson }
7068080b2f8SRichard Henderson
7078080b2f8SRichard Henderson if (!(flags & MAP_ANONYMOUS)) {
7088080b2f8SRichard Henderson void *t = mmap(p, len - fileend_adj, host_prot,
7098080b2f8SRichard Henderson flags | MAP_FIXED, fd, offset);
7108080b2f8SRichard Henderson
7118080b2f8SRichard Henderson if (t == MAP_FAILED) {
7128080b2f8SRichard Henderson int save_errno = errno;
7138080b2f8SRichard Henderson do_munmap(p, len);
7148080b2f8SRichard Henderson errno = save_errno;
7158080b2f8SRichard Henderson return -1;
7168080b2f8SRichard Henderson }
7178080b2f8SRichard Henderson }
7188080b2f8SRichard Henderson
7198080b2f8SRichard Henderson start = h2g(p);
7208080b2f8SRichard Henderson }
7218080b2f8SRichard Henderson
7228080b2f8SRichard Henderson last = start + len - 1;
7238080b2f8SRichard Henderson if (fileend_adj) {
7248080b2f8SRichard Henderson pass_last = ROUND_UP(last - fileend_adj, host_page_size) - 1;
7258080b2f8SRichard Henderson } else {
7268080b2f8SRichard Henderson pass_last = last;
7278080b2f8SRichard Henderson }
7288080b2f8SRichard Henderson return mmap_end(start, last, start, pass_last, mmap_flags, page_flags);
7298080b2f8SRichard Henderson }
7308080b2f8SRichard Henderson
731eb5027acSRichard Henderson /*
732eb5027acSRichard Henderson * Special case host page size > target page size.
733eb5027acSRichard Henderson *
734eb5027acSRichard Henderson * The two special cases are address and file offsets that are valid
735eb5027acSRichard Henderson * for the guest that cannot be directly represented by the host.
736eb5027acSRichard Henderson */
mmap_h_gt_g(abi_ulong start,abi_ulong len,int target_prot,int host_prot,int flags,int page_flags,int fd,off_t offset,int host_page_size)737eb5027acSRichard Henderson static abi_long mmap_h_gt_g(abi_ulong start, abi_ulong len,
738eb5027acSRichard Henderson int target_prot, int host_prot,
739eb5027acSRichard Henderson int flags, int page_flags, int fd,
740eb5027acSRichard Henderson off_t offset, int host_page_size)
741eb5027acSRichard Henderson {
742eb5027acSRichard Henderson void *p, *want_p = g2h_untagged(start);
743eb5027acSRichard Henderson off_t host_offset = offset & -host_page_size;
744eb5027acSRichard Henderson abi_ulong last, real_start, real_last;
745eb5027acSRichard Henderson bool misaligned_offset = false;
746eb5027acSRichard Henderson size_t host_len;
747eb5027acSRichard Henderson
748eb5027acSRichard Henderson if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
749eb5027acSRichard Henderson /*
750eb5027acSRichard Henderson * Adjust the offset to something representable on the host.
751eb5027acSRichard Henderson */
752eb5027acSRichard Henderson host_len = len + offset - host_offset;
753eb5027acSRichard Henderson p = mmap(want_p, host_len, host_prot, flags, fd, host_offset);
754eb5027acSRichard Henderson if (p == MAP_FAILED) {
755eb5027acSRichard Henderson return -1;
756eb5027acSRichard Henderson }
757eb5027acSRichard Henderson
758eb5027acSRichard Henderson /* Update start to the file position at offset. */
759eb5027acSRichard Henderson p += offset - host_offset;
760eb5027acSRichard Henderson
761eb5027acSRichard Henderson start = h2g(p);
762eb5027acSRichard Henderson last = start + len - 1;
763eb5027acSRichard Henderson return mmap_end(start, last, start, last, flags, page_flags);
764eb5027acSRichard Henderson }
765eb5027acSRichard Henderson
766eb5027acSRichard Henderson if (!(flags & MAP_ANONYMOUS)) {
767eb5027acSRichard Henderson misaligned_offset = (start ^ offset) & (host_page_size - 1);
768eb5027acSRichard Henderson
769eb5027acSRichard Henderson /*
770eb5027acSRichard Henderson * The fallback for misalignment is a private mapping + read.
771eb5027acSRichard Henderson * This carries none of semantics required of MAP_SHARED.
772eb5027acSRichard Henderson */
773eb5027acSRichard Henderson if (misaligned_offset && (flags & MAP_TYPE) != MAP_PRIVATE) {
774eb5027acSRichard Henderson errno = EINVAL;
775eb5027acSRichard Henderson return -1;
776eb5027acSRichard Henderson }
777eb5027acSRichard Henderson }
778eb5027acSRichard Henderson
779eb5027acSRichard Henderson last = start + len - 1;
780eb5027acSRichard Henderson real_start = start & -host_page_size;
781eb5027acSRichard Henderson real_last = ROUND_UP(last, host_page_size) - 1;
782eb5027acSRichard Henderson
783eb5027acSRichard Henderson /*
784eb5027acSRichard Henderson * Handle the start and end of the mapping.
785eb5027acSRichard Henderson */
786eb5027acSRichard Henderson if (real_start < start) {
787eb5027acSRichard Henderson abi_ulong real_page_last = real_start + host_page_size - 1;
788eb5027acSRichard Henderson if (last <= real_page_last) {
789eb5027acSRichard Henderson /* Entire allocation a subset of one host page. */
790eb5027acSRichard Henderson if (!mmap_frag(real_start, start, last, target_prot,
791eb5027acSRichard Henderson flags, fd, offset)) {
792eb5027acSRichard Henderson return -1;
793eb5027acSRichard Henderson }
794eb5027acSRichard Henderson return mmap_end(start, last, -1, 0, flags, page_flags);
795eb5027acSRichard Henderson }
796eb5027acSRichard Henderson
797eb5027acSRichard Henderson if (!mmap_frag(real_start, start, real_page_last, target_prot,
798eb5027acSRichard Henderson flags, fd, offset)) {
799eb5027acSRichard Henderson return -1;
800eb5027acSRichard Henderson }
801eb5027acSRichard Henderson real_start = real_page_last + 1;
802eb5027acSRichard Henderson }
803eb5027acSRichard Henderson
804eb5027acSRichard Henderson if (last < real_last) {
805eb5027acSRichard Henderson abi_ulong real_page_start = real_last - host_page_size + 1;
806eb5027acSRichard Henderson if (!mmap_frag(real_page_start, real_page_start, last,
807eb5027acSRichard Henderson target_prot, flags, fd,
808eb5027acSRichard Henderson offset + real_page_start - start)) {
809eb5027acSRichard Henderson return -1;
810eb5027acSRichard Henderson }
811eb5027acSRichard Henderson real_last = real_page_start - 1;
812eb5027acSRichard Henderson }
813eb5027acSRichard Henderson
814eb5027acSRichard Henderson if (real_start > real_last) {
815eb5027acSRichard Henderson return mmap_end(start, last, -1, 0, flags, page_flags);
816eb5027acSRichard Henderson }
817eb5027acSRichard Henderson
818eb5027acSRichard Henderson /*
819eb5027acSRichard Henderson * Handle the middle of the mapping.
820eb5027acSRichard Henderson */
821eb5027acSRichard Henderson
822eb5027acSRichard Henderson host_len = real_last - real_start + 1;
823eb5027acSRichard Henderson want_p += real_start - start;
824eb5027acSRichard Henderson
825eb5027acSRichard Henderson if (flags & MAP_ANONYMOUS) {
826eb5027acSRichard Henderson p = mmap(want_p, host_len, host_prot, flags, -1, 0);
827eb5027acSRichard Henderson } else if (!misaligned_offset) {
828eb5027acSRichard Henderson p = mmap(want_p, host_len, host_prot, flags, fd,
829eb5027acSRichard Henderson offset + real_start - start);
830eb5027acSRichard Henderson } else {
831eb5027acSRichard Henderson p = mmap(want_p, host_len, host_prot | PROT_WRITE,
832eb5027acSRichard Henderson flags | MAP_ANONYMOUS, -1, 0);
833eb5027acSRichard Henderson }
834eb5027acSRichard Henderson if (p != want_p) {
835eb5027acSRichard Henderson if (p != MAP_FAILED) {
836eb5027acSRichard Henderson do_munmap(p, host_len);
837eb5027acSRichard Henderson errno = EEXIST;
838eb5027acSRichard Henderson }
839eb5027acSRichard Henderson return -1;
840eb5027acSRichard Henderson }
841eb5027acSRichard Henderson
842eb5027acSRichard Henderson if (misaligned_offset) {
843eb5027acSRichard Henderson /* TODO: The read could be short. */
844eb5027acSRichard Henderson if (pread(fd, p, host_len, offset + real_start - start) != host_len) {
845eb5027acSRichard Henderson do_munmap(p, host_len);
846eb5027acSRichard Henderson return -1;
847eb5027acSRichard Henderson }
848eb5027acSRichard Henderson if (!(host_prot & PROT_WRITE)) {
849eb5027acSRichard Henderson mprotect(p, host_len, host_prot);
850eb5027acSRichard Henderson }
851eb5027acSRichard Henderson }
852eb5027acSRichard Henderson
853eb5027acSRichard Henderson return mmap_end(start, last, -1, 0, flags, page_flags);
854eb5027acSRichard Henderson }
855eb5027acSRichard Henderson
target_mmap__locked(abi_ulong start,abi_ulong len,int target_prot,int flags,int page_flags,int fd,off_t offset)856d558c395SRichard Henderson static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
857e8cec51bSRichard Henderson int target_prot, int flags, int page_flags,
858d558c395SRichard Henderson int fd, off_t offset)
85954936004Sbellard {
860621ac47dSRichard Henderson int host_page_size = qemu_real_host_page_size();
86168098de9SRichard Henderson int host_prot;
86254936004Sbellard
8632b730f79SRichard Henderson /*
864ad87d26eSRichard Henderson * For reserved_va, we are in full control of the allocation.
865ad87d26eSRichard Henderson * Find a suitable hole and convert to MAP_FIXED.
8662b730f79SRichard Henderson */
86768098de9SRichard Henderson if (reserved_va) {
86868098de9SRichard Henderson if (flags & MAP_FIXED_NOREPLACE) {
86968098de9SRichard Henderson /* Validate that the chosen range is empty. */
87068098de9SRichard Henderson if (!page_check_range_empty(start, start + len - 1)) {
87168098de9SRichard Henderson errno = EEXIST;
87268098de9SRichard Henderson return -1;
87368098de9SRichard Henderson }
87468098de9SRichard Henderson flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED;
87568098de9SRichard Henderson } else if (!(flags & MAP_FIXED)) {
876eb5027acSRichard Henderson abi_ulong real_start = start & -host_page_size;
877eb5027acSRichard Henderson off_t host_offset = offset & -host_page_size;
87868098de9SRichard Henderson size_t real_len = len + offset - host_offset;
87968098de9SRichard Henderson abi_ulong align = MAX(host_page_size, TARGET_PAGE_SIZE);
88068098de9SRichard Henderson
88168098de9SRichard Henderson start = mmap_find_vma(real_start, real_len, align);
882a5e7ee46SRichard Henderson if (start == (abi_ulong)-1) {
883a5e7ee46SRichard Henderson errno = ENOMEM;
884d558c395SRichard Henderson return -1;
885a5e7ee46SRichard Henderson }
886ad87d26eSRichard Henderson start += offset - host_offset;
887ad87d26eSRichard Henderson flags |= MAP_FIXED;
888a5e7ee46SRichard Henderson }
88968098de9SRichard Henderson }
89068098de9SRichard Henderson
89168098de9SRichard Henderson host_prot = target_to_host_prot(target_prot);
89268098de9SRichard Henderson
89368098de9SRichard Henderson if (host_page_size == TARGET_PAGE_SIZE) {
89468098de9SRichard Henderson return mmap_h_eq_g(start, len, host_prot, flags,
89568098de9SRichard Henderson page_flags, fd, offset);
8968080b2f8SRichard Henderson } else if (host_page_size < TARGET_PAGE_SIZE) {
8978080b2f8SRichard Henderson return mmap_h_lt_g(start, len, host_prot, flags,
8988080b2f8SRichard Henderson page_flags, fd, offset, host_page_size);
899a03e2d42Sbellard } else {
900eb5027acSRichard Henderson return mmap_h_gt_g(start, len, target_prot, host_prot, flags,
901eb5027acSRichard Henderson page_flags, fd, offset, host_page_size);
90203798605SRichard Henderson }
903d558c395SRichard Henderson }
904d558c395SRichard Henderson
905d558c395SRichard Henderson /* NOTE: all the constants are the HOST ones */
target_mmap(abi_ulong start,abi_ulong len,int target_prot,int flags,int fd,off_t offset)906d558c395SRichard Henderson abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
907d558c395SRichard Henderson int flags, int fd, off_t offset)
908d558c395SRichard Henderson {
909d558c395SRichard Henderson abi_long ret;
910e8cec51bSRichard Henderson int page_flags;
911d558c395SRichard Henderson
912d558c395SRichard Henderson trace_target_mmap(start, len, target_prot, flags, fd, offset);
913e8cec51bSRichard Henderson
914e8cec51bSRichard Henderson if (!len) {
915e8cec51bSRichard Henderson errno = EINVAL;
916e8cec51bSRichard Henderson return -1;
917e8cec51bSRichard Henderson }
918e8cec51bSRichard Henderson
919e8cec51bSRichard Henderson page_flags = validate_prot_to_pageflags(target_prot);
920e8cec51bSRichard Henderson if (!page_flags) {
921e8cec51bSRichard Henderson errno = EINVAL;
922e8cec51bSRichard Henderson return -1;
923e8cec51bSRichard Henderson }
924e8cec51bSRichard Henderson
925e8cec51bSRichard Henderson /* Also check for overflows... */
926e8cec51bSRichard Henderson len = TARGET_PAGE_ALIGN(len);
927e8cec51bSRichard Henderson if (!len || len != (size_t)len) {
928e8cec51bSRichard Henderson errno = ENOMEM;
929e8cec51bSRichard Henderson return -1;
930e8cec51bSRichard Henderson }
931e8cec51bSRichard Henderson
932e8cec51bSRichard Henderson if (offset & ~TARGET_PAGE_MASK) {
933e8cec51bSRichard Henderson errno = EINVAL;
934e8cec51bSRichard Henderson return -1;
935e8cec51bSRichard Henderson }
936e8cec51bSRichard Henderson if (flags & (MAP_FIXED | MAP_FIXED_NOREPLACE)) {
937e8cec51bSRichard Henderson if (start & ~TARGET_PAGE_MASK) {
938e8cec51bSRichard Henderson errno = EINVAL;
939e8cec51bSRichard Henderson return -1;
940e8cec51bSRichard Henderson }
941e8cec51bSRichard Henderson if (!guest_range_valid_untagged(start, len)) {
942e8cec51bSRichard Henderson errno = ENOMEM;
943e8cec51bSRichard Henderson return -1;
944e8cec51bSRichard Henderson }
945e8cec51bSRichard Henderson }
946e8cec51bSRichard Henderson
947d558c395SRichard Henderson mmap_lock();
948d558c395SRichard Henderson
949e8cec51bSRichard Henderson ret = target_mmap__locked(start, len, target_prot, flags,
950e8cec51bSRichard Henderson page_flags, fd, offset);
951d558c395SRichard Henderson
952c8a706feSpbrook mmap_unlock();
953e8cec51bSRichard Henderson
954e8cec51bSRichard Henderson /*
955e8cec51bSRichard Henderson * If we're mapping shared memory, ensure we generate code for parallel
956e8cec51bSRichard Henderson * execution and flush old translations. This will work up to the level
957e8cec51bSRichard Henderson * supported by the host -- anything that requires EXCP_ATOMIC will not
958e8cec51bSRichard Henderson * be atomic with respect to an external process.
959e8cec51bSRichard Henderson */
960e8cec51bSRichard Henderson if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) {
961e8cec51bSRichard Henderson CPUState *cpu = thread_cpu;
962e8cec51bSRichard Henderson if (!(cpu->tcg_cflags & CF_PARALLEL)) {
963e8cec51bSRichard Henderson cpu->tcg_cflags |= CF_PARALLEL;
964e8cec51bSRichard Henderson tb_flush(cpu);
965e8cec51bSRichard Henderson }
966e8cec51bSRichard Henderson }
967e8cec51bSRichard Henderson
968d558c395SRichard Henderson return ret;
96954936004Sbellard }
97054936004Sbellard
mmap_reserve_or_unmap(abi_ulong start,abi_ulong len)971912ff698SRichard Henderson static int mmap_reserve_or_unmap(abi_ulong start, abi_ulong len)
97268a1c816SPaul Brook {
973621ac47dSRichard Henderson int host_page_size = qemu_real_host_page_size();
97468a1c816SPaul Brook abi_ulong real_start;
975260561d8SRichard Henderson abi_ulong real_last;
976260561d8SRichard Henderson abi_ulong real_len;
977260561d8SRichard Henderson abi_ulong last;
978260561d8SRichard Henderson abi_ulong a;
979558a4411SRichard Henderson void *host_start;
98068a1c816SPaul Brook int prot;
98168a1c816SPaul Brook
982260561d8SRichard Henderson last = start + len - 1;
983621ac47dSRichard Henderson real_start = start & -host_page_size;
984b36b2b1dSRichard Henderson real_last = ROUND_UP(last, host_page_size) - 1;
985260561d8SRichard Henderson
986260561d8SRichard Henderson /*
987260561d8SRichard Henderson * If guest pages remain on the first or last host pages,
988260561d8SRichard Henderson * adjust the deallocation to retain those guest pages.
989260561d8SRichard Henderson * The single page special case is required for the last page,
990260561d8SRichard Henderson * lest real_start overflow to zero.
991260561d8SRichard Henderson */
992621ac47dSRichard Henderson if (real_last - real_start < host_page_size) {
99368a1c816SPaul Brook prot = 0;
994260561d8SRichard Henderson for (a = real_start; a < start; a += TARGET_PAGE_SIZE) {
995260561d8SRichard Henderson prot |= page_get_flags(a);
99668a1c816SPaul Brook }
997260561d8SRichard Henderson for (a = last; a < real_last; a += TARGET_PAGE_SIZE) {
998260561d8SRichard Henderson prot |= page_get_flags(a + 1);
99968a1c816SPaul Brook }
1000260561d8SRichard Henderson if (prot != 0) {
1001912ff698SRichard Henderson return 0;
1002260561d8SRichard Henderson }
1003260561d8SRichard Henderson } else {
1004260561d8SRichard Henderson for (prot = 0, a = real_start; a < start; a += TARGET_PAGE_SIZE) {
1005260561d8SRichard Henderson prot |= page_get_flags(a);
100668a1c816SPaul Brook }
10072b730f79SRichard Henderson if (prot != 0) {
1008621ac47dSRichard Henderson real_start += host_page_size;
100968a1c816SPaul Brook }
1010260561d8SRichard Henderson
1011260561d8SRichard Henderson for (prot = 0, a = last; a < real_last; a += TARGET_PAGE_SIZE) {
1012260561d8SRichard Henderson prot |= page_get_flags(a + 1);
101368a1c816SPaul Brook }
10142b730f79SRichard Henderson if (prot != 0) {
1015621ac47dSRichard Henderson real_last -= host_page_size;
1016260561d8SRichard Henderson }
1017260561d8SRichard Henderson
1018260561d8SRichard Henderson if (real_last < real_start) {
1019912ff698SRichard Henderson return 0;
102068a1c816SPaul Brook }
10212b730f79SRichard Henderson }
1022260561d8SRichard Henderson
1023260561d8SRichard Henderson real_len = real_last - real_start + 1;
1024260561d8SRichard Henderson host_start = g2h_untagged(real_start);
1025260561d8SRichard Henderson
10262952b642SRichard Henderson return do_munmap(host_start, real_len);
102768a1c816SPaul Brook }
102868a1c816SPaul Brook
target_munmap(abi_ulong start,abi_ulong len)1029992f48a0Sblueswir1 int target_munmap(abi_ulong start, abi_ulong len)
103054936004Sbellard {
1031912ff698SRichard Henderson int ret;
1032912ff698SRichard Henderson
1033b7b18d26SAlex Bennée trace_target_munmap(start, len);
1034b7b18d26SAlex Bennée
10352b730f79SRichard Henderson if (start & ~TARGET_PAGE_MASK) {
1036912ff698SRichard Henderson errno = EINVAL;
1037912ff698SRichard Henderson return -1;
10382b730f79SRichard Henderson }
103954936004Sbellard len = TARGET_PAGE_ALIGN(len);
104046b12f46SRichard Henderson if (len == 0 || !guest_range_valid_untagged(start, len)) {
1041912ff698SRichard Henderson errno = EINVAL;
1042912ff698SRichard Henderson return -1;
1043ebf9a363SMax Filippov }
1044ebf9a363SMax Filippov
1045c8a706feSpbrook mmap_lock();
1046912ff698SRichard Henderson ret = mmap_reserve_or_unmap(start, len);
1047912ff698SRichard Henderson if (likely(ret == 0)) {
104849840a4aSRichard Henderson page_set_flags(start, start + len - 1, 0);
1049044e95c8SRichard Henderson shm_region_rm_complete(start, start + len - 1);
1050912ff698SRichard Henderson }
1051c8a706feSpbrook mmap_unlock();
1052d7b0c5d0SRichard Henderson
1053912ff698SRichard Henderson return ret;
105454936004Sbellard }
105554936004Sbellard
target_mremap(abi_ulong old_addr,abi_ulong old_size,abi_ulong new_size,unsigned long flags,abi_ulong new_addr)1056992f48a0Sblueswir1 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
1057992f48a0Sblueswir1 abi_ulong new_size, unsigned long flags,
1058992f48a0Sblueswir1 abi_ulong new_addr)
105954936004Sbellard {
106054936004Sbellard int prot;
1061f19412a2Saurel32 void *host_addr;
106254936004Sbellard
106346b12f46SRichard Henderson if (!guest_range_valid_untagged(old_addr, old_size) ||
1064ebf9a363SMax Filippov ((flags & MREMAP_FIXED) &&
106546b12f46SRichard Henderson !guest_range_valid_untagged(new_addr, new_size)) ||
1066ccc5ccc1SRichard Purdie ((flags & MREMAP_MAYMOVE) == 0 &&
106746b12f46SRichard Henderson !guest_range_valid_untagged(old_addr, new_size))) {
1068ebf9a363SMax Filippov errno = ENOMEM;
1069ebf9a363SMax Filippov return -1;
1070ebf9a363SMax Filippov }
1071ebf9a363SMax Filippov
1072c8a706feSpbrook mmap_lock();
1073f19412a2Saurel32
107468a1c816SPaul Brook if (flags & MREMAP_FIXED) {
10753e8f1628SRichard Henderson host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
10763e8f1628SRichard Henderson flags, g2h_untagged(new_addr));
107768a1c816SPaul Brook
1078b76f21a7SLaurent Vivier if (reserved_va && host_addr != MAP_FAILED) {
10792b730f79SRichard Henderson /*
10802b730f79SRichard Henderson * If new and old addresses overlap then the above mremap will
10812b730f79SRichard Henderson * already have failed with EINVAL.
10822b730f79SRichard Henderson */
1083558a4411SRichard Henderson mmap_reserve_or_unmap(old_addr, old_size);
108468a1c816SPaul Brook }
108568a1c816SPaul Brook } else if (flags & MREMAP_MAYMOVE) {
1086f19412a2Saurel32 abi_ulong mmap_start;
1087f19412a2Saurel32
108830ab9ef2SRichard Henderson mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
1089f19412a2Saurel32
1090f19412a2Saurel32 if (mmap_start == -1) {
1091f19412a2Saurel32 errno = ENOMEM;
1092f19412a2Saurel32 host_addr = MAP_FAILED;
109368a1c816SPaul Brook } else {
10943e8f1628SRichard Henderson host_addr = mremap(g2h_untagged(old_addr), old_size, new_size,
10953e8f1628SRichard Henderson flags | MREMAP_FIXED,
10963e8f1628SRichard Henderson g2h_untagged(mmap_start));
1097b76f21a7SLaurent Vivier if (reserved_va) {
1098558a4411SRichard Henderson mmap_reserve_or_unmap(old_addr, old_size);
109968a1c816SPaul Brook }
1100c65ffe6dSamateur }
11013af72a4dSblueswir1 } else {
1102ea800033SLaurent Vivier int page_flags = 0;
1103b76f21a7SLaurent Vivier if (reserved_va && old_size < new_size) {
110468a1c816SPaul Brook abi_ulong addr;
110568a1c816SPaul Brook for (addr = old_addr + old_size;
110668a1c816SPaul Brook addr < old_addr + new_size;
110768a1c816SPaul Brook addr++) {
1108ea800033SLaurent Vivier page_flags |= page_get_flags(addr);
110968a1c816SPaul Brook }
111068a1c816SPaul Brook }
1111ea800033SLaurent Vivier if (page_flags == 0) {
11123e8f1628SRichard Henderson host_addr = mremap(g2h_untagged(old_addr),
11133e8f1628SRichard Henderson old_size, new_size, flags);
111456d19084STobias Koch
111556d19084STobias Koch if (host_addr != MAP_FAILED) {
111656d19084STobias Koch /* Check if address fits target address space */
111746b12f46SRichard Henderson if (!guest_range_valid_untagged(h2g(host_addr), new_size)) {
111856d19084STobias Koch /* Revert mremap() changes */
11193e8f1628SRichard Henderson host_addr = mremap(g2h_untagged(old_addr),
11203e8f1628SRichard Henderson new_size, old_size, flags);
112168a1c816SPaul Brook errno = ENOMEM;
112268a1c816SPaul Brook host_addr = MAP_FAILED;
112356d19084STobias Koch } else if (reserved_va && old_size > new_size) {
1124558a4411SRichard Henderson mmap_reserve_or_unmap(old_addr + old_size,
1125558a4411SRichard Henderson old_size - new_size);
112668a1c816SPaul Brook }
112756d19084STobias Koch }
112856d19084STobias Koch } else {
1129f19412a2Saurel32 errno = ENOMEM;
1130f19412a2Saurel32 host_addr = MAP_FAILED;
1131f19412a2Saurel32 }
1132f19412a2Saurel32 }
1133f19412a2Saurel32
1134f19412a2Saurel32 if (host_addr == MAP_FAILED) {
1135c8a706feSpbrook new_addr = -1;
1136c8a706feSpbrook } else {
1137a5b85f79Sths new_addr = h2g(host_addr);
113854936004Sbellard prot = page_get_flags(old_addr);
113949840a4aSRichard Henderson page_set_flags(old_addr, old_addr + old_size - 1, 0);
1140044e95c8SRichard Henderson shm_region_rm_complete(old_addr, old_addr + old_size - 1);
114149840a4aSRichard Henderson page_set_flags(new_addr, new_addr + new_size - 1,
1142d9c58585SRichard Henderson prot | PAGE_VALID | PAGE_RESET);
1143044e95c8SRichard Henderson shm_region_rm_complete(new_addr, new_addr + new_size - 1);
1144c8a706feSpbrook }
1145c8a706feSpbrook mmap_unlock();
114654936004Sbellard return new_addr;
114754936004Sbellard }
1148892a4f6aSIlya Leoshkevich
target_madvise(abi_ulong start,abi_ulong len_in,int advice)1149892a4f6aSIlya Leoshkevich abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
1150892a4f6aSIlya Leoshkevich {
1151e230ec09SRichard Henderson abi_ulong len;
1152892a4f6aSIlya Leoshkevich int ret = 0;
1153892a4f6aSIlya Leoshkevich
1154892a4f6aSIlya Leoshkevich if (start & ~TARGET_PAGE_MASK) {
1155892a4f6aSIlya Leoshkevich return -TARGET_EINVAL;
1156892a4f6aSIlya Leoshkevich }
1157e230ec09SRichard Henderson if (len_in == 0) {
1158892a4f6aSIlya Leoshkevich return 0;
1159892a4f6aSIlya Leoshkevich }
1160e230ec09SRichard Henderson len = TARGET_PAGE_ALIGN(len_in);
1161e230ec09SRichard Henderson if (len == 0 || !guest_range_valid_untagged(start, len)) {
1162892a4f6aSIlya Leoshkevich return -TARGET_EINVAL;
1163892a4f6aSIlya Leoshkevich }
1164892a4f6aSIlya Leoshkevich
11654530deb1SHelge Deller /* Translate for some architectures which have different MADV_xxx values */
11664530deb1SHelge Deller switch (advice) {
11674530deb1SHelge Deller case TARGET_MADV_DONTNEED: /* alpha */
11684530deb1SHelge Deller advice = MADV_DONTNEED;
11694530deb1SHelge Deller break;
11704530deb1SHelge Deller case TARGET_MADV_WIPEONFORK: /* parisc */
11714530deb1SHelge Deller advice = MADV_WIPEONFORK;
11724530deb1SHelge Deller break;
11734530deb1SHelge Deller case TARGET_MADV_KEEPONFORK: /* parisc */
11744530deb1SHelge Deller advice = MADV_KEEPONFORK;
11754530deb1SHelge Deller break;
11764530deb1SHelge Deller /* we do not care about the other MADV_xxx values yet */
11774530deb1SHelge Deller }
11784530deb1SHelge Deller
1179892a4f6aSIlya Leoshkevich /*
11804530deb1SHelge Deller * Most advice values are hints, so ignoring and returning success is ok.
1181892a4f6aSIlya Leoshkevich *
11824530deb1SHelge Deller * However, some advice values such as MADV_DONTNEED, MADV_WIPEONFORK and
11834530deb1SHelge Deller * MADV_KEEPONFORK are not hints and need to be emulated.
1184892a4f6aSIlya Leoshkevich *
11854530deb1SHelge Deller * A straight passthrough for those may not be safe because qemu sometimes
11864530deb1SHelge Deller * turns private file-backed mappings into anonymous mappings.
1187ecb796dbSRichard Henderson * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
1188ecb796dbSRichard Henderson * same semantics for the host as for the guest.
11894530deb1SHelge Deller *
11904530deb1SHelge Deller * We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and
11914530deb1SHelge Deller * return failure if not.
11924530deb1SHelge Deller *
11934530deb1SHelge Deller * MADV_DONTNEED is passed through as well, if possible.
11944530deb1SHelge Deller * If passthrough isn't possible, we nevertheless (wrongly!) return
11954530deb1SHelge Deller * success, which is broken but some userspace programs fail to work
11964530deb1SHelge Deller * otherwise. Completely implementing such emulation is quite complicated
11974530deb1SHelge Deller * though.
1198892a4f6aSIlya Leoshkevich */
1199892a4f6aSIlya Leoshkevich mmap_lock();
12004530deb1SHelge Deller switch (advice) {
12014530deb1SHelge Deller case MADV_WIPEONFORK:
12024530deb1SHelge Deller case MADV_KEEPONFORK:
12034530deb1SHelge Deller ret = -EINVAL;
12044530deb1SHelge Deller /* fall through */
12054530deb1SHelge Deller case MADV_DONTNEED:
1206ecb796dbSRichard Henderson if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
12074530deb1SHelge Deller ret = get_errno(madvise(g2h_untagged(start), len, advice));
12084530deb1SHelge Deller if ((advice == MADV_DONTNEED) && (ret == 0)) {
120910310cbdSRichard Henderson page_reset_target_data(start, start + len - 1);
1210dbbf8975SVitaly Buka }
1211892a4f6aSIlya Leoshkevich }
12124530deb1SHelge Deller }
1213892a4f6aSIlya Leoshkevich mmap_unlock();
1214892a4f6aSIlya Leoshkevich
1215892a4f6aSIlya Leoshkevich return ret;
1216892a4f6aSIlya Leoshkevich }
1217225a206cSRichard Henderson
1218225a206cSRichard Henderson #ifndef TARGET_FORCE_SHMLBA
1219225a206cSRichard Henderson /*
1220225a206cSRichard Henderson * For most architectures, SHMLBA is the same as the page size;
1221225a206cSRichard Henderson * some architectures have larger values, in which case they should
1222225a206cSRichard Henderson * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
1223225a206cSRichard Henderson * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
1224225a206cSRichard Henderson * and defining its own value for SHMLBA.
1225225a206cSRichard Henderson *
1226225a206cSRichard Henderson * The kernel also permits SHMLBA to be set by the architecture to a
1227225a206cSRichard Henderson * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
1228225a206cSRichard Henderson * this means that addresses are rounded to the large size if
1229225a206cSRichard Henderson * SHM_RND is set but addresses not aligned to that size are not rejected
1230225a206cSRichard Henderson * as long as they are at least page-aligned. Since the only architecture
1231225a206cSRichard Henderson * which uses this is ia64 this code doesn't provide for that oddity.
1232225a206cSRichard Henderson */
target_shmlba(CPUArchState * cpu_env)1233225a206cSRichard Henderson static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
1234225a206cSRichard Henderson {
1235225a206cSRichard Henderson return TARGET_PAGE_SIZE;
1236225a206cSRichard Henderson }
1237225a206cSRichard Henderson #endif
1238225a206cSRichard Henderson
123978bc8ed9SRichard Henderson #if defined(__arm__) || defined(__mips__) || defined(__sparc__)
124078bc8ed9SRichard Henderson #define HOST_FORCE_SHMLBA 1
124178bc8ed9SRichard Henderson #else
124278bc8ed9SRichard Henderson #define HOST_FORCE_SHMLBA 0
124378bc8ed9SRichard Henderson #endif
124478bc8ed9SRichard Henderson
target_shmat(CPUArchState * cpu_env,int shmid,abi_ulong shmaddr,int shmflg)1245225a206cSRichard Henderson abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
1246225a206cSRichard Henderson abi_ulong shmaddr, int shmflg)
1247225a206cSRichard Henderson {
1248225a206cSRichard Henderson CPUState *cpu = env_cpu(cpu_env);
1249225a206cSRichard Henderson struct shmid_ds shm_info;
125069fa2708SRichard Henderson int ret;
125178bc8ed9SRichard Henderson int h_pagesize;
125278bc8ed9SRichard Henderson int t_shmlba, h_shmlba, m_shmlba;
125378bc8ed9SRichard Henderson size_t t_len, h_len, m_len;
1254225a206cSRichard Henderson
1255225a206cSRichard Henderson /* shmat pointers are always untagged */
1256225a206cSRichard Henderson
125778bc8ed9SRichard Henderson /*
125878bc8ed9SRichard Henderson * Because we can't use host shmat() unless the address is sufficiently
125978bc8ed9SRichard Henderson * aligned for the host, we'll need to check both.
126078bc8ed9SRichard Henderson * TODO: Could be fixed with softmmu.
126178bc8ed9SRichard Henderson */
126278bc8ed9SRichard Henderson t_shmlba = target_shmlba(cpu_env);
126378bc8ed9SRichard Henderson h_pagesize = qemu_real_host_page_size();
126478bc8ed9SRichard Henderson h_shmlba = (HOST_FORCE_SHMLBA ? SHMLBA : h_pagesize);
126578bc8ed9SRichard Henderson m_shmlba = MAX(t_shmlba, h_shmlba);
126678bc8ed9SRichard Henderson
126778bc8ed9SRichard Henderson if (shmaddr) {
126878bc8ed9SRichard Henderson if (shmaddr & (m_shmlba - 1)) {
126978bc8ed9SRichard Henderson if (shmflg & SHM_RND) {
127078bc8ed9SRichard Henderson /*
127178bc8ed9SRichard Henderson * The guest is allowing the kernel to round the address.
127278bc8ed9SRichard Henderson * Assume that the guest is ok with us rounding to the
127378bc8ed9SRichard Henderson * host required alignment too. Anyway if we don't, we'll
127478bc8ed9SRichard Henderson * get an error from the kernel.
127578bc8ed9SRichard Henderson */
127678bc8ed9SRichard Henderson shmaddr &= ~(m_shmlba - 1);
127778bc8ed9SRichard Henderson if (shmaddr == 0 && (shmflg & SHM_REMAP)) {
127878bc8ed9SRichard Henderson return -TARGET_EINVAL;
127978bc8ed9SRichard Henderson }
128078bc8ed9SRichard Henderson } else {
128178bc8ed9SRichard Henderson int require = TARGET_PAGE_SIZE;
128278bc8ed9SRichard Henderson #ifdef TARGET_FORCE_SHMLBA
128378bc8ed9SRichard Henderson require = t_shmlba;
128478bc8ed9SRichard Henderson #endif
128578bc8ed9SRichard Henderson /*
128678bc8ed9SRichard Henderson * Include host required alignment, as otherwise we cannot
128778bc8ed9SRichard Henderson * use host shmat at all.
128878bc8ed9SRichard Henderson */
128978bc8ed9SRichard Henderson require = MAX(require, h_shmlba);
129078bc8ed9SRichard Henderson if (shmaddr & (require - 1)) {
129178bc8ed9SRichard Henderson return -TARGET_EINVAL;
129278bc8ed9SRichard Henderson }
129378bc8ed9SRichard Henderson }
129478bc8ed9SRichard Henderson }
129578bc8ed9SRichard Henderson } else {
129678bc8ed9SRichard Henderson if (shmflg & SHM_REMAP) {
129778bc8ed9SRichard Henderson return -TARGET_EINVAL;
129878bc8ed9SRichard Henderson }
129978bc8ed9SRichard Henderson }
130078bc8ed9SRichard Henderson /* All rounding now manually concluded. */
130178bc8ed9SRichard Henderson shmflg &= ~SHM_RND;
130278bc8ed9SRichard Henderson
130378bc8ed9SRichard Henderson /* Find out the length of the shared memory segment. */
1304225a206cSRichard Henderson ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
1305225a206cSRichard Henderson if (is_error(ret)) {
1306225a206cSRichard Henderson /* can't get length, bail out */
1307225a206cSRichard Henderson return ret;
1308225a206cSRichard Henderson }
130978bc8ed9SRichard Henderson t_len = TARGET_PAGE_ALIGN(shm_info.shm_segsz);
131078bc8ed9SRichard Henderson h_len = ROUND_UP(shm_info.shm_segsz, h_pagesize);
131178bc8ed9SRichard Henderson m_len = MAX(t_len, h_len);
1312225a206cSRichard Henderson
131378bc8ed9SRichard Henderson if (!guest_range_valid_untagged(shmaddr, m_len)) {
1314225a206cSRichard Henderson return -TARGET_EINVAL;
1315225a206cSRichard Henderson }
1316225a206cSRichard Henderson
131769fa2708SRichard Henderson WITH_MMAP_LOCK_GUARD() {
131878bc8ed9SRichard Henderson bool mapped = false;
131978bc8ed9SRichard Henderson void *want, *test;
1320044e95c8SRichard Henderson abi_ulong last;
132169fa2708SRichard Henderson
132278bc8ed9SRichard Henderson if (!shmaddr) {
132378bc8ed9SRichard Henderson shmaddr = mmap_find_vma(0, m_len, m_shmlba);
132478bc8ed9SRichard Henderson if (shmaddr == -1) {
132569fa2708SRichard Henderson return -TARGET_ENOMEM;
132669fa2708SRichard Henderson }
132778bc8ed9SRichard Henderson mapped = !reserved_va;
132878bc8ed9SRichard Henderson } else if (shmflg & SHM_REMAP) {
132978bc8ed9SRichard Henderson /*
133078bc8ed9SRichard Henderson * If host page size > target page size, the host shmat may map
133178bc8ed9SRichard Henderson * more memory than the guest expects. Reject a mapping that
133278bc8ed9SRichard Henderson * would replace memory in the unexpected gap.
133378bc8ed9SRichard Henderson * TODO: Could be fixed with softmmu.
133478bc8ed9SRichard Henderson */
133578bc8ed9SRichard Henderson if (t_len < h_len &&
133678bc8ed9SRichard Henderson !page_check_range_empty(shmaddr + t_len,
133778bc8ed9SRichard Henderson shmaddr + h_len - 1)) {
133878bc8ed9SRichard Henderson return -TARGET_EINVAL;
133978bc8ed9SRichard Henderson }
134078bc8ed9SRichard Henderson } else {
134178bc8ed9SRichard Henderson if (!page_check_range_empty(shmaddr, shmaddr + m_len - 1)) {
134278bc8ed9SRichard Henderson return -TARGET_EINVAL;
134378bc8ed9SRichard Henderson }
134469fa2708SRichard Henderson }
134569fa2708SRichard Henderson
134678bc8ed9SRichard Henderson /* All placement is now complete. */
134778bc8ed9SRichard Henderson want = (void *)g2h_untagged(shmaddr);
134869fa2708SRichard Henderson
134978bc8ed9SRichard Henderson /*
135078bc8ed9SRichard Henderson * Map anonymous pages across the entire range, then remap with
135178bc8ed9SRichard Henderson * the shared memory. This is required for a number of corner
135278bc8ed9SRichard Henderson * cases for which host and guest page sizes differ.
135378bc8ed9SRichard Henderson */
135478bc8ed9SRichard Henderson if (h_len != t_len) {
135578bc8ed9SRichard Henderson int mmap_p = PROT_READ | (shmflg & SHM_RDONLY ? 0 : PROT_WRITE);
135678bc8ed9SRichard Henderson int mmap_f = MAP_PRIVATE | MAP_ANONYMOUS
1357*fa527b44SIlya Leoshkevich | (reserved_va || mapped || (shmflg & SHM_REMAP)
135878bc8ed9SRichard Henderson ? MAP_FIXED : MAP_FIXED_NOREPLACE);
135978bc8ed9SRichard Henderson
136078bc8ed9SRichard Henderson test = mmap(want, m_len, mmap_p, mmap_f, -1, 0);
136178bc8ed9SRichard Henderson if (unlikely(test != want)) {
136278bc8ed9SRichard Henderson /* shmat returns EINVAL not EEXIST like mmap. */
136378bc8ed9SRichard Henderson ret = (test == MAP_FAILED && errno != EEXIST
136478bc8ed9SRichard Henderson ? get_errno(-1) : -TARGET_EINVAL);
136578bc8ed9SRichard Henderson if (mapped) {
136678bc8ed9SRichard Henderson do_munmap(want, m_len);
136778bc8ed9SRichard Henderson }
136878bc8ed9SRichard Henderson return ret;
136978bc8ed9SRichard Henderson }
137078bc8ed9SRichard Henderson mapped = true;
137178bc8ed9SRichard Henderson }
137278bc8ed9SRichard Henderson
137378bc8ed9SRichard Henderson if (reserved_va || mapped) {
137478bc8ed9SRichard Henderson shmflg |= SHM_REMAP;
137578bc8ed9SRichard Henderson }
137678bc8ed9SRichard Henderson test = shmat(shmid, want, shmflg);
137778bc8ed9SRichard Henderson if (test == MAP_FAILED) {
137878bc8ed9SRichard Henderson ret = get_errno(-1);
137978bc8ed9SRichard Henderson if (mapped) {
138078bc8ed9SRichard Henderson do_munmap(want, m_len);
138178bc8ed9SRichard Henderson }
138278bc8ed9SRichard Henderson return ret;
138378bc8ed9SRichard Henderson }
138478bc8ed9SRichard Henderson assert(test == want);
138578bc8ed9SRichard Henderson
138678bc8ed9SRichard Henderson last = shmaddr + m_len - 1;
138778bc8ed9SRichard Henderson page_set_flags(shmaddr, last,
138869fa2708SRichard Henderson PAGE_VALID | PAGE_RESET | PAGE_READ |
138978bc8ed9SRichard Henderson (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE) |
139078bc8ed9SRichard Henderson (shmflg & SHM_EXEC ? PAGE_EXEC : 0));
139169fa2708SRichard Henderson
139278bc8ed9SRichard Henderson shm_region_rm_complete(shmaddr, last);
139378bc8ed9SRichard Henderson shm_region_add(shmaddr, last);
139469fa2708SRichard Henderson }
1395225a206cSRichard Henderson
1396225a206cSRichard Henderson /*
1397225a206cSRichard Henderson * We're mapping shared memory, so ensure we generate code for parallel
1398225a206cSRichard Henderson * execution and flush old translations. This will work up to the level
1399225a206cSRichard Henderson * supported by the host -- anything that requires EXCP_ATOMIC will not
1400225a206cSRichard Henderson * be atomic with respect to an external process.
1401225a206cSRichard Henderson */
1402225a206cSRichard Henderson if (!(cpu->tcg_cflags & CF_PARALLEL)) {
1403225a206cSRichard Henderson cpu->tcg_cflags |= CF_PARALLEL;
1404225a206cSRichard Henderson tb_flush(cpu);
1405225a206cSRichard Henderson }
1406225a206cSRichard Henderson
140778bc8ed9SRichard Henderson if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
140878bc8ed9SRichard Henderson FILE *f = qemu_log_trylock();
140978bc8ed9SRichard Henderson if (f) {
141078bc8ed9SRichard Henderson fprintf(f, "page layout changed following shmat\n");
141178bc8ed9SRichard Henderson page_dump(f);
141278bc8ed9SRichard Henderson qemu_log_unlock(f);
141378bc8ed9SRichard Henderson }
141478bc8ed9SRichard Henderson }
141578bc8ed9SRichard Henderson return shmaddr;
1416225a206cSRichard Henderson }
1417225a206cSRichard Henderson
target_shmdt(abi_ulong shmaddr)1418225a206cSRichard Henderson abi_long target_shmdt(abi_ulong shmaddr)
1419225a206cSRichard Henderson {
1420225a206cSRichard Henderson abi_long rv;
1421225a206cSRichard Henderson
1422225a206cSRichard Henderson /* shmdt pointers are always untagged */
1423225a206cSRichard Henderson
142469fa2708SRichard Henderson WITH_MMAP_LOCK_GUARD() {
1425044e95c8SRichard Henderson abi_ulong last = shm_region_find(shmaddr);
1426044e95c8SRichard Henderson if (last == 0) {
1427ceda5688SRichard Henderson return -TARGET_EINVAL;
1428ceda5688SRichard Henderson }
1429ceda5688SRichard Henderson
1430225a206cSRichard Henderson rv = get_errno(shmdt(g2h_untagged(shmaddr)));
1431ceda5688SRichard Henderson if (rv == 0) {
1432044e95c8SRichard Henderson abi_ulong size = last - shmaddr + 1;
1433ceda5688SRichard Henderson
1434044e95c8SRichard Henderson page_set_flags(shmaddr, last, 0);
1435044e95c8SRichard Henderson shm_region_rm_complete(shmaddr, last);
1436ceda5688SRichard Henderson mmap_reserve_or_unmap(shmaddr, size);
1437ceda5688SRichard Henderson }
143869fa2708SRichard Henderson }
1439225a206cSRichard Henderson return rv;
1440225a206cSRichard Henderson }
1441