xref: /qemu/linux-user/mmap.c (revision 56d19084)
154936004Sbellard /*
254936004Sbellard  *  mmap support for qemu
354936004Sbellard  *
454936004Sbellard  *  Copyright (c) 2003 Fabrice Bellard
554936004Sbellard  *
654936004Sbellard  *  This program is free software; you can redistribute it and/or modify
754936004Sbellard  *  it under the terms of the GNU General Public License as published by
854936004Sbellard  *  the Free Software Foundation; either version 2 of the License, or
954936004Sbellard  *  (at your option) any later version.
1054936004Sbellard  *
1154936004Sbellard  *  This program is distributed in the hope that it will be useful,
1254936004Sbellard  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
1354936004Sbellard  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
1454936004Sbellard  *  GNU General Public License for more details.
1554936004Sbellard  *
1654936004Sbellard  *  You should have received a copy of the GNU General Public License
178167ee88SBlue Swirl  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
1854936004Sbellard  */
19d39594e9SPeter Maydell #include "qemu/osdep.h"
2011d96056SAlex Bennée #include "trace.h"
2110d0d505SAlex Bennée #include "exec/log.h"
2254936004Sbellard #include "qemu.h"
2354936004Sbellard 
241e6eec8bSBlue Swirl static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
25dfd3f85cSJuan Quintela static __thread int mmap_lock_count;
26c8a706feSpbrook 
27c8a706feSpbrook void mmap_lock(void)
28c8a706feSpbrook {
29c8a706feSpbrook     if (mmap_lock_count++ == 0) {
30c8a706feSpbrook         pthread_mutex_lock(&mmap_mutex);
31c8a706feSpbrook     }
32c8a706feSpbrook }
33c8a706feSpbrook 
34c8a706feSpbrook void mmap_unlock(void)
35c8a706feSpbrook {
36c8a706feSpbrook     if (--mmap_lock_count == 0) {
37c8a706feSpbrook         pthread_mutex_unlock(&mmap_mutex);
38c8a706feSpbrook     }
39c8a706feSpbrook }
40d5975363Spbrook 
41301e40edSAlex Bennée bool have_mmap_lock(void)
42301e40edSAlex Bennée {
43301e40edSAlex Bennée     return mmap_lock_count > 0 ? true : false;
44301e40edSAlex Bennée }
45301e40edSAlex Bennée 
46d5975363Spbrook /* Grab lock to make sure things are in a consistent state after fork().  */
47d5975363Spbrook void mmap_fork_start(void)
48d5975363Spbrook {
49d5975363Spbrook     if (mmap_lock_count)
50d5975363Spbrook         abort();
51d5975363Spbrook     pthread_mutex_lock(&mmap_mutex);
52d5975363Spbrook }
53d5975363Spbrook 
54d5975363Spbrook void mmap_fork_end(int child)
55d5975363Spbrook {
56d5975363Spbrook     if (child)
57d5975363Spbrook         pthread_mutex_init(&mmap_mutex, NULL);
58d5975363Spbrook     else
59d5975363Spbrook         pthread_mutex_unlock(&mmap_mutex);
60d5975363Spbrook }
61c8a706feSpbrook 
629dba3ca5SRichard Henderson /*
639dba3ca5SRichard Henderson  * Validate target prot bitmask.
649dba3ca5SRichard Henderson  * Return the prot bitmask for the host in *HOST_PROT.
659dba3ca5SRichard Henderson  * Return 0 if the target prot bitmask is invalid, otherwise
669dba3ca5SRichard Henderson  * the internal qemu page_flags (which will include PAGE_VALID).
679dba3ca5SRichard Henderson  */
689dba3ca5SRichard Henderson static int validate_prot_to_pageflags(int *host_prot, int prot)
699dba3ca5SRichard Henderson {
709dba3ca5SRichard Henderson     int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
719dba3ca5SRichard Henderson     int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
729dba3ca5SRichard Henderson 
739dba3ca5SRichard Henderson     /*
749dba3ca5SRichard Henderson      * For the host, we need not pass anything except read/write/exec.
759dba3ca5SRichard Henderson      * While PROT_SEM is allowed by all hosts, it is also ignored, so
769dba3ca5SRichard Henderson      * don't bother transforming guest bit to host bit.  Any other
779dba3ca5SRichard Henderson      * target-specific prot bits will not be understood by the host
789dba3ca5SRichard Henderson      * and will need to be encoded into page_flags for qemu emulation.
794eaa960dSRichard Henderson      *
804eaa960dSRichard Henderson      * Pages that are executable by the guest will never be executed
814eaa960dSRichard Henderson      * by the host, but the host will need to be able to read them.
829dba3ca5SRichard Henderson      */
834eaa960dSRichard Henderson     *host_prot = (prot & (PROT_READ | PROT_WRITE))
844eaa960dSRichard Henderson                | (prot & PROT_EXEC ? PROT_READ : 0);
859dba3ca5SRichard Henderson 
86be5d6f48SRichard Henderson #ifdef TARGET_AARCH64
87be5d6f48SRichard Henderson     /*
88be5d6f48SRichard Henderson      * The PROT_BTI bit is only accepted if the cpu supports the feature.
89be5d6f48SRichard Henderson      * Since this is the unusual case, don't bother checking unless
90be5d6f48SRichard Henderson      * the bit has been requested.  If set and valid, record the bit
91be5d6f48SRichard Henderson      * within QEMU's page_flags.
92be5d6f48SRichard Henderson      */
93be5d6f48SRichard Henderson     if (prot & TARGET_PROT_BTI) {
94be5d6f48SRichard Henderson         ARMCPU *cpu = ARM_CPU(thread_cpu);
95be5d6f48SRichard Henderson         if (cpu_isar_feature(aa64_bti, cpu)) {
96be5d6f48SRichard Henderson             valid |= TARGET_PROT_BTI;
97be5d6f48SRichard Henderson             page_flags |= PAGE_BTI;
98be5d6f48SRichard Henderson         }
99be5d6f48SRichard Henderson     }
100be5d6f48SRichard Henderson #endif
101be5d6f48SRichard Henderson 
1029dba3ca5SRichard Henderson     return prot & ~valid ? 0 : page_flags;
1039dba3ca5SRichard Henderson }
1049dba3ca5SRichard Henderson 
10553a5960aSpbrook /* NOTE: all the constants are the HOST ones, but addresses are target. */
1069dba3ca5SRichard Henderson int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
10754936004Sbellard {
108992f48a0Sblueswir1     abi_ulong end, host_start, host_end, addr;
1099dba3ca5SRichard Henderson     int prot1, ret, page_flags, host_prot;
11054936004Sbellard 
1119dba3ca5SRichard Henderson     trace_target_mprotect(start, len, target_prot);
11254936004Sbellard 
1139dba3ca5SRichard Henderson     if ((start & ~TARGET_PAGE_MASK) != 0) {
11478cf3390SMax Filippov         return -TARGET_EINVAL;
1159dba3ca5SRichard Henderson     }
1169dba3ca5SRichard Henderson     page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
1179dba3ca5SRichard Henderson     if (!page_flags) {
1189dba3ca5SRichard Henderson         return -TARGET_EINVAL;
1199dba3ca5SRichard Henderson     }
12054936004Sbellard     len = TARGET_PAGE_ALIGN(len);
12154936004Sbellard     end = start + len;
122ebf9a363SMax Filippov     if (!guest_range_valid(start, len)) {
12378cf3390SMax Filippov         return -TARGET_ENOMEM;
124ebf9a363SMax Filippov     }
1259dba3ca5SRichard Henderson     if (len == 0) {
12654936004Sbellard         return 0;
1279dba3ca5SRichard Henderson     }
12854936004Sbellard 
129c8a706feSpbrook     mmap_lock();
13083fb7adfSbellard     host_start = start & qemu_host_page_mask;
13154936004Sbellard     host_end = HOST_PAGE_ALIGN(end);
13254936004Sbellard     if (start > host_start) {
13354936004Sbellard         /* handle host page containing start */
1349dba3ca5SRichard Henderson         prot1 = host_prot;
13554936004Sbellard         for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
13654936004Sbellard             prot1 |= page_get_flags(addr);
13754936004Sbellard         }
13883fb7adfSbellard         if (host_end == host_start + qemu_host_page_size) {
139d418c81eSbellard             for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
140d418c81eSbellard                 prot1 |= page_get_flags(addr);
141d418c81eSbellard             }
142d418c81eSbellard             end = host_end;
143d418c81eSbellard         }
1449dba3ca5SRichard Henderson         ret = mprotect(g2h(host_start), qemu_host_page_size,
1459dba3ca5SRichard Henderson                        prot1 & PAGE_BITS);
1469dba3ca5SRichard Henderson         if (ret != 0) {
147c8a706feSpbrook             goto error;
1489dba3ca5SRichard Henderson         }
14983fb7adfSbellard         host_start += qemu_host_page_size;
15054936004Sbellard     }
15154936004Sbellard     if (end < host_end) {
1529dba3ca5SRichard Henderson         prot1 = host_prot;
15354936004Sbellard         for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
15454936004Sbellard             prot1 |= page_get_flags(addr);
15554936004Sbellard         }
1569dba3ca5SRichard Henderson         ret = mprotect(g2h(host_end - qemu_host_page_size),
1579dba3ca5SRichard Henderson                        qemu_host_page_size, prot1 & PAGE_BITS);
1589dba3ca5SRichard Henderson         if (ret != 0) {
159c8a706feSpbrook             goto error;
1609dba3ca5SRichard Henderson         }
16183fb7adfSbellard         host_end -= qemu_host_page_size;
16254936004Sbellard     }
16354936004Sbellard 
16454936004Sbellard     /* handle the pages in the middle */
16554936004Sbellard     if (host_start < host_end) {
1669dba3ca5SRichard Henderson         ret = mprotect(g2h(host_start), host_end - host_start, host_prot);
1679dba3ca5SRichard Henderson         if (ret != 0) {
168c8a706feSpbrook             goto error;
16954936004Sbellard         }
1709dba3ca5SRichard Henderson     }
1719dba3ca5SRichard Henderson     page_set_flags(start, start + len, page_flags);
172c8a706feSpbrook     mmap_unlock();
17354936004Sbellard     return 0;
174c8a706feSpbrook error:
175c8a706feSpbrook     mmap_unlock();
176c8a706feSpbrook     return ret;
17754936004Sbellard }
17854936004Sbellard 
17954936004Sbellard /* map an incomplete host page */
180992f48a0Sblueswir1 static int mmap_frag(abi_ulong real_start,
181992f48a0Sblueswir1                      abi_ulong start, abi_ulong end,
182992f48a0Sblueswir1                      int prot, int flags, int fd, abi_ulong offset)
18354936004Sbellard {
18480210bcdSths     abi_ulong real_end, addr;
18553a5960aSpbrook     void *host_start;
18654936004Sbellard     int prot1, prot_new;
18754936004Sbellard 
18853a5960aSpbrook     real_end = real_start + qemu_host_page_size;
18953a5960aSpbrook     host_start = g2h(real_start);
19054936004Sbellard 
19154936004Sbellard     /* get the protection of the target pages outside the mapping */
19254936004Sbellard     prot1 = 0;
19353a5960aSpbrook     for(addr = real_start; addr < real_end; addr++) {
19454936004Sbellard         if (addr < start || addr >= end)
19554936004Sbellard             prot1 |= page_get_flags(addr);
19654936004Sbellard     }
19754936004Sbellard 
19854936004Sbellard     if (prot1 == 0) {
19954936004Sbellard         /* no page was there, so we allocate one */
20080210bcdSths         void *p = mmap(host_start, qemu_host_page_size, prot,
20154936004Sbellard                        flags | MAP_ANONYMOUS, -1, 0);
20280210bcdSths         if (p == MAP_FAILED)
20380210bcdSths             return -1;
20453a5960aSpbrook         prot1 = prot;
20554936004Sbellard     }
20654936004Sbellard     prot1 &= PAGE_BITS;
20754936004Sbellard 
20854936004Sbellard     prot_new = prot | prot1;
20954936004Sbellard     if (!(flags & MAP_ANONYMOUS)) {
21054936004Sbellard         /* msync() won't work here, so we return an error if write is
21154936004Sbellard            possible while it is a shared mapping */
21254936004Sbellard         if ((flags & MAP_TYPE) == MAP_SHARED &&
21354936004Sbellard             (prot & PROT_WRITE))
214ee636500SJuan Quintela             return -1;
21554936004Sbellard 
21654936004Sbellard         /* adjust protection to be able to read */
21754936004Sbellard         if (!(prot1 & PROT_WRITE))
21853a5960aSpbrook             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
21954936004Sbellard 
22054936004Sbellard         /* read the corresponding file data */
221fb7e378cSKirill A. Shutemov         if (pread(fd, g2h(start), end - start, offset) == -1)
222fb7e378cSKirill A. Shutemov             return -1;
22354936004Sbellard 
22454936004Sbellard         /* put final protection */
22554936004Sbellard         if (prot_new != (prot1 | PROT_WRITE))
22653a5960aSpbrook             mprotect(host_start, qemu_host_page_size, prot_new);
22754936004Sbellard     } else {
22854936004Sbellard         if (prot_new != prot1) {
22953a5960aSpbrook             mprotect(host_start, qemu_host_page_size, prot_new);
23054936004Sbellard         }
231e6deac9cSChen Gang         if (prot_new & PROT_WRITE) {
232e6deac9cSChen Gang             memset(g2h(start), 0, end - start);
233e6deac9cSChen Gang         }
23454936004Sbellard     }
23554936004Sbellard     return 0;
23654936004Sbellard }
23754936004Sbellard 
23814f24e14SRichard Henderson #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
239aab613fbSLirong Yuan #ifdef TARGET_AARCH64
240aab613fbSLirong Yuan # define TASK_UNMAPPED_BASE  0x5500000000
241aab613fbSLirong Yuan #else
24214f24e14SRichard Henderson # define TASK_UNMAPPED_BASE  (1ul << 38)
243aab613fbSLirong Yuan #endif
244a03e2d42Sbellard #else
24514f24e14SRichard Henderson # define TASK_UNMAPPED_BASE  0x40000000
246a03e2d42Sbellard #endif
24759e9d91cSPeter Maydell abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
248a03e2d42Sbellard 
2490776590dSpbrook unsigned long last_brk;
2500776590dSpbrook 
25168a1c816SPaul Brook /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
25268a1c816SPaul Brook    of guest address space.  */
25330ab9ef2SRichard Henderson static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
25430ab9ef2SRichard Henderson                                         abi_ulong align)
25568a1c816SPaul Brook {
25630ab9ef2SRichard Henderson     abi_ulong addr, end_addr, incr = qemu_host_page_size;
25768a1c816SPaul Brook     int prot;
25830ab9ef2SRichard Henderson     bool looped = false;
25968a1c816SPaul Brook 
260b76f21a7SLaurent Vivier     if (size > reserved_va) {
26168a1c816SPaul Brook         return (abi_ulong)-1;
26268a1c816SPaul Brook     }
26368a1c816SPaul Brook 
26430ab9ef2SRichard Henderson     /* Note that start and size have already been aligned by mmap_find_vma. */
26559e9d91cSPeter Maydell 
26630ab9ef2SRichard Henderson     end_addr = start + size;
26730ab9ef2SRichard Henderson     if (start > reserved_va - size) {
26830ab9ef2SRichard Henderson         /* Start at the top of the address space.  */
26930ab9ef2SRichard Henderson         end_addr = ((reserved_va - size) & -align) + size;
27030ab9ef2SRichard Henderson         looped = true;
27130ab9ef2SRichard Henderson     }
27230ab9ef2SRichard Henderson 
27330ab9ef2SRichard Henderson     /* Search downward from END_ADDR, checking to see if a page is in use.  */
27430ab9ef2SRichard Henderson     addr = end_addr;
27559e9d91cSPeter Maydell     while (1) {
27630ab9ef2SRichard Henderson         addr -= incr;
27759e9d91cSPeter Maydell         if (addr > end_addr) {
27868a1c816SPaul Brook             if (looped) {
27930ab9ef2SRichard Henderson                 /* Failure.  The entire address space has been searched.  */
28068a1c816SPaul Brook                 return (abi_ulong)-1;
28168a1c816SPaul Brook             }
28230ab9ef2SRichard Henderson             /* Re-start at the top of the address space.  */
28330ab9ef2SRichard Henderson             addr = end_addr = ((reserved_va - size) & -align) + size;
28430ab9ef2SRichard Henderson             looped = true;
28530ab9ef2SRichard Henderson         } else {
28668a1c816SPaul Brook             prot = page_get_flags(addr);
28768a1c816SPaul Brook             if (prot) {
28830ab9ef2SRichard Henderson                 /* Page in use.  Restart below this page.  */
28930ab9ef2SRichard Henderson                 addr = end_addr = ((addr - size) & -align) + size;
29030ab9ef2SRichard Henderson             } else if (addr && addr + size == end_addr) {
29130ab9ef2SRichard Henderson                 /* Success!  All pages between ADDR and END_ADDR are free.  */
29259e9d91cSPeter Maydell                 if (start == mmap_next_start) {
29368a1c816SPaul Brook                     mmap_next_start = addr;
29459e9d91cSPeter Maydell                 }
29559e9d91cSPeter Maydell                 return addr;
29668a1c816SPaul Brook             }
29730ab9ef2SRichard Henderson         }
29830ab9ef2SRichard Henderson     }
29930ab9ef2SRichard Henderson }
30068a1c816SPaul Brook 
301fe3b4152SKirill A. Shutemov /*
302fe3b4152SKirill A. Shutemov  * Find and reserve a free memory area of size 'size'. The search
303fe3b4152SKirill A. Shutemov  * starts at 'start'.
304fe3b4152SKirill A. Shutemov  * It must be called with mmap_lock() held.
305fe3b4152SKirill A. Shutemov  * Return -1 if error.
306a03e2d42Sbellard  */
30730ab9ef2SRichard Henderson abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
308a03e2d42Sbellard {
30914f24e14SRichard Henderson     void *ptr, *prev;
310fe3b4152SKirill A. Shutemov     abi_ulong addr;
31114f24e14SRichard Henderson     int wrapped, repeat;
312fe3b4152SKirill A. Shutemov 
313443b7505SRichard Henderson     align = MAX(align, qemu_host_page_size);
314443b7505SRichard Henderson 
315fe3b4152SKirill A. Shutemov     /* If 'start' == 0, then a default start address is used. */
31614f24e14SRichard Henderson     if (start == 0) {
317fe3b4152SKirill A. Shutemov         start = mmap_next_start;
31814f24e14SRichard Henderson     } else {
31914f24e14SRichard Henderson         start &= qemu_host_page_mask;
32014f24e14SRichard Henderson     }
32130ab9ef2SRichard Henderson     start = ROUND_UP(start, align);
32214f24e14SRichard Henderson 
32314f24e14SRichard Henderson     size = HOST_PAGE_ALIGN(size);
324fe3b4152SKirill A. Shutemov 
325b76f21a7SLaurent Vivier     if (reserved_va) {
32630ab9ef2SRichard Henderson         return mmap_find_vma_reserved(start, size, align);
32768a1c816SPaul Brook     }
32868a1c816SPaul Brook 
329a03e2d42Sbellard     addr = start;
33014f24e14SRichard Henderson     wrapped = repeat = 0;
33114f24e14SRichard Henderson     prev = 0;
332fe3b4152SKirill A. Shutemov 
33314f24e14SRichard Henderson     for (;; prev = ptr) {
334fe3b4152SKirill A. Shutemov         /*
335fe3b4152SKirill A. Shutemov          * Reserve needed memory area to avoid a race.
336fe3b4152SKirill A. Shutemov          * It should be discarded using:
337fe3b4152SKirill A. Shutemov          *  - mmap() with MAP_FIXED flag
338fe3b4152SKirill A. Shutemov          *  - mremap() with MREMAP_FIXED flag
339fe3b4152SKirill A. Shutemov          *  - shmat() with SHM_REMAP flag
340fe3b4152SKirill A. Shutemov          */
34114f24e14SRichard Henderson         ptr = mmap(g2h(addr), size, PROT_NONE,
342fe3b4152SKirill A. Shutemov                    MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
343fe3b4152SKirill A. Shutemov 
344fe3b4152SKirill A. Shutemov         /* ENOMEM, if host address space has no memory */
34514f24e14SRichard Henderson         if (ptr == MAP_FAILED) {
346a03e2d42Sbellard             return (abi_ulong)-1;
347a03e2d42Sbellard         }
348fe3b4152SKirill A. Shutemov 
34914f24e14SRichard Henderson         /* Count the number of sequential returns of the same address.
35014f24e14SRichard Henderson            This is used to modify the search algorithm below.  */
35114f24e14SRichard Henderson         repeat = (ptr == prev ? repeat + 1 : 0);
352fe3b4152SKirill A. Shutemov 
35314f24e14SRichard Henderson         if (h2g_valid(ptr + size - 1)) {
35414f24e14SRichard Henderson             addr = h2g(ptr);
35514f24e14SRichard Henderson 
35630ab9ef2SRichard Henderson             if ((addr & (align - 1)) == 0) {
35714f24e14SRichard Henderson                 /* Success.  */
35814f24e14SRichard Henderson                 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
35914f24e14SRichard Henderson                     mmap_next_start = addr + size;
36014f24e14SRichard Henderson                 }
36114f24e14SRichard Henderson                 return addr;
36214f24e14SRichard Henderson             }
36314f24e14SRichard Henderson 
36414f24e14SRichard Henderson             /* The address is not properly aligned for the target.  */
36514f24e14SRichard Henderson             switch (repeat) {
36614f24e14SRichard Henderson             case 0:
36714f24e14SRichard Henderson                 /* Assume the result that the kernel gave us is the
36814f24e14SRichard Henderson                    first with enough free space, so start again at the
36914f24e14SRichard Henderson                    next higher target page.  */
37030ab9ef2SRichard Henderson                 addr = ROUND_UP(addr, align);
37114f24e14SRichard Henderson                 break;
37214f24e14SRichard Henderson             case 1:
37314f24e14SRichard Henderson                 /* Sometimes the kernel decides to perform the allocation
37414f24e14SRichard Henderson                    at the top end of memory instead.  */
37530ab9ef2SRichard Henderson                 addr &= -align;
37614f24e14SRichard Henderson                 break;
37714f24e14SRichard Henderson             case 2:
37814f24e14SRichard Henderson                 /* Start over at low memory.  */
37914f24e14SRichard Henderson                 addr = 0;
38014f24e14SRichard Henderson                 break;
38114f24e14SRichard Henderson             default:
38214f24e14SRichard Henderson                 /* Fail.  This unaligned block must the last.  */
38314f24e14SRichard Henderson                 addr = -1;
38414f24e14SRichard Henderson                 break;
38514f24e14SRichard Henderson             }
38614f24e14SRichard Henderson         } else {
38714f24e14SRichard Henderson             /* Since the result the kernel gave didn't fit, start
38814f24e14SRichard Henderson                again at low memory.  If any repetition, fail.  */
38914f24e14SRichard Henderson             addr = (repeat ? -1 : 0);
39014f24e14SRichard Henderson         }
39114f24e14SRichard Henderson 
39214f24e14SRichard Henderson         /* Unmap and try again.  */
39314f24e14SRichard Henderson         munmap(ptr, size);
39414f24e14SRichard Henderson 
39514f24e14SRichard Henderson         /* ENOMEM if we checked the whole of the target address space.  */
396d0b3e4f5SBlue Swirl         if (addr == (abi_ulong)-1) {
39714f24e14SRichard Henderson             return (abi_ulong)-1;
39814f24e14SRichard Henderson         } else if (addr == 0) {
39914f24e14SRichard Henderson             if (wrapped) {
40014f24e14SRichard Henderson                 return (abi_ulong)-1;
40114f24e14SRichard Henderson             }
40214f24e14SRichard Henderson             wrapped = 1;
40314f24e14SRichard Henderson             /* Don't actually use 0 when wrapping, instead indicate
4048186e783SStefan Weil                that we'd truly like an allocation in low memory.  */
40514f24e14SRichard Henderson             addr = (mmap_min_addr > TARGET_PAGE_SIZE
40614f24e14SRichard Henderson                      ? TARGET_PAGE_ALIGN(mmap_min_addr)
40714f24e14SRichard Henderson                      : TARGET_PAGE_SIZE);
40814f24e14SRichard Henderson         } else if (wrapped && addr >= start) {
40914f24e14SRichard Henderson             return (abi_ulong)-1;
41014f24e14SRichard Henderson         }
41114f24e14SRichard Henderson     }
412a03e2d42Sbellard }
413a03e2d42Sbellard 
41454936004Sbellard /* NOTE: all the constants are the HOST ones */
4159dba3ca5SRichard Henderson abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
416992f48a0Sblueswir1                      int flags, int fd, abi_ulong offset)
41754936004Sbellard {
418992f48a0Sblueswir1     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
4199dba3ca5SRichard Henderson     int page_flags, host_prot;
42054936004Sbellard 
421c8a706feSpbrook     mmap_lock();
4229dba3ca5SRichard Henderson     trace_target_mmap(start, len, target_prot, flags, fd, offset);
42354936004Sbellard 
42438138fabSAlex Bennée     if (!len) {
42538138fabSAlex Bennée         errno = EINVAL;
42638138fabSAlex Bennée         goto fail;
42738138fabSAlex Bennée     }
42838138fabSAlex Bennée 
4299dba3ca5SRichard Henderson     page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
4309dba3ca5SRichard Henderson     if (!page_flags) {
4319dba3ca5SRichard Henderson         errno = EINVAL;
4329dba3ca5SRichard Henderson         goto fail;
4339dba3ca5SRichard Henderson     }
4349dba3ca5SRichard Henderson 
43538138fabSAlex Bennée     /* Also check for overflows... */
43638138fabSAlex Bennée     len = TARGET_PAGE_ALIGN(len);
43738138fabSAlex Bennée     if (!len) {
43838138fabSAlex Bennée         errno = ENOMEM;
43938138fabSAlex Bennée         goto fail;
44038138fabSAlex Bennée     }
44138138fabSAlex Bennée 
442e89f07d3Spbrook     if (offset & ~TARGET_PAGE_MASK) {
443e89f07d3Spbrook         errno = EINVAL;
444c8a706feSpbrook         goto fail;
445e89f07d3Spbrook     }
44654936004Sbellard 
44753a5960aSpbrook     real_start = start & qemu_host_page_mask;
448a5e7ee46SRichard Henderson     host_offset = offset & qemu_host_page_mask;
449a5e7ee46SRichard Henderson 
450a5e7ee46SRichard Henderson     /* If the user is asking for the kernel to find a location, do that
451a5e7ee46SRichard Henderson        before we truncate the length for mapping files below.  */
452a5e7ee46SRichard Henderson     if (!(flags & MAP_FIXED)) {
453a5e7ee46SRichard Henderson         host_len = len + offset - host_offset;
454a5e7ee46SRichard Henderson         host_len = HOST_PAGE_ALIGN(host_len);
45530ab9ef2SRichard Henderson         start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
456a5e7ee46SRichard Henderson         if (start == (abi_ulong)-1) {
457a5e7ee46SRichard Henderson             errno = ENOMEM;
458a5e7ee46SRichard Henderson             goto fail;
459a5e7ee46SRichard Henderson         }
460a5e7ee46SRichard Henderson     }
46154936004Sbellard 
46254c5a2aeSedgar_igl     /* When mapping files into a memory area larger than the file, accesses
46354c5a2aeSedgar_igl        to pages beyond the file size will cause a SIGBUS.
46454c5a2aeSedgar_igl 
46554c5a2aeSedgar_igl        For example, if mmaping a file of 100 bytes on a host with 4K pages
46654c5a2aeSedgar_igl        emulating a target with 8K pages, the target expects to be able to
46754c5a2aeSedgar_igl        access the first 8K. But the host will trap us on any access beyond
46854c5a2aeSedgar_igl        4K.
46954c5a2aeSedgar_igl 
47054c5a2aeSedgar_igl        When emulating a target with a larger page-size than the hosts, we
47154c5a2aeSedgar_igl        may need to truncate file maps at EOF and add extra anonymous pages
47254c5a2aeSedgar_igl        up to the targets page boundary.  */
47354c5a2aeSedgar_igl 
47435f2fd04SMarc-André Lureau     if ((qemu_real_host_page_size < qemu_host_page_size) &&
47535f2fd04SMarc-André Lureau         !(flags & MAP_ANONYMOUS)) {
47654c5a2aeSedgar_igl         struct stat sb;
47754c5a2aeSedgar_igl 
47854c5a2aeSedgar_igl        if (fstat (fd, &sb) == -1)
47954c5a2aeSedgar_igl            goto fail;
48054c5a2aeSedgar_igl 
48154c5a2aeSedgar_igl        /* Are we trying to create a map beyond EOF?.  */
48254c5a2aeSedgar_igl        if (offset + len > sb.st_size) {
48354c5a2aeSedgar_igl            /* If so, truncate the file map at eof aligned with
48454c5a2aeSedgar_igl               the hosts real pagesize. Additional anonymous maps
48554c5a2aeSedgar_igl               will be created beyond EOF.  */
4860c2d70c4SPaolo Bonzini            len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
48754c5a2aeSedgar_igl        }
48854c5a2aeSedgar_igl     }
48954c5a2aeSedgar_igl 
49054936004Sbellard     if (!(flags & MAP_FIXED)) {
491a5e7ee46SRichard Henderson         unsigned long host_start;
492a03e2d42Sbellard         void *p;
493a5e7ee46SRichard Henderson 
49454936004Sbellard         host_len = len + offset - host_offset;
495a03e2d42Sbellard         host_len = HOST_PAGE_ALIGN(host_len);
496a5e7ee46SRichard Henderson 
497a03e2d42Sbellard         /* Note: we prefer to control the mapping address. It is
498a03e2d42Sbellard            especially important if qemu_host_page_size >
499a03e2d42Sbellard            qemu_real_host_page_size */
5009dba3ca5SRichard Henderson         p = mmap(g2h(start), host_len, host_prot,
501a5e7ee46SRichard Henderson                  flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
5029dba3ca5SRichard Henderson         if (p == MAP_FAILED) {
503c8a706feSpbrook             goto fail;
5049dba3ca5SRichard Henderson         }
50554936004Sbellard         /* update start so that it points to the file position at 'offset' */
50680210bcdSths         host_start = (unsigned long)p;
50754c5a2aeSedgar_igl         if (!(flags & MAP_ANONYMOUS)) {
5089dba3ca5SRichard Henderson             p = mmap(g2h(start), len, host_prot,
50954c5a2aeSedgar_igl                      flags | MAP_FIXED, fd, host_offset);
5108384274eSJürg Billeter             if (p == MAP_FAILED) {
5118384274eSJürg Billeter                 munmap(g2h(start), host_len);
5128384274eSJürg Billeter                 goto fail;
5138384274eSJürg Billeter             }
51453a5960aSpbrook             host_start += offset - host_offset;
51554c5a2aeSedgar_igl         }
51653a5960aSpbrook         start = h2g(host_start);
517a03e2d42Sbellard     } else {
518e89f07d3Spbrook         if (start & ~TARGET_PAGE_MASK) {
519e89f07d3Spbrook             errno = EINVAL;
520c8a706feSpbrook             goto fail;
521e89f07d3Spbrook         }
52254936004Sbellard         end = start + len;
52353a5960aSpbrook         real_end = HOST_PAGE_ALIGN(end);
52454936004Sbellard 
52545bc1f52Saurel32         /*
52645bc1f52Saurel32          * Test if requested memory area fits target address space
52745bc1f52Saurel32          * It can fail only on 64-bit host with 32-bit target.
52845bc1f52Saurel32          * On any other target/host host mmap() handles this error correctly.
52945bc1f52Saurel32          */
5308ef61885SAlex Bennée         if (end < start || !guest_range_valid(start, len)) {
531ebf9a363SMax Filippov             errno = ENOMEM;
53245bc1f52Saurel32             goto fail;
53345bc1f52Saurel32         }
53445bc1f52Saurel32 
53554936004Sbellard         /* worst case: we cannot map the file because the offset is not
53654936004Sbellard            aligned, so we read it */
53754936004Sbellard         if (!(flags & MAP_ANONYMOUS) &&
53883fb7adfSbellard             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
53954936004Sbellard             /* msync() won't work here, so we return an error if write is
54054936004Sbellard                possible while it is a shared mapping */
54154936004Sbellard             if ((flags & MAP_TYPE) == MAP_SHARED &&
5429dba3ca5SRichard Henderson                 (host_prot & PROT_WRITE)) {
543e89f07d3Spbrook                 errno = EINVAL;
544c8a706feSpbrook                 goto fail;
545e89f07d3Spbrook             }
5469dba3ca5SRichard Henderson             retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
54754936004Sbellard                                   MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
54854936004Sbellard                                   -1, 0);
54954936004Sbellard             if (retaddr == -1)
550c8a706feSpbrook                 goto fail;
551fb7e378cSKirill A. Shutemov             if (pread(fd, g2h(start), len, offset) == -1)
552fb7e378cSKirill A. Shutemov                 goto fail;
5539dba3ca5SRichard Henderson             if (!(host_prot & PROT_WRITE)) {
5549dba3ca5SRichard Henderson                 ret = target_mprotect(start, len, target_prot);
55586abac06SPaolo Bonzini                 assert(ret == 0);
55654936004Sbellard             }
55754936004Sbellard             goto the_end;
55854936004Sbellard         }
55954936004Sbellard 
56054936004Sbellard         /* handle the start of the mapping */
56153a5960aSpbrook         if (start > real_start) {
56253a5960aSpbrook             if (real_end == real_start + qemu_host_page_size) {
56354936004Sbellard                 /* one single host page */
56453a5960aSpbrook                 ret = mmap_frag(real_start, start, end,
5659dba3ca5SRichard Henderson                                 host_prot, flags, fd, offset);
56654936004Sbellard                 if (ret == -1)
567c8a706feSpbrook                     goto fail;
56854936004Sbellard                 goto the_end1;
56954936004Sbellard             }
57053a5960aSpbrook             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
5719dba3ca5SRichard Henderson                             host_prot, flags, fd, offset);
57254936004Sbellard             if (ret == -1)
573c8a706feSpbrook                 goto fail;
57453a5960aSpbrook             real_start += qemu_host_page_size;
57554936004Sbellard         }
57654936004Sbellard         /* handle the end of the mapping */
57753a5960aSpbrook         if (end < real_end) {
57853a5960aSpbrook             ret = mmap_frag(real_end - qemu_host_page_size,
579530c0032SChen Gang                             real_end - qemu_host_page_size, end,
5809dba3ca5SRichard Henderson                             host_prot, flags, fd,
58153a5960aSpbrook                             offset + real_end - qemu_host_page_size - start);
58254936004Sbellard             if (ret == -1)
583c8a706feSpbrook                 goto fail;
58453a5960aSpbrook             real_end -= qemu_host_page_size;
58554936004Sbellard         }
58654936004Sbellard 
58754936004Sbellard         /* map the middle (easier) */
58853a5960aSpbrook         if (real_start < real_end) {
58980210bcdSths             void *p;
5904a585ccbSbellard             unsigned long offset1;
5914a585ccbSbellard             if (flags & MAP_ANONYMOUS)
5924a585ccbSbellard                 offset1 = 0;
5934a585ccbSbellard             else
59453a5960aSpbrook                 offset1 = offset + real_start - start;
59580210bcdSths             p = mmap(g2h(real_start), real_end - real_start,
5969dba3ca5SRichard Henderson                      host_prot, flags, fd, offset1);
59780210bcdSths             if (p == MAP_FAILED)
598c8a706feSpbrook                 goto fail;
59954936004Sbellard         }
600a03e2d42Sbellard     }
60154936004Sbellard  the_end1:
6029dba3ca5SRichard Henderson     page_set_flags(start, start + len, page_flags);
60354936004Sbellard  the_end:
604d0e165aeSAlex Bennée     trace_target_mmap_complete(start);
60510d0d505SAlex Bennée     if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
60610d0d505SAlex Bennée         log_page_dump(__func__);
60710d0d505SAlex Bennée     }
60835865339SPaolo Bonzini     tb_invalidate_phys_range(start, start + len);
609c8a706feSpbrook     mmap_unlock();
61054936004Sbellard     return start;
611c8a706feSpbrook fail:
612c8a706feSpbrook     mmap_unlock();
613c8a706feSpbrook     return -1;
61454936004Sbellard }
61554936004Sbellard 
61668a1c816SPaul Brook static void mmap_reserve(abi_ulong start, abi_ulong size)
61768a1c816SPaul Brook {
61868a1c816SPaul Brook     abi_ulong real_start;
61968a1c816SPaul Brook     abi_ulong real_end;
62068a1c816SPaul Brook     abi_ulong addr;
62168a1c816SPaul Brook     abi_ulong end;
62268a1c816SPaul Brook     int prot;
62368a1c816SPaul Brook 
62468a1c816SPaul Brook     real_start = start & qemu_host_page_mask;
62568a1c816SPaul Brook     real_end = HOST_PAGE_ALIGN(start + size);
62668a1c816SPaul Brook     end = start + size;
62768a1c816SPaul Brook     if (start > real_start) {
62868a1c816SPaul Brook         /* handle host page containing start */
62968a1c816SPaul Brook         prot = 0;
63068a1c816SPaul Brook         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
63168a1c816SPaul Brook             prot |= page_get_flags(addr);
63268a1c816SPaul Brook         }
63368a1c816SPaul Brook         if (real_end == real_start + qemu_host_page_size) {
63468a1c816SPaul Brook             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
63568a1c816SPaul Brook                 prot |= page_get_flags(addr);
63668a1c816SPaul Brook             }
63768a1c816SPaul Brook             end = real_end;
63868a1c816SPaul Brook         }
63968a1c816SPaul Brook         if (prot != 0)
64068a1c816SPaul Brook             real_start += qemu_host_page_size;
64168a1c816SPaul Brook     }
64268a1c816SPaul Brook     if (end < real_end) {
64368a1c816SPaul Brook         prot = 0;
64468a1c816SPaul Brook         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
64568a1c816SPaul Brook             prot |= page_get_flags(addr);
64668a1c816SPaul Brook         }
64768a1c816SPaul Brook         if (prot != 0)
64868a1c816SPaul Brook             real_end -= qemu_host_page_size;
64968a1c816SPaul Brook     }
65068a1c816SPaul Brook     if (real_start != real_end) {
65168a1c816SPaul Brook         mmap(g2h(real_start), real_end - real_start, PROT_NONE,
65268a1c816SPaul Brook                  MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
65368a1c816SPaul Brook                  -1, 0);
65468a1c816SPaul Brook     }
65568a1c816SPaul Brook }
65668a1c816SPaul Brook 
657992f48a0Sblueswir1 int target_munmap(abi_ulong start, abi_ulong len)
65854936004Sbellard {
659992f48a0Sblueswir1     abi_ulong end, real_start, real_end, addr;
66054936004Sbellard     int prot, ret;
66154936004Sbellard 
662b7b18d26SAlex Bennée     trace_target_munmap(start, len);
663b7b18d26SAlex Bennée 
66454936004Sbellard     if (start & ~TARGET_PAGE_MASK)
66578cf3390SMax Filippov         return -TARGET_EINVAL;
66654936004Sbellard     len = TARGET_PAGE_ALIGN(len);
667ebf9a363SMax Filippov     if (len == 0 || !guest_range_valid(start, len)) {
66878cf3390SMax Filippov         return -TARGET_EINVAL;
669ebf9a363SMax Filippov     }
670ebf9a363SMax Filippov 
671c8a706feSpbrook     mmap_lock();
67254936004Sbellard     end = start + len;
67353a5960aSpbrook     real_start = start & qemu_host_page_mask;
67453a5960aSpbrook     real_end = HOST_PAGE_ALIGN(end);
67554936004Sbellard 
67653a5960aSpbrook     if (start > real_start) {
67754936004Sbellard         /* handle host page containing start */
67854936004Sbellard         prot = 0;
67953a5960aSpbrook         for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
68054936004Sbellard             prot |= page_get_flags(addr);
68154936004Sbellard         }
68253a5960aSpbrook         if (real_end == real_start + qemu_host_page_size) {
68353a5960aSpbrook             for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
684d418c81eSbellard                 prot |= page_get_flags(addr);
685d418c81eSbellard             }
68653a5960aSpbrook             end = real_end;
687d418c81eSbellard         }
68854936004Sbellard         if (prot != 0)
68953a5960aSpbrook             real_start += qemu_host_page_size;
69054936004Sbellard     }
69153a5960aSpbrook     if (end < real_end) {
69254936004Sbellard         prot = 0;
69353a5960aSpbrook         for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
69454936004Sbellard             prot |= page_get_flags(addr);
69554936004Sbellard         }
69654936004Sbellard         if (prot != 0)
69753a5960aSpbrook             real_end -= qemu_host_page_size;
69854936004Sbellard     }
69954936004Sbellard 
700c8a706feSpbrook     ret = 0;
70154936004Sbellard     /* unmap what we can */
70253a5960aSpbrook     if (real_start < real_end) {
703b76f21a7SLaurent Vivier         if (reserved_va) {
70468a1c816SPaul Brook             mmap_reserve(real_start, real_end - real_start);
70568a1c816SPaul Brook         } else {
7064118a970Sj_mayer             ret = munmap(g2h(real_start), real_end - real_start);
70754936004Sbellard         }
70868a1c816SPaul Brook     }
70954936004Sbellard 
71077a8f1a5SAlexander Graf     if (ret == 0) {
71154936004Sbellard         page_set_flags(start, start + len, 0);
71235865339SPaolo Bonzini         tb_invalidate_phys_range(start, start + len);
71377a8f1a5SAlexander Graf     }
714c8a706feSpbrook     mmap_unlock();
715c8a706feSpbrook     return ret;
71654936004Sbellard }
71754936004Sbellard 
718992f48a0Sblueswir1 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
719992f48a0Sblueswir1                        abi_ulong new_size, unsigned long flags,
720992f48a0Sblueswir1                        abi_ulong new_addr)
72154936004Sbellard {
72254936004Sbellard     int prot;
723f19412a2Saurel32     void *host_addr;
72454936004Sbellard 
725ebf9a363SMax Filippov     if (!guest_range_valid(old_addr, old_size) ||
726ebf9a363SMax Filippov         ((flags & MREMAP_FIXED) &&
727ebf9a363SMax Filippov          !guest_range_valid(new_addr, new_size))) {
728ebf9a363SMax Filippov         errno = ENOMEM;
729ebf9a363SMax Filippov         return -1;
730ebf9a363SMax Filippov     }
731ebf9a363SMax Filippov 
732c8a706feSpbrook     mmap_lock();
733f19412a2Saurel32 
73468a1c816SPaul Brook     if (flags & MREMAP_FIXED) {
73552956a9bSFelix Janda         host_addr = mremap(g2h(old_addr), old_size, new_size,
73652956a9bSFelix Janda                            flags, g2h(new_addr));
73768a1c816SPaul Brook 
738b76f21a7SLaurent Vivier         if (reserved_va && host_addr != MAP_FAILED) {
73968a1c816SPaul Brook             /* If new and old addresses overlap then the above mremap will
74068a1c816SPaul Brook                already have failed with EINVAL.  */
74168a1c816SPaul Brook             mmap_reserve(old_addr, old_size);
74268a1c816SPaul Brook         }
74368a1c816SPaul Brook     } else if (flags & MREMAP_MAYMOVE) {
744f19412a2Saurel32         abi_ulong mmap_start;
745f19412a2Saurel32 
74630ab9ef2SRichard Henderson         mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
747f19412a2Saurel32 
748f19412a2Saurel32         if (mmap_start == -1) {
749f19412a2Saurel32             errno = ENOMEM;
750f19412a2Saurel32             host_addr = MAP_FAILED;
75168a1c816SPaul Brook         } else {
75252956a9bSFelix Janda             host_addr = mremap(g2h(old_addr), old_size, new_size,
75352956a9bSFelix Janda                                flags | MREMAP_FIXED, g2h(mmap_start));
754b76f21a7SLaurent Vivier             if (reserved_va) {
75568a1c816SPaul Brook                 mmap_reserve(old_addr, old_size);
75668a1c816SPaul Brook             }
757c65ffe6dSamateur         }
7583af72a4dSblueswir1     } else {
75968a1c816SPaul Brook         int prot = 0;
760b76f21a7SLaurent Vivier         if (reserved_va && old_size < new_size) {
76168a1c816SPaul Brook             abi_ulong addr;
76268a1c816SPaul Brook             for (addr = old_addr + old_size;
76368a1c816SPaul Brook                  addr < old_addr + new_size;
76468a1c816SPaul Brook                  addr++) {
76568a1c816SPaul Brook                 prot |= page_get_flags(addr);
76668a1c816SPaul Brook             }
76768a1c816SPaul Brook         }
76868a1c816SPaul Brook         if (prot == 0) {
769f19412a2Saurel32             host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
770*56d19084STobias Koch 
771*56d19084STobias Koch             if (host_addr != MAP_FAILED) {
772*56d19084STobias Koch                 /* Check if address fits target address space */
773*56d19084STobias Koch                 if (!guest_range_valid(h2g(host_addr), new_size)) {
774*56d19084STobias Koch                     /* Revert mremap() changes */
775*56d19084STobias Koch                     host_addr = mremap(g2h(old_addr), new_size, old_size,
776*56d19084STobias Koch                                        flags);
77768a1c816SPaul Brook                     errno = ENOMEM;
77868a1c816SPaul Brook                     host_addr = MAP_FAILED;
779*56d19084STobias Koch                 } else if (reserved_va && old_size > new_size) {
780*56d19084STobias Koch                     mmap_reserve(old_addr + old_size, old_size - new_size);
78168a1c816SPaul Brook                 }
782*56d19084STobias Koch             }
783*56d19084STobias Koch         } else {
784f19412a2Saurel32             errno = ENOMEM;
785f19412a2Saurel32             host_addr = MAP_FAILED;
786f19412a2Saurel32         }
787f19412a2Saurel32     }
788f19412a2Saurel32 
789f19412a2Saurel32     if (host_addr == MAP_FAILED) {
790c8a706feSpbrook         new_addr = -1;
791c8a706feSpbrook     } else {
792a5b85f79Sths         new_addr = h2g(host_addr);
79354936004Sbellard         prot = page_get_flags(old_addr);
79454936004Sbellard         page_set_flags(old_addr, old_addr + old_size, 0);
79554936004Sbellard         page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
796c8a706feSpbrook     }
79735865339SPaolo Bonzini     tb_invalidate_phys_range(new_addr, new_addr + new_size);
798c8a706feSpbrook     mmap_unlock();
79954936004Sbellard     return new_addr;
80054936004Sbellard }
801