xref: /qemu/linux-user/mmap.c (revision 92eecfff)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "trace.h"
21 #include "exec/log.h"
22 #include "qemu.h"
23 
24 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
25 static __thread int mmap_lock_count;
26 
27 void mmap_lock(void)
28 {
29     if (mmap_lock_count++ == 0) {
30         pthread_mutex_lock(&mmap_mutex);
31     }
32 }
33 
34 void mmap_unlock(void)
35 {
36     if (--mmap_lock_count == 0) {
37         pthread_mutex_unlock(&mmap_mutex);
38     }
39 }
40 
41 bool have_mmap_lock(void)
42 {
43     return mmap_lock_count > 0 ? true : false;
44 }
45 
46 /* Grab lock to make sure things are in a consistent state after fork().  */
47 void mmap_fork_start(void)
48 {
49     if (mmap_lock_count)
50         abort();
51     pthread_mutex_lock(&mmap_mutex);
52 }
53 
54 void mmap_fork_end(int child)
55 {
56     if (child)
57         pthread_mutex_init(&mmap_mutex, NULL);
58     else
59         pthread_mutex_unlock(&mmap_mutex);
60 }
61 
62 /*
63  * Validate target prot bitmask.
64  * Return the prot bitmask for the host in *HOST_PROT.
65  * Return 0 if the target prot bitmask is invalid, otherwise
66  * the internal qemu page_flags (which will include PAGE_VALID).
67  */
68 static int validate_prot_to_pageflags(int *host_prot, int prot)
69 {
70     int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
71     int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
72 
73     /*
74      * For the host, we need not pass anything except read/write/exec.
75      * While PROT_SEM is allowed by all hosts, it is also ignored, so
76      * don't bother transforming guest bit to host bit.  Any other
77      * target-specific prot bits will not be understood by the host
78      * and will need to be encoded into page_flags for qemu emulation.
79      *
80      * Pages that are executable by the guest will never be executed
81      * by the host, but the host will need to be able to read them.
82      */
83     *host_prot = (prot & (PROT_READ | PROT_WRITE))
84                | (prot & PROT_EXEC ? PROT_READ : 0);
85 
86 #ifdef TARGET_AARCH64
87     /*
88      * The PROT_BTI bit is only accepted if the cpu supports the feature.
89      * Since this is the unusual case, don't bother checking unless
90      * the bit has been requested.  If set and valid, record the bit
91      * within QEMU's page_flags.
92      */
93     if (prot & TARGET_PROT_BTI) {
94         ARMCPU *cpu = ARM_CPU(thread_cpu);
95         if (cpu_isar_feature(aa64_bti, cpu)) {
96             valid |= TARGET_PROT_BTI;
97             page_flags |= PAGE_BTI;
98         }
99     }
100 #endif
101 
102     return prot & ~valid ? 0 : page_flags;
103 }
104 
105 /* NOTE: all the constants are the HOST ones, but addresses are target. */
106 int target_mprotect(abi_ulong start, abi_ulong len, int target_prot)
107 {
108     abi_ulong end, host_start, host_end, addr;
109     int prot1, ret, page_flags, host_prot;
110 
111     trace_target_mprotect(start, len, target_prot);
112 
113     if ((start & ~TARGET_PAGE_MASK) != 0) {
114         return -TARGET_EINVAL;
115     }
116     page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
117     if (!page_flags) {
118         return -TARGET_EINVAL;
119     }
120     len = TARGET_PAGE_ALIGN(len);
121     end = start + len;
122     if (!guest_range_valid(start, len)) {
123         return -TARGET_ENOMEM;
124     }
125     if (len == 0) {
126         return 0;
127     }
128 
129     mmap_lock();
130     host_start = start & qemu_host_page_mask;
131     host_end = HOST_PAGE_ALIGN(end);
132     if (start > host_start) {
133         /* handle host page containing start */
134         prot1 = host_prot;
135         for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
136             prot1 |= page_get_flags(addr);
137         }
138         if (host_end == host_start + qemu_host_page_size) {
139             for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
140                 prot1 |= page_get_flags(addr);
141             }
142             end = host_end;
143         }
144         ret = mprotect(g2h(host_start), qemu_host_page_size,
145                        prot1 & PAGE_BITS);
146         if (ret != 0) {
147             goto error;
148         }
149         host_start += qemu_host_page_size;
150     }
151     if (end < host_end) {
152         prot1 = host_prot;
153         for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
154             prot1 |= page_get_flags(addr);
155         }
156         ret = mprotect(g2h(host_end - qemu_host_page_size),
157                        qemu_host_page_size, prot1 & PAGE_BITS);
158         if (ret != 0) {
159             goto error;
160         }
161         host_end -= qemu_host_page_size;
162     }
163 
164     /* handle the pages in the middle */
165     if (host_start < host_end) {
166         ret = mprotect(g2h(host_start), host_end - host_start, host_prot);
167         if (ret != 0) {
168             goto error;
169         }
170     }
171     page_set_flags(start, start + len, page_flags);
172     mmap_unlock();
173     return 0;
174 error:
175     mmap_unlock();
176     return ret;
177 }
178 
179 /* map an incomplete host page */
180 static int mmap_frag(abi_ulong real_start,
181                      abi_ulong start, abi_ulong end,
182                      int prot, int flags, int fd, abi_ulong offset)
183 {
184     abi_ulong real_end, addr;
185     void *host_start;
186     int prot1, prot_new;
187 
188     real_end = real_start + qemu_host_page_size;
189     host_start = g2h(real_start);
190 
191     /* get the protection of the target pages outside the mapping */
192     prot1 = 0;
193     for(addr = real_start; addr < real_end; addr++) {
194         if (addr < start || addr >= end)
195             prot1 |= page_get_flags(addr);
196     }
197 
198     if (prot1 == 0) {
199         /* no page was there, so we allocate one */
200         void *p = mmap(host_start, qemu_host_page_size, prot,
201                        flags | MAP_ANONYMOUS, -1, 0);
202         if (p == MAP_FAILED)
203             return -1;
204         prot1 = prot;
205     }
206     prot1 &= PAGE_BITS;
207 
208     prot_new = prot | prot1;
209     if (!(flags & MAP_ANONYMOUS)) {
210         /* msync() won't work here, so we return an error if write is
211            possible while it is a shared mapping */
212         if ((flags & MAP_TYPE) == MAP_SHARED &&
213             (prot & PROT_WRITE))
214             return -1;
215 
216         /* adjust protection to be able to read */
217         if (!(prot1 & PROT_WRITE))
218             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
219 
220         /* read the corresponding file data */
221         if (pread(fd, g2h(start), end - start, offset) == -1)
222             return -1;
223 
224         /* put final protection */
225         if (prot_new != (prot1 | PROT_WRITE))
226             mprotect(host_start, qemu_host_page_size, prot_new);
227     } else {
228         if (prot_new != prot1) {
229             mprotect(host_start, qemu_host_page_size, prot_new);
230         }
231         if (prot_new & PROT_WRITE) {
232             memset(g2h(start), 0, end - start);
233         }
234     }
235     return 0;
236 }
237 
238 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
239 #ifdef TARGET_AARCH64
240 # define TASK_UNMAPPED_BASE  0x5500000000
241 #else
242 # define TASK_UNMAPPED_BASE  (1ul << 38)
243 #endif
244 #else
245 # define TASK_UNMAPPED_BASE  0x40000000
246 #endif
247 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
248 
249 unsigned long last_brk;
250 
251 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
252    of guest address space.  */
253 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
254                                         abi_ulong align)
255 {
256     abi_ulong addr, end_addr, incr = qemu_host_page_size;
257     int prot;
258     bool looped = false;
259 
260     if (size > reserved_va) {
261         return (abi_ulong)-1;
262     }
263 
264     /* Note that start and size have already been aligned by mmap_find_vma. */
265 
266     end_addr = start + size;
267     if (start > reserved_va - size) {
268         /* Start at the top of the address space.  */
269         end_addr = ((reserved_va - size) & -align) + size;
270         looped = true;
271     }
272 
273     /* Search downward from END_ADDR, checking to see if a page is in use.  */
274     addr = end_addr;
275     while (1) {
276         addr -= incr;
277         if (addr > end_addr) {
278             if (looped) {
279                 /* Failure.  The entire address space has been searched.  */
280                 return (abi_ulong)-1;
281             }
282             /* Re-start at the top of the address space.  */
283             addr = end_addr = ((reserved_va - size) & -align) + size;
284             looped = true;
285         } else {
286             prot = page_get_flags(addr);
287             if (prot) {
288                 /* Page in use.  Restart below this page.  */
289                 addr = end_addr = ((addr - size) & -align) + size;
290             } else if (addr && addr + size == end_addr) {
291                 /* Success!  All pages between ADDR and END_ADDR are free.  */
292                 if (start == mmap_next_start) {
293                     mmap_next_start = addr;
294                 }
295                 return addr;
296             }
297         }
298     }
299 }
300 
301 /*
302  * Find and reserve a free memory area of size 'size'. The search
303  * starts at 'start'.
304  * It must be called with mmap_lock() held.
305  * Return -1 if error.
306  */
307 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
308 {
309     void *ptr, *prev;
310     abi_ulong addr;
311     int wrapped, repeat;
312 
313     align = MAX(align, qemu_host_page_size);
314 
315     /* If 'start' == 0, then a default start address is used. */
316     if (start == 0) {
317         start = mmap_next_start;
318     } else {
319         start &= qemu_host_page_mask;
320     }
321     start = ROUND_UP(start, align);
322 
323     size = HOST_PAGE_ALIGN(size);
324 
325     if (reserved_va) {
326         return mmap_find_vma_reserved(start, size, align);
327     }
328 
329     addr = start;
330     wrapped = repeat = 0;
331     prev = 0;
332 
333     for (;; prev = ptr) {
334         /*
335          * Reserve needed memory area to avoid a race.
336          * It should be discarded using:
337          *  - mmap() with MAP_FIXED flag
338          *  - mremap() with MREMAP_FIXED flag
339          *  - shmat() with SHM_REMAP flag
340          */
341         ptr = mmap(g2h(addr), size, PROT_NONE,
342                    MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
343 
344         /* ENOMEM, if host address space has no memory */
345         if (ptr == MAP_FAILED) {
346             return (abi_ulong)-1;
347         }
348 
349         /* Count the number of sequential returns of the same address.
350            This is used to modify the search algorithm below.  */
351         repeat = (ptr == prev ? repeat + 1 : 0);
352 
353         if (h2g_valid(ptr + size - 1)) {
354             addr = h2g(ptr);
355 
356             if ((addr & (align - 1)) == 0) {
357                 /* Success.  */
358                 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
359                     mmap_next_start = addr + size;
360                 }
361                 return addr;
362             }
363 
364             /* The address is not properly aligned for the target.  */
365             switch (repeat) {
366             case 0:
367                 /* Assume the result that the kernel gave us is the
368                    first with enough free space, so start again at the
369                    next higher target page.  */
370                 addr = ROUND_UP(addr, align);
371                 break;
372             case 1:
373                 /* Sometimes the kernel decides to perform the allocation
374                    at the top end of memory instead.  */
375                 addr &= -align;
376                 break;
377             case 2:
378                 /* Start over at low memory.  */
379                 addr = 0;
380                 break;
381             default:
382                 /* Fail.  This unaligned block must the last.  */
383                 addr = -1;
384                 break;
385             }
386         } else {
387             /* Since the result the kernel gave didn't fit, start
388                again at low memory.  If any repetition, fail.  */
389             addr = (repeat ? -1 : 0);
390         }
391 
392         /* Unmap and try again.  */
393         munmap(ptr, size);
394 
395         /* ENOMEM if we checked the whole of the target address space.  */
396         if (addr == (abi_ulong)-1) {
397             return (abi_ulong)-1;
398         } else if (addr == 0) {
399             if (wrapped) {
400                 return (abi_ulong)-1;
401             }
402             wrapped = 1;
403             /* Don't actually use 0 when wrapping, instead indicate
404                that we'd truly like an allocation in low memory.  */
405             addr = (mmap_min_addr > TARGET_PAGE_SIZE
406                      ? TARGET_PAGE_ALIGN(mmap_min_addr)
407                      : TARGET_PAGE_SIZE);
408         } else if (wrapped && addr >= start) {
409             return (abi_ulong)-1;
410         }
411     }
412 }
413 
414 /* NOTE: all the constants are the HOST ones */
415 abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot,
416                      int flags, int fd, abi_ulong offset)
417 {
418     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
419     int page_flags, host_prot;
420 
421     mmap_lock();
422     trace_target_mmap(start, len, target_prot, flags, fd, offset);
423 
424     if (!len) {
425         errno = EINVAL;
426         goto fail;
427     }
428 
429     page_flags = validate_prot_to_pageflags(&host_prot, target_prot);
430     if (!page_flags) {
431         errno = EINVAL;
432         goto fail;
433     }
434 
435     /* Also check for overflows... */
436     len = TARGET_PAGE_ALIGN(len);
437     if (!len) {
438         errno = ENOMEM;
439         goto fail;
440     }
441 
442     if (offset & ~TARGET_PAGE_MASK) {
443         errno = EINVAL;
444         goto fail;
445     }
446 
447     real_start = start & qemu_host_page_mask;
448     host_offset = offset & qemu_host_page_mask;
449 
450     /* If the user is asking for the kernel to find a location, do that
451        before we truncate the length for mapping files below.  */
452     if (!(flags & MAP_FIXED)) {
453         host_len = len + offset - host_offset;
454         host_len = HOST_PAGE_ALIGN(host_len);
455         start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
456         if (start == (abi_ulong)-1) {
457             errno = ENOMEM;
458             goto fail;
459         }
460     }
461 
462     /* When mapping files into a memory area larger than the file, accesses
463        to pages beyond the file size will cause a SIGBUS.
464 
465        For example, if mmaping a file of 100 bytes on a host with 4K pages
466        emulating a target with 8K pages, the target expects to be able to
467        access the first 8K. But the host will trap us on any access beyond
468        4K.
469 
470        When emulating a target with a larger page-size than the hosts, we
471        may need to truncate file maps at EOF and add extra anonymous pages
472        up to the targets page boundary.  */
473 
474     if ((qemu_real_host_page_size < qemu_host_page_size) &&
475         !(flags & MAP_ANONYMOUS)) {
476         struct stat sb;
477 
478        if (fstat (fd, &sb) == -1)
479            goto fail;
480 
481        /* Are we trying to create a map beyond EOF?.  */
482        if (offset + len > sb.st_size) {
483            /* If so, truncate the file map at eof aligned with
484               the hosts real pagesize. Additional anonymous maps
485               will be created beyond EOF.  */
486            len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
487        }
488     }
489 
490     if (!(flags & MAP_FIXED)) {
491         unsigned long host_start;
492         void *p;
493 
494         host_len = len + offset - host_offset;
495         host_len = HOST_PAGE_ALIGN(host_len);
496 
497         /* Note: we prefer to control the mapping address. It is
498            especially important if qemu_host_page_size >
499            qemu_real_host_page_size */
500         p = mmap(g2h(start), host_len, host_prot,
501                  flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
502         if (p == MAP_FAILED) {
503             goto fail;
504         }
505         /* update start so that it points to the file position at 'offset' */
506         host_start = (unsigned long)p;
507         if (!(flags & MAP_ANONYMOUS)) {
508             p = mmap(g2h(start), len, host_prot,
509                      flags | MAP_FIXED, fd, host_offset);
510             if (p == MAP_FAILED) {
511                 munmap(g2h(start), host_len);
512                 goto fail;
513             }
514             host_start += offset - host_offset;
515         }
516         start = h2g(host_start);
517     } else {
518         if (start & ~TARGET_PAGE_MASK) {
519             errno = EINVAL;
520             goto fail;
521         }
522         end = start + len;
523         real_end = HOST_PAGE_ALIGN(end);
524 
525         /*
526          * Test if requested memory area fits target address space
527          * It can fail only on 64-bit host with 32-bit target.
528          * On any other target/host host mmap() handles this error correctly.
529          */
530         if (end < start || !guest_range_valid(start, len)) {
531             errno = ENOMEM;
532             goto fail;
533         }
534 
535         /* worst case: we cannot map the file because the offset is not
536            aligned, so we read it */
537         if (!(flags & MAP_ANONYMOUS) &&
538             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
539             /* msync() won't work here, so we return an error if write is
540                possible while it is a shared mapping */
541             if ((flags & MAP_TYPE) == MAP_SHARED &&
542                 (host_prot & PROT_WRITE)) {
543                 errno = EINVAL;
544                 goto fail;
545             }
546             retaddr = target_mmap(start, len, target_prot | PROT_WRITE,
547                                   MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
548                                   -1, 0);
549             if (retaddr == -1)
550                 goto fail;
551             if (pread(fd, g2h(start), len, offset) == -1)
552                 goto fail;
553             if (!(host_prot & PROT_WRITE)) {
554                 ret = target_mprotect(start, len, target_prot);
555                 assert(ret == 0);
556             }
557             goto the_end;
558         }
559 
560         /* handle the start of the mapping */
561         if (start > real_start) {
562             if (real_end == real_start + qemu_host_page_size) {
563                 /* one single host page */
564                 ret = mmap_frag(real_start, start, end,
565                                 host_prot, flags, fd, offset);
566                 if (ret == -1)
567                     goto fail;
568                 goto the_end1;
569             }
570             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
571                             host_prot, flags, fd, offset);
572             if (ret == -1)
573                 goto fail;
574             real_start += qemu_host_page_size;
575         }
576         /* handle the end of the mapping */
577         if (end < real_end) {
578             ret = mmap_frag(real_end - qemu_host_page_size,
579                             real_end - qemu_host_page_size, end,
580                             host_prot, flags, fd,
581                             offset + real_end - qemu_host_page_size - start);
582             if (ret == -1)
583                 goto fail;
584             real_end -= qemu_host_page_size;
585         }
586 
587         /* map the middle (easier) */
588         if (real_start < real_end) {
589             void *p;
590             unsigned long offset1;
591             if (flags & MAP_ANONYMOUS)
592                 offset1 = 0;
593             else
594                 offset1 = offset + real_start - start;
595             p = mmap(g2h(real_start), real_end - real_start,
596                      host_prot, flags, fd, offset1);
597             if (p == MAP_FAILED)
598                 goto fail;
599         }
600     }
601  the_end1:
602     page_set_flags(start, start + len, page_flags);
603  the_end:
604     trace_target_mmap_complete(start);
605     if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
606         log_page_dump(__func__);
607     }
608     tb_invalidate_phys_range(start, start + len);
609     mmap_unlock();
610     return start;
611 fail:
612     mmap_unlock();
613     return -1;
614 }
615 
616 static void mmap_reserve(abi_ulong start, abi_ulong size)
617 {
618     abi_ulong real_start;
619     abi_ulong real_end;
620     abi_ulong addr;
621     abi_ulong end;
622     int prot;
623 
624     real_start = start & qemu_host_page_mask;
625     real_end = HOST_PAGE_ALIGN(start + size);
626     end = start + size;
627     if (start > real_start) {
628         /* handle host page containing start */
629         prot = 0;
630         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
631             prot |= page_get_flags(addr);
632         }
633         if (real_end == real_start + qemu_host_page_size) {
634             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
635                 prot |= page_get_flags(addr);
636             }
637             end = real_end;
638         }
639         if (prot != 0)
640             real_start += qemu_host_page_size;
641     }
642     if (end < real_end) {
643         prot = 0;
644         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
645             prot |= page_get_flags(addr);
646         }
647         if (prot != 0)
648             real_end -= qemu_host_page_size;
649     }
650     if (real_start != real_end) {
651         mmap(g2h(real_start), real_end - real_start, PROT_NONE,
652                  MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
653                  -1, 0);
654     }
655 }
656 
657 int target_munmap(abi_ulong start, abi_ulong len)
658 {
659     abi_ulong end, real_start, real_end, addr;
660     int prot, ret;
661 
662     trace_target_munmap(start, len);
663 
664     if (start & ~TARGET_PAGE_MASK)
665         return -TARGET_EINVAL;
666     len = TARGET_PAGE_ALIGN(len);
667     if (len == 0 || !guest_range_valid(start, len)) {
668         return -TARGET_EINVAL;
669     }
670 
671     mmap_lock();
672     end = start + len;
673     real_start = start & qemu_host_page_mask;
674     real_end = HOST_PAGE_ALIGN(end);
675 
676     if (start > real_start) {
677         /* handle host page containing start */
678         prot = 0;
679         for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
680             prot |= page_get_flags(addr);
681         }
682         if (real_end == real_start + qemu_host_page_size) {
683             for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
684                 prot |= page_get_flags(addr);
685             }
686             end = real_end;
687         }
688         if (prot != 0)
689             real_start += qemu_host_page_size;
690     }
691     if (end < real_end) {
692         prot = 0;
693         for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
694             prot |= page_get_flags(addr);
695         }
696         if (prot != 0)
697             real_end -= qemu_host_page_size;
698     }
699 
700     ret = 0;
701     /* unmap what we can */
702     if (real_start < real_end) {
703         if (reserved_va) {
704             mmap_reserve(real_start, real_end - real_start);
705         } else {
706             ret = munmap(g2h(real_start), real_end - real_start);
707         }
708     }
709 
710     if (ret == 0) {
711         page_set_flags(start, start + len, 0);
712         tb_invalidate_phys_range(start, start + len);
713     }
714     mmap_unlock();
715     return ret;
716 }
717 
718 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
719                        abi_ulong new_size, unsigned long flags,
720                        abi_ulong new_addr)
721 {
722     int prot;
723     void *host_addr;
724 
725     if (!guest_range_valid(old_addr, old_size) ||
726         ((flags & MREMAP_FIXED) &&
727          !guest_range_valid(new_addr, new_size))) {
728         errno = ENOMEM;
729         return -1;
730     }
731 
732     mmap_lock();
733 
734     if (flags & MREMAP_FIXED) {
735         host_addr = mremap(g2h(old_addr), old_size, new_size,
736                            flags, g2h(new_addr));
737 
738         if (reserved_va && host_addr != MAP_FAILED) {
739             /* If new and old addresses overlap then the above mremap will
740                already have failed with EINVAL.  */
741             mmap_reserve(old_addr, old_size);
742         }
743     } else if (flags & MREMAP_MAYMOVE) {
744         abi_ulong mmap_start;
745 
746         mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
747 
748         if (mmap_start == -1) {
749             errno = ENOMEM;
750             host_addr = MAP_FAILED;
751         } else {
752             host_addr = mremap(g2h(old_addr), old_size, new_size,
753                                flags | MREMAP_FIXED, g2h(mmap_start));
754             if (reserved_va) {
755                 mmap_reserve(old_addr, old_size);
756             }
757         }
758     } else {
759         int prot = 0;
760         if (reserved_va && old_size < new_size) {
761             abi_ulong addr;
762             for (addr = old_addr + old_size;
763                  addr < old_addr + new_size;
764                  addr++) {
765                 prot |= page_get_flags(addr);
766             }
767         }
768         if (prot == 0) {
769             host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
770             if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
771                 mmap_reserve(old_addr + old_size, old_size - new_size);
772             }
773         } else {
774             errno = ENOMEM;
775             host_addr = MAP_FAILED;
776         }
777         /* Check if address fits target address space */
778         if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
779             /* Revert mremap() changes */
780             host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
781             errno = ENOMEM;
782             host_addr = MAP_FAILED;
783         }
784     }
785 
786     if (host_addr == MAP_FAILED) {
787         new_addr = -1;
788     } else {
789         new_addr = h2g(host_addr);
790         prot = page_get_flags(old_addr);
791         page_set_flags(old_addr, old_addr + old_size, 0);
792         page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
793     }
794     tb_invalidate_phys_range(new_addr, new_addr + new_size);
795     mmap_unlock();
796     return new_addr;
797 }
798