xref: /qemu/linux-user/mmap.c (revision 7294e600)
1 /*
2  *  mmap support for qemu
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 
21 #include "qemu.h"
22 #include "qemu-common.h"
23 
24 //#define DEBUG_MMAP
25 
26 static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
27 static __thread int mmap_lock_count;
28 
29 void mmap_lock(void)
30 {
31     if (mmap_lock_count++ == 0) {
32         pthread_mutex_lock(&mmap_mutex);
33     }
34 }
35 
36 void mmap_unlock(void)
37 {
38     if (--mmap_lock_count == 0) {
39         pthread_mutex_unlock(&mmap_mutex);
40     }
41 }
42 
43 bool have_mmap_lock(void)
44 {
45     return mmap_lock_count > 0 ? true : false;
46 }
47 
48 /* Grab lock to make sure things are in a consistent state after fork().  */
49 void mmap_fork_start(void)
50 {
51     if (mmap_lock_count)
52         abort();
53     pthread_mutex_lock(&mmap_mutex);
54 }
55 
56 void mmap_fork_end(int child)
57 {
58     if (child)
59         pthread_mutex_init(&mmap_mutex, NULL);
60     else
61         pthread_mutex_unlock(&mmap_mutex);
62 }
63 
64 /* NOTE: all the constants are the HOST ones, but addresses are target. */
65 int target_mprotect(abi_ulong start, abi_ulong len, int prot)
66 {
67     abi_ulong end, host_start, host_end, addr;
68     int prot1, ret;
69 
70 #ifdef DEBUG_MMAP
71     printf("mprotect: start=0x" TARGET_ABI_FMT_lx
72            "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
73            prot & PROT_READ ? 'r' : '-',
74            prot & PROT_WRITE ? 'w' : '-',
75            prot & PROT_EXEC ? 'x' : '-');
76 #endif
77 
78     if ((start & ~TARGET_PAGE_MASK) != 0)
79         return -TARGET_EINVAL;
80     len = TARGET_PAGE_ALIGN(len);
81     end = start + len;
82     if (!guest_range_valid(start, len)) {
83         return -TARGET_ENOMEM;
84     }
85     prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
86     if (len == 0)
87         return 0;
88 
89     mmap_lock();
90     host_start = start & qemu_host_page_mask;
91     host_end = HOST_PAGE_ALIGN(end);
92     if (start > host_start) {
93         /* handle host page containing start */
94         prot1 = prot;
95         for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
96             prot1 |= page_get_flags(addr);
97         }
98         if (host_end == host_start + qemu_host_page_size) {
99             for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
100                 prot1 |= page_get_flags(addr);
101             }
102             end = host_end;
103         }
104         ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
105         if (ret != 0)
106             goto error;
107         host_start += qemu_host_page_size;
108     }
109     if (end < host_end) {
110         prot1 = prot;
111         for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
112             prot1 |= page_get_flags(addr);
113         }
114         ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
115                        prot1 & PAGE_BITS);
116         if (ret != 0)
117             goto error;
118         host_end -= qemu_host_page_size;
119     }
120 
121     /* handle the pages in the middle */
122     if (host_start < host_end) {
123         ret = mprotect(g2h(host_start), host_end - host_start, prot);
124         if (ret != 0)
125             goto error;
126     }
127     page_set_flags(start, start + len, prot | PAGE_VALID);
128     mmap_unlock();
129     return 0;
130 error:
131     mmap_unlock();
132     return ret;
133 }
134 
135 /* map an incomplete host page */
136 static int mmap_frag(abi_ulong real_start,
137                      abi_ulong start, abi_ulong end,
138                      int prot, int flags, int fd, abi_ulong offset)
139 {
140     abi_ulong real_end, addr;
141     void *host_start;
142     int prot1, prot_new;
143 
144     real_end = real_start + qemu_host_page_size;
145     host_start = g2h(real_start);
146 
147     /* get the protection of the target pages outside the mapping */
148     prot1 = 0;
149     for(addr = real_start; addr < real_end; addr++) {
150         if (addr < start || addr >= end)
151             prot1 |= page_get_flags(addr);
152     }
153 
154     if (prot1 == 0) {
155         /* no page was there, so we allocate one */
156         void *p = mmap(host_start, qemu_host_page_size, prot,
157                        flags | MAP_ANONYMOUS, -1, 0);
158         if (p == MAP_FAILED)
159             return -1;
160         prot1 = prot;
161     }
162     prot1 &= PAGE_BITS;
163 
164     prot_new = prot | prot1;
165     if (!(flags & MAP_ANONYMOUS)) {
166         /* msync() won't work here, so we return an error if write is
167            possible while it is a shared mapping */
168         if ((flags & MAP_TYPE) == MAP_SHARED &&
169             (prot & PROT_WRITE))
170             return -1;
171 
172         /* adjust protection to be able to read */
173         if (!(prot1 & PROT_WRITE))
174             mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
175 
176         /* read the corresponding file data */
177         if (pread(fd, g2h(start), end - start, offset) == -1)
178             return -1;
179 
180         /* put final protection */
181         if (prot_new != (prot1 | PROT_WRITE))
182             mprotect(host_start, qemu_host_page_size, prot_new);
183     } else {
184         if (prot_new != prot1) {
185             mprotect(host_start, qemu_host_page_size, prot_new);
186         }
187         if (prot_new & PROT_WRITE) {
188             memset(g2h(start), 0, end - start);
189         }
190     }
191     return 0;
192 }
193 
194 #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
195 # define TASK_UNMAPPED_BASE  (1ul << 38)
196 #else
197 # define TASK_UNMAPPED_BASE  0x40000000
198 #endif
199 abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
200 
201 unsigned long last_brk;
202 
203 /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
204    of guest address space.  */
205 static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
206 {
207     abi_ulong addr;
208     abi_ulong end_addr;
209     int prot;
210     int looped = 0;
211 
212     if (size > reserved_va) {
213         return (abi_ulong)-1;
214     }
215 
216     size = HOST_PAGE_ALIGN(size);
217     end_addr = start + size;
218     if (end_addr > reserved_va) {
219         end_addr = reserved_va;
220     }
221     addr = end_addr - qemu_host_page_size;
222 
223     while (1) {
224         if (addr > end_addr) {
225             if (looped) {
226                 return (abi_ulong)-1;
227             }
228             end_addr = reserved_va;
229             addr = end_addr - qemu_host_page_size;
230             looped = 1;
231             continue;
232         }
233         prot = page_get_flags(addr);
234         if (prot) {
235             end_addr = addr;
236         }
237         if (addr && addr + size == end_addr) {
238             break;
239         }
240         addr -= qemu_host_page_size;
241     }
242 
243     if (start == mmap_next_start) {
244         mmap_next_start = addr;
245     }
246 
247     return addr;
248 }
249 
250 /*
251  * Find and reserve a free memory area of size 'size'. The search
252  * starts at 'start'.
253  * It must be called with mmap_lock() held.
254  * Return -1 if error.
255  */
256 abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
257 {
258     void *ptr, *prev;
259     abi_ulong addr;
260     int wrapped, repeat;
261 
262     /* If 'start' == 0, then a default start address is used. */
263     if (start == 0) {
264         start = mmap_next_start;
265     } else {
266         start &= qemu_host_page_mask;
267     }
268 
269     size = HOST_PAGE_ALIGN(size);
270 
271     if (reserved_va) {
272         return mmap_find_vma_reserved(start, size);
273     }
274 
275     addr = start;
276     wrapped = repeat = 0;
277     prev = 0;
278 
279     for (;; prev = ptr) {
280         /*
281          * Reserve needed memory area to avoid a race.
282          * It should be discarded using:
283          *  - mmap() with MAP_FIXED flag
284          *  - mremap() with MREMAP_FIXED flag
285          *  - shmat() with SHM_REMAP flag
286          */
287         ptr = mmap(g2h(addr), size, PROT_NONE,
288                    MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
289 
290         /* ENOMEM, if host address space has no memory */
291         if (ptr == MAP_FAILED) {
292             return (abi_ulong)-1;
293         }
294 
295         /* Count the number of sequential returns of the same address.
296            This is used to modify the search algorithm below.  */
297         repeat = (ptr == prev ? repeat + 1 : 0);
298 
299         if (h2g_valid(ptr + size - 1)) {
300             addr = h2g(ptr);
301 
302             if ((addr & ~TARGET_PAGE_MASK) == 0) {
303                 /* Success.  */
304                 if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
305                     mmap_next_start = addr + size;
306                 }
307                 return addr;
308             }
309 
310             /* The address is not properly aligned for the target.  */
311             switch (repeat) {
312             case 0:
313                 /* Assume the result that the kernel gave us is the
314                    first with enough free space, so start again at the
315                    next higher target page.  */
316                 addr = TARGET_PAGE_ALIGN(addr);
317                 break;
318             case 1:
319                 /* Sometimes the kernel decides to perform the allocation
320                    at the top end of memory instead.  */
321                 addr &= TARGET_PAGE_MASK;
322                 break;
323             case 2:
324                 /* Start over at low memory.  */
325                 addr = 0;
326                 break;
327             default:
328                 /* Fail.  This unaligned block must the last.  */
329                 addr = -1;
330                 break;
331             }
332         } else {
333             /* Since the result the kernel gave didn't fit, start
334                again at low memory.  If any repetition, fail.  */
335             addr = (repeat ? -1 : 0);
336         }
337 
338         /* Unmap and try again.  */
339         munmap(ptr, size);
340 
341         /* ENOMEM if we checked the whole of the target address space.  */
342         if (addr == (abi_ulong)-1) {
343             return (abi_ulong)-1;
344         } else if (addr == 0) {
345             if (wrapped) {
346                 return (abi_ulong)-1;
347             }
348             wrapped = 1;
349             /* Don't actually use 0 when wrapping, instead indicate
350                that we'd truly like an allocation in low memory.  */
351             addr = (mmap_min_addr > TARGET_PAGE_SIZE
352                      ? TARGET_PAGE_ALIGN(mmap_min_addr)
353                      : TARGET_PAGE_SIZE);
354         } else if (wrapped && addr >= start) {
355             return (abi_ulong)-1;
356         }
357     }
358 }
359 
360 /* NOTE: all the constants are the HOST ones */
361 abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
362                      int flags, int fd, abi_ulong offset)
363 {
364     abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
365 
366     mmap_lock();
367 #ifdef DEBUG_MMAP
368     {
369         printf("mmap: start=0x" TARGET_ABI_FMT_lx
370                " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
371                start, len,
372                prot & PROT_READ ? 'r' : '-',
373                prot & PROT_WRITE ? 'w' : '-',
374                prot & PROT_EXEC ? 'x' : '-');
375         if (flags & MAP_FIXED)
376             printf("MAP_FIXED ");
377         if (flags & MAP_ANONYMOUS)
378             printf("MAP_ANON ");
379         switch(flags & MAP_TYPE) {
380         case MAP_PRIVATE:
381             printf("MAP_PRIVATE ");
382             break;
383         case MAP_SHARED:
384             printf("MAP_SHARED ");
385             break;
386         default:
387             printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
388             break;
389         }
390         printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
391     }
392 #endif
393 
394     if (!len) {
395         errno = EINVAL;
396         goto fail;
397     }
398 
399     /* Also check for overflows... */
400     len = TARGET_PAGE_ALIGN(len);
401     if (!len) {
402         errno = ENOMEM;
403         goto fail;
404     }
405 
406     if (offset & ~TARGET_PAGE_MASK) {
407         errno = EINVAL;
408         goto fail;
409     }
410 
411     real_start = start & qemu_host_page_mask;
412     host_offset = offset & qemu_host_page_mask;
413 
414     /* If the user is asking for the kernel to find a location, do that
415        before we truncate the length for mapping files below.  */
416     if (!(flags & MAP_FIXED)) {
417         host_len = len + offset - host_offset;
418         host_len = HOST_PAGE_ALIGN(host_len);
419         start = mmap_find_vma(real_start, host_len);
420         if (start == (abi_ulong)-1) {
421             errno = ENOMEM;
422             goto fail;
423         }
424     }
425 
426     /* When mapping files into a memory area larger than the file, accesses
427        to pages beyond the file size will cause a SIGBUS.
428 
429        For example, if mmaping a file of 100 bytes on a host with 4K pages
430        emulating a target with 8K pages, the target expects to be able to
431        access the first 8K. But the host will trap us on any access beyond
432        4K.
433 
434        When emulating a target with a larger page-size than the hosts, we
435        may need to truncate file maps at EOF and add extra anonymous pages
436        up to the targets page boundary.  */
437 
438     if ((qemu_real_host_page_size < qemu_host_page_size) &&
439         !(flags & MAP_ANONYMOUS)) {
440         struct stat sb;
441 
442        if (fstat (fd, &sb) == -1)
443            goto fail;
444 
445        /* Are we trying to create a map beyond EOF?.  */
446        if (offset + len > sb.st_size) {
447            /* If so, truncate the file map at eof aligned with
448               the hosts real pagesize. Additional anonymous maps
449               will be created beyond EOF.  */
450            len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
451        }
452     }
453 
454     if (!(flags & MAP_FIXED)) {
455         unsigned long host_start;
456         void *p;
457 
458         host_len = len + offset - host_offset;
459         host_len = HOST_PAGE_ALIGN(host_len);
460 
461         /* Note: we prefer to control the mapping address. It is
462            especially important if qemu_host_page_size >
463            qemu_real_host_page_size */
464         p = mmap(g2h(start), host_len, prot,
465                  flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
466         if (p == MAP_FAILED)
467             goto fail;
468         /* update start so that it points to the file position at 'offset' */
469         host_start = (unsigned long)p;
470         if (!(flags & MAP_ANONYMOUS)) {
471             p = mmap(g2h(start), len, prot,
472                      flags | MAP_FIXED, fd, host_offset);
473             if (p == MAP_FAILED) {
474                 munmap(g2h(start), host_len);
475                 goto fail;
476             }
477             host_start += offset - host_offset;
478         }
479         start = h2g(host_start);
480     } else {
481         if (start & ~TARGET_PAGE_MASK) {
482             errno = EINVAL;
483             goto fail;
484         }
485         end = start + len;
486         real_end = HOST_PAGE_ALIGN(end);
487 
488 	/*
489 	 * Test if requested memory area fits target address space
490 	 * It can fail only on 64-bit host with 32-bit target.
491 	 * On any other target/host host mmap() handles this error correctly.
492 	 */
493         if (!guest_range_valid(start, len)) {
494             errno = ENOMEM;
495             goto fail;
496         }
497 
498         /* worst case: we cannot map the file because the offset is not
499            aligned, so we read it */
500         if (!(flags & MAP_ANONYMOUS) &&
501             (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
502             /* msync() won't work here, so we return an error if write is
503                possible while it is a shared mapping */
504             if ((flags & MAP_TYPE) == MAP_SHARED &&
505                 (prot & PROT_WRITE)) {
506                 errno = EINVAL;
507                 goto fail;
508             }
509             retaddr = target_mmap(start, len, prot | PROT_WRITE,
510                                   MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
511                                   -1, 0);
512             if (retaddr == -1)
513                 goto fail;
514             if (pread(fd, g2h(start), len, offset) == -1)
515                 goto fail;
516             if (!(prot & PROT_WRITE)) {
517                 ret = target_mprotect(start, len, prot);
518                 assert(ret == 0);
519             }
520             goto the_end;
521         }
522 
523         /* handle the start of the mapping */
524         if (start > real_start) {
525             if (real_end == real_start + qemu_host_page_size) {
526                 /* one single host page */
527                 ret = mmap_frag(real_start, start, end,
528                                 prot, flags, fd, offset);
529                 if (ret == -1)
530                     goto fail;
531                 goto the_end1;
532             }
533             ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
534                             prot, flags, fd, offset);
535             if (ret == -1)
536                 goto fail;
537             real_start += qemu_host_page_size;
538         }
539         /* handle the end of the mapping */
540         if (end < real_end) {
541             ret = mmap_frag(real_end - qemu_host_page_size,
542                             real_end - qemu_host_page_size, end,
543                             prot, flags, fd,
544                             offset + real_end - qemu_host_page_size - start);
545             if (ret == -1)
546                 goto fail;
547             real_end -= qemu_host_page_size;
548         }
549 
550         /* map the middle (easier) */
551         if (real_start < real_end) {
552             void *p;
553             unsigned long offset1;
554             if (flags & MAP_ANONYMOUS)
555                 offset1 = 0;
556             else
557                 offset1 = offset + real_start - start;
558             p = mmap(g2h(real_start), real_end - real_start,
559                      prot, flags, fd, offset1);
560             if (p == MAP_FAILED)
561                 goto fail;
562         }
563     }
564  the_end1:
565     page_set_flags(start, start + len, prot | PAGE_VALID);
566  the_end:
567 #ifdef DEBUG_MMAP
568     printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
569     page_dump(stdout);
570     printf("\n");
571 #endif
572     tb_invalidate_phys_range(start, start + len);
573     mmap_unlock();
574     return start;
575 fail:
576     mmap_unlock();
577     return -1;
578 }
579 
580 static void mmap_reserve(abi_ulong start, abi_ulong size)
581 {
582     abi_ulong real_start;
583     abi_ulong real_end;
584     abi_ulong addr;
585     abi_ulong end;
586     int prot;
587 
588     real_start = start & qemu_host_page_mask;
589     real_end = HOST_PAGE_ALIGN(start + size);
590     end = start + size;
591     if (start > real_start) {
592         /* handle host page containing start */
593         prot = 0;
594         for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
595             prot |= page_get_flags(addr);
596         }
597         if (real_end == real_start + qemu_host_page_size) {
598             for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
599                 prot |= page_get_flags(addr);
600             }
601             end = real_end;
602         }
603         if (prot != 0)
604             real_start += qemu_host_page_size;
605     }
606     if (end < real_end) {
607         prot = 0;
608         for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
609             prot |= page_get_flags(addr);
610         }
611         if (prot != 0)
612             real_end -= qemu_host_page_size;
613     }
614     if (real_start != real_end) {
615         mmap(g2h(real_start), real_end - real_start, PROT_NONE,
616                  MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
617                  -1, 0);
618     }
619 }
620 
621 int target_munmap(abi_ulong start, abi_ulong len)
622 {
623     abi_ulong end, real_start, real_end, addr;
624     int prot, ret;
625 
626 #ifdef DEBUG_MMAP
627     printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
628            TARGET_ABI_FMT_lx "\n",
629            start, len);
630 #endif
631     if (start & ~TARGET_PAGE_MASK)
632         return -TARGET_EINVAL;
633     len = TARGET_PAGE_ALIGN(len);
634     if (len == 0 || !guest_range_valid(start, len)) {
635         return -TARGET_EINVAL;
636     }
637 
638     mmap_lock();
639     end = start + len;
640     real_start = start & qemu_host_page_mask;
641     real_end = HOST_PAGE_ALIGN(end);
642 
643     if (start > real_start) {
644         /* handle host page containing start */
645         prot = 0;
646         for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
647             prot |= page_get_flags(addr);
648         }
649         if (real_end == real_start + qemu_host_page_size) {
650             for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
651                 prot |= page_get_flags(addr);
652             }
653             end = real_end;
654         }
655         if (prot != 0)
656             real_start += qemu_host_page_size;
657     }
658     if (end < real_end) {
659         prot = 0;
660         for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
661             prot |= page_get_flags(addr);
662         }
663         if (prot != 0)
664             real_end -= qemu_host_page_size;
665     }
666 
667     ret = 0;
668     /* unmap what we can */
669     if (real_start < real_end) {
670         if (reserved_va) {
671             mmap_reserve(real_start, real_end - real_start);
672         } else {
673             ret = munmap(g2h(real_start), real_end - real_start);
674         }
675     }
676 
677     if (ret == 0) {
678         page_set_flags(start, start + len, 0);
679         tb_invalidate_phys_range(start, start + len);
680     }
681     mmap_unlock();
682     return ret;
683 }
684 
685 abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
686                        abi_ulong new_size, unsigned long flags,
687                        abi_ulong new_addr)
688 {
689     int prot;
690     void *host_addr;
691 
692     if (!guest_range_valid(old_addr, old_size) ||
693         ((flags & MREMAP_FIXED) &&
694          !guest_range_valid(new_addr, new_size))) {
695         errno = ENOMEM;
696         return -1;
697     }
698 
699     mmap_lock();
700 
701     if (flags & MREMAP_FIXED) {
702         host_addr = mremap(g2h(old_addr), old_size, new_size,
703                            flags, g2h(new_addr));
704 
705         if (reserved_va && host_addr != MAP_FAILED) {
706             /* If new and old addresses overlap then the above mremap will
707                already have failed with EINVAL.  */
708             mmap_reserve(old_addr, old_size);
709         }
710     } else if (flags & MREMAP_MAYMOVE) {
711         abi_ulong mmap_start;
712 
713         mmap_start = mmap_find_vma(0, new_size);
714 
715         if (mmap_start == -1) {
716             errno = ENOMEM;
717             host_addr = MAP_FAILED;
718         } else {
719             host_addr = mremap(g2h(old_addr), old_size, new_size,
720                                flags | MREMAP_FIXED, g2h(mmap_start));
721             if (reserved_va) {
722                 mmap_reserve(old_addr, old_size);
723             }
724         }
725     } else {
726         int prot = 0;
727         if (reserved_va && old_size < new_size) {
728             abi_ulong addr;
729             for (addr = old_addr + old_size;
730                  addr < old_addr + new_size;
731                  addr++) {
732                 prot |= page_get_flags(addr);
733             }
734         }
735         if (prot == 0) {
736             host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
737             if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
738                 mmap_reserve(old_addr + old_size, new_size - old_size);
739             }
740         } else {
741             errno = ENOMEM;
742             host_addr = MAP_FAILED;
743         }
744         /* Check if address fits target address space */
745         if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
746             /* Revert mremap() changes */
747             host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
748             errno = ENOMEM;
749             host_addr = MAP_FAILED;
750         }
751     }
752 
753     if (host_addr == MAP_FAILED) {
754         new_addr = -1;
755     } else {
756         new_addr = h2g(host_addr);
757         prot = page_get_flags(old_addr);
758         page_set_flags(old_addr, old_addr + old_size, 0);
759         page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
760     }
761     tb_invalidate_phys_range(new_addr, new_addr + new_size);
762     mmap_unlock();
763     return new_addr;
764 }
765