xref: /qemu/bsd-user/bsd-mem.h (revision 5db05230)
1 /*
2  *  memory management system call shims and definitions
3  *
4  *  Copyright (c) 2013-15 Stacey D. Son
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 /*
21  * Copyright (c) 1982, 1986, 1993
22  *      The Regents of the University of California.  All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  * 1. Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  * 2. Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in the
31  *    documentation and/or other materials provided with the distribution.
32  * 4. Neither the name of the University nor the names of its contributors
33  *    may be used to endorse or promote products derived from this software
34  *    without specific prior written permission.
35  *
36  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
37  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
40  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46  * SUCH DAMAGE.
47  */
48 
49 #ifndef BSD_USER_BSD_MEM_H
50 #define BSD_USER_BSD_MEM_H
51 
52 #include <sys/types.h>
53 #include <sys/ipc.h>
54 #include <sys/mman.h>
55 #include <sys/shm.h>
56 #include <fcntl.h>
57 
58 #include "qemu-bsd.h"
59 
60 extern struct bsd_shm_regions bsd_shm_regions[];
61 extern abi_ulong target_brk;
62 extern abi_ulong initial_target_brk;
63 
64 /* mmap(2) */
65 static inline abi_long do_bsd_mmap(void *cpu_env, abi_long arg1, abi_long arg2,
66     abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6, abi_long arg7,
67     abi_long arg8)
68 {
69     if (regpairs_aligned(cpu_env) != 0) {
70         arg6 = arg7;
71         arg7 = arg8;
72     }
73     return get_errno(target_mmap(arg1, arg2, arg3,
74                                  target_to_host_bitmask(arg4, mmap_flags_tbl),
75                                  arg5, target_arg64(arg6, arg7)));
76 }
77 
78 /* munmap(2) */
79 static inline abi_long do_bsd_munmap(abi_long arg1, abi_long arg2)
80 {
81     return get_errno(target_munmap(arg1, arg2));
82 }
83 
84 /* mprotect(2) */
85 static inline abi_long do_bsd_mprotect(abi_long arg1, abi_long arg2,
86         abi_long arg3)
87 {
88     return get_errno(target_mprotect(arg1, arg2, arg3));
89 }
90 
91 /* msync(2) */
92 static inline abi_long do_bsd_msync(abi_long addr, abi_long len, abi_long flags)
93 {
94     if (!guest_range_valid_untagged(addr, len)) {
95         /* It seems odd, but POSIX wants this to be ENOMEM */
96         return -TARGET_ENOMEM;
97     }
98 
99     return get_errno(msync(g2h_untagged(addr), len, flags));
100 }
101 
102 /* mlock(2) */
103 static inline abi_long do_bsd_mlock(abi_long arg1, abi_long arg2)
104 {
105     if (!guest_range_valid_untagged(arg1, arg2)) {
106         return -TARGET_EINVAL;
107     }
108     return get_errno(mlock(g2h_untagged(arg1), arg2));
109 }
110 
111 /* munlock(2) */
112 static inline abi_long do_bsd_munlock(abi_long arg1, abi_long arg2)
113 {
114     if (!guest_range_valid_untagged(arg1, arg2)) {
115         return -TARGET_EINVAL;
116     }
117     return get_errno(munlock(g2h_untagged(arg1), arg2));
118 }
119 
120 /* mlockall(2) */
121 static inline abi_long do_bsd_mlockall(abi_long arg1)
122 {
123     return get_errno(mlockall(arg1));
124 }
125 
126 /* munlockall(2) */
127 static inline abi_long do_bsd_munlockall(void)
128 {
129     return get_errno(munlockall());
130 }
131 
132 /* madvise(2) */
133 static inline abi_long do_bsd_madvise(abi_long arg1, abi_long arg2,
134         abi_long arg3)
135 {
136     abi_ulong len;
137     int ret = 0;
138     abi_long start = arg1;
139     abi_long len_in = arg2;
140     abi_long advice = arg3;
141 
142     if (start & ~TARGET_PAGE_MASK) {
143         return -TARGET_EINVAL;
144     }
145     if (len_in == 0) {
146         return 0;
147     }
148     len = TARGET_PAGE_ALIGN(len_in);
149     if (len == 0 || !guest_range_valid_untagged(start, len)) {
150         return -TARGET_EINVAL;
151     }
152 
153     /*
154      * Most advice values are hints, so ignoring and returning success is ok.
155      *
156      * However, some advice values such as MADV_DONTNEED, are not hints and
157      * need to be emulated.
158      *
159      * A straight passthrough for those may not be safe because qemu sometimes
160      * turns private file-backed mappings into anonymous mappings.
161      * If all guest pages have PAGE_PASSTHROUGH set, mappings have the
162      * same semantics for the host as for the guest.
163      *
164      * MADV_DONTNEED is passed through, if possible.
165      * If passthrough isn't possible, we nevertheless (wrongly!) return
166      * success, which is broken but some userspace programs fail to work
167      * otherwise. Completely implementing such emulation is quite complicated
168      * though.
169      */
170     mmap_lock();
171     switch (advice) {
172     case MADV_DONTNEED:
173         if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
174             ret = get_errno(madvise(g2h_untagged(start), len, advice));
175             if (ret == 0) {
176                 page_reset_target_data(start, start + len - 1);
177             }
178         }
179     }
180     mmap_unlock();
181 
182     return ret;
183 }
184 
185 /* minherit(2) */
186 static inline abi_long do_bsd_minherit(abi_long addr, abi_long len,
187         abi_long inherit)
188 {
189     return get_errno(minherit(g2h_untagged(addr), len, inherit));
190 }
191 
192 /* mincore(2) */
193 static inline abi_long do_bsd_mincore(abi_ulong target_addr, abi_ulong len,
194         abi_ulong target_vec)
195 {
196     abi_long ret;
197     void *p;
198     abi_ulong vec_len = DIV_ROUND_UP(len, TARGET_PAGE_SIZE);
199 
200     if (!guest_range_valid_untagged(target_addr, len)
201         || !page_check_range(target_addr, len, PAGE_VALID)) {
202         return -TARGET_EFAULT;
203     }
204 
205     p = lock_user(VERIFY_WRITE, target_vec, vec_len, 0);
206     if (p == NULL) {
207         return -TARGET_EFAULT;
208     }
209     ret = get_errno(mincore(g2h_untagged(target_addr), len, p));
210     unlock_user(p, target_vec, vec_len);
211 
212     return ret;
213 }
214 
215 /* do_brk() must return target values and target errnos. */
216 static inline abi_long do_obreak(abi_ulong brk_val)
217 {
218     abi_long mapped_addr;
219     abi_ulong new_brk;
220     abi_ulong old_brk;
221 
222     /* brk pointers are always untagged */
223 
224     /* do not allow to shrink below initial brk value */
225     if (brk_val < initial_target_brk) {
226         return target_brk;
227     }
228 
229     new_brk = TARGET_PAGE_ALIGN(brk_val);
230     old_brk = TARGET_PAGE_ALIGN(target_brk);
231 
232     /* new and old target_brk might be on the same page */
233     if (new_brk == old_brk) {
234         target_brk = brk_val;
235         return target_brk;
236     }
237 
238     /* Release heap if necessary */
239     if (new_brk < old_brk) {
240         target_munmap(new_brk, old_brk - new_brk);
241 
242         target_brk = brk_val;
243         return target_brk;
244     }
245 
246     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
247                               PROT_READ | PROT_WRITE,
248                               MAP_FIXED | MAP_EXCL | MAP_ANON | MAP_PRIVATE,
249                               -1, 0);
250 
251     if (mapped_addr == old_brk) {
252         target_brk = brk_val;
253         return target_brk;
254     }
255 
256     /* For everything else, return the previous break. */
257     return target_brk;
258 }
259 
260 /* shm_open(2) */
261 static inline abi_long do_bsd_shm_open(abi_ulong arg1, abi_long arg2,
262         abi_long arg3)
263 {
264     int ret;
265     void *p;
266 
267     if (arg1 == (uintptr_t)SHM_ANON) {
268         p = SHM_ANON;
269     } else {
270         p = lock_user_string(arg1);
271         if (p == NULL) {
272             return -TARGET_EFAULT;
273         }
274     }
275     ret = get_errno(shm_open(p, target_to_host_bitmask(arg2, fcntl_flags_tbl),
276                              arg3));
277 
278     if (p != SHM_ANON) {
279         unlock_user(p, arg1, 0);
280     }
281 
282     return ret;
283 }
284 
285 /* shm_unlink(2) */
286 static inline abi_long do_bsd_shm_unlink(abi_ulong arg1)
287 {
288     int ret;
289     void *p;
290 
291     p = lock_user_string(arg1);
292     if (p == NULL) {
293         return -TARGET_EFAULT;
294     }
295     ret = get_errno(shm_unlink(p)); /* XXX path(p)? */
296     unlock_user(p, arg1, 0);
297 
298     return ret;
299 }
300 
301 /* shmget(2) */
302 static inline abi_long do_bsd_shmget(abi_long arg1, abi_ulong arg2,
303         abi_long arg3)
304 {
305     return get_errno(shmget(arg1, arg2, arg3));
306 }
307 
308 /* shmctl(2) */
309 static inline abi_long do_bsd_shmctl(abi_long shmid, abi_long cmd,
310         abi_ulong buff)
311 {
312     struct shmid_ds dsarg;
313     abi_long ret = -TARGET_EINVAL;
314 
315     cmd &= 0xff;
316 
317     switch (cmd) {
318     case IPC_STAT:
319         if (target_to_host_shmid_ds(&dsarg, buff)) {
320             return -TARGET_EFAULT;
321         }
322         ret = get_errno(shmctl(shmid, cmd, &dsarg));
323         if (host_to_target_shmid_ds(buff, &dsarg)) {
324             return -TARGET_EFAULT;
325         }
326         break;
327 
328     case IPC_SET:
329         if (target_to_host_shmid_ds(&dsarg, buff)) {
330             return -TARGET_EFAULT;
331         }
332         ret = get_errno(shmctl(shmid, cmd, &dsarg));
333         break;
334 
335     case IPC_RMID:
336         ret = get_errno(shmctl(shmid, cmd, NULL));
337         break;
338 
339     default:
340         ret = -TARGET_EINVAL;
341         break;
342     }
343 
344     return ret;
345 }
346 
347 /* shmat(2) */
348 static inline abi_long do_bsd_shmat(int shmid, abi_ulong shmaddr, int shmflg)
349 {
350     abi_ulong raddr;
351     abi_long ret;
352     struct shmid_ds shm_info;
353 
354     /* Find out the length of the shared memory segment. */
355     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
356     if (is_error(ret)) {
357         /* Can't get the length */
358         return ret;
359     }
360 
361     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
362         return -TARGET_EINVAL;
363     }
364 
365     WITH_MMAP_LOCK_GUARD() {
366         void *host_raddr;
367 
368         if (shmaddr) {
369             host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
370         } else {
371             abi_ulong mmap_start;
372 
373             mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
374 
375             if (mmap_start == -1) {
376                 return -TARGET_ENOMEM;
377             }
378             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
379                                shmflg | SHM_REMAP);
380         }
381 
382         if (host_raddr == (void *)-1) {
383             return get_errno(-1);
384         }
385         raddr = h2g(host_raddr);
386 
387         page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
388                        PAGE_VALID | PAGE_RESET | PAGE_READ |
389                        (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
390 
391         for (int i = 0; i < N_BSD_SHM_REGIONS; i++) {
392             if (bsd_shm_regions[i].start == 0) {
393                 bsd_shm_regions[i].start = raddr;
394                 bsd_shm_regions[i].size = shm_info.shm_segsz;
395                 break;
396             }
397         }
398     }
399 
400     return raddr;
401 }
402 
403 /* shmdt(2) */
404 static inline abi_long do_bsd_shmdt(abi_ulong shmaddr)
405 {
406     abi_long ret;
407 
408     WITH_MMAP_LOCK_GUARD() {
409         int i;
410 
411         for (i = 0; i < N_BSD_SHM_REGIONS; ++i) {
412             if (bsd_shm_regions[i].start == shmaddr) {
413                 break;
414             }
415         }
416 
417         if (i == N_BSD_SHM_REGIONS) {
418             return -TARGET_EINVAL;
419         }
420 
421         ret = get_errno(shmdt(g2h_untagged(shmaddr)));
422         if (ret == 0) {
423             abi_ulong size = bsd_shm_regions[i].size;
424 
425             bsd_shm_regions[i].start = 0;
426             page_set_flags(shmaddr, shmaddr + size - 1, 0);
427             mmap_reserve(shmaddr, size);
428         }
429     }
430 
431     return ret;
432 }
433 
434 static inline abi_long do_bsd_vadvise(void)
435 {
436     /* See sys_ovadvise() in vm_unix.c */
437     return -TARGET_EINVAL;
438 }
439 
440 static inline abi_long do_bsd_sbrk(void)
441 {
442     /* see sys_sbrk() in vm_mmap.c */
443     return -TARGET_EOPNOTSUPP;
444 }
445 
446 static inline abi_long do_bsd_sstk(void)
447 {
448     /* see sys_sstk() in vm_mmap.c */
449     return -TARGET_EOPNOTSUPP;
450 }
451 
452 #endif /* BSD_USER_BSD_MEM_H */
453