1 /*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
5 * (c) UNIX System Laboratories, Inc.
6 * All or some portions of this file are derived from material licensed
7 * to the University of California by American Telephone and Telegraph
8 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
9 * the permission of UNIX System Laboratories, Inc.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department. Originally from the University of Wisconsin.
14 *
15 * %sccs.include.proprietary.c%
16 *
17 * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
18 *
19 * @(#)sysv_shm.c 8.7 (Berkeley) 02/14/95
20 */
21
22 /*
23 * System V shared memory routines.
24 * TEMPORARY, until mmap is in place;
25 * needed now for HP-UX compatibility and X server (yech!).
26 */
27
28 #ifdef SYSVSHM
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/shm.h>
35 #include <sys/malloc.h>
36 #include <sys/mman.h>
37 #include <sys/stat.h>
38
39 #include <vm/vm.h>
40 #include <vm/vm_kern.h>
41 #include <vm/vm_inherit.h>
42 #include <vm/vm_pager.h>
43
44 int shmat(), shmctl(), shmdt(), shmget();
45 int (*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
46 int shmtot = 0;
47
48 /*
49 * Per process internal structure for managing segments.
50 * Each process using shm will have an array of ``shmseg'' of these.
51 */
52 struct shmdesc {
53 vm_offset_t shmd_uva;
54 int shmd_id;
55 };
56
57 /*
58 * Per segment internal structure (shm_handle).
59 */
60 struct shmhandle {
61 vm_offset_t shmh_kva;
62 caddr_t shmh_id;
63 };
64
65 vm_map_t shm_map; /* address space for shared memory segments */
66
shminit()67 shminit()
68 {
69 register int i;
70 vm_offset_t whocares1, whocares2;
71
72 shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
73 shminfo.shmall * NBPG, TRUE);
74 if (shminfo.shmmni > SHMMMNI)
75 shminfo.shmmni = SHMMMNI;
76 for (i = 0; i < shminfo.shmmni; i++) {
77 shmsegs[i].shm_perm.mode = 0;
78 shmsegs[i].shm_perm.seq = 0;
79 }
80 }
81
82 /*
83 * Entry point for all SHM calls
84 */
85 struct shmsys_args {
86 u_int which;
87 };
88 compat_43_shmsys(p, uap, retval)
89 struct proc *p;
90 struct shmsys_args *uap;
91 int *retval;
92 {
93
94 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
95 return (EINVAL);
96 return ((*shmcalls[uap->which])(p, &uap[1], retval));
97 }
98
99 /*
100 * Get a shared memory segment
101 */
102 struct shmget_args {
103 key_t key;
104 int size;
105 int shmflg;
106 };
107 shmget(p, uap, retval)
108 struct proc *p;
109 register struct shmget_args *uap;
110 int *retval;
111 {
112 register struct shmid_ds *shp;
113 register struct ucred *cred = p->p_ucred;
114 register int i;
115 int error, size, rval = 0;
116 register struct shmhandle *shmh;
117
118 /* look up the specified shm_id */
119 if (uap->key != IPC_PRIVATE) {
120 for (i = 0; i < shminfo.shmmni; i++)
121 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
122 shmsegs[i].shm_perm.key == uap->key) {
123 rval = i;
124 break;
125 }
126 } else
127 i = shminfo.shmmni;
128
129 /* create a new shared segment if necessary */
130 if (i == shminfo.shmmni) {
131 if ((uap->shmflg & IPC_CREAT) == 0)
132 return (ENOENT);
133 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
134 return (EINVAL);
135 for (i = 0; i < shminfo.shmmni; i++)
136 if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
137 rval = i;
138 break;
139 }
140 if (i == shminfo.shmmni)
141 return (ENOSPC);
142 size = clrnd(btoc(uap->size));
143 if (shmtot + size > shminfo.shmall)
144 return (ENOMEM);
145 shp = &shmsegs[rval];
146 /*
147 * We need to do a couple of things to ensure consistency
148 * in case we sleep in malloc(). We mark segment as
149 * allocated so that other shmgets() will not allocate it.
150 * We mark it as "destroyed" to insure that shmvalid() is
151 * false making most operations fail (XXX). We set the key,
152 * so that other shmget()s will fail.
153 */
154 shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
155 shp->shm_perm.key = uap->key;
156 shmh = (struct shmhandle *)
157 malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
158 shmh->shmh_kva = 0;
159 shmh->shmh_id = (caddr_t)(0xc0000000|rval); /* XXX */
160 error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
161 VM_PROT_ALL, VM_PROT_ALL,
162 MAP_ANON, shmh->shmh_id, 0);
163 if (error) {
164 free((caddr_t)shmh, M_SHM);
165 shp->shm_perm.mode = 0;
166 return(ENOMEM);
167 }
168 shp->shm_handle = (void *) shmh;
169 shmtot += size;
170 shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
171 shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
172 shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg & ACCESSPERMS);
173 shp->shm_segsz = uap->size;
174 shp->shm_cpid = p->p_pid;
175 shp->shm_lpid = shp->shm_nattch = 0;
176 shp->shm_atime = shp->shm_dtime = 0;
177 shp->shm_ctime = time.tv_sec;
178 } else {
179 shp = &shmsegs[rval];
180 /* XXX: probably not the right thing to do */
181 if (shp->shm_perm.mode & SHM_DEST)
182 return (EBUSY);
183 if (error = ipcaccess(&shp->shm_perm, uap->shmflg & ACCESSPERMS,
184 cred))
185 return (error);
186 if (uap->size && uap->size > shp->shm_segsz)
187 return (EINVAL);
188 if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
189 return (EEXIST);
190 }
191 *retval = shp->shm_perm.seq * SHMMMNI + rval;
192 return (0);
193 }
194
195 /*
196 * Shared memory control
197 */
198 struct shmctl_args {
199 int shmid;
200 int cmd;
201 caddr_t buf;
202 };
203 /* ARGSUSED */
204 shmctl(p, uap, retval)
205 struct proc *p;
206 register struct shmctl_args *uap;
207 int *retval;
208 {
209 register struct shmid_ds *shp;
210 register struct ucred *cred = p->p_ucred;
211 struct shmid_ds sbuf;
212 int error;
213
214 if (error = shmvalid(uap->shmid))
215 return (error);
216 shp = &shmsegs[uap->shmid % SHMMMNI];
217 switch (uap->cmd) {
218 case IPC_STAT:
219 if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
220 return (error);
221 return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
222
223 case IPC_SET:
224 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
225 cred->cr_uid != shp->shm_perm.cuid)
226 return (EPERM);
227 if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
228 return (error);
229 shp->shm_perm.uid = sbuf.shm_perm.uid;
230 shp->shm_perm.gid = sbuf.shm_perm.gid;
231 shp->shm_perm.mode = (shp->shm_perm.mode & ~ACCESSPERMS)
232 | (sbuf.shm_perm.mode & ACCESSPERMS);
233 shp->shm_ctime = time.tv_sec;
234 break;
235
236 case IPC_RMID:
237 if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
238 cred->cr_uid != shp->shm_perm.cuid)
239 return (EPERM);
240 /* set ctime? */
241 shp->shm_perm.key = IPC_PRIVATE;
242 shp->shm_perm.mode |= SHM_DEST;
243 if (shp->shm_nattch <= 0)
244 shmfree(shp);
245 break;
246
247 default:
248 return (EINVAL);
249 }
250 return (0);
251 }
252
253 /*
254 * Attach to shared memory segment.
255 */
256 struct shmat_args {
257 int shmid;
258 caddr_t shmaddr;
259 int shmflg;
260 };
261 shmat(p, uap, retval)
262 struct proc *p;
263 register struct shmat_args *uap;
264 int *retval;
265 {
266 register struct shmid_ds *shp;
267 register int size;
268 caddr_t uva;
269 int error;
270 int flags;
271 vm_prot_t prot;
272 struct shmdesc *shmd;
273
274 /*
275 * Allocate descriptors now (before validity check)
276 * in case malloc() blocks.
277 */
278 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
279 size = shminfo.shmseg * sizeof(struct shmdesc);
280 if (shmd == NULL) {
281 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
282 bzero((caddr_t)shmd, size);
283 p->p_vmspace->vm_shm = (caddr_t)shmd;
284 }
285 if (error = shmvalid(uap->shmid))
286 return (error);
287 shp = &shmsegs[uap->shmid % SHMMMNI];
288 if (shp->shm_handle == NULL)
289 panic("shmat NULL handle");
290 if (error = ipcaccess(&shp->shm_perm,
291 (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
292 return (error);
293 uva = uap->shmaddr;
294 if (uva && ((int)uva & (SHMLBA-1))) {
295 if (uap->shmflg & SHM_RND)
296 uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
297 else
298 return (EINVAL);
299 }
300 /*
301 * Make sure user doesn't use more than their fair share
302 */
303 for (size = 0; size < shminfo.shmseg; size++) {
304 if (shmd->shmd_uva == 0)
305 break;
306 shmd++;
307 }
308 if (size >= shminfo.shmseg)
309 return (EMFILE);
310 size = ctob(clrnd(btoc(shp->shm_segsz)));
311 prot = VM_PROT_READ;
312 if ((uap->shmflg & SHM_RDONLY) == 0)
313 prot |= VM_PROT_WRITE;
314 flags = MAP_ANON|MAP_SHARED;
315 if (uva)
316 flags |= MAP_FIXED;
317 else
318 uva = (caddr_t)0x1000000; /* XXX */
319 error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
320 (vm_size_t)size, prot, VM_PROT_ALL, flags,
321 ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
322 if (error)
323 return(error);
324 shmd->shmd_uva = (vm_offset_t)uva;
325 shmd->shmd_id = uap->shmid;
326 /*
327 * Fill in the remaining fields
328 */
329 shp->shm_lpid = p->p_pid;
330 shp->shm_atime = time.tv_sec;
331 shp->shm_nattch++;
332 *retval = (int) uva;
333 return (0);
334 }
335
336 /*
337 * Detach from shared memory segment.
338 */
339 struct shmdt_args {
340 caddr_t shmaddr;
341 };
342 /* ARGSUSED */
343 shmdt(p, uap, retval)
344 struct proc *p;
345 struct shmdt_args *uap;
346 int *retval;
347 {
348 register struct shmdesc *shmd;
349 register int i;
350
351 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
352 for (i = 0; i < shminfo.shmseg; i++, shmd++)
353 if (shmd->shmd_uva &&
354 shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
355 break;
356 if (i == shminfo.shmseg)
357 return (EINVAL);
358 shmufree(p, shmd);
359 shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
360 return (0);
361 }
362
363 shmfork(p1, p2, isvfork)
364 struct proc *p1, *p2;
365 int isvfork;
366 {
367 register struct shmdesc *shmd;
368 register int size;
369
370 /*
371 * Copy parents descriptive information
372 */
373 size = shminfo.shmseg * sizeof(struct shmdesc);
374 shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
375 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
376 p2->p_vmspace->vm_shm = (caddr_t)shmd;
377 /*
378 * Increment reference counts
379 */
380 for (size = 0; size < shminfo.shmseg; size++, shmd++)
381 if (shmd->shmd_uva)
382 shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
383 }
384
385 shmexit(p)
386 struct proc *p;
387 {
388 register struct shmdesc *shmd;
389 register int i;
390
391 shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
392 for (i = 0; i < shminfo.shmseg; i++, shmd++)
393 if (shmd->shmd_uva)
394 shmufree(p, shmd);
395 free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
396 p->p_vmspace->vm_shm = NULL;
397 }
398
shmvalid(id)399 shmvalid(id)
400 register int id;
401 {
402 register struct shmid_ds *shp;
403
404 if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
405 return(EINVAL);
406 shp = &shmsegs[id % SHMMMNI];
407 if (shp->shm_perm.seq == (id / SHMMMNI) &&
408 (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
409 return(0);
410 return(EINVAL);
411 }
412
413 /*
414 * Free user resources associated with a shared memory segment
415 */
416 shmufree(p, shmd)
417 struct proc *p;
418 struct shmdesc *shmd;
419 {
420 register struct shmid_ds *shp;
421
422 shp = &shmsegs[shmd->shmd_id % SHMMMNI];
423 (void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
424 ctob(clrnd(btoc(shp->shm_segsz))));
425 shmd->shmd_id = 0;
426 shmd->shmd_uva = 0;
427 shp->shm_dtime = time.tv_sec;
428 if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
429 shmfree(shp);
430 }
431
432 /*
433 * Deallocate resources associated with a shared memory segment
434 */
shmfree(shp)435 shmfree(shp)
436 register struct shmid_ds *shp;
437 {
438
439 if (shp->shm_handle == NULL)
440 panic("shmfree");
441 /*
442 * Lose our lingering object reference by deallocating space
443 * in kernel. Pager will also be deallocated as a side-effect.
444 */
445 vm_deallocate(shm_map,
446 ((struct shmhandle *)shp->shm_handle)->shmh_kva,
447 ctob(clrnd(btoc(shp->shm_segsz))));
448 free((caddr_t)shp->shm_handle, M_SHM);
449 shp->shm_handle = NULL;
450 shmtot -= clrnd(btoc(shp->shm_segsz));
451 shp->shm_perm.mode = 0;
452 /*
453 * Increment the sequence number to ensure that outstanding
454 * shmids for this segment will be invalid in the event that
455 * the segment is reallocated. Note that shmids must be
456 * positive as decreed by SVID.
457 */
458 shp->shm_perm.seq++;
459 if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
460 shp->shm_perm.seq = 0;
461 }
462
463 /*
464 * XXX This routine would be common to all sysV style IPC
465 * (if the others were implemented).
466 */
ipcaccess(ipc,mode,cred)467 ipcaccess(ipc, mode, cred)
468 register struct ipc_perm *ipc;
469 int mode;
470 register struct ucred *cred;
471 {
472 register int m;
473
474 if (cred->cr_uid == 0)
475 return(0);
476 /*
477 * Access check is based on only one of owner, group, public.
478 * If not owner, then check group.
479 * If not a member of the group, then check public access.
480 */
481 mode &= 0700;
482 m = ipc->mode;
483 if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
484 m <<= 3;
485 if (!groupmember(ipc->gid, cred) &&
486 !groupmember(ipc->cgid, cred))
487 m <<= 3;
488 }
489 if ((mode&m) == mode)
490 return (0);
491 return (EACCES);
492 }
493 #endif /* SYSVSHM */
494