xref: /original-bsd/sys/kern/sysv_shm.c (revision f8013ff8)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department. Originally from University of Wisconsin.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
13  *
14  *	@(#)sysv_shm.c	7.12 (Berkeley) 03/17/91
15  */
16 
17 /*
18  * System V shared memory routines.
19  * TEMPORARY, until mmap is in place;
20  * needed now for HP-UX compatibility and X server (yech!).
21  */
22 
23 #ifdef SYSVSHM
24 
25 #include "param.h"
26 #include "systm.h"
27 #include "user.h"
28 #include "kernel.h"
29 #include "proc.h"
30 #include "shm.h"
31 #include "malloc.h"
32 #include "mman.h"
33 #include "../vm/vm_param.h"
34 #include "../vm/vm_map.h"
35 #include "../vm/vm_kern.h"
36 #include "../vm/vm_inherit.h"
37 #include "../vm/vm_pager.h"
38 
39 #ifdef HPUXCOMPAT
40 #include "hp300/hpux/hpux.h"
41 #endif
42 
43 int	shmat(), shmctl(), shmdt(), shmget();
44 int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
45 int	shmtot = 0;
46 
47 /*
48  * Per process internal structure for managing segments.
49  * Each process using shm will have an array of ``shmseg'' of these.
50  */
51 struct	shmdesc {
52 	vm_offset_t	shmd_uva;
53 	int		shmd_id;
54 };
55 
56 /*
57  * Per segment internal structure (shm_handle).
58  */
59 struct	shmhandle {
60 	vm_offset_t	shmh_kva;
61 	caddr_t		shmh_id;
62 };
63 
64 vm_map_t shm_map;	/* address space for shared memory segments */
65 
66 shminit()
67 {
68 	register int i;
69 	vm_offset_t whocares1, whocares2;
70 
71 	shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
72 				shminfo.shmall * NBPG, FALSE);
73 	if (shminfo.shmmni > SHMMMNI)
74 		shminfo.shmmni = SHMMMNI;
75 	for (i = 0; i < shminfo.shmmni; i++) {
76 		shmsegs[i].shm_perm.mode = 0;
77 		shmsegs[i].shm_perm.seq = 0;
78 	}
79 }
80 
81 /*
82  * Entry point for all SHM calls
83  */
84 shmsys(p, uap, retval)
85 	struct proc *p;
86 	struct args {
87 		u_int which;
88 	} *uap;
89 	int *retval;
90 {
91 
92 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
93 		return (EINVAL);
94 	return ((*shmcalls[uap->which])(p, &uap[1], retval));
95 }
96 
97 /*
98  * Get a shared memory segment
99  */
100 shmget(p, uap, retval)
101 	struct proc *p;
102 	register struct args {
103 		key_t key;
104 		int size;
105 		int shmflg;
106 	} *uap;
107 	int *retval;
108 {
109 	register struct shmid_ds *shp;
110 	register struct ucred *cred = p->p_ucred;
111 	register int i;
112 	int error, size, rval = 0;
113 	register struct shmhandle *shmh;
114 
115 	/* look up the specified shm_id */
116 	if (uap->key != IPC_PRIVATE) {
117 		for (i = 0; i < shminfo.shmmni; i++)
118 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
119 			    shmsegs[i].shm_perm.key == uap->key) {
120 				rval = i;
121 				break;
122 			}
123 	} else
124 		i = shminfo.shmmni;
125 
126 	/* create a new shared segment if necessary */
127 	if (i == shminfo.shmmni) {
128 		if ((uap->shmflg & IPC_CREAT) == 0)
129 			return (ENOENT);
130 		if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
131 			return (EINVAL);
132 		for (i = 0; i < shminfo.shmmni; i++)
133 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
134 				rval = i;
135 				break;
136 			}
137 		if (i == shminfo.shmmni)
138 			return (ENOSPC);
139 		size = clrnd(btoc(uap->size));
140 		if (shmtot + size > shminfo.shmall)
141 			return (ENOMEM);
142 		shp = &shmsegs[rval];
143 		/*
144 		 * We need to do a couple of things to ensure consistency
145 		 * in case we sleep in malloc().  We mark segment as
146 		 * allocated so that other shmgets() will not allocate it.
147 		 * We mark it as "destroyed" to insure that shmvalid() is
148 		 * false making most operations fail (XXX).  We set the key,
149 		 * so that other shmget()s will fail.
150 		 */
151 		shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
152 		shp->shm_perm.key = uap->key;
153 		shmh = (struct shmhandle *)
154 			malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
155 		shmh->shmh_kva = 0;
156 		shmh->shmh_id = (caddr_t)(0xc0000000|rval);	/* XXX */
157 		error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
158 				VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0);
159 		if (error) {
160 			free((caddr_t)shmh, M_SHM);
161 			shp->shm_perm.mode = 0;
162 			return(ENOMEM);
163 		}
164 		shp->shm_handle = (void *) shmh;
165 		shmtot += size;
166 		shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
167 		shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
168 		shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
169 		shp->shm_segsz = uap->size;
170 		shp->shm_cpid = p->p_pid;
171 		shp->shm_lpid = shp->shm_nattch = 0;
172 		shp->shm_atime = shp->shm_dtime = 0;
173 		shp->shm_ctime = time.tv_sec;
174 	} else {
175 		shp = &shmsegs[rval];
176 		/* XXX: probably not the right thing to do */
177 		if (shp->shm_perm.mode & SHM_DEST)
178 			return (EBUSY);
179 		if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
180 			return (error);
181 		if (uap->size && uap->size > shp->shm_segsz)
182 			return (EINVAL);
183 		if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
184 			return (EEXIST);
185 	}
186 	*retval = shp->shm_perm.seq * SHMMMNI + rval;
187 	return (0);
188 }
189 
190 /*
191  * Shared memory control
192  */
193 /* ARGSUSED */
194 shmctl(p, uap, retval)
195 	struct proc *p;
196 	register struct args {
197 		int shmid;
198 		int cmd;
199 		caddr_t buf;
200 	} *uap;
201 	int *retval;
202 {
203 	register struct shmid_ds *shp;
204 	register struct ucred *cred = p->p_ucred;
205 	struct shmid_ds sbuf;
206 	int error;
207 
208 	if (error = shmvalid(uap->shmid))
209 		return (error);
210 	shp = &shmsegs[uap->shmid % SHMMMNI];
211 	switch (uap->cmd) {
212 	case IPC_STAT:
213 		if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
214 			return (error);
215 		return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
216 
217 	case IPC_SET:
218 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
219 		    cred->cr_uid != shp->shm_perm.cuid)
220 			return (EPERM);
221 		if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
222 			return (error);
223 		shp->shm_perm.uid = sbuf.shm_perm.uid;
224 		shp->shm_perm.gid = sbuf.shm_perm.gid;
225 		shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
226 			| (sbuf.shm_perm.mode & 0777);
227 		shp->shm_ctime = time.tv_sec;
228 		break;
229 
230 	case IPC_RMID:
231 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
232 		    cred->cr_uid != shp->shm_perm.cuid)
233 			return (EPERM);
234 		/* set ctime? */
235 		shp->shm_perm.key = IPC_PRIVATE;
236 		shp->shm_perm.mode |= SHM_DEST;
237 		if (shp->shm_nattch <= 0)
238 			shmfree(shp);
239 		break;
240 
241 #ifdef HPUXCOMPAT
242 	case SHM_LOCK:
243 	case SHM_UNLOCK:
244 		/* don't really do anything, but make them think we did */
245 		if ((p->p_flag & SHPUX) == 0)
246 			return (EINVAL);
247 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
248 		    cred->cr_uid != shp->shm_perm.cuid)
249 			return (EPERM);
250 		break;
251 #endif
252 
253 	default:
254 		return (EINVAL);
255 	}
256 	return (0);
257 }
258 
259 /*
260  * Attach to shared memory segment.
261  */
262 shmat(p, uap, retval)
263 	struct proc *p;
264 	register struct args {
265 		int	shmid;
266 		caddr_t	shmaddr;
267 		int	shmflg;
268 	} *uap;
269 	int *retval;
270 {
271 	register struct shmid_ds *shp;
272 	register int size;
273 	caddr_t uva;
274 	int error;
275 	int flags;
276 	vm_prot_t prot;
277 	struct shmdesc *shmd;
278 
279 	/*
280 	 * Allocate descriptors now (before validity check)
281 	 * in case malloc() blocks.
282 	 */
283 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
284 	size = shminfo.shmseg * sizeof(struct shmdesc);
285 	if (shmd == NULL) {
286 		shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
287 		bzero((caddr_t)shmd, size);
288 		p->p_vmspace->vm_shm = (caddr_t)shmd;
289 	}
290 	if (error = shmvalid(uap->shmid))
291 		return (error);
292 	shp = &shmsegs[uap->shmid % SHMMMNI];
293 	if (shp->shm_handle == NULL)
294 		panic("shmat NULL handle");
295 	if (error = ipcaccess(&shp->shm_perm,
296 	    (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
297 		return (error);
298 	uva = uap->shmaddr;
299 	if (uva && ((int)uva & (SHMLBA-1))) {
300 		if (uap->shmflg & SHM_RND)
301 			uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
302 		else
303 			return (EINVAL);
304 	}
305 	/*
306 	 * Make sure user doesn't use more than their fair share
307 	 */
308 	for (size = 0; size < shminfo.shmseg; size++) {
309 		if (shmd->shmd_uva == 0)
310 			break;
311 		shmd++;
312 	}
313 	if (size >= shminfo.shmseg)
314 		return (EMFILE);
315 	size = ctob(clrnd(btoc(shp->shm_segsz)));
316 	prot = VM_PROT_READ;
317 	if ((uap->shmflg & SHM_RDONLY) == 0)
318 		prot |= VM_PROT_WRITE;
319 	flags = MAP_ANON|MAP_SHARED;
320 	if (uva)
321 		flags |= MAP_FIXED;
322 	else
323 		uva = (caddr_t)0x1000000;	/* XXX */
324 	error = vm_mmap(p->p_vmspace->vm_map, &uva, (vm_size_t)size, prot,
325 	    flags, ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
326 	if (error)
327 		return(error);
328 	shmd->shmd_uva = (vm_offset_t)uva;
329 	shmd->shmd_id = uap->shmid;
330 	/*
331 	 * Fill in the remaining fields
332 	 */
333 	shp->shm_lpid = p->p_pid;
334 	shp->shm_atime = time.tv_sec;
335 	shp->shm_nattch++;
336 	*retval = (int) uva;
337 	return (0);
338 }
339 
340 /*
341  * Detach from shared memory segment.
342  */
343 /* ARGSUSED */
344 shmdt(p, uap, retval)
345 	struct proc *p;
346 	struct args {
347 		caddr_t	shmaddr;
348 	} *uap;
349 	int *retval;
350 {
351 	register struct shmdesc *shmd;
352 	register int i;
353 
354 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
355 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
356 		if (shmd->shmd_uva &&
357 		    shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
358 			break;
359 	if (i == shminfo.shmseg)
360 		return(EINVAL);
361 	shmufree(p, shmd);
362 	shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
363 }
364 
365 shmfork(p1, p2, isvfork)
366 	struct proc *p1, *p2;
367 	int isvfork;
368 {
369 	register struct shmdesc *shmd;
370 	register int size;
371 
372 	/*
373 	 * Copy parents descriptive information
374 	 */
375 	size = shminfo.shmseg * sizeof(struct shmdesc);
376 	shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
377 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
378 	p2->p_vmspace->vm_shm = (caddr_t)shmd;
379 	/*
380 	 * Increment reference counts
381 	 */
382 	for (size = 0; size < shminfo.shmseg; size++, shmd++)
383 		if (shmd->shmd_uva)
384 			shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
385 }
386 
387 shmexit(p)
388 	struct proc *p;
389 {
390 	register struct shmdesc *shmd;
391 	register int i;
392 
393 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
394 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
395 		if (shmd->shmd_uva)
396 			shmufree(p, shmd);
397 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
398 	p->p_vmspace->vm_shm = NULL;
399 }
400 
401 shmvalid(id)
402 	register int id;
403 {
404 	register struct shmid_ds *shp;
405 
406 	if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
407 		return(EINVAL);
408 	shp = &shmsegs[id % SHMMMNI];
409 	if (shp->shm_perm.seq == (id / SHMMMNI) &&
410 	    (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
411 		return(0);
412 	return(EINVAL);
413 }
414 
415 /*
416  * Free user resources associated with a shared memory segment
417  */
418 shmufree(p, shmd)
419 	struct proc *p;
420 	struct shmdesc *shmd;
421 {
422 	register struct shmid_ds *shp;
423 
424 	shp = &shmsegs[shmd->shmd_id % SHMMMNI];
425 	(void) vm_deallocate(p->p_vmspace->vm_map, shmd->shmd_uva,
426 			     ctob(clrnd(btoc(shp->shm_segsz))));
427 	shmd->shmd_id = 0;
428 	shmd->shmd_uva = 0;
429 	shp->shm_dtime = time.tv_sec;
430 	if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
431 		shmfree(shp);
432 }
433 
434 /*
435  * Deallocate resources associated with a shared memory segment
436  */
437 shmfree(shp)
438 	register struct shmid_ds *shp;
439 {
440 	caddr_t kva;
441 
442 	if (shp->shm_handle == NULL)
443 		panic("shmfree");
444 	/*
445 	 * Lose our lingering object reference by deallocating space
446 	 * in kernel.  Pager will also be deallocated as a side-effect.
447 	 */
448 	vm_deallocate(shm_map,
449 		      ((struct shmhandle *)shp->shm_handle)->shmh_kva,
450 		      clrnd(btoc(shp->shm_segsz)));
451 	free((caddr_t)shp->shm_handle, M_SHM);
452 	shp->shm_handle = NULL;
453 	shmtot -= clrnd(btoc(shp->shm_segsz));
454 	shp->shm_perm.mode = 0;
455 	/*
456 	 * Increment the sequence number to ensure that outstanding
457 	 * shmids for this segment will be invalid in the event that
458 	 * the segment is reallocated.  Note that shmids must be
459 	 * positive as decreed by SVID.
460 	 */
461 	shp->shm_perm.seq++;
462 	if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
463 		shp->shm_perm.seq = 0;
464 }
465 
466 /*
467  * XXX This routine would be common to all sysV style IPC
468  *     (if the others were implemented).
469  */
470 ipcaccess(ipc, mode, cred)
471 	register struct ipc_perm *ipc;
472 	int mode;
473 	register struct ucred *cred;
474 {
475 	register int m;
476 
477 	if (cred->cr_uid == 0)
478 		return(0);
479 	/*
480 	 * Access check is based on only one of owner, group, public.
481 	 * If not owner, then check group.
482 	 * If not a member of the group, then check public access.
483 	 */
484 	mode &= 0700;
485 	m = ipc->mode;
486 	if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
487 		m <<= 3;
488 		if (!groupmember(ipc->gid, cred) &&
489 		    !groupmember(ipc->cgid, cred))
490 			m <<= 3;
491 	}
492 	if ((mode&m) == mode)
493 		return (0);
494 	return (EACCES);
495 }
496 #endif /* SYSVSHM */
497