xref: /original-bsd/sys/kern/sysv_shm.c (revision 1ff91bf0)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department. Originally from University of Wisconsin.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
13  *
14  *	@(#)sysv_shm.c	7.13 (Berkeley) 04/20/91
15  */
16 
17 /*
18  * System V shared memory routines.
19  * TEMPORARY, until mmap is in place;
20  * needed now for HP-UX compatibility and X server (yech!).
21  */
22 
23 #ifdef SYSVSHM
24 
25 #include "param.h"
26 #include "systm.h"
27 #include "kernel.h"
28 #include "proc.h"
29 #include "shm.h"
30 #include "malloc.h"
31 #include "mman.h"
32 #include "vm/vm.h"
33 #include "vm/vm_kern.h"
34 #include "vm/vm_inherit.h"
35 #include "vm/vm_pager.h"
36 
37 #ifdef HPUXCOMPAT
38 #include "hp300/hpux/hpux.h"
39 #endif
40 
41 int	shmat(), shmctl(), shmdt(), shmget();
42 int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
43 int	shmtot = 0;
44 
45 /*
46  * Per process internal structure for managing segments.
47  * Each process using shm will have an array of ``shmseg'' of these.
48  */
49 struct	shmdesc {
50 	vm_offset_t	shmd_uva;
51 	int		shmd_id;
52 };
53 
54 /*
55  * Per segment internal structure (shm_handle).
56  */
57 struct	shmhandle {
58 	vm_offset_t	shmh_kva;
59 	caddr_t		shmh_id;
60 };
61 
62 vm_map_t shm_map;	/* address space for shared memory segments */
63 
64 shminit()
65 {
66 	register int i;
67 	vm_offset_t whocares1, whocares2;
68 
69 	shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
70 				shminfo.shmall * NBPG, FALSE);
71 	if (shminfo.shmmni > SHMMMNI)
72 		shminfo.shmmni = SHMMMNI;
73 	for (i = 0; i < shminfo.shmmni; i++) {
74 		shmsegs[i].shm_perm.mode = 0;
75 		shmsegs[i].shm_perm.seq = 0;
76 	}
77 }
78 
79 /*
80  * Entry point for all SHM calls
81  */
82 shmsys(p, uap, retval)
83 	struct proc *p;
84 	struct args {
85 		u_int which;
86 	} *uap;
87 	int *retval;
88 {
89 
90 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
91 		return (EINVAL);
92 	return ((*shmcalls[uap->which])(p, &uap[1], retval));
93 }
94 
95 /*
96  * Get a shared memory segment
97  */
98 shmget(p, uap, retval)
99 	struct proc *p;
100 	register struct args {
101 		key_t key;
102 		int size;
103 		int shmflg;
104 	} *uap;
105 	int *retval;
106 {
107 	register struct shmid_ds *shp;
108 	register struct ucred *cred = p->p_ucred;
109 	register int i;
110 	int error, size, rval = 0;
111 	register struct shmhandle *shmh;
112 
113 	/* look up the specified shm_id */
114 	if (uap->key != IPC_PRIVATE) {
115 		for (i = 0; i < shminfo.shmmni; i++)
116 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
117 			    shmsegs[i].shm_perm.key == uap->key) {
118 				rval = i;
119 				break;
120 			}
121 	} else
122 		i = shminfo.shmmni;
123 
124 	/* create a new shared segment if necessary */
125 	if (i == shminfo.shmmni) {
126 		if ((uap->shmflg & IPC_CREAT) == 0)
127 			return (ENOENT);
128 		if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
129 			return (EINVAL);
130 		for (i = 0; i < shminfo.shmmni; i++)
131 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
132 				rval = i;
133 				break;
134 			}
135 		if (i == shminfo.shmmni)
136 			return (ENOSPC);
137 		size = clrnd(btoc(uap->size));
138 		if (shmtot + size > shminfo.shmall)
139 			return (ENOMEM);
140 		shp = &shmsegs[rval];
141 		/*
142 		 * We need to do a couple of things to ensure consistency
143 		 * in case we sleep in malloc().  We mark segment as
144 		 * allocated so that other shmgets() will not allocate it.
145 		 * We mark it as "destroyed" to insure that shmvalid() is
146 		 * false making most operations fail (XXX).  We set the key,
147 		 * so that other shmget()s will fail.
148 		 */
149 		shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
150 		shp->shm_perm.key = uap->key;
151 		shmh = (struct shmhandle *)
152 			malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
153 		shmh->shmh_kva = 0;
154 		shmh->shmh_id = (caddr_t)(0xc0000000|rval);	/* XXX */
155 		error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
156 				VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0);
157 		if (error) {
158 			free((caddr_t)shmh, M_SHM);
159 			shp->shm_perm.mode = 0;
160 			return(ENOMEM);
161 		}
162 		shp->shm_handle = (void *) shmh;
163 		shmtot += size;
164 		shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
165 		shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
166 		shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
167 		shp->shm_segsz = uap->size;
168 		shp->shm_cpid = p->p_pid;
169 		shp->shm_lpid = shp->shm_nattch = 0;
170 		shp->shm_atime = shp->shm_dtime = 0;
171 		shp->shm_ctime = time.tv_sec;
172 	} else {
173 		shp = &shmsegs[rval];
174 		/* XXX: probably not the right thing to do */
175 		if (shp->shm_perm.mode & SHM_DEST)
176 			return (EBUSY);
177 		if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
178 			return (error);
179 		if (uap->size && uap->size > shp->shm_segsz)
180 			return (EINVAL);
181 		if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
182 			return (EEXIST);
183 	}
184 	*retval = shp->shm_perm.seq * SHMMMNI + rval;
185 	return (0);
186 }
187 
188 /*
189  * Shared memory control
190  */
191 /* ARGSUSED */
192 shmctl(p, uap, retval)
193 	struct proc *p;
194 	register struct args {
195 		int shmid;
196 		int cmd;
197 		caddr_t buf;
198 	} *uap;
199 	int *retval;
200 {
201 	register struct shmid_ds *shp;
202 	register struct ucred *cred = p->p_ucred;
203 	struct shmid_ds sbuf;
204 	int error;
205 
206 	if (error = shmvalid(uap->shmid))
207 		return (error);
208 	shp = &shmsegs[uap->shmid % SHMMMNI];
209 	switch (uap->cmd) {
210 	case IPC_STAT:
211 		if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
212 			return (error);
213 		return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
214 
215 	case IPC_SET:
216 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
217 		    cred->cr_uid != shp->shm_perm.cuid)
218 			return (EPERM);
219 		if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
220 			return (error);
221 		shp->shm_perm.uid = sbuf.shm_perm.uid;
222 		shp->shm_perm.gid = sbuf.shm_perm.gid;
223 		shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
224 			| (sbuf.shm_perm.mode & 0777);
225 		shp->shm_ctime = time.tv_sec;
226 		break;
227 
228 	case IPC_RMID:
229 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
230 		    cred->cr_uid != shp->shm_perm.cuid)
231 			return (EPERM);
232 		/* set ctime? */
233 		shp->shm_perm.key = IPC_PRIVATE;
234 		shp->shm_perm.mode |= SHM_DEST;
235 		if (shp->shm_nattch <= 0)
236 			shmfree(shp);
237 		break;
238 
239 #ifdef HPUXCOMPAT
240 	case SHM_LOCK:
241 	case SHM_UNLOCK:
242 		/* don't really do anything, but make them think we did */
243 		if ((p->p_flag & SHPUX) == 0)
244 			return (EINVAL);
245 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
246 		    cred->cr_uid != shp->shm_perm.cuid)
247 			return (EPERM);
248 		break;
249 #endif
250 
251 	default:
252 		return (EINVAL);
253 	}
254 	return (0);
255 }
256 
257 /*
258  * Attach to shared memory segment.
259  */
260 shmat(p, uap, retval)
261 	struct proc *p;
262 	register struct args {
263 		int	shmid;
264 		caddr_t	shmaddr;
265 		int	shmflg;
266 	} *uap;
267 	int *retval;
268 {
269 	register struct shmid_ds *shp;
270 	register int size;
271 	caddr_t uva;
272 	int error;
273 	int flags;
274 	vm_prot_t prot;
275 	struct shmdesc *shmd;
276 
277 	/*
278 	 * Allocate descriptors now (before validity check)
279 	 * in case malloc() blocks.
280 	 */
281 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
282 	size = shminfo.shmseg * sizeof(struct shmdesc);
283 	if (shmd == NULL) {
284 		shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
285 		bzero((caddr_t)shmd, size);
286 		p->p_vmspace->vm_shm = (caddr_t)shmd;
287 	}
288 	if (error = shmvalid(uap->shmid))
289 		return (error);
290 	shp = &shmsegs[uap->shmid % SHMMMNI];
291 	if (shp->shm_handle == NULL)
292 		panic("shmat NULL handle");
293 	if (error = ipcaccess(&shp->shm_perm,
294 	    (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
295 		return (error);
296 	uva = uap->shmaddr;
297 	if (uva && ((int)uva & (SHMLBA-1))) {
298 		if (uap->shmflg & SHM_RND)
299 			uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
300 		else
301 			return (EINVAL);
302 	}
303 	/*
304 	 * Make sure user doesn't use more than their fair share
305 	 */
306 	for (size = 0; size < shminfo.shmseg; size++) {
307 		if (shmd->shmd_uva == 0)
308 			break;
309 		shmd++;
310 	}
311 	if (size >= shminfo.shmseg)
312 		return (EMFILE);
313 	size = ctob(clrnd(btoc(shp->shm_segsz)));
314 	prot = VM_PROT_READ;
315 	if ((uap->shmflg & SHM_RDONLY) == 0)
316 		prot |= VM_PROT_WRITE;
317 	flags = MAP_ANON|MAP_SHARED;
318 	if (uva)
319 		flags |= MAP_FIXED;
320 	else
321 		uva = (caddr_t)0x1000000;	/* XXX */
322 	error = vm_mmap(p->p_vmspace->vm_map, &uva, (vm_size_t)size, prot,
323 	    flags, ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
324 	if (error)
325 		return(error);
326 	shmd->shmd_uva = (vm_offset_t)uva;
327 	shmd->shmd_id = uap->shmid;
328 	/*
329 	 * Fill in the remaining fields
330 	 */
331 	shp->shm_lpid = p->p_pid;
332 	shp->shm_atime = time.tv_sec;
333 	shp->shm_nattch++;
334 	*retval = (int) uva;
335 	return (0);
336 }
337 
338 /*
339  * Detach from shared memory segment.
340  */
341 /* ARGSUSED */
342 shmdt(p, uap, retval)
343 	struct proc *p;
344 	struct args {
345 		caddr_t	shmaddr;
346 	} *uap;
347 	int *retval;
348 {
349 	register struct shmdesc *shmd;
350 	register int i;
351 
352 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
353 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
354 		if (shmd->shmd_uva &&
355 		    shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
356 			break;
357 	if (i == shminfo.shmseg)
358 		return(EINVAL);
359 	shmufree(p, shmd);
360 	shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
361 }
362 
363 shmfork(p1, p2, isvfork)
364 	struct proc *p1, *p2;
365 	int isvfork;
366 {
367 	register struct shmdesc *shmd;
368 	register int size;
369 
370 	/*
371 	 * Copy parents descriptive information
372 	 */
373 	size = shminfo.shmseg * sizeof(struct shmdesc);
374 	shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
375 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
376 	p2->p_vmspace->vm_shm = (caddr_t)shmd;
377 	/*
378 	 * Increment reference counts
379 	 */
380 	for (size = 0; size < shminfo.shmseg; size++, shmd++)
381 		if (shmd->shmd_uva)
382 			shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
383 }
384 
385 shmexit(p)
386 	struct proc *p;
387 {
388 	register struct shmdesc *shmd;
389 	register int i;
390 
391 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
392 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
393 		if (shmd->shmd_uva)
394 			shmufree(p, shmd);
395 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
396 	p->p_vmspace->vm_shm = NULL;
397 }
398 
399 shmvalid(id)
400 	register int id;
401 {
402 	register struct shmid_ds *shp;
403 
404 	if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
405 		return(EINVAL);
406 	shp = &shmsegs[id % SHMMMNI];
407 	if (shp->shm_perm.seq == (id / SHMMMNI) &&
408 	    (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
409 		return(0);
410 	return(EINVAL);
411 }
412 
413 /*
414  * Free user resources associated with a shared memory segment
415  */
416 shmufree(p, shmd)
417 	struct proc *p;
418 	struct shmdesc *shmd;
419 {
420 	register struct shmid_ds *shp;
421 
422 	shp = &shmsegs[shmd->shmd_id % SHMMMNI];
423 	(void) vm_deallocate(p->p_vmspace->vm_map, shmd->shmd_uva,
424 			     ctob(clrnd(btoc(shp->shm_segsz))));
425 	shmd->shmd_id = 0;
426 	shmd->shmd_uva = 0;
427 	shp->shm_dtime = time.tv_sec;
428 	if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
429 		shmfree(shp);
430 }
431 
432 /*
433  * Deallocate resources associated with a shared memory segment
434  */
435 shmfree(shp)
436 	register struct shmid_ds *shp;
437 {
438 	caddr_t kva;
439 
440 	if (shp->shm_handle == NULL)
441 		panic("shmfree");
442 	/*
443 	 * Lose our lingering object reference by deallocating space
444 	 * in kernel.  Pager will also be deallocated as a side-effect.
445 	 */
446 	vm_deallocate(shm_map,
447 		      ((struct shmhandle *)shp->shm_handle)->shmh_kva,
448 		      clrnd(btoc(shp->shm_segsz)));
449 	free((caddr_t)shp->shm_handle, M_SHM);
450 	shp->shm_handle = NULL;
451 	shmtot -= clrnd(btoc(shp->shm_segsz));
452 	shp->shm_perm.mode = 0;
453 	/*
454 	 * Increment the sequence number to ensure that outstanding
455 	 * shmids for this segment will be invalid in the event that
456 	 * the segment is reallocated.  Note that shmids must be
457 	 * positive as decreed by SVID.
458 	 */
459 	shp->shm_perm.seq++;
460 	if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
461 		shp->shm_perm.seq = 0;
462 }
463 
464 /*
465  * XXX This routine would be common to all sysV style IPC
466  *     (if the others were implemented).
467  */
468 ipcaccess(ipc, mode, cred)
469 	register struct ipc_perm *ipc;
470 	int mode;
471 	register struct ucred *cred;
472 {
473 	register int m;
474 
475 	if (cred->cr_uid == 0)
476 		return(0);
477 	/*
478 	 * Access check is based on only one of owner, group, public.
479 	 * If not owner, then check group.
480 	 * If not a member of the group, then check public access.
481 	 */
482 	mode &= 0700;
483 	m = ipc->mode;
484 	if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
485 		m <<= 3;
486 		if (!groupmember(ipc->gid, cred) &&
487 		    !groupmember(ipc->cgid, cred))
488 			m <<= 3;
489 	}
490 	if ((mode&m) == mode)
491 		return (0);
492 	return (EACCES);
493 }
494 #endif /* SYSVSHM */
495