xref: /original-bsd/sys/kern/sysv_shm.c (revision 7a38d872)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department. Originally from University of Wisconsin.
9  *
10  * %sccs.include.proprietary.c%
11  *
12  * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
13  *
14  *	@(#)sysv_shm.c	8.5 (Berkeley) 01/12/94
15  */
16 
17 /*
18  * System V shared memory routines.
19  * TEMPORARY, until mmap is in place;
20  * needed now for HP-UX compatibility and X server (yech!).
21  */
22 
23 #ifdef SYSVSHM
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/kernel.h>
28 #include <sys/proc.h>
29 #include <sys/shm.h>
30 #include <sys/malloc.h>
31 #include <sys/mman.h>
32 #include <sys/stat.h>
33 
34 #include <vm/vm.h>
35 #include <vm/vm_kern.h>
36 #include <vm/vm_inherit.h>
37 #include <vm/vm_pager.h>
38 
39 int	shmat(), shmctl(), shmdt(), shmget();
40 int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
41 int	shmtot = 0;
42 
43 /*
44  * Per process internal structure for managing segments.
45  * Each process using shm will have an array of ``shmseg'' of these.
46  */
47 struct	shmdesc {
48 	vm_offset_t	shmd_uva;
49 	int		shmd_id;
50 };
51 
52 /*
53  * Per segment internal structure (shm_handle).
54  */
55 struct	shmhandle {
56 	vm_offset_t	shmh_kva;
57 	caddr_t		shmh_id;
58 };
59 
60 vm_map_t shm_map;	/* address space for shared memory segments */
61 
62 shminit()
63 {
64 	register int i;
65 	vm_offset_t whocares1, whocares2;
66 
67 	shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
68 				shminfo.shmall * NBPG, TRUE);
69 	if (shminfo.shmmni > SHMMMNI)
70 		shminfo.shmmni = SHMMMNI;
71 	for (i = 0; i < shminfo.shmmni; i++) {
72 		shmsegs[i].shm_perm.mode = 0;
73 		shmsegs[i].shm_perm.seq = 0;
74 	}
75 }
76 
77 /*
78  * Entry point for all SHM calls
79  */
80 struct shmsys_args {
81 	u_int which;
82 };
83 shmsys(p, uap, retval)
84 	struct proc *p;
85 	struct shmsys_args *uap;
86 	int *retval;
87 {
88 
89 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
90 		return (EINVAL);
91 	return ((*shmcalls[uap->which])(p, &uap[1], retval));
92 }
93 
94 /*
95  * Get a shared memory segment
96  */
97 struct shmget_args {
98 	key_t key;
99 	int size;
100 	int shmflg;
101 };
102 shmget(p, uap, retval)
103 	struct proc *p;
104 	register struct shmget_args *uap;
105 	int *retval;
106 {
107 	register struct shmid_ds *shp;
108 	register struct ucred *cred = p->p_ucred;
109 	register int i;
110 	int error, size, rval = 0;
111 	register struct shmhandle *shmh;
112 
113 	/* look up the specified shm_id */
114 	if (uap->key != IPC_PRIVATE) {
115 		for (i = 0; i < shminfo.shmmni; i++)
116 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
117 			    shmsegs[i].shm_perm.key == uap->key) {
118 				rval = i;
119 				break;
120 			}
121 	} else
122 		i = shminfo.shmmni;
123 
124 	/* create a new shared segment if necessary */
125 	if (i == shminfo.shmmni) {
126 		if ((uap->shmflg & IPC_CREAT) == 0)
127 			return (ENOENT);
128 		if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
129 			return (EINVAL);
130 		for (i = 0; i < shminfo.shmmni; i++)
131 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
132 				rval = i;
133 				break;
134 			}
135 		if (i == shminfo.shmmni)
136 			return (ENOSPC);
137 		size = clrnd(btoc(uap->size));
138 		if (shmtot + size > shminfo.shmall)
139 			return (ENOMEM);
140 		shp = &shmsegs[rval];
141 		/*
142 		 * We need to do a couple of things to ensure consistency
143 		 * in case we sleep in malloc().  We mark segment as
144 		 * allocated so that other shmgets() will not allocate it.
145 		 * We mark it as "destroyed" to insure that shmvalid() is
146 		 * false making most operations fail (XXX).  We set the key,
147 		 * so that other shmget()s will fail.
148 		 */
149 		shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
150 		shp->shm_perm.key = uap->key;
151 		shmh = (struct shmhandle *)
152 			malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
153 		shmh->shmh_kva = 0;
154 		shmh->shmh_id = (caddr_t)(0xc0000000|rval);	/* XXX */
155 		error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
156 				VM_PROT_ALL, VM_PROT_ALL,
157 				MAP_ANON, shmh->shmh_id, 0);
158 		if (error) {
159 			free((caddr_t)shmh, M_SHM);
160 			shp->shm_perm.mode = 0;
161 			return(ENOMEM);
162 		}
163 		shp->shm_handle = (void *) shmh;
164 		shmtot += size;
165 		shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
166 		shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
167 		shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg & ACCESSPERMS);
168 		shp->shm_segsz = uap->size;
169 		shp->shm_cpid = p->p_pid;
170 		shp->shm_lpid = shp->shm_nattch = 0;
171 		shp->shm_atime = shp->shm_dtime = 0;
172 		shp->shm_ctime = time.tv_sec;
173 	} else {
174 		shp = &shmsegs[rval];
175 		/* XXX: probably not the right thing to do */
176 		if (shp->shm_perm.mode & SHM_DEST)
177 			return (EBUSY);
178 		if (error = ipcaccess(&shp->shm_perm, uap->shmflg & ACCESSPERMS,
179 			    cred))
180 			return (error);
181 		if (uap->size && uap->size > shp->shm_segsz)
182 			return (EINVAL);
183 		if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
184 			return (EEXIST);
185 	}
186 	*retval = shp->shm_perm.seq * SHMMMNI + rval;
187 	return (0);
188 }
189 
190 /*
191  * Shared memory control
192  */
193 struct shmctl_args {
194 	int shmid;
195 	int cmd;
196 	caddr_t buf;
197 };
198 /* ARGSUSED */
199 shmctl(p, uap, retval)
200 	struct proc *p;
201 	register struct shmctl_args *uap;
202 	int *retval;
203 {
204 	register struct shmid_ds *shp;
205 	register struct ucred *cred = p->p_ucred;
206 	struct shmid_ds sbuf;
207 	int error;
208 
209 	if (error = shmvalid(uap->shmid))
210 		return (error);
211 	shp = &shmsegs[uap->shmid % SHMMMNI];
212 	switch (uap->cmd) {
213 	case IPC_STAT:
214 		if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
215 			return (error);
216 		return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
217 
218 	case IPC_SET:
219 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
220 		    cred->cr_uid != shp->shm_perm.cuid)
221 			return (EPERM);
222 		if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
223 			return (error);
224 		shp->shm_perm.uid = sbuf.shm_perm.uid;
225 		shp->shm_perm.gid = sbuf.shm_perm.gid;
226 		shp->shm_perm.mode = (shp->shm_perm.mode & ~ACCESSPERMS)
227 			| (sbuf.shm_perm.mode & ACCESSPERMS);
228 		shp->shm_ctime = time.tv_sec;
229 		break;
230 
231 	case IPC_RMID:
232 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
233 		    cred->cr_uid != shp->shm_perm.cuid)
234 			return (EPERM);
235 		/* set ctime? */
236 		shp->shm_perm.key = IPC_PRIVATE;
237 		shp->shm_perm.mode |= SHM_DEST;
238 		if (shp->shm_nattch <= 0)
239 			shmfree(shp);
240 		break;
241 
242 	default:
243 		return (EINVAL);
244 	}
245 	return (0);
246 }
247 
248 /*
249  * Attach to shared memory segment.
250  */
251 struct shmat_args {
252 	int	shmid;
253 	caddr_t	shmaddr;
254 	int	shmflg;
255 };
256 shmat(p, uap, retval)
257 	struct proc *p;
258 	register struct shmat_args *uap;
259 	int *retval;
260 {
261 	register struct shmid_ds *shp;
262 	register int size;
263 	caddr_t uva;
264 	int error;
265 	int flags;
266 	vm_prot_t prot;
267 	struct shmdesc *shmd;
268 
269 	/*
270 	 * Allocate descriptors now (before validity check)
271 	 * in case malloc() blocks.
272 	 */
273 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
274 	size = shminfo.shmseg * sizeof(struct shmdesc);
275 	if (shmd == NULL) {
276 		shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
277 		bzero((caddr_t)shmd, size);
278 		p->p_vmspace->vm_shm = (caddr_t)shmd;
279 	}
280 	if (error = shmvalid(uap->shmid))
281 		return (error);
282 	shp = &shmsegs[uap->shmid % SHMMMNI];
283 	if (shp->shm_handle == NULL)
284 		panic("shmat NULL handle");
285 	if (error = ipcaccess(&shp->shm_perm,
286 	    (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
287 		return (error);
288 	uva = uap->shmaddr;
289 	if (uva && ((int)uva & (SHMLBA-1))) {
290 		if (uap->shmflg & SHM_RND)
291 			uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
292 		else
293 			return (EINVAL);
294 	}
295 	/*
296 	 * Make sure user doesn't use more than their fair share
297 	 */
298 	for (size = 0; size < shminfo.shmseg; size++) {
299 		if (shmd->shmd_uva == 0)
300 			break;
301 		shmd++;
302 	}
303 	if (size >= shminfo.shmseg)
304 		return (EMFILE);
305 	size = ctob(clrnd(btoc(shp->shm_segsz)));
306 	prot = VM_PROT_READ;
307 	if ((uap->shmflg & SHM_RDONLY) == 0)
308 		prot |= VM_PROT_WRITE;
309 	flags = MAP_ANON|MAP_SHARED;
310 	if (uva)
311 		flags |= MAP_FIXED;
312 	else
313 		uva = (caddr_t)0x1000000;	/* XXX */
314 	error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
315 			(vm_size_t)size, prot, VM_PROT_ALL, flags,
316 			((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
317 	if (error)
318 		return(error);
319 	shmd->shmd_uva = (vm_offset_t)uva;
320 	shmd->shmd_id = uap->shmid;
321 	/*
322 	 * Fill in the remaining fields
323 	 */
324 	shp->shm_lpid = p->p_pid;
325 	shp->shm_atime = time.tv_sec;
326 	shp->shm_nattch++;
327 	*retval = (int) uva;
328 	return (0);
329 }
330 
331 /*
332  * Detach from shared memory segment.
333  */
334 struct shmdt_args {
335 	caddr_t	shmaddr;
336 };
337 /* ARGSUSED */
338 shmdt(p, uap, retval)
339 	struct proc *p;
340 	struct shmdt_args *uap;
341 	int *retval;
342 {
343 	register struct shmdesc *shmd;
344 	register int i;
345 
346 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
347 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
348 		if (shmd->shmd_uva &&
349 		    shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
350 			break;
351 	if (i == shminfo.shmseg)
352 		return (EINVAL);
353 	shmufree(p, shmd);
354 	shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
355 	return (0);
356 }
357 
358 shmfork(p1, p2, isvfork)
359 	struct proc *p1, *p2;
360 	int isvfork;
361 {
362 	register struct shmdesc *shmd;
363 	register int size;
364 
365 	/*
366 	 * Copy parents descriptive information
367 	 */
368 	size = shminfo.shmseg * sizeof(struct shmdesc);
369 	shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
370 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
371 	p2->p_vmspace->vm_shm = (caddr_t)shmd;
372 	/*
373 	 * Increment reference counts
374 	 */
375 	for (size = 0; size < shminfo.shmseg; size++, shmd++)
376 		if (shmd->shmd_uva)
377 			shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
378 }
379 
380 shmexit(p)
381 	struct proc *p;
382 {
383 	register struct shmdesc *shmd;
384 	register int i;
385 
386 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
387 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
388 		if (shmd->shmd_uva)
389 			shmufree(p, shmd);
390 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
391 	p->p_vmspace->vm_shm = NULL;
392 }
393 
394 shmvalid(id)
395 	register int id;
396 {
397 	register struct shmid_ds *shp;
398 
399 	if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
400 		return(EINVAL);
401 	shp = &shmsegs[id % SHMMMNI];
402 	if (shp->shm_perm.seq == (id / SHMMMNI) &&
403 	    (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
404 		return(0);
405 	return(EINVAL);
406 }
407 
408 /*
409  * Free user resources associated with a shared memory segment
410  */
411 shmufree(p, shmd)
412 	struct proc *p;
413 	struct shmdesc *shmd;
414 {
415 	register struct shmid_ds *shp;
416 
417 	shp = &shmsegs[shmd->shmd_id % SHMMMNI];
418 	(void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
419 			     ctob(clrnd(btoc(shp->shm_segsz))));
420 	shmd->shmd_id = 0;
421 	shmd->shmd_uva = 0;
422 	shp->shm_dtime = time.tv_sec;
423 	if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
424 		shmfree(shp);
425 }
426 
427 /*
428  * Deallocate resources associated with a shared memory segment
429  */
430 shmfree(shp)
431 	register struct shmid_ds *shp;
432 {
433 
434 	if (shp->shm_handle == NULL)
435 		panic("shmfree");
436 	/*
437 	 * Lose our lingering object reference by deallocating space
438 	 * in kernel.  Pager will also be deallocated as a side-effect.
439 	 */
440 	vm_deallocate(shm_map,
441 		      ((struct shmhandle *)shp->shm_handle)->shmh_kva,
442 		      ctob(clrnd(btoc(shp->shm_segsz))));
443 	free((caddr_t)shp->shm_handle, M_SHM);
444 	shp->shm_handle = NULL;
445 	shmtot -= clrnd(btoc(shp->shm_segsz));
446 	shp->shm_perm.mode = 0;
447 	/*
448 	 * Increment the sequence number to ensure that outstanding
449 	 * shmids for this segment will be invalid in the event that
450 	 * the segment is reallocated.  Note that shmids must be
451 	 * positive as decreed by SVID.
452 	 */
453 	shp->shm_perm.seq++;
454 	if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
455 		shp->shm_perm.seq = 0;
456 }
457 
458 /*
459  * XXX This routine would be common to all sysV style IPC
460  *     (if the others were implemented).
461  */
462 ipcaccess(ipc, mode, cred)
463 	register struct ipc_perm *ipc;
464 	int mode;
465 	register struct ucred *cred;
466 {
467 	register int m;
468 
469 	if (cred->cr_uid == 0)
470 		return(0);
471 	/*
472 	 * Access check is based on only one of owner, group, public.
473 	 * If not owner, then check group.
474 	 * If not a member of the group, then check public access.
475 	 */
476 	mode &= 0700;
477 	m = ipc->mode;
478 	if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
479 		m <<= 3;
480 		if (!groupmember(ipc->gid, cred) &&
481 		    !groupmember(ipc->cgid, cred))
482 			m <<= 3;
483 	}
484 	if ((mode&m) == mode)
485 		return (0);
486 	return (EACCES);
487 }
488 #endif /* SYSVSHM */
489