xref: /original-bsd/sys/kern/sysv_shm.c (revision 3705696b)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1990, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department. Originally from University of Wisconsin.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
13  *
14  *	@(#)sysv_shm.c	8.1 (Berkeley) 06/10/93
15  */
16 
17 /*
18  * System V shared memory routines.
19  * TEMPORARY, until mmap is in place;
20  * needed now for HP-UX compatibility and X server (yech!).
21  */
22 
23 #ifdef SYSVSHM
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/kernel.h>
28 #include <sys/proc.h>
29 #include <sys/shm.h>
30 #include <sys/malloc.h>
31 #include <sys/mman.h>
32 
33 #include <vm/vm.h>
34 #include <vm/vm_kern.h>
35 #include <vm/vm_inherit.h>
36 #include <vm/vm_pager.h>
37 
38 int	shmat(), shmctl(), shmdt(), shmget();
39 int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
40 int	shmtot = 0;
41 
42 /*
43  * Per process internal structure for managing segments.
44  * Each process using shm will have an array of ``shmseg'' of these.
45  */
46 struct	shmdesc {
47 	vm_offset_t	shmd_uva;
48 	int		shmd_id;
49 };
50 
51 /*
52  * Per segment internal structure (shm_handle).
53  */
54 struct	shmhandle {
55 	vm_offset_t	shmh_kva;
56 	caddr_t		shmh_id;
57 };
58 
59 vm_map_t shm_map;	/* address space for shared memory segments */
60 
61 shminit()
62 {
63 	register int i;
64 	vm_offset_t whocares1, whocares2;
65 
66 	shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
67 				shminfo.shmall * NBPG, FALSE);
68 	if (shminfo.shmmni > SHMMMNI)
69 		shminfo.shmmni = SHMMMNI;
70 	for (i = 0; i < shminfo.shmmni; i++) {
71 		shmsegs[i].shm_perm.mode = 0;
72 		shmsegs[i].shm_perm.seq = 0;
73 	}
74 }
75 
76 /*
77  * Entry point for all SHM calls
78  */
79 struct shmsys_args {
80 	u_int which;
81 };
82 shmsys(p, uap, retval)
83 	struct proc *p;
84 	struct shmsys_args *uap;
85 	int *retval;
86 {
87 
88 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
89 		return (EINVAL);
90 	return ((*shmcalls[uap->which])(p, &uap[1], retval));
91 }
92 
93 /*
94  * Get a shared memory segment
95  */
96 struct shmget_args {
97 	key_t key;
98 	int size;
99 	int shmflg;
100 };
101 shmget(p, uap, retval)
102 	struct proc *p;
103 	register struct shmget_args *uap;
104 	int *retval;
105 {
106 	register struct shmid_ds *shp;
107 	register struct ucred *cred = p->p_ucred;
108 	register int i;
109 	int error, size, rval = 0;
110 	register struct shmhandle *shmh;
111 
112 	/* look up the specified shm_id */
113 	if (uap->key != IPC_PRIVATE) {
114 		for (i = 0; i < shminfo.shmmni; i++)
115 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
116 			    shmsegs[i].shm_perm.key == uap->key) {
117 				rval = i;
118 				break;
119 			}
120 	} else
121 		i = shminfo.shmmni;
122 
123 	/* create a new shared segment if necessary */
124 	if (i == shminfo.shmmni) {
125 		if ((uap->shmflg & IPC_CREAT) == 0)
126 			return (ENOENT);
127 		if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
128 			return (EINVAL);
129 		for (i = 0; i < shminfo.shmmni; i++)
130 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
131 				rval = i;
132 				break;
133 			}
134 		if (i == shminfo.shmmni)
135 			return (ENOSPC);
136 		size = clrnd(btoc(uap->size));
137 		if (shmtot + size > shminfo.shmall)
138 			return (ENOMEM);
139 		shp = &shmsegs[rval];
140 		/*
141 		 * We need to do a couple of things to ensure consistency
142 		 * in case we sleep in malloc().  We mark segment as
143 		 * allocated so that other shmgets() will not allocate it.
144 		 * We mark it as "destroyed" to insure that shmvalid() is
145 		 * false making most operations fail (XXX).  We set the key,
146 		 * so that other shmget()s will fail.
147 		 */
148 		shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
149 		shp->shm_perm.key = uap->key;
150 		shmh = (struct shmhandle *)
151 			malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
152 		shmh->shmh_kva = 0;
153 		shmh->shmh_id = (caddr_t)(0xc0000000|rval);	/* XXX */
154 		error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
155 				VM_PROT_ALL, VM_PROT_ALL,
156 				MAP_ANON, shmh->shmh_id, 0);
157 		if (error) {
158 			free((caddr_t)shmh, M_SHM);
159 			shp->shm_perm.mode = 0;
160 			return(ENOMEM);
161 		}
162 		shp->shm_handle = (void *) shmh;
163 		shmtot += size;
164 		shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
165 		shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
166 		shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
167 		shp->shm_segsz = uap->size;
168 		shp->shm_cpid = p->p_pid;
169 		shp->shm_lpid = shp->shm_nattch = 0;
170 		shp->shm_atime = shp->shm_dtime = 0;
171 		shp->shm_ctime = time.tv_sec;
172 	} else {
173 		shp = &shmsegs[rval];
174 		/* XXX: probably not the right thing to do */
175 		if (shp->shm_perm.mode & SHM_DEST)
176 			return (EBUSY);
177 		if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
178 			return (error);
179 		if (uap->size && uap->size > shp->shm_segsz)
180 			return (EINVAL);
181 		if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
182 			return (EEXIST);
183 	}
184 	*retval = shp->shm_perm.seq * SHMMMNI + rval;
185 	return (0);
186 }
187 
188 /*
189  * Shared memory control
190  */
191 struct shmctl_args {
192 	int shmid;
193 	int cmd;
194 	caddr_t buf;
195 };
196 /* ARGSUSED */
197 shmctl(p, uap, retval)
198 	struct proc *p;
199 	register struct shmctl_args *uap;
200 	int *retval;
201 {
202 	register struct shmid_ds *shp;
203 	register struct ucred *cred = p->p_ucred;
204 	struct shmid_ds sbuf;
205 	int error;
206 
207 	if (error = shmvalid(uap->shmid))
208 		return (error);
209 	shp = &shmsegs[uap->shmid % SHMMMNI];
210 	switch (uap->cmd) {
211 	case IPC_STAT:
212 		if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
213 			return (error);
214 		return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
215 
216 	case IPC_SET:
217 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
218 		    cred->cr_uid != shp->shm_perm.cuid)
219 			return (EPERM);
220 		if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
221 			return (error);
222 		shp->shm_perm.uid = sbuf.shm_perm.uid;
223 		shp->shm_perm.gid = sbuf.shm_perm.gid;
224 		shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
225 			| (sbuf.shm_perm.mode & 0777);
226 		shp->shm_ctime = time.tv_sec;
227 		break;
228 
229 	case IPC_RMID:
230 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
231 		    cred->cr_uid != shp->shm_perm.cuid)
232 			return (EPERM);
233 		/* set ctime? */
234 		shp->shm_perm.key = IPC_PRIVATE;
235 		shp->shm_perm.mode |= SHM_DEST;
236 		if (shp->shm_nattch <= 0)
237 			shmfree(shp);
238 		break;
239 
240 	default:
241 		return (EINVAL);
242 	}
243 	return (0);
244 }
245 
246 /*
247  * Attach to shared memory segment.
248  */
249 struct shmat_args {
250 	int	shmid;
251 	caddr_t	shmaddr;
252 	int	shmflg;
253 };
254 shmat(p, uap, retval)
255 	struct proc *p;
256 	register struct shmat_args *uap;
257 	int *retval;
258 {
259 	register struct shmid_ds *shp;
260 	register int size;
261 	caddr_t uva;
262 	int error;
263 	int flags;
264 	vm_prot_t prot;
265 	struct shmdesc *shmd;
266 
267 	/*
268 	 * Allocate descriptors now (before validity check)
269 	 * in case malloc() blocks.
270 	 */
271 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
272 	size = shminfo.shmseg * sizeof(struct shmdesc);
273 	if (shmd == NULL) {
274 		shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
275 		bzero((caddr_t)shmd, size);
276 		p->p_vmspace->vm_shm = (caddr_t)shmd;
277 	}
278 	if (error = shmvalid(uap->shmid))
279 		return (error);
280 	shp = &shmsegs[uap->shmid % SHMMMNI];
281 	if (shp->shm_handle == NULL)
282 		panic("shmat NULL handle");
283 	if (error = ipcaccess(&shp->shm_perm,
284 	    (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
285 		return (error);
286 	uva = uap->shmaddr;
287 	if (uva && ((int)uva & (SHMLBA-1))) {
288 		if (uap->shmflg & SHM_RND)
289 			uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
290 		else
291 			return (EINVAL);
292 	}
293 	/*
294 	 * Make sure user doesn't use more than their fair share
295 	 */
296 	for (size = 0; size < shminfo.shmseg; size++) {
297 		if (shmd->shmd_uva == 0)
298 			break;
299 		shmd++;
300 	}
301 	if (size >= shminfo.shmseg)
302 		return (EMFILE);
303 	size = ctob(clrnd(btoc(shp->shm_segsz)));
304 	prot = VM_PROT_READ;
305 	if ((uap->shmflg & SHM_RDONLY) == 0)
306 		prot |= VM_PROT_WRITE;
307 	flags = MAP_ANON|MAP_SHARED;
308 	if (uva)
309 		flags |= MAP_FIXED;
310 	else
311 		uva = (caddr_t)0x1000000;	/* XXX */
312 	error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
313 			(vm_size_t)size, prot, VM_PROT_ALL, flags,
314 			((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
315 	if (error)
316 		return(error);
317 	shmd->shmd_uva = (vm_offset_t)uva;
318 	shmd->shmd_id = uap->shmid;
319 	/*
320 	 * Fill in the remaining fields
321 	 */
322 	shp->shm_lpid = p->p_pid;
323 	shp->shm_atime = time.tv_sec;
324 	shp->shm_nattch++;
325 	*retval = (int) uva;
326 	return (0);
327 }
328 
329 /*
330  * Detach from shared memory segment.
331  */
332 struct shmdt_args {
333 	caddr_t	shmaddr;
334 };
335 /* ARGSUSED */
336 shmdt(p, uap, retval)
337 	struct proc *p;
338 	struct shmdt_args *uap;
339 	int *retval;
340 {
341 	register struct shmdesc *shmd;
342 	register int i;
343 
344 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
345 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
346 		if (shmd->shmd_uva &&
347 		    shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
348 			break;
349 	if (i == shminfo.shmseg)
350 		return(EINVAL);
351 	shmufree(p, shmd);
352 	shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
353 }
354 
355 shmfork(p1, p2, isvfork)
356 	struct proc *p1, *p2;
357 	int isvfork;
358 {
359 	register struct shmdesc *shmd;
360 	register int size;
361 
362 	/*
363 	 * Copy parents descriptive information
364 	 */
365 	size = shminfo.shmseg * sizeof(struct shmdesc);
366 	shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
367 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
368 	p2->p_vmspace->vm_shm = (caddr_t)shmd;
369 	/*
370 	 * Increment reference counts
371 	 */
372 	for (size = 0; size < shminfo.shmseg; size++, shmd++)
373 		if (shmd->shmd_uva)
374 			shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
375 }
376 
377 shmexit(p)
378 	struct proc *p;
379 {
380 	register struct shmdesc *shmd;
381 	register int i;
382 
383 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
384 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
385 		if (shmd->shmd_uva)
386 			shmufree(p, shmd);
387 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
388 	p->p_vmspace->vm_shm = NULL;
389 }
390 
391 shmvalid(id)
392 	register int id;
393 {
394 	register struct shmid_ds *shp;
395 
396 	if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
397 		return(EINVAL);
398 	shp = &shmsegs[id % SHMMMNI];
399 	if (shp->shm_perm.seq == (id / SHMMMNI) &&
400 	    (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
401 		return(0);
402 	return(EINVAL);
403 }
404 
405 /*
406  * Free user resources associated with a shared memory segment
407  */
408 shmufree(p, shmd)
409 	struct proc *p;
410 	struct shmdesc *shmd;
411 {
412 	register struct shmid_ds *shp;
413 
414 	shp = &shmsegs[shmd->shmd_id % SHMMMNI];
415 	(void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
416 			     ctob(clrnd(btoc(shp->shm_segsz))));
417 	shmd->shmd_id = 0;
418 	shmd->shmd_uva = 0;
419 	shp->shm_dtime = time.tv_sec;
420 	if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
421 		shmfree(shp);
422 }
423 
424 /*
425  * Deallocate resources associated with a shared memory segment
426  */
427 shmfree(shp)
428 	register struct shmid_ds *shp;
429 {
430 
431 	if (shp->shm_handle == NULL)
432 		panic("shmfree");
433 	/*
434 	 * Lose our lingering object reference by deallocating space
435 	 * in kernel.  Pager will also be deallocated as a side-effect.
436 	 */
437 	vm_deallocate(shm_map,
438 		      ((struct shmhandle *)shp->shm_handle)->shmh_kva,
439 		      ctob(clrnd(btoc(shp->shm_segsz))));
440 	free((caddr_t)shp->shm_handle, M_SHM);
441 	shp->shm_handle = NULL;
442 	shmtot -= clrnd(btoc(shp->shm_segsz));
443 	shp->shm_perm.mode = 0;
444 	/*
445 	 * Increment the sequence number to ensure that outstanding
446 	 * shmids for this segment will be invalid in the event that
447 	 * the segment is reallocated.  Note that shmids must be
448 	 * positive as decreed by SVID.
449 	 */
450 	shp->shm_perm.seq++;
451 	if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
452 		shp->shm_perm.seq = 0;
453 }
454 
455 /*
456  * XXX This routine would be common to all sysV style IPC
457  *     (if the others were implemented).
458  */
459 ipcaccess(ipc, mode, cred)
460 	register struct ipc_perm *ipc;
461 	int mode;
462 	register struct ucred *cred;
463 {
464 	register int m;
465 
466 	if (cred->cr_uid == 0)
467 		return(0);
468 	/*
469 	 * Access check is based on only one of owner, group, public.
470 	 * If not owner, then check group.
471 	 * If not a member of the group, then check public access.
472 	 */
473 	mode &= 0700;
474 	m = ipc->mode;
475 	if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
476 		m <<= 3;
477 		if (!groupmember(ipc->gid, cred) &&
478 		    !groupmember(ipc->cgid, cred))
479 			m <<= 3;
480 	}
481 	if ((mode&m) == mode)
482 		return (0);
483 	return (EACCES);
484 }
485 #endif /* SYSVSHM */
486