xref: /original-bsd/sys/kern/sysv_shm.c (revision 68d9582f)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department. Originally from University of Wisconsin.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
13  *
14  *	@(#)sysv_shm.c	7.17 (Berkeley) 06/04/92
15  */
16 
17 /*
18  * System V shared memory routines.
19  * TEMPORARY, until mmap is in place;
20  * needed now for HP-UX compatibility and X server (yech!).
21  */
22 
23 #ifdef SYSVSHM
24 
25 #include "param.h"
26 #include "systm.h"
27 #include "kernel.h"
28 #include "proc.h"
29 #include "shm.h"
30 #include "malloc.h"
31 #include "mman.h"
32 #include "vm/vm.h"
33 #include "vm/vm_kern.h"
34 #include "vm/vm_inherit.h"
35 #include "vm/vm_pager.h"
36 
37 #ifdef HPUXCOMPAT
38 #include "hp/hpux/hpux.h"
39 #endif
40 
41 int	shmat(), shmctl(), shmdt(), shmget();
42 int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
43 int	shmtot = 0;
44 
45 /*
46  * Per process internal structure for managing segments.
47  * Each process using shm will have an array of ``shmseg'' of these.
48  */
49 struct	shmdesc {
50 	vm_offset_t	shmd_uva;
51 	int		shmd_id;
52 };
53 
54 /*
55  * Per segment internal structure (shm_handle).
56  */
57 struct	shmhandle {
58 	vm_offset_t	shmh_kva;
59 	caddr_t		shmh_id;
60 };
61 
62 vm_map_t shm_map;	/* address space for shared memory segments */
63 
64 shminit()
65 {
66 	register int i;
67 	vm_offset_t whocares1, whocares2;
68 
69 	shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
70 				shminfo.shmall * NBPG, FALSE);
71 	if (shminfo.shmmni > SHMMMNI)
72 		shminfo.shmmni = SHMMMNI;
73 	for (i = 0; i < shminfo.shmmni; i++) {
74 		shmsegs[i].shm_perm.mode = 0;
75 		shmsegs[i].shm_perm.seq = 0;
76 	}
77 }
78 
79 /*
80  * Entry point for all SHM calls
81  */
82 shmsys(p, uap, retval)
83 	struct proc *p;
84 	struct args {
85 		u_int which;
86 	} *uap;
87 	int *retval;
88 {
89 
90 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
91 		return (EINVAL);
92 	return ((*shmcalls[uap->which])(p, &uap[1], retval));
93 }
94 
95 /*
96  * Get a shared memory segment
97  */
98 shmget(p, uap, retval)
99 	struct proc *p;
100 	register struct args {
101 		key_t key;
102 		int size;
103 		int shmflg;
104 	} *uap;
105 	int *retval;
106 {
107 	register struct shmid_ds *shp;
108 	register struct ucred *cred = p->p_ucred;
109 	register int i;
110 	int error, size, rval = 0;
111 	register struct shmhandle *shmh;
112 
113 	/* look up the specified shm_id */
114 	if (uap->key != IPC_PRIVATE) {
115 		for (i = 0; i < shminfo.shmmni; i++)
116 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
117 			    shmsegs[i].shm_perm.key == uap->key) {
118 				rval = i;
119 				break;
120 			}
121 	} else
122 		i = shminfo.shmmni;
123 
124 	/* create a new shared segment if necessary */
125 	if (i == shminfo.shmmni) {
126 		if ((uap->shmflg & IPC_CREAT) == 0)
127 			return (ENOENT);
128 		if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
129 			return (EINVAL);
130 		for (i = 0; i < shminfo.shmmni; i++)
131 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
132 				rval = i;
133 				break;
134 			}
135 		if (i == shminfo.shmmni)
136 			return (ENOSPC);
137 		size = clrnd(btoc(uap->size));
138 		if (shmtot + size > shminfo.shmall)
139 			return (ENOMEM);
140 		shp = &shmsegs[rval];
141 		/*
142 		 * We need to do a couple of things to ensure consistency
143 		 * in case we sleep in malloc().  We mark segment as
144 		 * allocated so that other shmgets() will not allocate it.
145 		 * We mark it as "destroyed" to insure that shmvalid() is
146 		 * false making most operations fail (XXX).  We set the key,
147 		 * so that other shmget()s will fail.
148 		 */
149 		shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
150 		shp->shm_perm.key = uap->key;
151 		shmh = (struct shmhandle *)
152 			malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
153 		shmh->shmh_kva = 0;
154 		shmh->shmh_id = (caddr_t)(0xc0000000|rval);	/* XXX */
155 		error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
156 				VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0);
157 		if (error) {
158 			free((caddr_t)shmh, M_SHM);
159 			shp->shm_perm.mode = 0;
160 			return(ENOMEM);
161 		}
162 		shp->shm_handle = (void *) shmh;
163 		shmtot += size;
164 		shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
165 		shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
166 		shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
167 		shp->shm_segsz = uap->size;
168 		shp->shm_cpid = p->p_pid;
169 		shp->shm_lpid = shp->shm_nattch = 0;
170 		shp->shm_atime = shp->shm_dtime = 0;
171 		shp->shm_ctime = time.tv_sec;
172 	} else {
173 		shp = &shmsegs[rval];
174 		/* XXX: probably not the right thing to do */
175 		if (shp->shm_perm.mode & SHM_DEST)
176 			return (EBUSY);
177 		if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
178 			return (error);
179 		if (uap->size && uap->size > shp->shm_segsz)
180 			return (EINVAL);
181 		if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
182 			return (EEXIST);
183 	}
184 	*retval = shp->shm_perm.seq * SHMMMNI + rval;
185 	return (0);
186 }
187 
188 /*
189  * Shared memory control
190  */
191 /* ARGSUSED */
192 shmctl(p, uap, retval)
193 	struct proc *p;
194 	register struct args {
195 		int shmid;
196 		int cmd;
197 		caddr_t buf;
198 	} *uap;
199 	int *retval;
200 {
201 	register struct shmid_ds *shp;
202 	register struct ucred *cred = p->p_ucred;
203 	struct shmid_ds sbuf;
204 	int error;
205 
206 	if (error = shmvalid(uap->shmid))
207 		return (error);
208 	shp = &shmsegs[uap->shmid % SHMMMNI];
209 	switch (uap->cmd) {
210 	case IPC_STAT:
211 		if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
212 			return (error);
213 		return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
214 
215 	case IPC_SET:
216 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
217 		    cred->cr_uid != shp->shm_perm.cuid)
218 			return (EPERM);
219 		if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
220 			return (error);
221 		shp->shm_perm.uid = sbuf.shm_perm.uid;
222 		shp->shm_perm.gid = sbuf.shm_perm.gid;
223 		shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
224 			| (sbuf.shm_perm.mode & 0777);
225 		shp->shm_ctime = time.tv_sec;
226 		break;
227 
228 	case IPC_RMID:
229 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
230 		    cred->cr_uid != shp->shm_perm.cuid)
231 			return (EPERM);
232 		/* set ctime? */
233 		shp->shm_perm.key = IPC_PRIVATE;
234 		shp->shm_perm.mode |= SHM_DEST;
235 		if (shp->shm_nattch <= 0)
236 			shmfree(shp);
237 		break;
238 
239 #ifdef HPUXCOMPAT
240 	case SHM_LOCK:
241 	case SHM_UNLOCK:
242 		/* don't really do anything, but make them think we did */
243 		if ((p->p_flag & SHPUX) == 0)
244 			return (EINVAL);
245 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
246 		    cred->cr_uid != shp->shm_perm.cuid)
247 			return (EPERM);
248 		break;
249 #endif
250 
251 	default:
252 		return (EINVAL);
253 	}
254 	return (0);
255 }
256 
257 /*
258  * Attach to shared memory segment.
259  */
260 shmat(p, uap, retval)
261 	struct proc *p;
262 	register struct args {
263 		int	shmid;
264 		caddr_t	shmaddr;
265 		int	shmflg;
266 	} *uap;
267 	int *retval;
268 {
269 	register struct shmid_ds *shp;
270 	register int size;
271 	caddr_t uva;
272 	int error;
273 	int flags;
274 	vm_prot_t prot;
275 	struct shmdesc *shmd;
276 
277 	/*
278 	 * Allocate descriptors now (before validity check)
279 	 * in case malloc() blocks.
280 	 */
281 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
282 	size = shminfo.shmseg * sizeof(struct shmdesc);
283 	if (shmd == NULL) {
284 		shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
285 		bzero((caddr_t)shmd, size);
286 		p->p_vmspace->vm_shm = (caddr_t)shmd;
287 	}
288 	if (error = shmvalid(uap->shmid))
289 		return (error);
290 	shp = &shmsegs[uap->shmid % SHMMMNI];
291 	if (shp->shm_handle == NULL)
292 		panic("shmat NULL handle");
293 	if (error = ipcaccess(&shp->shm_perm,
294 	    (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
295 		return (error);
296 	uva = uap->shmaddr;
297 	if (uva && ((int)uva & (SHMLBA-1))) {
298 		if (uap->shmflg & SHM_RND)
299 			uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
300 		else
301 			return (EINVAL);
302 	}
303 	/*
304 	 * Make sure user doesn't use more than their fair share
305 	 */
306 	for (size = 0; size < shminfo.shmseg; size++) {
307 		if (shmd->shmd_uva == 0)
308 			break;
309 		shmd++;
310 	}
311 	if (size >= shminfo.shmseg)
312 		return (EMFILE);
313 	size = ctob(clrnd(btoc(shp->shm_segsz)));
314 	prot = VM_PROT_READ;
315 	if ((uap->shmflg & SHM_RDONLY) == 0)
316 		prot |= VM_PROT_WRITE;
317 	flags = MAP_ANON|MAP_SHARED;
318 	if (uva)
319 		flags |= MAP_FIXED;
320 	else
321 		uva = (caddr_t)0x1000000;	/* XXX */
322 	error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
323 	    (vm_size_t)size, prot, flags,
324 	    ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
325 	if (error)
326 		return(error);
327 	shmd->shmd_uva = (vm_offset_t)uva;
328 	shmd->shmd_id = uap->shmid;
329 	/*
330 	 * Fill in the remaining fields
331 	 */
332 	shp->shm_lpid = p->p_pid;
333 	shp->shm_atime = time.tv_sec;
334 	shp->shm_nattch++;
335 	*retval = (int) uva;
336 	return (0);
337 }
338 
339 /*
340  * Detach from shared memory segment.
341  */
342 /* ARGSUSED */
343 shmdt(p, uap, retval)
344 	struct proc *p;
345 	struct args {
346 		caddr_t	shmaddr;
347 	} *uap;
348 	int *retval;
349 {
350 	register struct shmdesc *shmd;
351 	register int i;
352 
353 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
354 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
355 		if (shmd->shmd_uva &&
356 		    shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
357 			break;
358 	if (i == shminfo.shmseg)
359 		return(EINVAL);
360 	shmufree(p, shmd);
361 	shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
362 }
363 
364 shmfork(p1, p2, isvfork)
365 	struct proc *p1, *p2;
366 	int isvfork;
367 {
368 	register struct shmdesc *shmd;
369 	register int size;
370 
371 	/*
372 	 * Copy parents descriptive information
373 	 */
374 	size = shminfo.shmseg * sizeof(struct shmdesc);
375 	shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
376 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
377 	p2->p_vmspace->vm_shm = (caddr_t)shmd;
378 	/*
379 	 * Increment reference counts
380 	 */
381 	for (size = 0; size < shminfo.shmseg; size++, shmd++)
382 		if (shmd->shmd_uva)
383 			shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
384 }
385 
386 shmexit(p)
387 	struct proc *p;
388 {
389 	register struct shmdesc *shmd;
390 	register int i;
391 
392 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
393 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
394 		if (shmd->shmd_uva)
395 			shmufree(p, shmd);
396 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
397 	p->p_vmspace->vm_shm = NULL;
398 }
399 
400 shmvalid(id)
401 	register int id;
402 {
403 	register struct shmid_ds *shp;
404 
405 	if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
406 		return(EINVAL);
407 	shp = &shmsegs[id % SHMMMNI];
408 	if (shp->shm_perm.seq == (id / SHMMMNI) &&
409 	    (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
410 		return(0);
411 	return(EINVAL);
412 }
413 
414 /*
415  * Free user resources associated with a shared memory segment
416  */
417 shmufree(p, shmd)
418 	struct proc *p;
419 	struct shmdesc *shmd;
420 {
421 	register struct shmid_ds *shp;
422 
423 	shp = &shmsegs[shmd->shmd_id % SHMMMNI];
424 	(void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
425 			     ctob(clrnd(btoc(shp->shm_segsz))));
426 	shmd->shmd_id = 0;
427 	shmd->shmd_uva = 0;
428 	shp->shm_dtime = time.tv_sec;
429 	if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
430 		shmfree(shp);
431 }
432 
433 /*
434  * Deallocate resources associated with a shared memory segment
435  */
436 shmfree(shp)
437 	register struct shmid_ds *shp;
438 {
439 
440 	if (shp->shm_handle == NULL)
441 		panic("shmfree");
442 	/*
443 	 * Lose our lingering object reference by deallocating space
444 	 * in kernel.  Pager will also be deallocated as a side-effect.
445 	 */
446 	vm_deallocate(shm_map,
447 		      ((struct shmhandle *)shp->shm_handle)->shmh_kva,
448 		      ctob(clrnd(btoc(shp->shm_segsz))));
449 	free((caddr_t)shp->shm_handle, M_SHM);
450 	shp->shm_handle = NULL;
451 	shmtot -= clrnd(btoc(shp->shm_segsz));
452 	shp->shm_perm.mode = 0;
453 	/*
454 	 * Increment the sequence number to ensure that outstanding
455 	 * shmids for this segment will be invalid in the event that
456 	 * the segment is reallocated.  Note that shmids must be
457 	 * positive as decreed by SVID.
458 	 */
459 	shp->shm_perm.seq++;
460 	if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
461 		shp->shm_perm.seq = 0;
462 }
463 
464 /*
465  * XXX This routine would be common to all sysV style IPC
466  *     (if the others were implemented).
467  */
468 ipcaccess(ipc, mode, cred)
469 	register struct ipc_perm *ipc;
470 	int mode;
471 	register struct ucred *cred;
472 {
473 	register int m;
474 
475 	if (cred->cr_uid == 0)
476 		return(0);
477 	/*
478 	 * Access check is based on only one of owner, group, public.
479 	 * If not owner, then check group.
480 	 * If not a member of the group, then check public access.
481 	 */
482 	mode &= 0700;
483 	m = ipc->mode;
484 	if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
485 		m <<= 3;
486 		if (!groupmember(ipc->gid, cred) &&
487 		    !groupmember(ipc->cgid, cred))
488 			m <<= 3;
489 	}
490 	if ((mode&m) == mode)
491 		return (0);
492 	return (EACCES);
493 }
494 #endif /* SYSVSHM */
495