xref: /original-bsd/sys/kern/sysv_shm.c (revision a6d8c59f)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department. Originally from University of Wisconsin.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
13  *
14  *	@(#)sysv_shm.c	7.20 (Berkeley) 10/11/92
15  */
16 
17 /*
18  * System V shared memory routines.
19  * TEMPORARY, until mmap is in place;
20  * needed now for HP-UX compatibility and X server (yech!).
21  */
22 
23 #ifdef SYSVSHM
24 
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/kernel.h>
28 #include <sys/proc.h>
29 #include <sys/shm.h>
30 #include <sys/malloc.h>
31 #include <sys/mman.h>
32 
33 #include <vm/vm.h>
34 #include <vm/vm_kern.h>
35 #include <vm/vm_inherit.h>
36 #include <vm/vm_pager.h>
37 
38 int	shmat(), shmctl(), shmdt(), shmget();
39 int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
40 int	shmtot = 0;
41 
42 /*
43  * Per process internal structure for managing segments.
44  * Each process using shm will have an array of ``shmseg'' of these.
45  */
46 struct	shmdesc {
47 	vm_offset_t	shmd_uva;
48 	int		shmd_id;
49 };
50 
51 /*
52  * Per segment internal structure (shm_handle).
53  */
54 struct	shmhandle {
55 	vm_offset_t	shmh_kva;
56 	caddr_t		shmh_id;
57 };
58 
59 vm_map_t shm_map;	/* address space for shared memory segments */
60 
61 shminit()
62 {
63 	register int i;
64 	vm_offset_t whocares1, whocares2;
65 
66 	shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
67 				shminfo.shmall * NBPG, FALSE);
68 	if (shminfo.shmmni > SHMMMNI)
69 		shminfo.shmmni = SHMMMNI;
70 	for (i = 0; i < shminfo.shmmni; i++) {
71 		shmsegs[i].shm_perm.mode = 0;
72 		shmsegs[i].shm_perm.seq = 0;
73 	}
74 }
75 
76 /*
77  * Entry point for all SHM calls
78  */
79 struct shmsys_args {
80 	u_int which;
81 };
82 shmsys(p, uap, retval)
83 	struct proc *p;
84 	struct shmsys_args *uap;
85 	int *retval;
86 {
87 
88 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
89 		return (EINVAL);
90 	return ((*shmcalls[uap->which])(p, &uap[1], retval));
91 }
92 
93 /*
94  * Get a shared memory segment
95  */
96 struct shmget_args {
97 	key_t key;
98 	int size;
99 	int shmflg;
100 };
101 shmget(p, uap, retval)
102 	struct proc *p;
103 	register struct shmget_args *uap;
104 	int *retval;
105 {
106 	register struct shmid_ds *shp;
107 	register struct ucred *cred = p->p_ucred;
108 	register int i;
109 	int error, size, rval = 0;
110 	register struct shmhandle *shmh;
111 
112 	/* look up the specified shm_id */
113 	if (uap->key != IPC_PRIVATE) {
114 		for (i = 0; i < shminfo.shmmni; i++)
115 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
116 			    shmsegs[i].shm_perm.key == uap->key) {
117 				rval = i;
118 				break;
119 			}
120 	} else
121 		i = shminfo.shmmni;
122 
123 	/* create a new shared segment if necessary */
124 	if (i == shminfo.shmmni) {
125 		if ((uap->shmflg & IPC_CREAT) == 0)
126 			return (ENOENT);
127 		if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
128 			return (EINVAL);
129 		for (i = 0; i < shminfo.shmmni; i++)
130 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
131 				rval = i;
132 				break;
133 			}
134 		if (i == shminfo.shmmni)
135 			return (ENOSPC);
136 		size = clrnd(btoc(uap->size));
137 		if (shmtot + size > shminfo.shmall)
138 			return (ENOMEM);
139 		shp = &shmsegs[rval];
140 		/*
141 		 * We need to do a couple of things to ensure consistency
142 		 * in case we sleep in malloc().  We mark segment as
143 		 * allocated so that other shmgets() will not allocate it.
144 		 * We mark it as "destroyed" to insure that shmvalid() is
145 		 * false making most operations fail (XXX).  We set the key,
146 		 * so that other shmget()s will fail.
147 		 */
148 		shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
149 		shp->shm_perm.key = uap->key;
150 		shmh = (struct shmhandle *)
151 			malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
152 		shmh->shmh_kva = 0;
153 		shmh->shmh_id = (caddr_t)(0xc0000000|rval);	/* XXX */
154 		error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
155 				VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0);
156 		if (error) {
157 			free((caddr_t)shmh, M_SHM);
158 			shp->shm_perm.mode = 0;
159 			return(ENOMEM);
160 		}
161 		shp->shm_handle = (void *) shmh;
162 		shmtot += size;
163 		shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
164 		shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
165 		shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
166 		shp->shm_segsz = uap->size;
167 		shp->shm_cpid = p->p_pid;
168 		shp->shm_lpid = shp->shm_nattch = 0;
169 		shp->shm_atime = shp->shm_dtime = 0;
170 		shp->shm_ctime = time.tv_sec;
171 	} else {
172 		shp = &shmsegs[rval];
173 		/* XXX: probably not the right thing to do */
174 		if (shp->shm_perm.mode & SHM_DEST)
175 			return (EBUSY);
176 		if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
177 			return (error);
178 		if (uap->size && uap->size > shp->shm_segsz)
179 			return (EINVAL);
180 		if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
181 			return (EEXIST);
182 	}
183 	*retval = shp->shm_perm.seq * SHMMMNI + rval;
184 	return (0);
185 }
186 
187 /*
188  * Shared memory control
189  */
190 struct shmctl_args {
191 	int shmid;
192 	int cmd;
193 	caddr_t buf;
194 };
195 /* ARGSUSED */
196 shmctl(p, uap, retval)
197 	struct proc *p;
198 	register struct shmctl_args *uap;
199 	int *retval;
200 {
201 	register struct shmid_ds *shp;
202 	register struct ucred *cred = p->p_ucred;
203 	struct shmid_ds sbuf;
204 	int error;
205 
206 	if (error = shmvalid(uap->shmid))
207 		return (error);
208 	shp = &shmsegs[uap->shmid % SHMMMNI];
209 	switch (uap->cmd) {
210 	case IPC_STAT:
211 		if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
212 			return (error);
213 		return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
214 
215 	case IPC_SET:
216 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
217 		    cred->cr_uid != shp->shm_perm.cuid)
218 			return (EPERM);
219 		if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
220 			return (error);
221 		shp->shm_perm.uid = sbuf.shm_perm.uid;
222 		shp->shm_perm.gid = sbuf.shm_perm.gid;
223 		shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
224 			| (sbuf.shm_perm.mode & 0777);
225 		shp->shm_ctime = time.tv_sec;
226 		break;
227 
228 	case IPC_RMID:
229 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
230 		    cred->cr_uid != shp->shm_perm.cuid)
231 			return (EPERM);
232 		/* set ctime? */
233 		shp->shm_perm.key = IPC_PRIVATE;
234 		shp->shm_perm.mode |= SHM_DEST;
235 		if (shp->shm_nattch <= 0)
236 			shmfree(shp);
237 		break;
238 
239 	default:
240 		return (EINVAL);
241 	}
242 	return (0);
243 }
244 
245 /*
246  * Attach to shared memory segment.
247  */
248 struct shmat_args {
249 	int	shmid;
250 	caddr_t	shmaddr;
251 	int	shmflg;
252 };
253 shmat(p, uap, retval)
254 	struct proc *p;
255 	register struct shmat_args *uap;
256 	int *retval;
257 {
258 	register struct shmid_ds *shp;
259 	register int size;
260 	caddr_t uva;
261 	int error;
262 	int flags;
263 	vm_prot_t prot;
264 	struct shmdesc *shmd;
265 
266 	/*
267 	 * Allocate descriptors now (before validity check)
268 	 * in case malloc() blocks.
269 	 */
270 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
271 	size = shminfo.shmseg * sizeof(struct shmdesc);
272 	if (shmd == NULL) {
273 		shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
274 		bzero((caddr_t)shmd, size);
275 		p->p_vmspace->vm_shm = (caddr_t)shmd;
276 	}
277 	if (error = shmvalid(uap->shmid))
278 		return (error);
279 	shp = &shmsegs[uap->shmid % SHMMMNI];
280 	if (shp->shm_handle == NULL)
281 		panic("shmat NULL handle");
282 	if (error = ipcaccess(&shp->shm_perm,
283 	    (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
284 		return (error);
285 	uva = uap->shmaddr;
286 	if (uva && ((int)uva & (SHMLBA-1))) {
287 		if (uap->shmflg & SHM_RND)
288 			uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
289 		else
290 			return (EINVAL);
291 	}
292 	/*
293 	 * Make sure user doesn't use more than their fair share
294 	 */
295 	for (size = 0; size < shminfo.shmseg; size++) {
296 		if (shmd->shmd_uva == 0)
297 			break;
298 		shmd++;
299 	}
300 	if (size >= shminfo.shmseg)
301 		return (EMFILE);
302 	size = ctob(clrnd(btoc(shp->shm_segsz)));
303 	prot = VM_PROT_READ;
304 	if ((uap->shmflg & SHM_RDONLY) == 0)
305 		prot |= VM_PROT_WRITE;
306 	flags = MAP_ANON|MAP_SHARED;
307 	if (uva)
308 		flags |= MAP_FIXED;
309 	else
310 		uva = (caddr_t)0x1000000;	/* XXX */
311 	error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
312 	    (vm_size_t)size, prot, flags,
313 	    ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
314 	if (error)
315 		return(error);
316 	shmd->shmd_uva = (vm_offset_t)uva;
317 	shmd->shmd_id = uap->shmid;
318 	/*
319 	 * Fill in the remaining fields
320 	 */
321 	shp->shm_lpid = p->p_pid;
322 	shp->shm_atime = time.tv_sec;
323 	shp->shm_nattch++;
324 	*retval = (int) uva;
325 	return (0);
326 }
327 
328 /*
329  * Detach from shared memory segment.
330  */
331 struct shmdt_args {
332 	caddr_t	shmaddr;
333 };
334 /* ARGSUSED */
335 shmdt(p, uap, retval)
336 	struct proc *p;
337 	struct shmdt_args *uap;
338 	int *retval;
339 {
340 	register struct shmdesc *shmd;
341 	register int i;
342 
343 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
344 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
345 		if (shmd->shmd_uva &&
346 		    shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
347 			break;
348 	if (i == shminfo.shmseg)
349 		return(EINVAL);
350 	shmufree(p, shmd);
351 	shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
352 }
353 
354 shmfork(p1, p2, isvfork)
355 	struct proc *p1, *p2;
356 	int isvfork;
357 {
358 	register struct shmdesc *shmd;
359 	register int size;
360 
361 	/*
362 	 * Copy parents descriptive information
363 	 */
364 	size = shminfo.shmseg * sizeof(struct shmdesc);
365 	shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
366 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
367 	p2->p_vmspace->vm_shm = (caddr_t)shmd;
368 	/*
369 	 * Increment reference counts
370 	 */
371 	for (size = 0; size < shminfo.shmseg; size++, shmd++)
372 		if (shmd->shmd_uva)
373 			shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
374 }
375 
376 shmexit(p)
377 	struct proc *p;
378 {
379 	register struct shmdesc *shmd;
380 	register int i;
381 
382 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
383 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
384 		if (shmd->shmd_uva)
385 			shmufree(p, shmd);
386 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
387 	p->p_vmspace->vm_shm = NULL;
388 }
389 
390 shmvalid(id)
391 	register int id;
392 {
393 	register struct shmid_ds *shp;
394 
395 	if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
396 		return(EINVAL);
397 	shp = &shmsegs[id % SHMMMNI];
398 	if (shp->shm_perm.seq == (id / SHMMMNI) &&
399 	    (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
400 		return(0);
401 	return(EINVAL);
402 }
403 
404 /*
405  * Free user resources associated with a shared memory segment
406  */
407 shmufree(p, shmd)
408 	struct proc *p;
409 	struct shmdesc *shmd;
410 {
411 	register struct shmid_ds *shp;
412 
413 	shp = &shmsegs[shmd->shmd_id % SHMMMNI];
414 	(void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
415 			     ctob(clrnd(btoc(shp->shm_segsz))));
416 	shmd->shmd_id = 0;
417 	shmd->shmd_uva = 0;
418 	shp->shm_dtime = time.tv_sec;
419 	if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
420 		shmfree(shp);
421 }
422 
423 /*
424  * Deallocate resources associated with a shared memory segment
425  */
426 shmfree(shp)
427 	register struct shmid_ds *shp;
428 {
429 
430 	if (shp->shm_handle == NULL)
431 		panic("shmfree");
432 	/*
433 	 * Lose our lingering object reference by deallocating space
434 	 * in kernel.  Pager will also be deallocated as a side-effect.
435 	 */
436 	vm_deallocate(shm_map,
437 		      ((struct shmhandle *)shp->shm_handle)->shmh_kva,
438 		      ctob(clrnd(btoc(shp->shm_segsz))));
439 	free((caddr_t)shp->shm_handle, M_SHM);
440 	shp->shm_handle = NULL;
441 	shmtot -= clrnd(btoc(shp->shm_segsz));
442 	shp->shm_perm.mode = 0;
443 	/*
444 	 * Increment the sequence number to ensure that outstanding
445 	 * shmids for this segment will be invalid in the event that
446 	 * the segment is reallocated.  Note that shmids must be
447 	 * positive as decreed by SVID.
448 	 */
449 	shp->shm_perm.seq++;
450 	if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
451 		shp->shm_perm.seq = 0;
452 }
453 
454 /*
455  * XXX This routine would be common to all sysV style IPC
456  *     (if the others were implemented).
457  */
458 ipcaccess(ipc, mode, cred)
459 	register struct ipc_perm *ipc;
460 	int mode;
461 	register struct ucred *cred;
462 {
463 	register int m;
464 
465 	if (cred->cr_uid == 0)
466 		return(0);
467 	/*
468 	 * Access check is based on only one of owner, group, public.
469 	 * If not owner, then check group.
470 	 * If not a member of the group, then check public access.
471 	 */
472 	mode &= 0700;
473 	m = ipc->mode;
474 	if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
475 		m <<= 3;
476 		if (!groupmember(ipc->gid, cred) &&
477 		    !groupmember(ipc->cgid, cred))
478 			m <<= 3;
479 	}
480 	if ((mode&m) == mode)
481 		return (0);
482 	return (EACCES);
483 }
484 #endif /* SYSVSHM */
485