xref: /original-bsd/sys/kern/sysv_shm.c (revision 6a39c8ab)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department. Originally from University of Wisconsin.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: uipc_shm.c 1.11 92/04/23$
13  *
14  *	@(#)sysv_shm.c	7.19 (Berkeley) 07/10/92
15  */
16 
17 /*
18  * System V shared memory routines.
19  * TEMPORARY, until mmap is in place;
20  * needed now for HP-UX compatibility and X server (yech!).
21  */
22 
23 #ifdef SYSVSHM
24 
25 #include "param.h"
26 #include "systm.h"
27 #include "kernel.h"
28 #include "proc.h"
29 #include "shm.h"
30 #include "malloc.h"
31 #include "mman.h"
32 #include "vm/vm.h"
33 #include "vm/vm_kern.h"
34 #include "vm/vm_inherit.h"
35 #include "vm/vm_pager.h"
36 
37 int	shmat(), shmctl(), shmdt(), shmget();
38 int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
39 int	shmtot = 0;
40 
41 /*
42  * Per process internal structure for managing segments.
43  * Each process using shm will have an array of ``shmseg'' of these.
44  */
45 struct	shmdesc {
46 	vm_offset_t	shmd_uva;
47 	int		shmd_id;
48 };
49 
50 /*
51  * Per segment internal structure (shm_handle).
52  */
53 struct	shmhandle {
54 	vm_offset_t	shmh_kva;
55 	caddr_t		shmh_id;
56 };
57 
58 vm_map_t shm_map;	/* address space for shared memory segments */
59 
60 shminit()
61 {
62 	register int i;
63 	vm_offset_t whocares1, whocares2;
64 
65 	shm_map = kmem_suballoc(kernel_map, &whocares1, &whocares2,
66 				shminfo.shmall * NBPG, FALSE);
67 	if (shminfo.shmmni > SHMMMNI)
68 		shminfo.shmmni = SHMMMNI;
69 	for (i = 0; i < shminfo.shmmni; i++) {
70 		shmsegs[i].shm_perm.mode = 0;
71 		shmsegs[i].shm_perm.seq = 0;
72 	}
73 }
74 
75 /*
76  * Entry point for all SHM calls
77  */
78 struct shmsys_args {
79 	u_int which;
80 };
81 shmsys(p, uap, retval)
82 	struct proc *p;
83 	struct shmsys_args *uap;
84 	int *retval;
85 {
86 
87 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
88 		return (EINVAL);
89 	return ((*shmcalls[uap->which])(p, &uap[1], retval));
90 }
91 
92 /*
93  * Get a shared memory segment
94  */
95 struct shmget_args {
96 	key_t key;
97 	int size;
98 	int shmflg;
99 };
100 shmget(p, uap, retval)
101 	struct proc *p;
102 	register struct shmget_args *uap;
103 	int *retval;
104 {
105 	register struct shmid_ds *shp;
106 	register struct ucred *cred = p->p_ucred;
107 	register int i;
108 	int error, size, rval = 0;
109 	register struct shmhandle *shmh;
110 
111 	/* look up the specified shm_id */
112 	if (uap->key != IPC_PRIVATE) {
113 		for (i = 0; i < shminfo.shmmni; i++)
114 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
115 			    shmsegs[i].shm_perm.key == uap->key) {
116 				rval = i;
117 				break;
118 			}
119 	} else
120 		i = shminfo.shmmni;
121 
122 	/* create a new shared segment if necessary */
123 	if (i == shminfo.shmmni) {
124 		if ((uap->shmflg & IPC_CREAT) == 0)
125 			return (ENOENT);
126 		if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
127 			return (EINVAL);
128 		for (i = 0; i < shminfo.shmmni; i++)
129 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
130 				rval = i;
131 				break;
132 			}
133 		if (i == shminfo.shmmni)
134 			return (ENOSPC);
135 		size = clrnd(btoc(uap->size));
136 		if (shmtot + size > shminfo.shmall)
137 			return (ENOMEM);
138 		shp = &shmsegs[rval];
139 		/*
140 		 * We need to do a couple of things to ensure consistency
141 		 * in case we sleep in malloc().  We mark segment as
142 		 * allocated so that other shmgets() will not allocate it.
143 		 * We mark it as "destroyed" to insure that shmvalid() is
144 		 * false making most operations fail (XXX).  We set the key,
145 		 * so that other shmget()s will fail.
146 		 */
147 		shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
148 		shp->shm_perm.key = uap->key;
149 		shmh = (struct shmhandle *)
150 			malloc(sizeof(struct shmhandle), M_SHM, M_WAITOK);
151 		shmh->shmh_kva = 0;
152 		shmh->shmh_id = (caddr_t)(0xc0000000|rval);	/* XXX */
153 		error = vm_mmap(shm_map, &shmh->shmh_kva, ctob(size),
154 				VM_PROT_ALL, MAP_ANON, shmh->shmh_id, 0);
155 		if (error) {
156 			free((caddr_t)shmh, M_SHM);
157 			shp->shm_perm.mode = 0;
158 			return(ENOMEM);
159 		}
160 		shp->shm_handle = (void *) shmh;
161 		shmtot += size;
162 		shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
163 		shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
164 		shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
165 		shp->shm_segsz = uap->size;
166 		shp->shm_cpid = p->p_pid;
167 		shp->shm_lpid = shp->shm_nattch = 0;
168 		shp->shm_atime = shp->shm_dtime = 0;
169 		shp->shm_ctime = time.tv_sec;
170 	} else {
171 		shp = &shmsegs[rval];
172 		/* XXX: probably not the right thing to do */
173 		if (shp->shm_perm.mode & SHM_DEST)
174 			return (EBUSY);
175 		if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
176 			return (error);
177 		if (uap->size && uap->size > shp->shm_segsz)
178 			return (EINVAL);
179 		if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
180 			return (EEXIST);
181 	}
182 	*retval = shp->shm_perm.seq * SHMMMNI + rval;
183 	return (0);
184 }
185 
186 /*
187  * Shared memory control
188  */
189 struct shmctl_args {
190 	int shmid;
191 	int cmd;
192 	caddr_t buf;
193 };
194 /* ARGSUSED */
195 shmctl(p, uap, retval)
196 	struct proc *p;
197 	register struct shmctl_args *uap;
198 	int *retval;
199 {
200 	register struct shmid_ds *shp;
201 	register struct ucred *cred = p->p_ucred;
202 	struct shmid_ds sbuf;
203 	int error;
204 
205 	if (error = shmvalid(uap->shmid))
206 		return (error);
207 	shp = &shmsegs[uap->shmid % SHMMMNI];
208 	switch (uap->cmd) {
209 	case IPC_STAT:
210 		if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
211 			return (error);
212 		return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
213 
214 	case IPC_SET:
215 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
216 		    cred->cr_uid != shp->shm_perm.cuid)
217 			return (EPERM);
218 		if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
219 			return (error);
220 		shp->shm_perm.uid = sbuf.shm_perm.uid;
221 		shp->shm_perm.gid = sbuf.shm_perm.gid;
222 		shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
223 			| (sbuf.shm_perm.mode & 0777);
224 		shp->shm_ctime = time.tv_sec;
225 		break;
226 
227 	case IPC_RMID:
228 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
229 		    cred->cr_uid != shp->shm_perm.cuid)
230 			return (EPERM);
231 		/* set ctime? */
232 		shp->shm_perm.key = IPC_PRIVATE;
233 		shp->shm_perm.mode |= SHM_DEST;
234 		if (shp->shm_nattch <= 0)
235 			shmfree(shp);
236 		break;
237 
238 	default:
239 		return (EINVAL);
240 	}
241 	return (0);
242 }
243 
244 /*
245  * Attach to shared memory segment.
246  */
247 struct shmat_args {
248 	int	shmid;
249 	caddr_t	shmaddr;
250 	int	shmflg;
251 };
252 shmat(p, uap, retval)
253 	struct proc *p;
254 	register struct shmat_args *uap;
255 	int *retval;
256 {
257 	register struct shmid_ds *shp;
258 	register int size;
259 	caddr_t uva;
260 	int error;
261 	int flags;
262 	vm_prot_t prot;
263 	struct shmdesc *shmd;
264 
265 	/*
266 	 * Allocate descriptors now (before validity check)
267 	 * in case malloc() blocks.
268 	 */
269 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
270 	size = shminfo.shmseg * sizeof(struct shmdesc);
271 	if (shmd == NULL) {
272 		shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
273 		bzero((caddr_t)shmd, size);
274 		p->p_vmspace->vm_shm = (caddr_t)shmd;
275 	}
276 	if (error = shmvalid(uap->shmid))
277 		return (error);
278 	shp = &shmsegs[uap->shmid % SHMMMNI];
279 	if (shp->shm_handle == NULL)
280 		panic("shmat NULL handle");
281 	if (error = ipcaccess(&shp->shm_perm,
282 	    (uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, p->p_ucred))
283 		return (error);
284 	uva = uap->shmaddr;
285 	if (uva && ((int)uva & (SHMLBA-1))) {
286 		if (uap->shmflg & SHM_RND)
287 			uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
288 		else
289 			return (EINVAL);
290 	}
291 	/*
292 	 * Make sure user doesn't use more than their fair share
293 	 */
294 	for (size = 0; size < shminfo.shmseg; size++) {
295 		if (shmd->shmd_uva == 0)
296 			break;
297 		shmd++;
298 	}
299 	if (size >= shminfo.shmseg)
300 		return (EMFILE);
301 	size = ctob(clrnd(btoc(shp->shm_segsz)));
302 	prot = VM_PROT_READ;
303 	if ((uap->shmflg & SHM_RDONLY) == 0)
304 		prot |= VM_PROT_WRITE;
305 	flags = MAP_ANON|MAP_SHARED;
306 	if (uva)
307 		flags |= MAP_FIXED;
308 	else
309 		uva = (caddr_t)0x1000000;	/* XXX */
310 	error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)&uva,
311 	    (vm_size_t)size, prot, flags,
312 	    ((struct shmhandle *)shp->shm_handle)->shmh_id, 0);
313 	if (error)
314 		return(error);
315 	shmd->shmd_uva = (vm_offset_t)uva;
316 	shmd->shmd_id = uap->shmid;
317 	/*
318 	 * Fill in the remaining fields
319 	 */
320 	shp->shm_lpid = p->p_pid;
321 	shp->shm_atime = time.tv_sec;
322 	shp->shm_nattch++;
323 	*retval = (int) uva;
324 	return (0);
325 }
326 
327 /*
328  * Detach from shared memory segment.
329  */
330 struct shmdt_args {
331 	caddr_t	shmaddr;
332 };
333 /* ARGSUSED */
334 shmdt(p, uap, retval)
335 	struct proc *p;
336 	struct shmdt_args *uap;
337 	int *retval;
338 {
339 	register struct shmdesc *shmd;
340 	register int i;
341 
342 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
343 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
344 		if (shmd->shmd_uva &&
345 		    shmd->shmd_uva == (vm_offset_t)uap->shmaddr)
346 			break;
347 	if (i == shminfo.shmseg)
348 		return(EINVAL);
349 	shmufree(p, shmd);
350 	shmsegs[shmd->shmd_id % SHMMMNI].shm_lpid = p->p_pid;
351 }
352 
353 shmfork(p1, p2, isvfork)
354 	struct proc *p1, *p2;
355 	int isvfork;
356 {
357 	register struct shmdesc *shmd;
358 	register int size;
359 
360 	/*
361 	 * Copy parents descriptive information
362 	 */
363 	size = shminfo.shmseg * sizeof(struct shmdesc);
364 	shmd = (struct shmdesc *)malloc(size, M_SHM, M_WAITOK);
365 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmd, size);
366 	p2->p_vmspace->vm_shm = (caddr_t)shmd;
367 	/*
368 	 * Increment reference counts
369 	 */
370 	for (size = 0; size < shminfo.shmseg; size++, shmd++)
371 		if (shmd->shmd_uva)
372 			shmsegs[shmd->shmd_id % SHMMMNI].shm_nattch++;
373 }
374 
375 shmexit(p)
376 	struct proc *p;
377 {
378 	register struct shmdesc *shmd;
379 	register int i;
380 
381 	shmd = (struct shmdesc *)p->p_vmspace->vm_shm;
382 	for (i = 0; i < shminfo.shmseg; i++, shmd++)
383 		if (shmd->shmd_uva)
384 			shmufree(p, shmd);
385 	free((caddr_t)p->p_vmspace->vm_shm, M_SHM);
386 	p->p_vmspace->vm_shm = NULL;
387 }
388 
389 shmvalid(id)
390 	register int id;
391 {
392 	register struct shmid_ds *shp;
393 
394 	if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
395 		return(EINVAL);
396 	shp = &shmsegs[id % SHMMMNI];
397 	if (shp->shm_perm.seq == (id / SHMMMNI) &&
398 	    (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
399 		return(0);
400 	return(EINVAL);
401 }
402 
403 /*
404  * Free user resources associated with a shared memory segment
405  */
406 shmufree(p, shmd)
407 	struct proc *p;
408 	struct shmdesc *shmd;
409 {
410 	register struct shmid_ds *shp;
411 
412 	shp = &shmsegs[shmd->shmd_id % SHMMMNI];
413 	(void) vm_deallocate(&p->p_vmspace->vm_map, shmd->shmd_uva,
414 			     ctob(clrnd(btoc(shp->shm_segsz))));
415 	shmd->shmd_id = 0;
416 	shmd->shmd_uva = 0;
417 	shp->shm_dtime = time.tv_sec;
418 	if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
419 		shmfree(shp);
420 }
421 
422 /*
423  * Deallocate resources associated with a shared memory segment
424  */
425 shmfree(shp)
426 	register struct shmid_ds *shp;
427 {
428 
429 	if (shp->shm_handle == NULL)
430 		panic("shmfree");
431 	/*
432 	 * Lose our lingering object reference by deallocating space
433 	 * in kernel.  Pager will also be deallocated as a side-effect.
434 	 */
435 	vm_deallocate(shm_map,
436 		      ((struct shmhandle *)shp->shm_handle)->shmh_kva,
437 		      ctob(clrnd(btoc(shp->shm_segsz))));
438 	free((caddr_t)shp->shm_handle, M_SHM);
439 	shp->shm_handle = NULL;
440 	shmtot -= clrnd(btoc(shp->shm_segsz));
441 	shp->shm_perm.mode = 0;
442 	/*
443 	 * Increment the sequence number to ensure that outstanding
444 	 * shmids for this segment will be invalid in the event that
445 	 * the segment is reallocated.  Note that shmids must be
446 	 * positive as decreed by SVID.
447 	 */
448 	shp->shm_perm.seq++;
449 	if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
450 		shp->shm_perm.seq = 0;
451 }
452 
453 /*
454  * XXX This routine would be common to all sysV style IPC
455  *     (if the others were implemented).
456  */
457 ipcaccess(ipc, mode, cred)
458 	register struct ipc_perm *ipc;
459 	int mode;
460 	register struct ucred *cred;
461 {
462 	register int m;
463 
464 	if (cred->cr_uid == 0)
465 		return(0);
466 	/*
467 	 * Access check is based on only one of owner, group, public.
468 	 * If not owner, then check group.
469 	 * If not a member of the group, then check public access.
470 	 */
471 	mode &= 0700;
472 	m = ipc->mode;
473 	if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
474 		m <<= 3;
475 		if (!groupmember(ipc->gid, cred) &&
476 		    !groupmember(ipc->cgid, cred))
477 			m <<= 3;
478 	}
479 	if ((mode&m) == mode)
480 		return (0);
481 	return (EACCES);
482 }
483 #endif /* SYSVSHM */
484