xref: /original-bsd/sys/kern/sysv_shm.c (revision 9a897be2)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department. Originally from University of Wisconsin.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: uipc_shm.c 1.9 89/08/14$
13  *
14  *	@(#)sysv_shm.c	7.8 (Berkeley) 06/24/90
15  */
16 
17 /*
18  * System V shared memory routines.
19  * TEMPORARY, until mmap is in place;
20  * needed now for HP-UX compatibility and X server (yech!).
21  */
22 
23 #ifdef SYSVSHM
24 
25 #include "machine/pte.h"
26 
27 #include "param.h"
28 #include "systm.h"
29 #include "syscontext.h"
30 #include "kernel.h"
31 #include "proc.h"
32 #include "vm.h"
33 #include "shm.h"
34 #include "mapmem.h"
35 #include "malloc.h"
36 
37 #ifdef HPUXCOMPAT
38 #include "../hpux/hpux.h"
39 #endif
40 
41 int	shmat(), shmctl(), shmdt(), shmget();
42 int	(*shmcalls[])() = { shmat, shmctl, shmdt, shmget };
43 int	shmtot = 0;
44 
45 int	shmfork(), shmexit();
46 struct	mapmemops shmops = { shmfork, (int (*)())0, shmexit, shmexit };
47 
48 shminit()
49 {
50 	register int i;
51 
52 	if (shminfo.shmmni > SHMMMNI)
53 		shminfo.shmmni = SHMMMNI;
54 	for (i = 0; i < shminfo.shmmni; i++) {
55 		shmsegs[i].shm_perm.mode = 0;
56 		shmsegs[i].shm_perm.seq = 0;
57 	}
58 }
59 
60 /*
61  * Entry point for all SHM calls
62  */
63 shmsys(p, uap, retval)
64 	struct proc *p;
65 	struct args {
66 		u_int which;
67 	} *uap;
68 	int *retval;
69 {
70 
71 	if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
72 		RETURN (EINVAL);
73 	RETURN ((*shmcalls[uap->which])(p, &uap[1], retval));
74 }
75 
76 /*
77  * Get a shared memory segment
78  */
79 shmget(p, uap, retval)
80 	struct proc *p;
81 	register struct args {
82 		key_t key;
83 		int size;
84 		int shmflg;
85 	} *uap;
86 	int *retval;
87 {
88 	register struct shmid_ds *shp;
89 	register struct ucred *cred = u.u_cred;
90 	register int i;
91 	int error, size, rval = 0;
92 	caddr_t kva;
93 
94 	/* look up the specified shm_id */
95 	if (uap->key != IPC_PRIVATE) {
96 		for (i = 0; i < shminfo.shmmni; i++)
97 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) &&
98 			    shmsegs[i].shm_perm.key == uap->key) {
99 				rval = i;
100 				break;
101 			}
102 	} else
103 		i = shminfo.shmmni;
104 
105 	/* create a new shared segment if necessary */
106 	if (i == shminfo.shmmni) {
107 		if ((uap->shmflg & IPC_CREAT) == 0)
108 			return (ENOENT);
109 		if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
110 			return (EINVAL);
111 		for (i = 0; i < shminfo.shmmni; i++)
112 			if ((shmsegs[i].shm_perm.mode & SHM_ALLOC) == 0) {
113 				rval = i;
114 				break;
115 			}
116 		if (i == shminfo.shmmni)
117 			return (ENOSPC);
118 		size = clrnd(btoc(uap->size));
119 		if (shmtot + size > shminfo.shmall)
120 			return (ENOMEM);
121 		shp = &shmsegs[rval];
122 		/*
123 		 * We need to do a couple of things to ensure consistency
124 		 * in case we sleep in malloc().  We mark segment as
125 		 * allocated so that other shmgets() will not allocate it.
126 		 * We mark it as "destroyed" to insure that shmvalid() is
127 		 * false making most operations fail (XXX).  We set the key,
128 		 * so that other shmget()s will fail.
129 		 */
130 		shp->shm_perm.mode = SHM_ALLOC | SHM_DEST;
131 		shp->shm_perm.key = uap->key;
132 		kva = (caddr_t) malloc((u_long)ctob(size), M_SHM, M_WAITOK);
133 		if (kva == NULL) {
134 			shp->shm_perm.mode = 0;
135 			return (ENOMEM);
136 		}
137 		if (!claligned(kva))
138 			panic("shmget: non-aligned memory");
139 		bzero(kva, (u_int)ctob(size));
140 		shmtot += size;
141 		shp->shm_perm.cuid = shp->shm_perm.uid = cred->cr_uid;
142 		shp->shm_perm.cgid = shp->shm_perm.gid = cred->cr_gid;
143 		shp->shm_perm.mode = SHM_ALLOC | (uap->shmflg&0777);
144 		shp->shm_handle = (void *) kvtopte(kva);
145 		shp->shm_segsz = uap->size;
146 		shp->shm_cpid = p->p_pid;
147 		shp->shm_lpid = shp->shm_nattch = 0;
148 		shp->shm_atime = shp->shm_dtime = 0;
149 		shp->shm_ctime = time.tv_sec;
150 	} else {
151 		shp = &shmsegs[rval];
152 		/* XXX: probably not the right thing to do */
153 		if (shp->shm_perm.mode & SHM_DEST)
154 			return (EBUSY);
155 		if (error = ipcaccess(&shp->shm_perm, uap->shmflg&0777, cred))
156 			return (error);
157 		if (uap->size && uap->size > shp->shm_segsz)
158 			return (EINVAL);
159 		if ((uap->shmflg&IPC_CREAT) && (uap->shmflg&IPC_EXCL))
160 			return (EEXIST);
161 	}
162 	*retval = shp->shm_perm.seq * SHMMMNI + rval;
163 	return (0);
164 }
165 
166 /*
167  * Shared memory control
168  */
169 /* ARGSUSED */
170 shmctl(p, uap, retval)
171 	struct proc *p;
172 	register struct args {
173 		int shmid;
174 		int cmd;
175 		caddr_t buf;
176 	} *uap;
177 	int *retval;
178 {
179 	register struct shmid_ds *shp;
180 	register struct ucred *cred = u.u_cred;
181 	struct shmid_ds sbuf;
182 	int error;
183 
184 	if (error = shmvalid(uap->shmid))
185 		return (error);
186 	shp = &shmsegs[uap->shmid % SHMMMNI];
187 	switch (uap->cmd) {
188 	case IPC_STAT:
189 		if (error = ipcaccess(&shp->shm_perm, IPC_R, cred))
190 			return (error);
191 		return (copyout((caddr_t)shp, uap->buf, sizeof(*shp)));
192 
193 	case IPC_SET:
194 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
195 		    cred->cr_uid != shp->shm_perm.cuid)
196 			return (EPERM);
197 		if (error = copyin(uap->buf, (caddr_t)&sbuf, sizeof sbuf))
198 			return (error);
199 		shp->shm_perm.uid = sbuf.shm_perm.uid;
200 		shp->shm_perm.gid = sbuf.shm_perm.gid;
201 		shp->shm_perm.mode = (shp->shm_perm.mode & ~0777)
202 			| (sbuf.shm_perm.mode & 0777);
203 		shp->shm_ctime = time.tv_sec;
204 		break;
205 
206 	case IPC_RMID:
207 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
208 		    cred->cr_uid != shp->shm_perm.cuid)
209 			return (EPERM);
210 		/* set ctime? */
211 		shp->shm_perm.key = IPC_PRIVATE;
212 		shp->shm_perm.mode |= SHM_DEST;
213 		if (shp->shm_nattch <= 0)
214 			shmfree(shp);
215 		break;
216 
217 #ifdef HPUXCOMPAT
218 	case SHM_LOCK:
219 	case SHM_UNLOCK:
220 		/* don't really do anything, but make them think we did */
221 		if ((p->p_flag & SHPUX) == 0)
222 			return (EINVAL);
223 		if (cred->cr_uid && cred->cr_uid != shp->shm_perm.uid &&
224 		    cred->cr_uid != shp->shm_perm.cuid)
225 			return (EPERM);
226 		break;
227 #endif
228 
229 	default:
230 		return (EINVAL);
231 	}
232 	return (0);
233 }
234 
235 /*
236  * Attach to shared memory segment.
237  */
238 shmat(p, uap, retval)
239 	struct proc *p;
240 	register struct args {
241 		int	shmid;
242 		caddr_t	shmaddr;
243 		int	shmflg;
244 	} *uap;
245 	int *retval;
246 {
247 	register struct shmid_ds *shp;
248 	register int size;
249 	struct mapmem *mp;
250 	caddr_t uva;
251 	int error, prot, shmmapin();
252 
253 	if (error = shmvalid(uap->shmid))
254 		return (error);
255 	shp = &shmsegs[uap->shmid % SHMMMNI];
256 	if (shp->shm_handle == NULL)
257 		panic("shmat NULL handle");
258 	if (error = ipcaccess(&shp->shm_perm,
259 			(uap->shmflg&SHM_RDONLY) ? IPC_R : IPC_R|IPC_W, u.u_cred))
260 		return (error);
261 	uva = uap->shmaddr;
262 	if (uva && ((int)uva & (SHMLBA-1))) {
263 		if (uap->shmflg & SHM_RND)
264 			uva = (caddr_t) ((int)uva & ~(SHMLBA-1));
265 		else
266 			return (EINVAL);
267 	}
268 	/*
269 	 * Make sure user doesn't use more than their fair share
270 	 */
271 	size = 0;
272 	for (mp = u.u_mmap; mp; mp = mp->mm_next)
273 		if (mp->mm_ops == &shmops)
274 			size++;
275 	if (size >= shminfo.shmseg)
276 		return (EMFILE);
277 	/*
278 	 * Allocate a mapped memory region descriptor and
279 	 * attempt to expand the user page table to allow for region
280 	 */
281 	prot = (uap->shmflg & SHM_RDONLY) ? MM_RO : MM_RW;
282 #if defined(hp300)
283 	prot |= MM_CI;
284 #endif
285 	size = ctob(clrnd(btoc(shp->shm_segsz)));
286 	error = mmalloc(p, uap->shmid, &uva, (segsz_t)size, prot, &shmops, &mp);
287 	if (error)
288 		return (error);
289 	if (error = mmmapin(p, mp, shmmapin)) {
290 		(void) mmfree(p, mp);
291 		return (error);
292 	}
293 	/*
294 	 * Fill in the remaining fields
295 	 */
296 	shp->shm_lpid = p->p_pid;
297 	shp->shm_atime = time.tv_sec;
298 	shp->shm_nattch++;
299 	*retval = (int) uva;
300 	return (0);
301 }
302 
303 /*
304  * Detach from shared memory segment.
305  */
306 /* ARGSUSED */
307 shmdt(p, uap, retval)
308 	struct proc *p;
309 	struct args {
310 		caddr_t	shmaddr;
311 	} *uap;
312 	int *retval;
313 {
314 	register struct mapmem *mp;
315 
316 	for (mp = u.u_mmap; mp; mp = mp->mm_next)
317 		if (mp->mm_ops == &shmops && mp->mm_uva == uap->shmaddr)
318 			break;
319 	if (mp == MMNIL)
320 		return (EINVAL);
321 	shmsegs[mp->mm_id % SHMMMNI].shm_lpid = p->p_pid;
322 	return (shmufree(p, mp));
323 }
324 
325 shmmapin(mp, off)
326 	struct mapmem *mp;
327 {
328 	register struct shmid_ds *shp;
329 
330 	shp = &shmsegs[mp->mm_id % SHMMMNI];
331 	if (off >= ctob(clrnd(btoc(shp->shm_segsz))))
332 		return(-1);
333 	return(((struct pte *)shp->shm_handle)[btop(off)].pg_pfnum);
334 }
335 
336 /*
337  * Increment attach count on fork
338  */
339 /* ARGSUSED */
340 shmfork(mp, ischild)
341 	register struct mapmem *mp;
342 {
343 	if (!ischild)
344 		shmsegs[mp->mm_id % SHMMMNI].shm_nattch++;
345 }
346 
347 /*
348  * Detach from shared memory segment on exit (or exec)
349  */
350 shmexit(mp)
351 	struct mapmem *mp;
352 {
353 	struct proc *p = u.u_procp;		/* XXX */
354 
355 	return (shmufree(p, mp));
356 }
357 
358 shmvalid(id)
359 	register int id;
360 {
361 	register struct shmid_ds *shp;
362 
363 	if (id < 0 || (id % SHMMMNI) >= shminfo.shmmni)
364 		return(EINVAL);
365 	shp = &shmsegs[id % SHMMMNI];
366 	if (shp->shm_perm.seq == (id / SHMMMNI) &&
367 	    (shp->shm_perm.mode & (SHM_ALLOC|SHM_DEST)) == SHM_ALLOC)
368 		return(0);
369 	return(EINVAL);
370 }
371 
372 /*
373  * Free user resources associated with a shared memory segment
374  */
375 shmufree(p, mp)
376 	struct proc *p;
377 	struct mapmem *mp;
378 {
379 	register struct shmid_ds *shp;
380 	int error;
381 
382 	shp = &shmsegs[mp->mm_id % SHMMMNI];
383 	mmmapout(p, mp);
384 	error = mmfree(p, mp);
385 	shp->shm_dtime = time.tv_sec;
386 	if (--shp->shm_nattch <= 0 && (shp->shm_perm.mode & SHM_DEST))
387 		shmfree(shp);
388 	return (error);
389 }
390 
391 /*
392  * Deallocate resources associated with a shared memory segment
393  */
394 shmfree(shp)
395 	register struct shmid_ds *shp;
396 {
397 	caddr_t kva;
398 
399 	if (shp->shm_handle == NULL)
400 		panic("shmfree");
401 	kva = (caddr_t) ptetokv(shp->shm_handle);
402 	free(kva, M_SHM);
403 	shp->shm_handle = NULL;
404 	shmtot -= clrnd(btoc(shp->shm_segsz));
405 	shp->shm_perm.mode = 0;
406 	/*
407 	 * Increment the sequence number to ensure that outstanding
408 	 * shmids for this segment will be invalid in the event that
409 	 * the segment is reallocated.  Note that shmids must be
410 	 * positive as decreed by SVID.
411 	 */
412 	shp->shm_perm.seq++;
413 	if ((int)(shp->shm_perm.seq * SHMMMNI) < 0)
414 		shp->shm_perm.seq = 0;
415 }
416 
417 /*
418  * XXX This routine would be common to all sysV style IPC
419  *     (if the others were implemented).
420  */
421 ipcaccess(ipc, mode, cred)
422 	register struct ipc_perm *ipc;
423 	int mode;
424 	register struct ucred *cred;
425 {
426 	register int m;
427 
428 	if (cred->cr_uid == 0)
429 		return(0);
430 	/*
431 	 * Access check is based on only one of owner, group, public.
432 	 * If not owner, then check group.
433 	 * If not a member of the group, then check public access.
434 	 */
435 	mode &= 0700;
436 	m = ipc->mode;
437 	if (cred->cr_uid != ipc->uid && cred->cr_uid != ipc->cuid) {
438 		m <<= 3;
439 		if (!groupmember(ipc->gid, cred) &&
440 		    !groupmember(ipc->cgid, cred))
441 			m <<= 3;
442 	}
443 	if ((mode&m) == mode)
444 		return (0);
445 	return (EACCES);
446 }
447 
448 #endif /* SYSVSHM */
449