xref: /openbsd/sys/kern/sysv_shm.c (revision 0d280c5f)
1 /*	$OpenBSD: sysv_shm.c,v 1.80 2022/08/14 01:58:28 jsg Exp $	*/
2 /*	$NetBSD: sysv_shm.c,v 1.50 1998/10/21 22:24:29 tron Exp $	*/
3 
4 /*
5  * Copyright (c) 2002 Todd C. Miller <millert@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  *
19  * Sponsored in part by the Defense Advanced Research Projects
20  * Agency (DARPA) and Air Force Research Laboratory, Air Force
21  * Materiel Command, USAF, under agreement number F39502-99-1-0512.
22  */
23 /*
24  * Copyright (c) 1994 Adam Glass and Charles M. Hannum.  All rights reserved.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  * 1. Redistributions of source code must retain the above copyright
30  *    notice, this list of conditions and the following disclaimer.
31  * 2. Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  * 3. All advertising materials mentioning features or use of this software
35  *    must display the following acknowledgement:
36  *	This product includes software developed by Adam Glass and Charles M.
37  *	Hannum.
38  * 4. The names of the authors may not be used to endorse or promote products
39  *    derived from this software without specific prior written permission.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
42  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
44  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52 
53 #include <sys/param.h>
54 #include <sys/shm.h>
55 #include <sys/proc.h>
56 #include <sys/time.h>
57 #include <sys/malloc.h>
58 #include <sys/mman.h>
59 #include <sys/pool.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/stat.h>
63 
64 #include <sys/mount.h>
65 #include <sys/syscallargs.h>
66 
67 #include <uvm/uvm_extern.h>
68 
69 extern struct shminfo shminfo;
70 struct shmid_ds **shmsegs;	/* linear mapping of shmid -> shmseg */
71 struct pool shm_pool;
72 unsigned short *shmseqs;	/* array of shm sequence numbers */
73 
74 struct shmid_ds *shm_find_segment_by_shmid(int);
75 
76 /*
77  * Provides the following externally accessible functions:
78  *
79  * shminit(void);		                 initialization
80  * shmexit(struct vmspace *)                     cleanup
81  * shmfork(struct vmspace *, struct vmspace *)   fork handling
82  * shmsys(arg1, arg2, arg3, arg4);         shm{at,ctl,dt,get}(arg2, arg3, arg4)
83  *
84  * Structures:
85  * shmsegs (an array of 'struct shmid_ds *')
86  * per proc 'struct shmmap_head' with an array of 'struct shmmap_state'
87  */
88 
89 #define	SHMSEG_REMOVED  	0x0200		/* can't overlap ACCESSPERMS */
90 
91 int shm_last_free, shm_nused, shm_committed;
92 
93 struct shm_handle {
94 	struct uvm_object *shm_object;
95 };
96 
97 struct shmmap_state {
98 	vaddr_t va;
99 	int shmid;
100 };
101 
102 struct shmmap_head {
103 	int shmseg;
104 	struct shmmap_state state[1];
105 };
106 
107 int shm_find_segment_by_key(key_t);
108 void shm_deallocate_segment(struct shmid_ds *);
109 int shm_delete_mapping(struct vmspace *, struct shmmap_state *);
110 int shmget_existing(struct proc *, struct sys_shmget_args *,
111 			 int, int, register_t *);
112 int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
113 				 int, register_t *);
114 
115 int
shm_find_segment_by_key(key_t key)116 shm_find_segment_by_key(key_t key)
117 {
118 	struct shmid_ds *shmseg;
119 	int i;
120 
121 	for (i = 0; i < shminfo.shmmni; i++) {
122 		shmseg = shmsegs[i];
123 		if (shmseg != NULL && shmseg->shm_perm.key == key)
124 			return (i);
125 	}
126 	return (-1);
127 }
128 
129 struct shmid_ds *
shm_find_segment_by_shmid(int shmid)130 shm_find_segment_by_shmid(int shmid)
131 {
132 	int segnum;
133 	struct shmid_ds *shmseg;
134 
135 	segnum = IPCID_TO_IX(shmid);
136 	if (segnum < 0 || segnum >= shminfo.shmmni ||
137 	    (shmseg = shmsegs[segnum]) == NULL ||
138 	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
139 		return (NULL);
140 	return (shmseg);
141 }
142 
143 void
shm_deallocate_segment(struct shmid_ds * shmseg)144 shm_deallocate_segment(struct shmid_ds *shmseg)
145 {
146 	struct shm_handle *shm_handle;
147 	size_t size;
148 
149 	shm_handle = shmseg->shm_internal;
150 	size = round_page(shmseg->shm_segsz);
151 	uao_detach(shm_handle->shm_object);
152 	pool_put(&shm_pool, shmseg);
153 	shm_committed -= atop(size);
154 	shm_nused--;
155 }
156 
157 int
shm_delete_mapping(struct vmspace * vm,struct shmmap_state * shmmap_s)158 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
159 {
160 	struct shmid_ds *shmseg;
161 	int segnum;
162 	vaddr_t end;
163 
164 	segnum = IPCID_TO_IX(shmmap_s->shmid);
165 	if (segnum < 0 || segnum >= shminfo.shmmni ||
166 	    (shmseg = shmsegs[segnum]) == NULL)
167 		return (EINVAL);
168 	end = round_page(shmmap_s->va+shmseg->shm_segsz);
169 	uvm_unmap(&vm->vm_map, trunc_page(shmmap_s->va), end);
170 	shmmap_s->shmid = -1;
171 	shmseg->shm_dtime = gettime();
172 	if ((--shmseg->shm_nattch <= 0) &&
173 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
174 		shm_deallocate_segment(shmseg);
175 		shm_last_free = segnum;
176 		shmsegs[shm_last_free] = NULL;
177 	}
178 	return (0);
179 }
180 
181 int
sys_shmdt(struct proc * p,void * v,register_t * retval)182 sys_shmdt(struct proc *p, void *v, register_t *retval)
183 {
184 	struct sys_shmdt_args /* {
185 		syscallarg(const void *) shmaddr;
186 	} */ *uap = v;
187 	struct shmmap_head *shmmap_h;
188 	struct shmmap_state *shmmap_s;
189 	int i;
190 
191 	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
192 	if (shmmap_h == NULL)
193 		return (EINVAL);
194 
195 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
196 	    i++, shmmap_s++)
197 		if (shmmap_s->shmid != -1 &&
198 		    shmmap_s->va == (vaddr_t)SCARG(uap, shmaddr))
199 			break;
200 	if (i == shmmap_h->shmseg)
201 		return (EINVAL);
202 	return (shm_delete_mapping(p->p_vmspace, shmmap_s));
203 }
204 
205 int
sys_shmat(struct proc * p,void * v,register_t * retval)206 sys_shmat(struct proc *p, void *v, register_t *retval)
207 {
208 	struct sys_shmat_args /* {
209 		syscallarg(int) shmid;
210 		syscallarg(const void *) shmaddr;
211 		syscallarg(int) shmflg;
212 	} */ *uap = v;
213 	int error, i, flags = 0;
214 	struct ucred *cred = p->p_ucred;
215 	struct shmid_ds *shmseg;
216 	struct shmmap_head *shmmap_h;
217 	struct shmmap_state *shmmap_s;
218 	struct shm_handle *shm_handle;
219 	vaddr_t attach_va;
220 	vm_prot_t prot;
221 	vsize_t size;
222 
223 	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
224 	if (shmmap_h == NULL) {
225 		size = sizeof(int) +
226 		    shminfo.shmseg * sizeof(struct shmmap_state);
227 		shmmap_h = malloc(size, M_SHM, M_WAITOK | M_CANFAIL);
228 		if (shmmap_h == NULL)
229 			return (ENOMEM);
230 		shmmap_h->shmseg = shminfo.shmseg;
231 		for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
232 		    i++, shmmap_s++)
233 			shmmap_s->shmid = -1;
234 		p->p_vmspace->vm_shm = (caddr_t)shmmap_h;
235 	}
236 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
237 	if (shmseg == NULL)
238 		return (EINVAL);
239 	error = ipcperm(cred, &shmseg->shm_perm,
240 		    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
241 	if (error)
242 		return (error);
243 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; i++) {
244 		if (shmmap_s->shmid == -1)
245 			break;
246 		shmmap_s++;
247 	}
248 	if (i >= shmmap_h->shmseg)
249 		return (EMFILE);
250 	size = round_page(shmseg->shm_segsz);
251 	prot = PROT_READ;
252 	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
253 		prot |= PROT_WRITE;
254 	if (SCARG(uap, shmaddr)) {
255 		flags |= UVM_FLAG_FIXED;
256 		if (SCARG(uap, shmflg) & SHM_RND)
257 			attach_va =
258 			    (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
259 		else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
260 			attach_va = (vaddr_t)SCARG(uap, shmaddr);
261 		else
262 			return (EINVAL);
263 	} else
264 		attach_va = 0;
265 	/*
266 	 * Since uvm_map() could end up sleeping, grab a reference to prevent
267 	 * the segment from being deallocated while sleeping.
268 	 */
269 	shmseg->shm_nattch++;
270 	shm_handle = shmseg->shm_internal;
271 	uao_reference(shm_handle->shm_object);
272 	error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
273 	    shm_handle->shm_object, 0, 0, UVM_MAPFLAG(prot, prot,
274 	    MAP_INHERIT_SHARE, MADV_RANDOM, flags));
275 	if (error) {
276 		if ((--shmseg->shm_nattch <= 0) &&
277 		    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
278 			shm_deallocate_segment(shmseg);
279 			shm_last_free = IPCID_TO_IX(SCARG(uap, shmid));
280 			shmsegs[shm_last_free] = NULL;
281 		} else {
282 			uao_detach(shm_handle->shm_object);
283 		}
284 		return (error);
285 	}
286 
287 	shmmap_s->va = attach_va;
288 	shmmap_s->shmid = SCARG(uap, shmid);
289 	shmseg->shm_lpid = p->p_p->ps_pid;
290 	shmseg->shm_atime = gettime();
291 	*retval = attach_va;
292 	return (0);
293 }
294 
295 int
sys_shmctl(struct proc * p,void * v,register_t * retval)296 sys_shmctl(struct proc *p, void *v, register_t *retval)
297 {
298 	struct sys_shmctl_args /* {
299 		syscallarg(int) shmid;
300 		syscallarg(int) cmd;
301 		syscallarg(struct shmid_ds *) buf;
302 	} */ *uap = v;
303 	int		shmid = SCARG(uap, shmid);
304 	int		cmd = SCARG(uap, cmd);
305 	void		*buf = SCARG(uap, buf);
306 	struct ucred	*cred = p->p_ucred;
307 	struct shmid_ds	inbuf, *shmseg;
308 	int		error;
309 
310 	if (cmd == IPC_SET) {
311 		error = copyin(buf, &inbuf, sizeof(inbuf));
312 		if (error)
313 			return (error);
314 	}
315 
316 	shmseg = shm_find_segment_by_shmid(shmid);
317 	if (shmseg == NULL)
318 		return (EINVAL);
319 	switch (cmd) {
320 	case IPC_STAT:
321 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
322 			return (error);
323 		error = copyout(shmseg, buf, sizeof(inbuf));
324 		if (error)
325 			return (error);
326 		break;
327 	case IPC_SET:
328 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
329 			return (error);
330 		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
331 		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
332 		shmseg->shm_perm.mode =
333 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
334 		    (inbuf.shm_perm.mode & ACCESSPERMS);
335 		shmseg->shm_ctime = gettime();
336 		break;
337 	case IPC_RMID:
338 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
339 			return (error);
340 		shmseg->shm_perm.key = IPC_PRIVATE;
341 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
342 		if (shmseg->shm_nattch <= 0) {
343 			shm_deallocate_segment(shmseg);
344 			shm_last_free = IPCID_TO_IX(shmid);
345 			shmsegs[shm_last_free] = NULL;
346 		}
347 		break;
348 	case SHM_LOCK:
349 	case SHM_UNLOCK:
350 	default:
351 		return (EINVAL);
352 	}
353 	return (0);
354 }
355 
356 int
shmget_existing(struct proc * p,struct sys_shmget_args * uap,int mode,int segnum,register_t * retval)357 shmget_existing(struct proc *p,
358 	struct sys_shmget_args /* {
359 		syscallarg(key_t) key;
360 		syscallarg(size_t) size;
361 		syscallarg(int) shmflg;
362 	} */ *uap,
363 	int mode, int segnum, register_t *retval)
364 {
365 	struct shmid_ds *shmseg;
366 	struct ucred *cred = p->p_ucred;
367 	int error;
368 
369 	shmseg = shmsegs[segnum];	/* We assume the segnum is valid */
370 	if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
371 		return (error);
372 	if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
373 		return (EINVAL);
374 	if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
375 	    (IPC_CREAT | IPC_EXCL))
376 		return (EEXIST);
377 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
378 	return (0);
379 }
380 
381 int
shmget_allocate_segment(struct proc * p,struct sys_shmget_args * uap,int mode,register_t * retval)382 shmget_allocate_segment(struct proc *p,
383 	struct sys_shmget_args /* {
384 		syscallarg(key_t) key;
385 		syscallarg(size_t) size;
386 		syscallarg(int) shmflg;
387 	} */ *uap,
388 	int mode, register_t *retval)
389 {
390 	size_t size;
391 	key_t key;
392 	int segnum;
393 	struct ucred *cred = p->p_ucred;
394 	struct shmid_ds *shmseg;
395 	struct shm_handle *shm_handle;
396 	int error = 0;
397 
398 	if (SCARG(uap, size) < shminfo.shmmin ||
399 	    SCARG(uap, size) > shminfo.shmmax)
400 		return (EINVAL);
401 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
402 		return (ENOSPC);
403 	size = round_page(SCARG(uap, size));
404 	if (shm_committed + atop(size) > shminfo.shmall)
405 		return (ENOMEM);
406 	shm_nused++;
407 	shm_committed += atop(size);
408 
409 	/*
410 	 * If a key has been specified and we had to wait for memory
411 	 * to be freed up we need to verify that no one has allocated
412 	 * the key we want in the meantime.  Yes, this is ugly.
413 	 */
414 	key = SCARG(uap, key);
415 	shmseg = pool_get(&shm_pool, key == IPC_PRIVATE ? PR_WAITOK :
416 	    PR_NOWAIT);
417 	if (shmseg == NULL) {
418 		shmseg = pool_get(&shm_pool, PR_WAITOK);
419 		if (shm_find_segment_by_key(key) != -1) {
420 			pool_put(&shm_pool, shmseg);
421 			shm_nused--;
422 			shm_committed -= atop(size);
423 			return (EAGAIN);
424 		}
425 	}
426 
427 	/* XXX - hash shmids instead */
428 	if (shm_last_free < 0) {
429 		for (segnum = 0; segnum < shminfo.shmmni && shmsegs[segnum];
430 		    segnum++)
431 			;
432 		if (segnum == shminfo.shmmni)
433 			panic("shmseg free count inconsistent");
434 	} else {
435 		segnum = shm_last_free;
436 		if (++shm_last_free >= shminfo.shmmni || shmsegs[shm_last_free])
437 			shm_last_free = -1;
438 	}
439 	shmsegs[segnum] = shmseg;
440 
441 	shm_handle = (struct shm_handle *)((caddr_t)shmseg + sizeof(*shmseg));
442 	shm_handle->shm_object = uao_create(size, 0);
443 
444 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
445 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
446 	shmseg->shm_perm.mode = (mode & ACCESSPERMS);
447 	shmseg->shm_perm.seq = shmseqs[segnum] = (shmseqs[segnum] + 1) & 0x7fff;
448 	shmseg->shm_perm.key = key;
449 	shmseg->shm_segsz = SCARG(uap, size);
450 	shmseg->shm_cpid = p->p_p->ps_pid;
451 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
452 	shmseg->shm_atime = shmseg->shm_dtime = 0;
453 	shmseg->shm_ctime = gettime();
454 	shmseg->shm_internal = shm_handle;
455 
456 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
457 	return (error);
458 }
459 
460 int
sys_shmget(struct proc * p,void * v,register_t * retval)461 sys_shmget(struct proc *p, void *v, register_t *retval)
462 {
463 	struct sys_shmget_args /* {
464 		syscallarg(key_t) key;
465 		syscallarg(size_t) size;
466 		syscallarg(int) shmflg;
467 	} */ *uap = v;
468 	int segnum, mode, error;
469 
470 	mode = SCARG(uap, shmflg) & ACCESSPERMS;
471 
472 	if (SCARG(uap, key) != IPC_PRIVATE) {
473 	again:
474 		segnum = shm_find_segment_by_key(SCARG(uap, key));
475 		if (segnum >= 0)
476 			return (shmget_existing(p, uap, mode, segnum, retval));
477 		if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
478 			return (ENOENT);
479 	}
480 	error = shmget_allocate_segment(p, uap, mode, retval);
481 	if (error == EAGAIN)
482 		goto again;
483 	return (error);
484 }
485 
486 void
shmfork(struct vmspace * vm1,struct vmspace * vm2)487 shmfork(struct vmspace *vm1, struct vmspace *vm2)
488 {
489 	struct shmmap_head *shmmap_h;
490 	struct shmmap_state *shmmap_s;
491 	struct shmid_ds *shmseg;
492 	size_t size;
493 	int i;
494 
495 	if (vm1->vm_shm == NULL) {
496 		vm2->vm_shm = NULL;
497 		return;
498 	}
499 
500 	shmmap_h = (struct shmmap_head *)vm1->vm_shm;
501 	size = sizeof(int) + shmmap_h->shmseg * sizeof(struct shmmap_state);
502 	vm2->vm_shm = malloc(size, M_SHM, M_WAITOK);
503 	memcpy(vm2->vm_shm, vm1->vm_shm, size);
504 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
505 	    i++, shmmap_s++) {
506 		if (shmmap_s->shmid != -1 &&
507 		    (shmseg = shmsegs[IPCID_TO_IX(shmmap_s->shmid)]) != NULL)
508 			shmseg->shm_nattch++;
509 	}
510 }
511 
512 void
shmexit(struct vmspace * vm)513 shmexit(struct vmspace *vm)
514 {
515 	struct shmmap_head *shmmap_h;
516 	struct shmmap_state *shmmap_s;
517 	size_t size;
518 	int i;
519 
520 	shmmap_h = (struct shmmap_head *)vm->vm_shm;
521 	if (shmmap_h == NULL)
522 		return;
523 	size = sizeof(int) + shmmap_h->shmseg * sizeof(struct shmmap_state);
524 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
525 	    i++, shmmap_s++)
526 		if (shmmap_s->shmid != -1)
527 			shm_delete_mapping(vm, shmmap_s);
528 	free(vm->vm_shm, M_SHM, size);
529 	vm->vm_shm = NULL;
530 }
531 
532 void
shminit(void)533 shminit(void)
534 {
535 
536 	pool_init(&shm_pool,
537 	    sizeof(struct shmid_ds) + sizeof(struct shm_handle), 0,
538 	    IPL_NONE, PR_WAITOK, "shmpl", NULL);
539 	shmsegs = mallocarray(shminfo.shmmni, sizeof(struct shmid_ds *),
540 	    M_SHM, M_WAITOK|M_ZERO);
541 	shmseqs = mallocarray(shminfo.shmmni, sizeof(unsigned short),
542 	    M_SHM, M_WAITOK|M_ZERO);
543 
544 	shminfo.shmmax *= PAGE_SIZE;	/* actually in pages */
545 	shm_last_free = 0;
546 	shm_nused = 0;
547 	shm_committed = 0;
548 }
549 
550 /* Expand shmsegs and shmseqs arrays */
551 void
shm_reallocate(int val)552 shm_reallocate(int val)
553 {
554 	struct shmid_ds **newsegs;
555 	unsigned short *newseqs;
556 
557 	newsegs = mallocarray(val, sizeof(struct shmid_ds *),
558 	    M_SHM, M_WAITOK | M_ZERO);
559 	memcpy(newsegs, shmsegs,
560 	    shminfo.shmmni * sizeof(struct shmid_ds *));
561 	free(shmsegs, M_SHM,
562 	    shminfo.shmmni * sizeof(struct shmid_ds *));
563 	shmsegs = newsegs;
564 	newseqs = mallocarray(val, sizeof(unsigned short), M_SHM,
565 	    M_WAITOK | M_ZERO);
566 	memcpy(newseqs, shmseqs,
567 	    shminfo.shmmni * sizeof(unsigned short));
568 	free(shmseqs, M_SHM, shminfo.shmmni * sizeof(unsigned short));
569 	shmseqs = newseqs;
570 	shminfo.shmmni = val;
571 }
572 
573 /*
574  * Userland access to struct shminfo.
575  */
576 int
sysctl_sysvshm(int * name,u_int namelen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)577 sysctl_sysvshm(int *name, u_int namelen, void *oldp, size_t *oldlenp,
578 	void *newp, size_t newlen)
579 {
580 	int error, val;
581 
582 	if (namelen != 1)
583                         return (ENOTDIR);       /* leaf-only */
584 
585 	switch (name[0]) {
586 	case KERN_SHMINFO_SHMMAX:
587 		if ((error = sysctl_int_bounded(oldp, oldlenp, newp, newlen,
588 		    &shminfo.shmmax, 0, INT_MAX)) || newp == NULL)
589 			return (error);
590 
591 		/* If new shmmax > shmall, crank shmall */
592 		if (atop(round_page(shminfo.shmmax)) > shminfo.shmall)
593 			shminfo.shmall = atop(round_page(shminfo.shmmax));
594 		return (0);
595 	case KERN_SHMINFO_SHMMIN:
596 		return (sysctl_int_bounded(oldp, oldlenp, newp, newlen,
597 		    &shminfo.shmmin, 1, INT_MAX));
598 	case KERN_SHMINFO_SHMMNI:
599 		val = shminfo.shmmni;
600 		/* can't decrease shmmni */
601 		error = sysctl_int_bounded(oldp, oldlenp, newp, newlen,
602 		    &val, val, 0xffff);
603 		/* returns success and skips reallocation if val is unchanged */
604 		if (error || val == shminfo.shmmni)
605 			return (error);
606 		shm_reallocate(val);
607 		return (0);
608 	case KERN_SHMINFO_SHMSEG:
609 		return (sysctl_int_bounded(oldp, oldlenp, newp, newlen,
610 		    &shminfo.shmseg, 1, INT_MAX));
611 	case KERN_SHMINFO_SHMALL:
612 		/* can't decrease shmall */
613 		return (sysctl_int_bounded(oldp, oldlenp, newp, newlen,
614 		    &shminfo.shmall, shminfo.shmall, INT_MAX));
615 	default:
616 		return (EOPNOTSUPP);
617 	}
618 	/* NOTREACHED */
619 }
620