xref: /openbsd/sys/kern/sysv_shm.c (revision fc61954a)
1 /*	$OpenBSD: sysv_shm.c,v 1.69 2016/09/15 02:00:16 dlg Exp $	*/
2 /*	$NetBSD: sysv_shm.c,v 1.50 1998/10/21 22:24:29 tron Exp $	*/
3 
4 /*
5  * Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  *
19  * Sponsored in part by the Defense Advanced Research Projects
20  * Agency (DARPA) and Air Force Research Laboratory, Air Force
21  * Materiel Command, USAF, under agreement number F39502-99-1-0512.
22  */
23 /*
24  * Copyright (c) 1994 Adam Glass and Charles M. Hannum.  All rights reserved.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  * 1. Redistributions of source code must retain the above copyright
30  *    notice, this list of conditions and the following disclaimer.
31  * 2. Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  * 3. All advertising materials mentioning features or use of this software
35  *    must display the following acknowledgement:
36  *	This product includes software developed by Adam Glass and Charles M.
37  *	Hannum.
38  * 4. The names of the authors may not be used to endorse or promote products
39  *    derived from this software without specific prior written permission.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
42  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
44  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52 
53 #include <sys/param.h>
54 #include <sys/shm.h>
55 #include <sys/proc.h>
56 #include <sys/uio.h>
57 #include <sys/time.h>
58 #include <sys/malloc.h>
59 #include <sys/mman.h>
60 #include <sys/pool.h>
61 #include <sys/systm.h>
62 #include <sys/sysctl.h>
63 #include <sys/stat.h>
64 
65 #include <sys/mount.h>
66 #include <sys/syscallargs.h>
67 
68 #include <uvm/uvm_extern.h>
69 
70 extern struct shminfo shminfo;
71 struct shmid_ds **shmsegs;	/* linear mapping of shmid -> shmseg */
72 struct pool shm_pool;
73 unsigned short *shmseqs;	/* array of shm sequence numbers */
74 
75 struct shmid_ds *shm_find_segment_by_shmid(int);
76 
77 /*
78  * Provides the following externally accessible functions:
79  *
80  * shminit(void);		                 initialization
81  * shmexit(struct vmspace *)                     cleanup
82  * shmfork(struct vmspace *, struct vmspace *)   fork handling
83  * shmsys(arg1, arg2, arg3, arg4);         shm{at,ctl,dt,get}(arg2, arg3, arg4)
84  *
85  * Structures:
86  * shmsegs (an array of 'struct shmid_ds *')
87  * per proc 'struct shmmap_head' with an array of 'struct shmmap_state'
88  */
89 
90 #define	SHMSEG_REMOVED  	0x0200		/* can't overlap ACCESSPERMS */
91 
92 int shm_last_free, shm_nused, shm_committed;
93 
94 struct shm_handle {
95 	struct uvm_object *shm_object;
96 };
97 
98 struct shmmap_state {
99 	vaddr_t va;
100 	int shmid;
101 };
102 
103 struct shmmap_head {
104 	int shmseg;
105 	struct shmmap_state state[1];
106 };
107 
108 int shm_find_segment_by_key(key_t);
109 void shm_deallocate_segment(struct shmid_ds *);
110 int shm_delete_mapping(struct vmspace *, struct shmmap_state *);
111 int shmget_existing(struct proc *, struct sys_shmget_args *,
112 			 int, int, register_t *);
113 int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
114 				 int, register_t *);
115 
116 int
117 shm_find_segment_by_key(key_t key)
118 {
119 	struct shmid_ds *shmseg;
120 	int i;
121 
122 	for (i = 0; i < shminfo.shmmni; i++) {
123 		shmseg = shmsegs[i];
124 		if (shmseg != NULL && shmseg->shm_perm.key == key)
125 			return (i);
126 	}
127 	return (-1);
128 }
129 
130 struct shmid_ds *
131 shm_find_segment_by_shmid(int shmid)
132 {
133 	int segnum;
134 	struct shmid_ds *shmseg;
135 
136 	segnum = IPCID_TO_IX(shmid);
137 	if (segnum < 0 || segnum >= shminfo.shmmni ||
138 	    (shmseg = shmsegs[segnum]) == NULL ||
139 	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
140 		return (NULL);
141 	return (shmseg);
142 }
143 
144 void
145 shm_deallocate_segment(struct shmid_ds *shmseg)
146 {
147 	struct shm_handle *shm_handle;
148 	size_t size;
149 
150 	shm_handle = shmseg->shm_internal;
151 	size = round_page(shmseg->shm_segsz);
152 	uao_detach(shm_handle->shm_object);
153 	pool_put(&shm_pool, shmseg);
154 	shm_committed -= atop(size);
155 	shm_nused--;
156 }
157 
158 int
159 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
160 {
161 	struct shmid_ds *shmseg;
162 	int segnum;
163 	size_t size;
164 
165 	segnum = IPCID_TO_IX(shmmap_s->shmid);
166 	if (segnum < 0 || segnum >= shminfo.shmmni ||
167 	    (shmseg = shmsegs[segnum]) == NULL)
168 		return (EINVAL);
169 	size = round_page(shmseg->shm_segsz);
170 	uvm_deallocate(&vm->vm_map, shmmap_s->va, size);
171 	shmmap_s->shmid = -1;
172 	shmseg->shm_dtime = time_second;
173 	if ((--shmseg->shm_nattch <= 0) &&
174 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
175 		shm_deallocate_segment(shmseg);
176 		shm_last_free = segnum;
177 		shmsegs[shm_last_free] = NULL;
178 	}
179 	return (0);
180 }
181 
182 int
183 sys_shmdt(struct proc *p, void *v, register_t *retval)
184 {
185 	struct sys_shmdt_args /* {
186 		syscallarg(const void *) shmaddr;
187 	} */ *uap = v;
188 	struct shmmap_head *shmmap_h;
189 	struct shmmap_state *shmmap_s;
190 	int i;
191 
192 	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
193 	if (shmmap_h == NULL)
194 		return (EINVAL);
195 
196 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
197 	    i++, shmmap_s++)
198 		if (shmmap_s->shmid != -1 &&
199 		    shmmap_s->va == (vaddr_t)SCARG(uap, shmaddr))
200 			break;
201 	if (i == shmmap_h->shmseg)
202 		return (EINVAL);
203 	return (shm_delete_mapping(p->p_vmspace, shmmap_s));
204 }
205 
206 int
207 sys_shmat(struct proc *p, void *v, register_t *retval)
208 {
209 	struct sys_shmat_args /* {
210 		syscallarg(int) shmid;
211 		syscallarg(const void *) shmaddr;
212 		syscallarg(int) shmflg;
213 	} */ *uap = v;
214 	int error, i, flags = 0;
215 	struct ucred *cred = p->p_ucred;
216 	struct shmid_ds *shmseg;
217 	struct shmmap_head *shmmap_h;
218 	struct shmmap_state *shmmap_s;
219 	struct shm_handle *shm_handle;
220 	vaddr_t attach_va;
221 	vm_prot_t prot;
222 	vsize_t size;
223 
224 	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
225 	if (shmmap_h == NULL) {
226 		size = sizeof(int) +
227 		    shminfo.shmseg * sizeof(struct shmmap_state);
228 		shmmap_h = malloc(size, M_SHM, M_WAITOK);
229 		shmmap_h->shmseg = shminfo.shmseg;
230 		for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
231 		    i++, shmmap_s++)
232 			shmmap_s->shmid = -1;
233 		p->p_vmspace->vm_shm = (caddr_t)shmmap_h;
234 	}
235 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
236 	if (shmseg == NULL)
237 		return (EINVAL);
238 	error = ipcperm(cred, &shmseg->shm_perm,
239 		    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
240 	if (error)
241 		return (error);
242 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; i++) {
243 		if (shmmap_s->shmid == -1)
244 			break;
245 		shmmap_s++;
246 	}
247 	if (i >= shmmap_h->shmseg)
248 		return (EMFILE);
249 	size = round_page(shmseg->shm_segsz);
250 	prot = PROT_READ;
251 	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
252 		prot |= PROT_WRITE;
253 	if (SCARG(uap, shmaddr)) {
254 		flags |= UVM_FLAG_FIXED;
255 		if (SCARG(uap, shmflg) & SHM_RND)
256 			attach_va =
257 			    (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
258 		else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
259 			attach_va = (vaddr_t)SCARG(uap, shmaddr);
260 		else
261 			return (EINVAL);
262 	} else
263 		attach_va = 0;
264 	shm_handle = shmseg->shm_internal;
265 	uao_reference(shm_handle->shm_object);
266 	error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
267 	    shm_handle->shm_object, 0, 0, UVM_MAPFLAG(prot, prot,
268 	    MAP_INHERIT_SHARE, MADV_RANDOM, flags));
269 	if (error) {
270 		uao_detach(shm_handle->shm_object);
271 		return (error);
272 	}
273 
274 	shmmap_s->va = attach_va;
275 	shmmap_s->shmid = SCARG(uap, shmid);
276 	shmseg->shm_lpid = p->p_p->ps_pid;
277 	shmseg->shm_atime = time_second;
278 	shmseg->shm_nattch++;
279 	*retval = attach_va;
280 	return (0);
281 }
282 
283 int
284 sys_shmctl(struct proc *p, void *v, register_t *retval)
285 {
286 	struct sys_shmctl_args /* {
287 		syscallarg(int) shmid;
288 		syscallarg(int) cmd;
289 		syscallarg(struct shmid_ds *) buf;
290 	} */ *uap = v;
291 
292 	return (shmctl1(p, SCARG(uap, shmid), SCARG(uap, cmd),
293 	    (caddr_t)SCARG(uap, buf), copyin, copyout));
294 }
295 
296 int
297 shmctl1(struct proc *p, int shmid, int cmd, caddr_t buf,
298     int (*ds_copyin)(const void *, void *, size_t),
299     int (*ds_copyout)(const void *, void *, size_t))
300 {
301 	struct ucred *cred = p->p_ucred;
302 	struct shmid_ds inbuf, *shmseg;
303 	int error;
304 
305 	shmseg = shm_find_segment_by_shmid(shmid);
306 	if (shmseg == NULL)
307 		return (EINVAL);
308 	switch (cmd) {
309 	case IPC_STAT:
310 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
311 			return (error);
312 		error = ds_copyout(shmseg, buf, sizeof(inbuf));
313 		if (error)
314 			return (error);
315 		break;
316 	case IPC_SET:
317 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
318 			return (error);
319 		error = ds_copyin(buf, &inbuf, sizeof(inbuf));
320 		if (error)
321 			return (error);
322 		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
323 		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
324 		shmseg->shm_perm.mode =
325 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
326 		    (inbuf.shm_perm.mode & ACCESSPERMS);
327 		shmseg->shm_ctime = time_second;
328 		break;
329 	case IPC_RMID:
330 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
331 			return (error);
332 		shmseg->shm_perm.key = IPC_PRIVATE;
333 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
334 		if (shmseg->shm_nattch <= 0) {
335 			shm_deallocate_segment(shmseg);
336 			shm_last_free = IPCID_TO_IX(shmid);
337 			shmsegs[shm_last_free] = NULL;
338 		}
339 		break;
340 	case SHM_LOCK:
341 	case SHM_UNLOCK:
342 	default:
343 		return (EINVAL);
344 	}
345 	return (0);
346 }
347 
348 int
349 shmget_existing(struct proc *p,
350 	struct sys_shmget_args /* {
351 		syscallarg(key_t) key;
352 		syscallarg(size_t) size;
353 		syscallarg(int) shmflg;
354 	} */ *uap,
355 	int mode, int segnum, register_t *retval)
356 {
357 	struct shmid_ds *shmseg;
358 	struct ucred *cred = p->p_ucred;
359 	int error;
360 
361 	shmseg = shmsegs[segnum];	/* We assume the segnum is valid */
362 	if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
363 		return (error);
364 	if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
365 		return (EINVAL);
366 	if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
367 	    (IPC_CREAT | IPC_EXCL))
368 		return (EEXIST);
369 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
370 	return (0);
371 }
372 
373 int
374 shmget_allocate_segment(struct proc *p,
375 	struct sys_shmget_args /* {
376 		syscallarg(key_t) key;
377 		syscallarg(size_t) size;
378 		syscallarg(int) shmflg;
379 	} */ *uap,
380 	int mode, register_t *retval)
381 {
382 	size_t size;
383 	key_t key;
384 	int segnum;
385 	struct ucred *cred = p->p_ucred;
386 	struct shmid_ds *shmseg;
387 	struct shm_handle *shm_handle;
388 	int error = 0;
389 
390 	if (SCARG(uap, size) < shminfo.shmmin ||
391 	    SCARG(uap, size) > shminfo.shmmax)
392 		return (EINVAL);
393 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
394 		return (ENOSPC);
395 	size = round_page(SCARG(uap, size));
396 	if (shm_committed + atop(size) > shminfo.shmall)
397 		return (ENOMEM);
398 	shm_nused++;
399 	shm_committed += atop(size);
400 
401 	/*
402 	 * If a key has been specified and we had to wait for memory
403 	 * to be freed up we need to verify that no one has allocated
404 	 * the key we want in the meantime.  Yes, this is ugly.
405 	 */
406 	key = SCARG(uap, key);
407 	shmseg = pool_get(&shm_pool, key == IPC_PRIVATE ? PR_WAITOK :
408 	    PR_NOWAIT);
409 	if (shmseg == NULL) {
410 		shmseg = pool_get(&shm_pool, PR_WAITOK);
411 		if (shm_find_segment_by_key(key) != -1) {
412 			pool_put(&shm_pool, shmseg);
413 			shm_nused--;
414 			shm_committed -= atop(size);
415 			return (EAGAIN);
416 		}
417 	}
418 
419 	/* XXX - hash shmids instead */
420 	if (shm_last_free < 0) {
421 		for (segnum = 0; segnum < shminfo.shmmni && shmsegs[segnum];
422 		    segnum++)
423 			;
424 		if (segnum == shminfo.shmmni)
425 			panic("shmseg free count inconsistent");
426 	} else {
427 		segnum = shm_last_free;
428 		if (++shm_last_free >= shminfo.shmmni || shmsegs[shm_last_free])
429 			shm_last_free = -1;
430 	}
431 	shmsegs[segnum] = shmseg;
432 
433 	shm_handle = (struct shm_handle *)((caddr_t)shmseg + sizeof(*shmseg));
434 	shm_handle->shm_object = uao_create(size, 0);
435 
436 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
437 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
438 	shmseg->shm_perm.mode = (mode & ACCESSPERMS);
439 	shmseg->shm_perm.seq = shmseqs[segnum] = (shmseqs[segnum] + 1) & 0x7fff;
440 	shmseg->shm_perm.key = key;
441 	shmseg->shm_segsz = SCARG(uap, size);
442 	shmseg->shm_cpid = p->p_p->ps_pid;
443 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
444 	shmseg->shm_atime = shmseg->shm_dtime = 0;
445 	shmseg->shm_ctime = time_second;
446 	shmseg->shm_internal = shm_handle;
447 
448 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
449 	return (error);
450 }
451 
452 int
453 sys_shmget(struct proc *p, void *v, register_t *retval)
454 {
455 	struct sys_shmget_args /* {
456 		syscallarg(key_t) key;
457 		syscallarg(size_t) size;
458 		syscallarg(int) shmflg;
459 	} */ *uap = v;
460 	int segnum, mode, error;
461 
462 	mode = SCARG(uap, shmflg) & ACCESSPERMS;
463 
464 	if (SCARG(uap, key) != IPC_PRIVATE) {
465 	again:
466 		segnum = shm_find_segment_by_key(SCARG(uap, key));
467 		if (segnum >= 0)
468 			return (shmget_existing(p, uap, mode, segnum, retval));
469 		if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
470 			return (ENOENT);
471 	}
472 	error = shmget_allocate_segment(p, uap, mode, retval);
473 	if (error == EAGAIN)
474 		goto again;
475 	return (error);
476 }
477 
478 void
479 shmfork(struct vmspace *vm1, struct vmspace *vm2)
480 {
481 	struct shmmap_head *shmmap_h;
482 	struct shmmap_state *shmmap_s;
483 	struct shmid_ds *shmseg;
484 	size_t size;
485 	int i;
486 
487 	if (vm1->vm_shm == NULL) {
488 		vm2->vm_shm = NULL;
489 		return;
490 	}
491 
492 	shmmap_h = (struct shmmap_head *)vm1->vm_shm;
493 	size = sizeof(int) + shmmap_h->shmseg * sizeof(struct shmmap_state);
494 	vm2->vm_shm = malloc(size, M_SHM, M_WAITOK);
495 	memcpy(vm2->vm_shm, vm1->vm_shm, size);
496 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
497 	    i++, shmmap_s++) {
498 		if (shmmap_s->shmid != -1 &&
499 		    (shmseg = shmsegs[IPCID_TO_IX(shmmap_s->shmid)]) != NULL)
500 			shmseg->shm_nattch++;
501 	}
502 }
503 
504 void
505 shmexit(struct vmspace *vm)
506 {
507 	struct shmmap_head *shmmap_h;
508 	struct shmmap_state *shmmap_s;
509 	int i;
510 
511 	shmmap_h = (struct shmmap_head *)vm->vm_shm;
512 	if (shmmap_h == NULL)
513 		return;
514 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
515 	    i++, shmmap_s++)
516 		if (shmmap_s->shmid != -1)
517 			shm_delete_mapping(vm, shmmap_s);
518 	free(vm->vm_shm, M_SHM, 0);
519 	vm->vm_shm = NULL;
520 }
521 
522 void
523 shminit(void)
524 {
525 
526 	pool_init(&shm_pool,
527 	    sizeof(struct shmid_ds) + sizeof(struct shm_handle), 0,
528 	    IPL_NONE, PR_WAITOK, "shmpl", NULL);
529 	shmsegs = mallocarray(shminfo.shmmni, sizeof(struct shmid_ds *),
530 	    M_SHM, M_WAITOK|M_ZERO);
531 	shmseqs = mallocarray(shminfo.shmmni, sizeof(unsigned short),
532 	    M_SHM, M_WAITOK|M_ZERO);
533 
534 	shminfo.shmmax *= PAGE_SIZE;	/* actually in pages */
535 	shm_last_free = 0;
536 	shm_nused = 0;
537 	shm_committed = 0;
538 }
539 
540 /*
541  * Userland access to struct shminfo.
542  */
543 int
544 sysctl_sysvshm(int *name, u_int namelen, void *oldp, size_t *oldlenp,
545 	void *newp, size_t newlen)
546 {
547 	int error, val;
548 	struct shmid_ds **newsegs;
549 	unsigned short *newseqs;
550 
551 	if (namelen != 2) {
552 		switch (name[0]) {
553 		case KERN_SHMINFO_SHMMAX:
554 		case KERN_SHMINFO_SHMMIN:
555 		case KERN_SHMINFO_SHMMNI:
556 		case KERN_SHMINFO_SHMSEG:
557 		case KERN_SHMINFO_SHMALL:
558 			break;
559 		default:
560                         return (ENOTDIR);       /* overloaded */
561                 }
562         }
563 
564 	switch (name[0]) {
565 	case KERN_SHMINFO_SHMMAX:
566 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen,
567 		    &shminfo.shmmax)) || newp == NULL)
568 			return (error);
569 
570 		/* If new shmmax > shmall, crank shmall */
571 		if (atop(round_page(shminfo.shmmax)) > shminfo.shmall)
572 			shminfo.shmall = atop(round_page(shminfo.shmmax));
573 		return (0);
574 	case KERN_SHMINFO_SHMMIN:
575 		val = shminfo.shmmin;
576 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
577 		    val == shminfo.shmmin)
578 			return (error);
579 		if (val <= 0)
580 			return (EINVAL);	/* shmmin must be >= 1 */
581 		shminfo.shmmin = val;
582 		return (0);
583 	case KERN_SHMINFO_SHMMNI:
584 		val = shminfo.shmmni;
585 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
586 		    val == shminfo.shmmni)
587 			return (error);
588 
589 		if (val < shminfo.shmmni || val > 0xffff)
590 			return (EINVAL);
591 
592 		/* Expand shmsegs and shmseqs arrays */
593 		newsegs = mallocarray(val, sizeof(struct shmid_ds *),
594 		    M_SHM, M_WAITOK|M_ZERO);
595 		memcpy(newsegs, shmsegs,
596 		    shminfo.shmmni * sizeof(struct shmid_ds *));
597 		free(shmsegs, M_SHM, 0);
598 		shmsegs = newsegs;
599 		newseqs = mallocarray(val, sizeof(unsigned short), M_SHM,
600 		    M_WAITOK|M_ZERO);
601 		memcpy(newseqs, shmseqs,
602 		    shminfo.shmmni * sizeof(unsigned short));
603 		free(shmseqs, M_SHM, shminfo.shmmni * sizeof(unsigned short));
604 		shmseqs = newseqs;
605 		shminfo.shmmni = val;
606 		return (0);
607 	case KERN_SHMINFO_SHMSEG:
608 		val = shminfo.shmseg;
609 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
610 		    val == shminfo.shmseg)
611 			return (error);
612 		if (val <= 0)
613 			return (EINVAL);	/* shmseg must be >= 1 */
614 		shminfo.shmseg = val;
615 		return (0);
616 	case KERN_SHMINFO_SHMALL:
617 		val = shminfo.shmall;
618 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
619 		    val == shminfo.shmall)
620 			return (error);
621 		if (val < shminfo.shmall)
622 			return (EINVAL);	/* can't decrease shmall */
623 		shminfo.shmall = val;
624 		return (0);
625 	default:
626 		return (EOPNOTSUPP);
627 	}
628 	/* NOTREACHED */
629 }
630