xref: /openbsd/sys/kern/sysv_shm.c (revision 91f110e0)
1 /*	$OpenBSD: sysv_shm.c,v 1.56 2014/03/18 06:59:00 guenther Exp $	*/
2 /*	$NetBSD: sysv_shm.c,v 1.50 1998/10/21 22:24:29 tron Exp $	*/
3 
4 /*
5  * Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  *
19  * Sponsored in part by the Defense Advanced Research Projects
20  * Agency (DARPA) and Air Force Research Laboratory, Air Force
21  * Materiel Command, USAF, under agreement number F39502-99-1-0512.
22  */
23 /*
24  * Copyright (c) 1994 Adam Glass and Charles M. Hannum.  All rights reserved.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  * 1. Redistributions of source code must retain the above copyright
30  *    notice, this list of conditions and the following disclaimer.
31  * 2. Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  * 3. All advertising materials mentioning features or use of this software
35  *    must display the following acknowledgement:
36  *	This product includes software developed by Adam Glass and Charles M.
37  *	Hannum.
38  * 4. The names of the authors may not be used to endorse or promote products
39  *    derived from this software without specific prior written permission.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
42  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
44  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52 
53 #include <sys/param.h>
54 #include <sys/kernel.h>
55 #include <sys/shm.h>
56 #include <sys/proc.h>
57 #include <sys/uio.h>
58 #include <sys/time.h>
59 #include <sys/malloc.h>
60 #include <sys/mman.h>
61 #include <sys/pool.h>
62 #include <sys/systm.h>
63 #include <sys/sysctl.h>
64 #include <sys/stat.h>
65 
66 #include <sys/mount.h>
67 #include <sys/syscallargs.h>
68 
69 #include <uvm/uvm_extern.h>
70 
71 extern struct shminfo shminfo;
72 struct shmid_ds **shmsegs;	/* linear mapping of shmid -> shmseg */
73 struct pool shm_pool;
74 unsigned short *shmseqs;	/* array of shm sequence numbers */
75 
76 struct shmid_ds *shm_find_segment_by_shmid(int);
77 
78 /*
79  * Provides the following externally accessible functions:
80  *
81  * shminit(void);		                 initialization
82  * shmexit(struct vmspace *)                     cleanup
83  * shmfork(struct vmspace *, struct vmspace *)   fork handling
84  * shmsys(arg1, arg2, arg3, arg4);         shm{at,ctl,dt,get}(arg2, arg3, arg4)
85  *
86  * Structures:
87  * shmsegs (an array of 'struct shmid_ds *')
88  * per proc 'struct shmmap_head' with an array of 'struct shmmap_state'
89  */
90 
91 #define	SHMSEG_REMOVED  	0x0200		/* can't overlap ACCESSPERMS */
92 
93 int shm_last_free, shm_nused, shm_committed;
94 
95 struct shm_handle {
96 	struct uvm_object *shm_object;
97 };
98 
99 struct shmmap_state {
100 	vaddr_t va;
101 	int shmid;
102 };
103 
104 struct shmmap_head {
105 	int shmseg;
106 	struct shmmap_state state[1];
107 };
108 
109 int shm_find_segment_by_key(key_t);
110 void shm_deallocate_segment(struct shmid_ds *);
111 int shm_delete_mapping(struct vmspace *, struct shmmap_state *);
112 int shmget_existing(struct proc *, struct sys_shmget_args *,
113 			 int, int, register_t *);
114 int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
115 				 int, register_t *);
116 
117 int
118 shm_find_segment_by_key(key_t key)
119 {
120 	struct shmid_ds *shmseg;
121 	int i;
122 
123 	for (i = 0; i < shminfo.shmmni; i++) {
124 		shmseg = shmsegs[i];
125 		if (shmseg != NULL && shmseg->shm_perm.key == key)
126 			return (i);
127 	}
128 	return (-1);
129 }
130 
131 struct shmid_ds *
132 shm_find_segment_by_shmid(int shmid)
133 {
134 	int segnum;
135 	struct shmid_ds *shmseg;
136 
137 	segnum = IPCID_TO_IX(shmid);
138 	if (segnum < 0 || segnum >= shminfo.shmmni ||
139 	    (shmseg = shmsegs[segnum]) == NULL ||
140 	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
141 		return (NULL);
142 	return (shmseg);
143 }
144 
145 void
146 shm_deallocate_segment(struct shmid_ds *shmseg)
147 {
148 	struct shm_handle *shm_handle;
149 	size_t size;
150 
151 	shm_handle = shmseg->shm_internal;
152 	size = round_page(shmseg->shm_segsz);
153 	uao_detach(shm_handle->shm_object);
154 	pool_put(&shm_pool, shmseg);
155 	shm_committed -= atop(size);
156 	shm_nused--;
157 }
158 
159 int
160 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
161 {
162 	struct shmid_ds *shmseg;
163 	int segnum;
164 	size_t size;
165 
166 	segnum = IPCID_TO_IX(shmmap_s->shmid);
167 	if (segnum < 0 || segnum >= shminfo.shmmni ||
168 	    (shmseg = shmsegs[segnum]) == NULL)
169 		return (EINVAL);
170 	size = round_page(shmseg->shm_segsz);
171 	uvm_deallocate(&vm->vm_map, shmmap_s->va, size);
172 	shmmap_s->shmid = -1;
173 	shmseg->shm_dtime = time_second;
174 	if ((--shmseg->shm_nattch <= 0) &&
175 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
176 		shm_deallocate_segment(shmseg);
177 		shm_last_free = segnum;
178 		shmsegs[shm_last_free] = NULL;
179 	}
180 	return (0);
181 }
182 
183 int
184 sys_shmdt(struct proc *p, void *v, register_t *retval)
185 {
186 	struct sys_shmdt_args /* {
187 		syscallarg(const void *) shmaddr;
188 	} */ *uap = v;
189 	struct shmmap_head *shmmap_h;
190 	struct shmmap_state *shmmap_s;
191 	int i;
192 
193 	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
194 	if (shmmap_h == NULL)
195 		return (EINVAL);
196 
197 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
198 	    i++, shmmap_s++)
199 		if (shmmap_s->shmid != -1 &&
200 		    shmmap_s->va == (vaddr_t)SCARG(uap, shmaddr))
201 			break;
202 	if (i == shmmap_h->shmseg)
203 		return (EINVAL);
204 	return (shm_delete_mapping(p->p_vmspace, shmmap_s));
205 }
206 
207 int
208 sys_shmat(struct proc *p, void *v, register_t *retval)
209 {
210 	struct sys_shmat_args /* {
211 		syscallarg(int) shmid;
212 		syscallarg(const void *) shmaddr;
213 		syscallarg(int) shmflg;
214 	} */ *uap = v;
215 	int error, i, flags;
216 	struct ucred *cred = p->p_ucred;
217 	struct shmid_ds *shmseg;
218 	struct shmmap_head *shmmap_h;
219 	struct shmmap_state *shmmap_s;
220 	struct shm_handle *shm_handle;
221 	vaddr_t attach_va;
222 	vm_prot_t prot;
223 	vsize_t size;
224 
225 	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
226 	if (shmmap_h == NULL) {
227 		size = sizeof(int) +
228 		    shminfo.shmseg * sizeof(struct shmmap_state);
229 		shmmap_h = malloc(size, M_SHM, M_WAITOK);
230 		shmmap_h->shmseg = shminfo.shmseg;
231 		for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
232 		    i++, shmmap_s++)
233 			shmmap_s->shmid = -1;
234 		p->p_vmspace->vm_shm = (caddr_t)shmmap_h;
235 	}
236 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
237 	if (shmseg == NULL)
238 		return (EINVAL);
239 	error = ipcperm(cred, &shmseg->shm_perm,
240 		    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
241 	if (error)
242 		return (error);
243 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; i++) {
244 		if (shmmap_s->shmid == -1)
245 			break;
246 		shmmap_s++;
247 	}
248 	if (i >= shmmap_h->shmseg)
249 		return (EMFILE);
250 	size = round_page(shmseg->shm_segsz);
251 	prot = VM_PROT_READ;
252 	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
253 		prot |= VM_PROT_WRITE;
254 	flags = MAP_ANON | MAP_SHARED;
255 	if (SCARG(uap, shmaddr)) {
256 		flags |= MAP_FIXED;
257 		if (SCARG(uap, shmflg) & SHM_RND)
258 			attach_va =
259 			    (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
260 		else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
261 			attach_va = (vaddr_t)SCARG(uap, shmaddr);
262 		else
263 			return (EINVAL);
264 	} else
265 		attach_va = 0;
266 	shm_handle = shmseg->shm_internal;
267 	uao_reference(shm_handle->shm_object);
268 	error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
269 	    shm_handle->shm_object, 0, 0, UVM_MAPFLAG(prot, prot,
270 	    UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
271 	if (error) {
272 		uao_detach(shm_handle->shm_object);
273 		return (error);
274 	}
275 
276 	shmmap_s->va = attach_va;
277 	shmmap_s->shmid = SCARG(uap, shmid);
278 	shmseg->shm_lpid = p->p_p->ps_pid;
279 	shmseg->shm_atime = time_second;
280 	shmseg->shm_nattch++;
281 	*retval = attach_va;
282 	return (0);
283 }
284 
285 int
286 sys_shmctl(struct proc *p, void *v, register_t *retval)
287 {
288 	struct sys_shmctl_args /* {
289 		syscallarg(int) shmid;
290 		syscallarg(int) cmd;
291 		syscallarg(struct shmid_ds *) buf;
292 	} */ *uap = v;
293 
294 	return (shmctl1(p, SCARG(uap, shmid), SCARG(uap, cmd),
295 	    (caddr_t)SCARG(uap, buf), copyin, copyout));
296 }
297 
298 int
299 shmctl1(struct proc *p, int shmid, int cmd, caddr_t buf,
300     int (*ds_copyin)(const void *, void *, size_t),
301     int (*ds_copyout)(const void *, void *, size_t))
302 {
303 	struct ucred *cred = p->p_ucred;
304 	struct shmid_ds inbuf, *shmseg;
305 	int error;
306 
307 	shmseg = shm_find_segment_by_shmid(shmid);
308 	if (shmseg == NULL)
309 		return (EINVAL);
310 	switch (cmd) {
311 	case IPC_STAT:
312 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
313 			return (error);
314 		error = ds_copyout(shmseg, buf, sizeof(inbuf));
315 		if (error)
316 			return (error);
317 		break;
318 	case IPC_SET:
319 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
320 			return (error);
321 		error = ds_copyin(buf, &inbuf, sizeof(inbuf));
322 		if (error)
323 			return (error);
324 		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
325 		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
326 		shmseg->shm_perm.mode =
327 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
328 		    (inbuf.shm_perm.mode & ACCESSPERMS);
329 		shmseg->shm_ctime = time_second;
330 		break;
331 	case IPC_RMID:
332 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
333 			return (error);
334 		shmseg->shm_perm.key = IPC_PRIVATE;
335 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
336 		if (shmseg->shm_nattch <= 0) {
337 			shm_deallocate_segment(shmseg);
338 			shm_last_free = IPCID_TO_IX(shmid);
339 			shmsegs[shm_last_free] = NULL;
340 		}
341 		break;
342 	case SHM_LOCK:
343 	case SHM_UNLOCK:
344 	default:
345 		return (EINVAL);
346 	}
347 	return (0);
348 }
349 
350 int
351 shmget_existing(struct proc *p,
352 	struct sys_shmget_args /* {
353 		syscallarg(key_t) key;
354 		syscallarg(size_t) size;
355 		syscallarg(int) shmflg;
356 	} */ *uap,
357 	int mode, int segnum, register_t *retval)
358 {
359 	struct shmid_ds *shmseg;
360 	struct ucred *cred = p->p_ucred;
361 	int error;
362 
363 	shmseg = shmsegs[segnum];	/* We assume the segnum is valid */
364 	if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
365 		return (error);
366 	if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
367 		return (EINVAL);
368 	if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
369 	    (IPC_CREAT | IPC_EXCL))
370 		return (EEXIST);
371 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
372 	return (0);
373 }
374 
375 int
376 shmget_allocate_segment(struct proc *p,
377 	struct sys_shmget_args /* {
378 		syscallarg(key_t) key;
379 		syscallarg(size_t) size;
380 		syscallarg(int) shmflg;
381 	} */ *uap,
382 	int mode, register_t *retval)
383 {
384 	size_t size;
385 	key_t key;
386 	int segnum;
387 	struct ucred *cred = p->p_ucred;
388 	struct shmid_ds *shmseg;
389 	struct shm_handle *shm_handle;
390 	int error = 0;
391 
392 	if (SCARG(uap, size) < shminfo.shmmin ||
393 	    SCARG(uap, size) > shminfo.shmmax)
394 		return (EINVAL);
395 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
396 		return (ENOSPC);
397 	size = round_page(SCARG(uap, size));
398 	if (shm_committed + atop(size) > shminfo.shmall)
399 		return (ENOMEM);
400 	shm_nused++;
401 	shm_committed += atop(size);
402 
403 	/*
404 	 * If a key has been specified and we had to wait for memory
405 	 * to be freed up we need to verify that no one has allocated
406 	 * the key we want in the meantime.  Yes, this is ugly.
407 	 */
408 	key = SCARG(uap, key);
409 	shmseg = pool_get(&shm_pool, key == IPC_PRIVATE ? PR_WAITOK :
410 	    PR_NOWAIT);
411 	if (shmseg == NULL) {
412 		shmseg = pool_get(&shm_pool, PR_WAITOK);
413 		if (shm_find_segment_by_key(key) != -1) {
414 			pool_put(&shm_pool, shmseg);
415 			shm_nused--;
416 			shm_committed -= atop(size);
417 			return (EAGAIN);
418 		}
419 	}
420 
421 	/* XXX - hash shmids instead */
422 	if (shm_last_free < 0) {
423 		for (segnum = 0; segnum < shminfo.shmmni && shmsegs[segnum];
424 		    segnum++)
425 			;
426 		if (segnum == shminfo.shmmni)
427 			panic("shmseg free count inconsistent");
428 	} else {
429 		segnum = shm_last_free;
430 		if (++shm_last_free >= shminfo.shmmni || shmsegs[shm_last_free])
431 			shm_last_free = -1;
432 	}
433 	shmsegs[segnum] = shmseg;
434 
435 	shm_handle = (struct shm_handle *)((caddr_t)shmseg + sizeof(*shmseg));
436 	shm_handle->shm_object = uao_create(size, 0);
437 
438 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
439 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
440 	shmseg->shm_perm.mode = (mode & ACCESSPERMS);
441 	shmseg->shm_perm.seq = shmseqs[segnum] = (shmseqs[segnum] + 1) & 0x7fff;
442 	shmseg->shm_perm.key = key;
443 	shmseg->shm_segsz = SCARG(uap, size);
444 	shmseg->shm_cpid = p->p_p->ps_pid;
445 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
446 	shmseg->shm_atime = shmseg->shm_dtime = 0;
447 	shmseg->shm_ctime = time_second;
448 	shmseg->shm_internal = shm_handle;
449 
450 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
451 	return (error);
452 }
453 
454 int
455 sys_shmget(struct proc *p, void *v, register_t *retval)
456 {
457 	struct sys_shmget_args /* {
458 		syscallarg(key_t) key;
459 		syscallarg(size_t) size;
460 		syscallarg(int) shmflg;
461 	} */ *uap = v;
462 	int segnum, mode, error;
463 
464 	mode = SCARG(uap, shmflg) & ACCESSPERMS;
465 
466 	if (SCARG(uap, key) != IPC_PRIVATE) {
467 	again:
468 		segnum = shm_find_segment_by_key(SCARG(uap, key));
469 		if (segnum >= 0)
470 			return (shmget_existing(p, uap, mode, segnum, retval));
471 		if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
472 			return (ENOENT);
473 	}
474 	error = shmget_allocate_segment(p, uap, mode, retval);
475 	if (error == EAGAIN)
476 		goto again;
477 	return (error);
478 }
479 
480 void
481 shmfork(struct vmspace *vm1, struct vmspace *vm2)
482 {
483 	struct shmmap_head *shmmap_h;
484 	struct shmmap_state *shmmap_s;
485 	struct shmid_ds *shmseg;
486 	size_t size;
487 	int i;
488 
489 	if (vm1->vm_shm == NULL) {
490 		vm2->vm_shm = NULL;
491 		return;
492 	}
493 
494 	shmmap_h = (struct shmmap_head *)vm1->vm_shm;
495 	size = sizeof(int) + shmmap_h->shmseg * sizeof(struct shmmap_state);
496 	vm2->vm_shm = malloc(size, M_SHM, M_WAITOK);
497 	bcopy(vm1->vm_shm, vm2->vm_shm, size);
498 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
499 	    i++, shmmap_s++) {
500 		if (shmmap_s->shmid != -1 &&
501 		    (shmseg = shmsegs[IPCID_TO_IX(shmmap_s->shmid)]) != NULL)
502 			shmseg->shm_nattch++;
503 	}
504 }
505 
506 void
507 shmexit(struct vmspace *vm)
508 {
509 	struct shmmap_head *shmmap_h;
510 	struct shmmap_state *shmmap_s;
511 	int i;
512 
513 	shmmap_h = (struct shmmap_head *)vm->vm_shm;
514 	if (shmmap_h == NULL)
515 		return;
516 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
517 	    i++, shmmap_s++)
518 		if (shmmap_s->shmid != -1)
519 			shm_delete_mapping(vm, shmmap_s);
520 	free(vm->vm_shm, M_SHM);
521 	vm->vm_shm = NULL;
522 }
523 
524 void
525 shminit(void)
526 {
527 
528 	pool_init(&shm_pool, sizeof(struct shmid_ds) +
529 	    sizeof(struct shm_handle), 0, 0, 0, "shmpl",
530 	    &pool_allocator_nointr);
531 	shmsegs = malloc(shminfo.shmmni * sizeof(struct shmid_ds *),
532 	    M_SHM, M_WAITOK|M_ZERO);
533 	shmseqs = malloc(shminfo.shmmni * sizeof(unsigned short),
534 	    M_SHM, M_WAITOK|M_ZERO);
535 
536 	shminfo.shmmax *= PAGE_SIZE;	/* actually in pages */
537 	shm_last_free = 0;
538 	shm_nused = 0;
539 	shm_committed = 0;
540 }
541 
542 /*
543  * Userland access to struct shminfo.
544  */
545 int
546 sysctl_sysvshm(int *name, u_int namelen, void *oldp, size_t *oldlenp,
547 	void *newp, size_t newlen)
548 {
549 	int error, val;
550 	struct shmid_ds **newsegs;
551 	unsigned short *newseqs;
552 
553 	if (namelen != 2) {
554 		switch (name[0]) {
555 		case KERN_SHMINFO_SHMMAX:
556 		case KERN_SHMINFO_SHMMIN:
557 		case KERN_SHMINFO_SHMMNI:
558 		case KERN_SHMINFO_SHMSEG:
559 		case KERN_SHMINFO_SHMALL:
560 			break;
561 		default:
562                         return (ENOTDIR);       /* overloaded */
563                 }
564         }
565 
566 	switch (name[0]) {
567 	case KERN_SHMINFO_SHMMAX:
568 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen,
569 		    &shminfo.shmmax)) || newp == NULL)
570 			return (error);
571 
572 		/* If new shmmax > shmall, crank shmall */
573 		if (atop(round_page(shminfo.shmmax)) > shminfo.shmall)
574 			shminfo.shmall = atop(round_page(shminfo.shmmax));
575 		return (0);
576 	case KERN_SHMINFO_SHMMIN:
577 		val = shminfo.shmmin;
578 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
579 		    val == shminfo.shmmin)
580 			return (error);
581 		if (val <= 0)
582 			return (EINVAL);	/* shmmin must be >= 1 */
583 		shminfo.shmmin = val;
584 		return (0);
585 	case KERN_SHMINFO_SHMMNI:
586 		val = shminfo.shmmni;
587 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
588 		    val == shminfo.shmmni)
589 			return (error);
590 
591 		if (val < shminfo.shmmni || val > 0xffff)
592 			return (EINVAL);
593 
594 		/* Expand shmsegs and shmseqs arrays */
595 		newsegs = malloc(val * sizeof(struct shmid_ds *),
596 		    M_SHM, M_WAITOK|M_ZERO);
597 		bcopy(shmsegs, newsegs,
598 		    shminfo.shmmni * sizeof(struct shmid_ds *));
599 		free(shmsegs, M_SHM);
600 		shmsegs = newsegs;
601 		newseqs = malloc(val * sizeof(unsigned short), M_SHM,
602 		    M_WAITOK|M_ZERO);
603 		bcopy(shmseqs, newseqs,
604 		    shminfo.shmmni * sizeof(unsigned short));
605 		free(shmseqs, M_SHM);
606 		shmseqs = newseqs;
607 		shminfo.shmmni = val;
608 		return (0);
609 	case KERN_SHMINFO_SHMSEG:
610 		val = shminfo.shmseg;
611 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
612 		    val == shminfo.shmseg)
613 			return (error);
614 		if (val <= 0)
615 			return (EINVAL);	/* shmseg must be >= 1 */
616 		shminfo.shmseg = val;
617 		return (0);
618 	case KERN_SHMINFO_SHMALL:
619 		val = shminfo.shmall;
620 		if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
621 		    val == shminfo.shmall)
622 			return (error);
623 		if (val < shminfo.shmall)
624 			return (EINVAL);	/* can't decrease shmall */
625 		shminfo.shmall = val;
626 		return (0);
627 	default:
628 		return (EOPNOTSUPP);
629 	}
630 	/* NOTREACHED */
631 }
632