xref: /openbsd/sys/kern/sysv_shm.c (revision 4a445d44)
1 /*	$OpenBSD: sysv_shm.c,v 1.81 2024/11/05 15:34:30 mpi Exp $	*/
2 /*	$NetBSD: sysv_shm.c,v 1.50 1998/10/21 22:24:29 tron Exp $	*/
3 
4 /*
5  * Copyright (c) 2002 Todd C. Miller <millert@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  *
19  * Sponsored in part by the Defense Advanced Research Projects
20  * Agency (DARPA) and Air Force Research Laboratory, Air Force
21  * Materiel Command, USAF, under agreement number F39502-99-1-0512.
22  */
23 /*
24  * Copyright (c) 1994 Adam Glass and Charles M. Hannum.  All rights reserved.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  * 1. Redistributions of source code must retain the above copyright
30  *    notice, this list of conditions and the following disclaimer.
31  * 2. Redistributions in binary form must reproduce the above copyright
32  *    notice, this list of conditions and the following disclaimer in the
33  *    documentation and/or other materials provided with the distribution.
34  * 3. All advertising materials mentioning features or use of this software
35  *    must display the following acknowledgement:
36  *	This product includes software developed by Adam Glass and Charles M.
37  *	Hannum.
38  * 4. The names of the authors may not be used to endorse or promote products
39  *    derived from this software without specific prior written permission.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
42  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
44  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52 
53 #include <sys/param.h>
54 #include <sys/shm.h>
55 #include <sys/proc.h>
56 #include <sys/time.h>
57 #include <sys/malloc.h>
58 #include <sys/mman.h>
59 #include <sys/pool.h>
60 #include <sys/systm.h>
61 #include <sys/sysctl.h>
62 #include <sys/stat.h>
63 
64 #include <sys/mount.h>
65 #include <sys/syscallargs.h>
66 
67 #include <uvm/uvm_extern.h>
68 
69 extern struct shminfo shminfo;
70 struct shmid_ds **shmsegs;	/* linear mapping of shmid -> shmseg */
71 struct pool shm_pool;
72 unsigned short *shmseqs;	/* array of shm sequence numbers */
73 
74 struct shmid_ds *shm_find_segment_by_shmid(int);
75 
76 /*
77  * Provides the following externally accessible functions:
78  *
79  * shminit(void);		                 initialization
80  * shmexit(struct vmspace *)                     cleanup
81  * shmfork(struct vmspace *, struct vmspace *)   fork handling
82  * shmsys(arg1, arg2, arg3, arg4);         shm{at,ctl,dt,get}(arg2, arg3, arg4)
83  *
84  * Structures:
85  * shmsegs (an array of 'struct shmid_ds *')
86  * per proc 'struct shmmap_head' with an array of 'struct shmmap_state'
87  */
88 
89 #define	SHMSEG_REMOVED  	0x0200		/* can't overlap ACCESSPERMS */
90 
91 int shm_last_free, shm_nused, shm_committed;
92 
93 struct shm_handle {
94 	struct uvm_object *shm_object;
95 };
96 
97 struct shmmap_state {
98 	vaddr_t va;
99 	int shmid;
100 };
101 
102 struct shmmap_head {
103 	int shmseg;
104 	struct shmmap_state state[1];
105 };
106 
107 int shm_find_segment_by_key(key_t);
108 void shm_deallocate_segment(struct shmid_ds *);
109 int shm_delete_mapping(struct vmspace *, struct shmmap_state *);
110 int shmget_existing(struct proc *, struct sys_shmget_args *,
111 			 int, int, register_t *);
112 int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
113 				 int, register_t *);
114 
115 int
shm_find_segment_by_key(key_t key)116 shm_find_segment_by_key(key_t key)
117 {
118 	struct shmid_ds *shmseg;
119 	int i;
120 
121 	for (i = 0; i < shminfo.shmmni; i++) {
122 		shmseg = shmsegs[i];
123 		if (shmseg != NULL && shmseg->shm_perm.key == key)
124 			return (i);
125 	}
126 	return (-1);
127 }
128 
129 struct shmid_ds *
shm_find_segment_by_shmid(int shmid)130 shm_find_segment_by_shmid(int shmid)
131 {
132 	int segnum;
133 	struct shmid_ds *shmseg;
134 
135 	segnum = IPCID_TO_IX(shmid);
136 	if (segnum < 0 || segnum >= shminfo.shmmni ||
137 	    (shmseg = shmsegs[segnum]) == NULL ||
138 	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
139 		return (NULL);
140 	return (shmseg);
141 }
142 
143 void
shm_deallocate_segment(struct shmid_ds * shmseg)144 shm_deallocate_segment(struct shmid_ds *shmseg)
145 {
146 	struct shm_handle *shm_handle;
147 	size_t size;
148 
149 	shm_handle = shmseg->shm_internal;
150 	size = round_page(shmseg->shm_segsz);
151 	uao_detach(shm_handle->shm_object);
152 	pool_put(&shm_pool, shmseg);
153 	shm_committed -= atop(size);
154 	shm_nused--;
155 }
156 
157 int
shm_delete_mapping(struct vmspace * vm,struct shmmap_state * shmmap_s)158 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
159 {
160 	struct shmid_ds *shmseg;
161 	int segnum, deallocate = 0;
162 	vaddr_t end;
163 
164 	segnum = IPCID_TO_IX(shmmap_s->shmid);
165 	if (segnum < 0 || segnum >= shminfo.shmmni ||
166 	    (shmseg = shmsegs[segnum]) == NULL)
167 		return (EINVAL);
168 	if ((--shmseg->shm_nattch <= 0) &&
169 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
170 	    	deallocate = 1;
171 		shm_last_free = segnum;
172 		shmsegs[shm_last_free] = NULL;
173 	}
174 	end = round_page(shmmap_s->va+shmseg->shm_segsz);
175 	uvm_unmap(&vm->vm_map, trunc_page(shmmap_s->va), end);
176 	shmmap_s->shmid = -1;
177 	shmseg->shm_dtime = gettime();
178 	if (deallocate)
179 		shm_deallocate_segment(shmseg);
180 	return (0);
181 }
182 
183 int
sys_shmdt(struct proc * p,void * v,register_t * retval)184 sys_shmdt(struct proc *p, void *v, register_t *retval)
185 {
186 	struct sys_shmdt_args /* {
187 		syscallarg(const void *) shmaddr;
188 	} */ *uap = v;
189 	struct shmmap_head *shmmap_h;
190 	struct shmmap_state *shmmap_s;
191 	int i;
192 
193 	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
194 	if (shmmap_h == NULL)
195 		return (EINVAL);
196 
197 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
198 	    i++, shmmap_s++)
199 		if (shmmap_s->shmid != -1 &&
200 		    shmmap_s->va == (vaddr_t)SCARG(uap, shmaddr))
201 			break;
202 	if (i == shmmap_h->shmseg)
203 		return (EINVAL);
204 	return (shm_delete_mapping(p->p_vmspace, shmmap_s));
205 }
206 
207 int
sys_shmat(struct proc * p,void * v,register_t * retval)208 sys_shmat(struct proc *p, void *v, register_t *retval)
209 {
210 	struct sys_shmat_args /* {
211 		syscallarg(int) shmid;
212 		syscallarg(const void *) shmaddr;
213 		syscallarg(int) shmflg;
214 	} */ *uap = v;
215 	int error, i, flags = 0;
216 	struct ucred *cred = p->p_ucred;
217 	struct shmid_ds *shmseg;
218 	struct shmmap_head *shmmap_h;
219 	struct shmmap_state *shmmap_s;
220 	struct shm_handle *shm_handle;
221 	vaddr_t attach_va;
222 	vm_prot_t prot;
223 	vsize_t size;
224 
225 	shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
226 	if (shmmap_h == NULL) {
227 		size = sizeof(int) +
228 		    shminfo.shmseg * sizeof(struct shmmap_state);
229 		shmmap_h = malloc(size, M_SHM, M_WAITOK | M_CANFAIL);
230 		if (shmmap_h == NULL)
231 			return (ENOMEM);
232 		shmmap_h->shmseg = shminfo.shmseg;
233 		for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
234 		    i++, shmmap_s++)
235 			shmmap_s->shmid = -1;
236 		p->p_vmspace->vm_shm = (caddr_t)shmmap_h;
237 	}
238 	shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
239 	if (shmseg == NULL)
240 		return (EINVAL);
241 	error = ipcperm(cred, &shmseg->shm_perm,
242 		    (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
243 	if (error)
244 		return (error);
245 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; i++) {
246 		if (shmmap_s->shmid == -1)
247 			break;
248 		shmmap_s++;
249 	}
250 	if (i >= shmmap_h->shmseg)
251 		return (EMFILE);
252 	size = round_page(shmseg->shm_segsz);
253 	prot = PROT_READ;
254 	if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
255 		prot |= PROT_WRITE;
256 	if (SCARG(uap, shmaddr)) {
257 		flags |= UVM_FLAG_FIXED;
258 		if (SCARG(uap, shmflg) & SHM_RND)
259 			attach_va =
260 			    (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
261 		else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
262 			attach_va = (vaddr_t)SCARG(uap, shmaddr);
263 		else
264 			return (EINVAL);
265 	} else
266 		attach_va = 0;
267 	/*
268 	 * Since uvm_map() could end up sleeping, grab a reference to prevent
269 	 * the segment from being deallocated while sleeping.
270 	 */
271 	shmseg->shm_nattch++;
272 	shm_handle = shmseg->shm_internal;
273 	uao_reference(shm_handle->shm_object);
274 	error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
275 	    shm_handle->shm_object, 0, 0, UVM_MAPFLAG(prot, prot,
276 	    MAP_INHERIT_SHARE, MADV_RANDOM, flags));
277 	if (error) {
278 		if ((--shmseg->shm_nattch <= 0) &&
279 		    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
280 			shm_deallocate_segment(shmseg);
281 			shm_last_free = IPCID_TO_IX(SCARG(uap, shmid));
282 			shmsegs[shm_last_free] = NULL;
283 		} else {
284 			uao_detach(shm_handle->shm_object);
285 		}
286 		return (error);
287 	}
288 
289 	shmmap_s->va = attach_va;
290 	shmmap_s->shmid = SCARG(uap, shmid);
291 	shmseg->shm_lpid = p->p_p->ps_pid;
292 	shmseg->shm_atime = gettime();
293 	*retval = attach_va;
294 	return (0);
295 }
296 
297 int
sys_shmctl(struct proc * p,void * v,register_t * retval)298 sys_shmctl(struct proc *p, void *v, register_t *retval)
299 {
300 	struct sys_shmctl_args /* {
301 		syscallarg(int) shmid;
302 		syscallarg(int) cmd;
303 		syscallarg(struct shmid_ds *) buf;
304 	} */ *uap = v;
305 	int		shmid = SCARG(uap, shmid);
306 	int		cmd = SCARG(uap, cmd);
307 	void		*buf = SCARG(uap, buf);
308 	struct ucred	*cred = p->p_ucred;
309 	struct shmid_ds	inbuf, *shmseg;
310 	int		error;
311 
312 	if (cmd == IPC_SET) {
313 		error = copyin(buf, &inbuf, sizeof(inbuf));
314 		if (error)
315 			return (error);
316 	}
317 
318 	shmseg = shm_find_segment_by_shmid(shmid);
319 	if (shmseg == NULL)
320 		return (EINVAL);
321 	switch (cmd) {
322 	case IPC_STAT:
323 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
324 			return (error);
325 		error = copyout(shmseg, buf, sizeof(inbuf));
326 		if (error)
327 			return (error);
328 		break;
329 	case IPC_SET:
330 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
331 			return (error);
332 		shmseg->shm_perm.uid = inbuf.shm_perm.uid;
333 		shmseg->shm_perm.gid = inbuf.shm_perm.gid;
334 		shmseg->shm_perm.mode =
335 		    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
336 		    (inbuf.shm_perm.mode & ACCESSPERMS);
337 		shmseg->shm_ctime = gettime();
338 		break;
339 	case IPC_RMID:
340 		if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
341 			return (error);
342 		shmseg->shm_perm.key = IPC_PRIVATE;
343 		shmseg->shm_perm.mode |= SHMSEG_REMOVED;
344 		if (shmseg->shm_nattch <= 0) {
345 			shm_deallocate_segment(shmseg);
346 			shm_last_free = IPCID_TO_IX(shmid);
347 			shmsegs[shm_last_free] = NULL;
348 		}
349 		break;
350 	case SHM_LOCK:
351 	case SHM_UNLOCK:
352 	default:
353 		return (EINVAL);
354 	}
355 	return (0);
356 }
357 
358 int
shmget_existing(struct proc * p,struct sys_shmget_args * uap,int mode,int segnum,register_t * retval)359 shmget_existing(struct proc *p,
360 	struct sys_shmget_args /* {
361 		syscallarg(key_t) key;
362 		syscallarg(size_t) size;
363 		syscallarg(int) shmflg;
364 	} */ *uap,
365 	int mode, int segnum, register_t *retval)
366 {
367 	struct shmid_ds *shmseg;
368 	struct ucred *cred = p->p_ucred;
369 	int error;
370 
371 	shmseg = shmsegs[segnum];	/* We assume the segnum is valid */
372 	if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
373 		return (error);
374 	if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
375 		return (EINVAL);
376 	if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
377 	    (IPC_CREAT | IPC_EXCL))
378 		return (EEXIST);
379 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
380 	return (0);
381 }
382 
383 int
shmget_allocate_segment(struct proc * p,struct sys_shmget_args * uap,int mode,register_t * retval)384 shmget_allocate_segment(struct proc *p,
385 	struct sys_shmget_args /* {
386 		syscallarg(key_t) key;
387 		syscallarg(size_t) size;
388 		syscallarg(int) shmflg;
389 	} */ *uap,
390 	int mode, register_t *retval)
391 {
392 	size_t size;
393 	key_t key;
394 	int segnum;
395 	struct ucred *cred = p->p_ucred;
396 	struct shmid_ds *shmseg;
397 	struct shm_handle *shm_handle;
398 	int error = 0;
399 
400 	if (SCARG(uap, size) < shminfo.shmmin ||
401 	    SCARG(uap, size) > shminfo.shmmax)
402 		return (EINVAL);
403 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
404 		return (ENOSPC);
405 	size = round_page(SCARG(uap, size));
406 	if (shm_committed + atop(size) > shminfo.shmall)
407 		return (ENOMEM);
408 	shm_nused++;
409 	shm_committed += atop(size);
410 
411 	/*
412 	 * If a key has been specified and we had to wait for memory
413 	 * to be freed up we need to verify that no one has allocated
414 	 * the key we want in the meantime.  Yes, this is ugly.
415 	 */
416 	key = SCARG(uap, key);
417 	shmseg = pool_get(&shm_pool, key == IPC_PRIVATE ? PR_WAITOK :
418 	    PR_NOWAIT);
419 	if (shmseg == NULL) {
420 		shmseg = pool_get(&shm_pool, PR_WAITOK);
421 		if (shm_find_segment_by_key(key) != -1) {
422 			pool_put(&shm_pool, shmseg);
423 			shm_nused--;
424 			shm_committed -= atop(size);
425 			return (EAGAIN);
426 		}
427 	}
428 
429 	/* XXX - hash shmids instead */
430 	if (shm_last_free < 0) {
431 		for (segnum = 0; segnum < shminfo.shmmni && shmsegs[segnum];
432 		    segnum++)
433 			;
434 		if (segnum == shminfo.shmmni)
435 			panic("shmseg free count inconsistent");
436 	} else {
437 		segnum = shm_last_free;
438 		if (++shm_last_free >= shminfo.shmmni || shmsegs[shm_last_free])
439 			shm_last_free = -1;
440 	}
441 	shmsegs[segnum] = shmseg;
442 
443 	shm_handle = (struct shm_handle *)((caddr_t)shmseg + sizeof(*shmseg));
444 	shm_handle->shm_object = uao_create(size, 0);
445 
446 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
447 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
448 	shmseg->shm_perm.mode = (mode & ACCESSPERMS);
449 	shmseg->shm_perm.seq = shmseqs[segnum] = (shmseqs[segnum] + 1) & 0x7fff;
450 	shmseg->shm_perm.key = key;
451 	shmseg->shm_segsz = SCARG(uap, size);
452 	shmseg->shm_cpid = p->p_p->ps_pid;
453 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
454 	shmseg->shm_atime = shmseg->shm_dtime = 0;
455 	shmseg->shm_ctime = gettime();
456 	shmseg->shm_internal = shm_handle;
457 
458 	*retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
459 	return (error);
460 }
461 
462 int
sys_shmget(struct proc * p,void * v,register_t * retval)463 sys_shmget(struct proc *p, void *v, register_t *retval)
464 {
465 	struct sys_shmget_args /* {
466 		syscallarg(key_t) key;
467 		syscallarg(size_t) size;
468 		syscallarg(int) shmflg;
469 	} */ *uap = v;
470 	int segnum, mode, error;
471 
472 	mode = SCARG(uap, shmflg) & ACCESSPERMS;
473 
474 	if (SCARG(uap, key) != IPC_PRIVATE) {
475 	again:
476 		segnum = shm_find_segment_by_key(SCARG(uap, key));
477 		if (segnum >= 0)
478 			return (shmget_existing(p, uap, mode, segnum, retval));
479 		if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
480 			return (ENOENT);
481 	}
482 	error = shmget_allocate_segment(p, uap, mode, retval);
483 	if (error == EAGAIN)
484 		goto again;
485 	return (error);
486 }
487 
488 void
shmfork(struct vmspace * vm1,struct vmspace * vm2)489 shmfork(struct vmspace *vm1, struct vmspace *vm2)
490 {
491 	struct shmmap_head *shmmap_h;
492 	struct shmmap_state *shmmap_s;
493 	struct shmid_ds *shmseg;
494 	size_t size;
495 	int i;
496 
497 	if (vm1->vm_shm == NULL) {
498 		vm2->vm_shm = NULL;
499 		return;
500 	}
501 
502 	shmmap_h = (struct shmmap_head *)vm1->vm_shm;
503 	size = sizeof(int) + shmmap_h->shmseg * sizeof(struct shmmap_state);
504 	vm2->vm_shm = malloc(size, M_SHM, M_WAITOK);
505 	memcpy(vm2->vm_shm, vm1->vm_shm, size);
506 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
507 	    i++, shmmap_s++) {
508 		if (shmmap_s->shmid != -1 &&
509 		    (shmseg = shmsegs[IPCID_TO_IX(shmmap_s->shmid)]) != NULL)
510 			shmseg->shm_nattch++;
511 	}
512 }
513 
514 void
shmexit(struct vmspace * vm)515 shmexit(struct vmspace *vm)
516 {
517 	struct shmmap_head *shmmap_h;
518 	struct shmmap_state *shmmap_s;
519 	size_t size;
520 	int i;
521 
522 	shmmap_h = (struct shmmap_head *)vm->vm_shm;
523 	if (shmmap_h == NULL)
524 		return;
525 	size = sizeof(int) + shmmap_h->shmseg * sizeof(struct shmmap_state);
526 	for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
527 	    i++, shmmap_s++)
528 		if (shmmap_s->shmid != -1)
529 			shm_delete_mapping(vm, shmmap_s);
530 	free(vm->vm_shm, M_SHM, size);
531 	vm->vm_shm = NULL;
532 }
533 
534 void
shminit(void)535 shminit(void)
536 {
537 
538 	pool_init(&shm_pool,
539 	    sizeof(struct shmid_ds) + sizeof(struct shm_handle), 0,
540 	    IPL_NONE, PR_WAITOK, "shmpl", NULL);
541 	shmsegs = mallocarray(shminfo.shmmni, sizeof(struct shmid_ds *),
542 	    M_SHM, M_WAITOK|M_ZERO);
543 	shmseqs = mallocarray(shminfo.shmmni, sizeof(unsigned short),
544 	    M_SHM, M_WAITOK|M_ZERO);
545 
546 	shminfo.shmmax *= PAGE_SIZE;	/* actually in pages */
547 	shm_last_free = 0;
548 	shm_nused = 0;
549 	shm_committed = 0;
550 }
551 
552 /* Expand shmsegs and shmseqs arrays */
553 void
shm_reallocate(int val)554 shm_reallocate(int val)
555 {
556 	struct shmid_ds **newsegs;
557 	unsigned short *newseqs;
558 
559 	newsegs = mallocarray(val, sizeof(struct shmid_ds *),
560 	    M_SHM, M_WAITOK | M_ZERO);
561 	memcpy(newsegs, shmsegs,
562 	    shminfo.shmmni * sizeof(struct shmid_ds *));
563 	free(shmsegs, M_SHM,
564 	    shminfo.shmmni * sizeof(struct shmid_ds *));
565 	shmsegs = newsegs;
566 	newseqs = mallocarray(val, sizeof(unsigned short), M_SHM,
567 	    M_WAITOK | M_ZERO);
568 	memcpy(newseqs, shmseqs,
569 	    shminfo.shmmni * sizeof(unsigned short));
570 	free(shmseqs, M_SHM, shminfo.shmmni * sizeof(unsigned short));
571 	shmseqs = newseqs;
572 	shminfo.shmmni = val;
573 }
574 
575 /*
576  * Userland access to struct shminfo.
577  */
578 int
sysctl_sysvshm(int * name,u_int namelen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)579 sysctl_sysvshm(int *name, u_int namelen, void *oldp, size_t *oldlenp,
580 	void *newp, size_t newlen)
581 {
582 	int error, val;
583 
584 	if (namelen != 1)
585                         return (ENOTDIR);       /* leaf-only */
586 
587 	switch (name[0]) {
588 	case KERN_SHMINFO_SHMMAX:
589 		if ((error = sysctl_int_bounded(oldp, oldlenp, newp, newlen,
590 		    &shminfo.shmmax, 0, INT_MAX)) || newp == NULL)
591 			return (error);
592 
593 		/* If new shmmax > shmall, crank shmall */
594 		if (atop(round_page(shminfo.shmmax)) > shminfo.shmall)
595 			shminfo.shmall = atop(round_page(shminfo.shmmax));
596 		return (0);
597 	case KERN_SHMINFO_SHMMIN:
598 		return (sysctl_int_bounded(oldp, oldlenp, newp, newlen,
599 		    &shminfo.shmmin, 1, INT_MAX));
600 	case KERN_SHMINFO_SHMMNI:
601 		val = shminfo.shmmni;
602 		/* can't decrease shmmni */
603 		error = sysctl_int_bounded(oldp, oldlenp, newp, newlen,
604 		    &val, val, 0xffff);
605 		/* returns success and skips reallocation if val is unchanged */
606 		if (error || val == shminfo.shmmni)
607 			return (error);
608 		shm_reallocate(val);
609 		return (0);
610 	case KERN_SHMINFO_SHMSEG:
611 		return (sysctl_int_bounded(oldp, oldlenp, newp, newlen,
612 		    &shminfo.shmseg, 1, INT_MAX));
613 	case KERN_SHMINFO_SHMALL:
614 		/* can't decrease shmall */
615 		return (sysctl_int_bounded(oldp, oldlenp, newp, newlen,
616 		    &shminfo.shmall, shminfo.shmall, INT_MAX));
617 	default:
618 		return (EOPNOTSUPP);
619 	}
620 	/* NOTREACHED */
621 }
622