xref: /dragonfly/sys/kern/sysv_shm.c (revision 6ab64ab6)
1 /*
2  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. All advertising materials mentioning features or use of this software
13  *    must display the following acknowledgement:
14  *	This product includes software developed by Adam Glass and Charles
15  *	Hannum.
16  * 4. The names of the authors may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "opt_sysvipc.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sysproto.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/shm.h>
39 #include <sys/proc.h>
40 #include <sys/malloc.h>
41 #include <sys/mman.h>
42 #include <sys/stat.h>
43 #include <sys/sysent.h>
44 #include <sys/jail.h>
45 
46 #include <sys/mplock2.h>
47 
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <sys/lock.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_pager.h>
56 
57 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
58 
59 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode);
60 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum);
61 
62 #define	SHMSEG_FREE     	0x0200
63 #define	SHMSEG_REMOVED  	0x0400
64 #define	SHMSEG_ALLOCATED	0x0800
65 #define	SHMSEG_WANTED		0x1000
66 
67 static int shm_last_free, shm_committed, shmalloced;
68 int shm_nused;
69 static struct shmid_ds	*shmsegs;
70 
71 struct shm_handle {
72 	/* vm_offset_t kva; */
73 	vm_object_t shm_object;
74 };
75 
76 struct shmmap_state {
77 	vm_offset_t va;
78 	int shmid;
79 };
80 
81 static void shm_deallocate_segment (struct shmid_ds *);
82 static int shm_find_segment_by_key (key_t);
83 static struct shmid_ds *shm_find_segment_by_shmid (int);
84 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *);
85 static void shmrealloc (void);
86 static void shminit (void *);
87 
88 /*
89  * Tuneable values
90  */
91 #ifndef SHMMIN
92 #define	SHMMIN	1
93 #endif
94 #ifndef SHMMNI
95 #define	SHMMNI	512
96 #endif
97 #ifndef SHMSEG
98 #define	SHMSEG	1024
99 #endif
100 
101 struct	shminfo shminfo = {
102 	0,
103 	SHMMIN,
104 	SHMMNI,
105 	SHMSEG,
106 	0
107 };
108 
109 static int shm_allow_removed;
110 static int shm_use_phys = 1;
111 
112 TUNABLE_LONG("kern.ipc.shmmin", &shminfo.shmmin);
113 TUNABLE_LONG("kern.ipc.shmmni", &shminfo.shmmni);
114 TUNABLE_LONG("kern.ipc.shmseg", &shminfo.shmseg);
115 TUNABLE_LONG("kern.ipc.shmmaxpgs", &shminfo.shmall);
116 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys);
117 
118 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0,
119     "Max shared memory segment size");
120 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0,
121     "Min shared memory segment size");
122 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0,
123     "Max number of shared memory identifiers");
124 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0,
125     "Max shared memory segments per process");
126 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0,
127     "Max pages of shared memory");
128 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0,
129     "Use phys pager allocation instead of swap pager allocation");
130 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
131     &shm_allow_removed, 0,
132     "Enable/Disable attachment to attached segments marked for removal");
133 
134 static int
135 shm_find_segment_by_key(key_t key)
136 {
137 	int i;
138 
139 	for (i = 0; i < shmalloced; i++) {
140 		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
141 		    shmsegs[i].shm_perm.key == key)
142 			return i;
143 	}
144 	return -1;
145 }
146 
147 static struct shmid_ds *
148 shm_find_segment_by_shmid(int shmid)
149 {
150 	int segnum;
151 	struct shmid_ds *shmseg;
152 
153 	segnum = IPCID_TO_IX(shmid);
154 	if (segnum < 0 || segnum >= shmalloced)
155 		return NULL;
156 	shmseg = &shmsegs[segnum];
157 	if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
158 	    (!shm_allow_removed &&
159 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) ||
160 	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) {
161 		return NULL;
162 	}
163 	return shmseg;
164 }
165 
166 static void
167 shm_deallocate_segment(struct shmid_ds *shmseg)
168 {
169 	struct shm_handle *shm_handle;
170 	size_t size;
171 
172 	shm_handle = shmseg->shm_internal;
173 	vm_object_deallocate(shm_handle->shm_object);
174 	kfree((caddr_t)shm_handle, M_SHM);
175 	shmseg->shm_internal = NULL;
176 	size = round_page(shmseg->shm_segsz);
177 	shm_committed -= btoc(size);
178 	shm_nused--;
179 	shmseg->shm_perm.mode = SHMSEG_FREE;
180 }
181 
182 static int
183 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
184 {
185 	struct shmid_ds *shmseg;
186 	int segnum, result;
187 	size_t size;
188 
189 	segnum = IPCID_TO_IX(shmmap_s->shmid);
190 	shmseg = &shmsegs[segnum];
191 	size = round_page(shmseg->shm_segsz);
192 	result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
193 	if (result != KERN_SUCCESS)
194 		return EINVAL;
195 	shmmap_s->shmid = -1;
196 	shmseg->shm_dtime = time_second;
197 	if ((--shmseg->shm_nattch <= 0) &&
198 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
199 		shm_deallocate_segment(shmseg);
200 		shm_last_free = segnum;
201 	}
202 	return 0;
203 }
204 
205 /*
206  * MPALMOSTSAFE
207  */
208 int
209 sys_shmdt(struct shmdt_args *uap)
210 {
211 	struct thread *td = curthread;
212 	struct proc *p = td->td_proc;
213 	struct shmmap_state *shmmap_s;
214 	long i;
215 	int error;
216 
217 	if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
218 		return (ENOSYS);
219 
220 	get_mplock();
221 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
222 	if (shmmap_s == NULL) {
223 		error = EINVAL;
224 		goto done;
225 	}
226 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
227 		if (shmmap_s->shmid != -1 &&
228 		    shmmap_s->va == (vm_offset_t)uap->shmaddr)
229 			break;
230 	}
231 	if (i == shminfo.shmseg)
232 		error = EINVAL;
233 	else
234 		error = shm_delete_mapping(p->p_vmspace, shmmap_s);
235 done:
236 	rel_mplock();
237 	return (error);
238 }
239 
240 /*
241  * MPALMOSTSAFE
242  */
243 int
244 sys_shmat(struct shmat_args *uap)
245 {
246 	struct thread *td = curthread;
247 	struct proc *p = td->td_proc;
248 	int error, flags;
249 	long i;
250 	struct shmid_ds *shmseg;
251 	struct shmmap_state *shmmap_s = NULL;
252 	struct shm_handle *shm_handle;
253 	vm_offset_t attach_va;
254 	vm_prot_t prot;
255 	vm_size_t size;
256 	vm_size_t align;
257 	int rv;
258 
259 	if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
260 		return (ENOSYS);
261 
262 	get_mplock();
263 again:
264 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
265 	if (shmmap_s == NULL) {
266 		size = shminfo.shmseg * sizeof(struct shmmap_state);
267 		shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
268 		for (i = 0; i < shminfo.shmseg; i++)
269 			shmmap_s[i].shmid = -1;
270 		if (p->p_vmspace->vm_shm != NULL) {
271 			kfree(shmmap_s, M_SHM);
272 			goto again;
273 		}
274 		p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
275 	}
276 	shmseg = shm_find_segment_by_shmid(uap->shmid);
277 	if (shmseg == NULL) {
278 		error = EINVAL;
279 		goto done;
280 	}
281 	error = ipcperm(p, &shmseg->shm_perm,
282 			(uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
283 	if (error)
284 		goto done;
285 	for (i = 0; i < shminfo.shmseg; i++) {
286 		if (shmmap_s->shmid == -1)
287 			break;
288 		shmmap_s++;
289 	}
290 	if (i >= shminfo.shmseg) {
291 		error = EMFILE;
292 		goto done;
293 	}
294 	size = round_page(shmseg->shm_segsz);
295 #ifdef VM_PROT_READ_IS_EXEC
296 	prot = VM_PROT_READ | VM_PROT_EXECUTE;
297 #else
298 	prot = VM_PROT_READ;
299 #endif
300 	if ((uap->shmflg & SHM_RDONLY) == 0)
301 		prot |= VM_PROT_WRITE;
302 	flags = MAP_ANON | MAP_SHARED;
303 	if (uap->shmaddr) {
304 		flags |= MAP_FIXED;
305 		if (uap->shmflg & SHM_RND) {
306 			attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
307 		} else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) {
308 			attach_va = (vm_offset_t)uap->shmaddr;
309 		} else {
310 			error = EINVAL;
311 			goto done;
312 		}
313 	} else {
314 		/*
315 		 * This is just a hint to vm_map_find() about where to put it.
316 		 */
317 		attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr +
318 				       maxtsiz + maxdsiz);
319 	}
320 
321 	/*
322 	 * Handle alignment.  For large memory maps it is possible
323 	 * that the MMU can optimize the page table so align anything
324 	 * that is a multiple of SEG_SIZE to SEG_SIZE.
325 	 */
326 	if ((flags & MAP_FIXED) == 0 && (size & SEG_MASK) == 0)
327 		align = SEG_SIZE;
328 	else
329 		align = PAGE_SIZE;
330 
331 	shm_handle = shmseg->shm_internal;
332 	vm_object_hold(shm_handle->shm_object);
333 	vm_object_chain_wait(shm_handle->shm_object, 0);
334 	vm_object_reference_locked(shm_handle->shm_object);
335 	rv = vm_map_find(&p->p_vmspace->vm_map,
336 			 shm_handle->shm_object, NULL,
337 			 0, &attach_va, size,
338 			 align,
339 			 ((flags & MAP_FIXED) ? 0 : 1),
340 			 VM_MAPTYPE_NORMAL,
341 			 prot, prot, 0);
342 	vm_object_drop(shm_handle->shm_object);
343 	if (rv != KERN_SUCCESS) {
344                 vm_object_deallocate(shm_handle->shm_object);
345 		error = ENOMEM;
346 		goto done;
347 	}
348 	vm_map_inherit(&p->p_vmspace->vm_map,
349 		       attach_va, attach_va + size, VM_INHERIT_SHARE);
350 
351 	KKASSERT(shmmap_s->shmid == -1);
352 	shmmap_s->va = attach_va;
353 	shmmap_s->shmid = uap->shmid;
354 	shmseg->shm_lpid = p->p_pid;
355 	shmseg->shm_atime = time_second;
356 	shmseg->shm_nattch++;
357 	uap->sysmsg_resultp = (void *)attach_va;
358 	error = 0;
359 done:
360 	rel_mplock();
361 	return error;
362 }
363 
364 /*
365  * MPALMOSTSAFE
366  */
367 int
368 sys_shmctl(struct shmctl_args *uap)
369 {
370 	struct thread *td = curthread;
371 	struct proc *p = td->td_proc;
372 	int error;
373 	struct shmid_ds inbuf;
374 	struct shmid_ds *shmseg;
375 
376 	if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
377 		return (ENOSYS);
378 
379 	get_mplock();
380 	shmseg = shm_find_segment_by_shmid(uap->shmid);
381 	if (shmseg == NULL) {
382 		error = EINVAL;
383 		goto done;
384 	}
385 
386 	switch (uap->cmd) {
387 	case IPC_STAT:
388 		error = ipcperm(p, &shmseg->shm_perm, IPC_R);
389 		if (error == 0)
390 			error = copyout(shmseg, uap->buf, sizeof(inbuf));
391 		break;
392 	case IPC_SET:
393 		error = ipcperm(p, &shmseg->shm_perm, IPC_M);
394 		if (error == 0)
395 			error = copyin(uap->buf, &inbuf, sizeof(inbuf));
396 		if (error == 0) {
397 			shmseg->shm_perm.uid = inbuf.shm_perm.uid;
398 			shmseg->shm_perm.gid = inbuf.shm_perm.gid;
399 			shmseg->shm_perm.mode =
400 			    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
401 			    (inbuf.shm_perm.mode & ACCESSPERMS);
402 			shmseg->shm_ctime = time_second;
403 		}
404 		break;
405 	case IPC_RMID:
406 		error = ipcperm(p, &shmseg->shm_perm, IPC_M);
407 		if (error == 0) {
408 			shmseg->shm_perm.key = IPC_PRIVATE;
409 			shmseg->shm_perm.mode |= SHMSEG_REMOVED;
410 			if (shmseg->shm_nattch <= 0) {
411 				shm_deallocate_segment(shmseg);
412 				shm_last_free = IPCID_TO_IX(uap->shmid);
413 			}
414 		}
415 		break;
416 #if 0
417 	case SHM_LOCK:
418 	case SHM_UNLOCK:
419 #endif
420 	default:
421 		error = EINVAL;
422 		break;
423 	}
424 done:
425 	rel_mplock();
426 	return error;
427 }
428 
429 static int
430 shmget_existing(struct proc *p, struct shmget_args *uap, int mode, int segnum)
431 {
432 	struct shmid_ds *shmseg;
433 	int error;
434 
435 	shmseg = &shmsegs[segnum];
436 	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
437 		/*
438 		 * This segment is in the process of being allocated.  Wait
439 		 * until it's done, and look the key up again (in case the
440 		 * allocation failed or it was freed).
441 		 */
442 		shmseg->shm_perm.mode |= SHMSEG_WANTED;
443 		error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0);
444 		if (error)
445 			return error;
446 		return EAGAIN;
447 	}
448 	if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
449 		return EEXIST;
450 	error = ipcperm(p, &shmseg->shm_perm, mode);
451 	if (error)
452 		return error;
453 	if (uap->size && uap->size > shmseg->shm_segsz)
454 		return EINVAL;
455 	uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
456 	return 0;
457 }
458 
459 static int
460 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode)
461 {
462 	int i, segnum, shmid;
463 	size_t size;
464 	struct ucred *cred = p->p_ucred;
465 	struct shmid_ds *shmseg;
466 	struct shm_handle *shm_handle;
467 
468 	if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
469 		return EINVAL;
470 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
471 		return ENOSPC;
472 	size = round_page(uap->size);
473 	if (shm_committed + btoc(size) > shminfo.shmall)
474 		return ENOMEM;
475 	if (shm_last_free < 0) {
476 		shmrealloc();	/* maybe expand the shmsegs[] array */
477 		for (i = 0; i < shmalloced; i++) {
478 			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
479 				break;
480 		}
481 		if (i == shmalloced)
482 			return ENOSPC;
483 		segnum = i;
484 	} else  {
485 		segnum = shm_last_free;
486 		shm_last_free = -1;
487 	}
488 	shmseg = &shmsegs[segnum];
489 	/*
490 	 * In case we sleep in malloc(), mark the segment present but deleted
491 	 * so that noone else tries to create the same key.
492 	 */
493 	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
494 	shmseg->shm_perm.key = uap->key;
495 	shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
496 	shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
497 	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
498 
499 	/*
500 	 * We make sure that we have allocated a pager before we need
501 	 * to.
502 	 */
503 	if (shm_use_phys) {
504 		shm_handle->shm_object =
505 		   phys_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
506 	} else {
507 		shm_handle->shm_object =
508 		   swap_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
509 	}
510 	vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
511 	vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
512 
513 	shmseg->shm_internal = shm_handle;
514 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
515 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
516 	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
517 	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
518 	shmseg->shm_segsz = uap->size;
519 	shmseg->shm_cpid = p->p_pid;
520 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
521 	shmseg->shm_atime = shmseg->shm_dtime = 0;
522 	shmseg->shm_ctime = time_second;
523 	shm_committed += btoc(size);
524 	shm_nused++;
525 
526 	/*
527 	 * If a physical mapping is desired and we have a ton of free pages
528 	 * we pre-allocate the pages here in order to avoid on-the-fly
529 	 * allocation later.  This has a big effect on database warm-up
530 	 * times since DFly supports concurrent page faults coming from the
531 	 * same VM object for pages which already exist.
532 	 *
533 	 * This can hang the kernel for a while so only do it if shm_use_phys
534 	 * is set to 2 or higher.
535 	 */
536 	if (shm_use_phys > 1) {
537 		vm_pindex_t pi, pmax;
538 		vm_page_t m;
539 
540 		pmax = round_page(shmseg->shm_segsz) >> PAGE_SHIFT;
541 		vm_object_hold(shm_handle->shm_object);
542 		if (pmax > vmstats.v_free_count)
543 			pmax = vmstats.v_free_count;
544 		for (pi = 0; pi < pmax; ++pi) {
545 			m = vm_page_grab(shm_handle->shm_object, pi,
546 					 VM_ALLOC_SYSTEM | VM_ALLOC_NULL_OK |
547 					 VM_ALLOC_ZERO);
548 			if (m == NULL)
549 				break;
550 			vm_pager_get_page(shm_handle->shm_object, &m, 1);
551 			vm_page_activate(m);
552 			vm_page_wakeup(m);
553 			lwkt_yield();
554 		}
555 		vm_object_drop(shm_handle->shm_object);
556 	}
557 
558 	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
559 		/*
560 		 * Somebody else wanted this key while we were asleep.  Wake
561 		 * them up now.
562 		 */
563 		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
564 		wakeup((caddr_t)shmseg);
565 	}
566 	uap->sysmsg_result = shmid;
567 	return 0;
568 }
569 
570 /*
571  * MPALMOSTSAFE
572  */
573 int
574 sys_shmget(struct shmget_args *uap)
575 {
576 	struct thread *td = curthread;
577 	struct proc *p = td->td_proc;
578 	int segnum, mode, error;
579 
580 	if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
581 		return (ENOSYS);
582 
583 	mode = uap->shmflg & ACCESSPERMS;
584 	get_mplock();
585 
586 	if (uap->key != IPC_PRIVATE) {
587 	again:
588 		segnum = shm_find_segment_by_key(uap->key);
589 		if (segnum >= 0) {
590 			error = shmget_existing(p, uap, mode, segnum);
591 			if (error == EAGAIN)
592 				goto again;
593 			goto done;
594 		}
595 		if ((uap->shmflg & IPC_CREAT) == 0) {
596 			error = ENOENT;
597 			goto done;
598 		}
599 	}
600 	error = shmget_allocate_segment(p, uap, mode);
601 done:
602 	rel_mplock();
603 	return (error);
604 }
605 
606 void
607 shmfork(struct proc *p1, struct proc *p2)
608 {
609 	struct shmmap_state *shmmap_s;
610 	size_t size;
611 	int i;
612 
613 	get_mplock();
614 	size = shminfo.shmseg * sizeof(struct shmmap_state);
615 	shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
616 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
617 	p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
618 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
619 		if (shmmap_s->shmid != -1)
620 			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
621 	}
622 	rel_mplock();
623 }
624 
625 void
626 shmexit(struct vmspace *vm)
627 {
628 	struct shmmap_state *base, *shm;
629 	int i;
630 
631 	if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) {
632 		vm->vm_shm = NULL;
633 		get_mplock();
634 		for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
635 			if (shm->shmid != -1)
636 				shm_delete_mapping(vm, shm);
637 		}
638 		kfree(base, M_SHM);
639 		rel_mplock();
640 	}
641 }
642 
643 static void
644 shmrealloc(void)
645 {
646 	int i;
647 	struct shmid_ds *newsegs;
648 
649 	if (shmalloced >= shminfo.shmmni)
650 		return;
651 
652 	newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
653 	for (i = 0; i < shmalloced; i++)
654 		bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
655 	for (; i < shminfo.shmmni; i++) {
656 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
657 		shmsegs[i].shm_perm.seq = 0;
658 	}
659 	kfree(shmsegs, M_SHM);
660 	shmsegs = newsegs;
661 	shmalloced = shminfo.shmmni;
662 }
663 
664 static void
665 shminit(void *dummy)
666 {
667 	int i;
668 
669 	/*
670 	 * If not overridden by a tunable set the maximum shm to
671 	 * 2/3 of main memory.
672 	 */
673 	if (shminfo.shmall == 0)
674 		shminfo.shmall = (size_t)vmstats.v_page_count * 2 / 3;
675 
676 	shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
677 	shmalloced = shminfo.shmmni;
678 	shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
679 	for (i = 0; i < shmalloced; i++) {
680 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
681 		shmsegs[i].shm_perm.seq = 0;
682 	}
683 	shm_last_free = 0;
684 	shm_nused = 0;
685 	shm_committed = 0;
686 }
687 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL);
688