xref: /dragonfly/sys/kern/sysv_shm.c (revision 65cc0652)
1 /*
2  * Copyright (c) 1994 Adam Glass and Charles Hannum.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  * 3. All advertising materials mentioning features or use of this software
13  *    must display the following acknowledgement:
14  *	This product includes software developed by Adam Glass and Charles
15  *	Hannum.
16  * 4. The names of the authors may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include "opt_sysvipc.h"
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sysproto.h>
36 #include <sys/kernel.h>
37 #include <sys/sysctl.h>
38 #include <sys/shm.h>
39 #include <sys/proc.h>
40 #include <sys/malloc.h>
41 #include <sys/mman.h>
42 #include <sys/stat.h>
43 #include <sys/sysent.h>
44 #include <sys/jail.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <sys/lock.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pager.h>
54 
55 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments");
56 
57 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode);
58 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum);
59 
60 #define	SHMSEG_FREE     	0x0200
61 #define	SHMSEG_REMOVED  	0x0400
62 #define	SHMSEG_ALLOCATED	0x0800
63 #define	SHMSEG_WANTED		0x1000
64 
65 static int shm_last_free, shm_committed, shmalloced;
66 int shm_nused;
67 static struct shmid_ds	*shmsegs;
68 static struct lwkt_token shm_token = LWKT_TOKEN_INITIALIZER(shm_token);
69 
70 struct shm_handle {
71 	/* vm_offset_t kva; */
72 	vm_object_t shm_object;
73 };
74 
75 struct shmmap_state {
76 	vm_offset_t va;
77 	int shmid;
78 };
79 
80 static void shm_deallocate_segment (struct shmid_ds *);
81 static int shm_find_segment_by_key (key_t);
82 static struct shmid_ds *shm_find_segment_by_shmid (int);
83 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *);
84 static void shmrealloc (void);
85 static void shminit (void *);
86 
87 /*
88  * Tuneable values
89  */
90 #ifndef SHMMIN
91 #define	SHMMIN	1
92 #endif
93 #ifndef SHMMNI
94 #define	SHMMNI	512
95 #endif
96 #ifndef SHMSEG
97 #define	SHMSEG	1024
98 #endif
99 
100 struct	shminfo shminfo = {
101 	0,
102 	SHMMIN,
103 	SHMMNI,
104 	SHMSEG,
105 	0
106 };
107 
108 /*
109  * allow-removed    Allow a shared memory segment to be attached by its shmid
110  *		    even after it has been deleted, as long as it was still
111  *		    being referenced by someone.  This is a trick used by
112  *		    chrome and other applications to avoid leaving shm
113  *		    segments hanging around after the application is killed
114  *		    or seg-faults unexpectedly.
115  *
116  * use-phys	    Shared memory segments are to use physical memory by
117  *		    default, which allows the kernel to optimize (remove)
118  *		    pv_entry management structures for the related PTEs and
119  *		    prevents paging.  This has distinctly different and
120  *		    usually desireable characteristics verses mmap()ing
121  *		    anonymous memory.
122  */
123 static int shm_allow_removed = 1;
124 static int shm_use_phys = 1;
125 
126 TUNABLE_LONG("kern.ipc.shmmin", &shminfo.shmmin);
127 TUNABLE_LONG("kern.ipc.shmmni", &shminfo.shmmni);
128 TUNABLE_LONG("kern.ipc.shmseg", &shminfo.shmseg);
129 TUNABLE_LONG("kern.ipc.shmmaxpgs", &shminfo.shmall);
130 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys);
131 
132 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0,
133     "Max shared memory segment size");
134 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0,
135     "Min shared memory segment size");
136 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0,
137     "Max number of shared memory identifiers");
138 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0,
139     "Max shared memory segments per process");
140 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0,
141     "Max pages of shared memory");
142 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0,
143     "Use phys pager allocation instead of swap pager allocation");
144 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
145     &shm_allow_removed, 0,
146     "Enable/Disable attachment to attached segments marked for removal");
147 
148 static int
149 shm_find_segment_by_key(key_t key)
150 {
151 	int i;
152 
153 	for (i = 0; i < shmalloced; i++) {
154 		if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) &&
155 		    shmsegs[i].shm_perm.key == key)
156 			return i;
157 	}
158 	return -1;
159 }
160 
161 static struct shmid_ds *
162 shm_find_segment_by_shmid(int shmid)
163 {
164 	int segnum;
165 	struct shmid_ds *shmseg;
166 
167 	segnum = IPCID_TO_IX(shmid);
168 	if (segnum < 0 || segnum >= shmalloced)
169 		return NULL;
170 	shmseg = &shmsegs[segnum];
171 	if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 ||
172 	    (!shm_allow_removed &&
173 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) ||
174 	    shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) {
175 		return NULL;
176 	}
177 	return shmseg;
178 }
179 
180 static void
181 shm_deallocate_segment(struct shmid_ds *shmseg)
182 {
183 	struct shm_handle *shm_handle;
184 	size_t size;
185 
186 	shm_handle = shmseg->shm_internal;
187 	vm_object_deallocate(shm_handle->shm_object);
188 	kfree((caddr_t)shm_handle, M_SHM);
189 	shmseg->shm_internal = NULL;
190 	size = round_page(shmseg->shm_segsz);
191 	shm_committed -= btoc(size);
192 	shm_nused--;
193 	shmseg->shm_perm.mode = SHMSEG_FREE;
194 }
195 
196 static int
197 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
198 {
199 	struct shmid_ds *shmseg;
200 	int segnum, result;
201 	size_t size;
202 
203 	segnum = IPCID_TO_IX(shmmap_s->shmid);
204 	shmseg = &shmsegs[segnum];
205 	size = round_page(shmseg->shm_segsz);
206 	result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size);
207 	if (result != KERN_SUCCESS)
208 		return EINVAL;
209 	shmmap_s->shmid = -1;
210 	shmseg->shm_dtime = time_second;
211 	if ((--shmseg->shm_nattch <= 0) &&
212 	    (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
213 		shm_deallocate_segment(shmseg);
214 		shm_last_free = segnum;
215 	}
216 	return 0;
217 }
218 
219 /*
220  * MPALMOSTSAFE
221  */
222 int
223 sys_shmdt(struct shmdt_args *uap)
224 {
225 	struct thread *td = curthread;
226 	struct proc *p = td->td_proc;
227 	struct shmmap_state *shmmap_s;
228 	long i;
229 	int error;
230 
231 	if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
232 		return (ENOSYS);
233 
234 	lwkt_gettoken(&shm_token);
235 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
236 	if (shmmap_s == NULL) {
237 		error = EINVAL;
238 		goto done;
239 	}
240 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
241 		if (shmmap_s->shmid != -1 &&
242 		    shmmap_s->va == (vm_offset_t)uap->shmaddr)
243 			break;
244 	}
245 	if (i == shminfo.shmseg)
246 		error = EINVAL;
247 	else
248 		error = shm_delete_mapping(p->p_vmspace, shmmap_s);
249 done:
250 	lwkt_reltoken(&shm_token);
251 
252 	return (error);
253 }
254 
255 /*
256  * MPALMOSTSAFE
257  */
258 int
259 sys_shmat(struct shmat_args *uap)
260 {
261 	struct thread *td = curthread;
262 	struct proc *p = td->td_proc;
263 	int error, flags;
264 	long i;
265 	struct shmid_ds *shmseg;
266 	struct shmmap_state *shmmap_s = NULL;
267 	struct shm_handle *shm_handle;
268 	vm_offset_t attach_va;
269 	vm_prot_t prot;
270 	vm_size_t size;
271 	vm_size_t align;
272 	int rv;
273 
274 	if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
275 		return (ENOSYS);
276 
277 	lwkt_gettoken(&shm_token);
278 again:
279 	shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm;
280 	if (shmmap_s == NULL) {
281 		size = shminfo.shmseg * sizeof(struct shmmap_state);
282 		shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
283 		for (i = 0; i < shminfo.shmseg; i++)
284 			shmmap_s[i].shmid = -1;
285 		if (p->p_vmspace->vm_shm != NULL) {
286 			kfree(shmmap_s, M_SHM);
287 			goto again;
288 		}
289 		p->p_vmspace->vm_shm = (caddr_t)shmmap_s;
290 	}
291 	shmseg = shm_find_segment_by_shmid(uap->shmid);
292 	if (shmseg == NULL) {
293 		error = EINVAL;
294 		goto done;
295 	}
296 	error = ipcperm(p, &shmseg->shm_perm,
297 			(uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
298 	if (error)
299 		goto done;
300 	for (i = 0; i < shminfo.shmseg; i++) {
301 		if (shmmap_s->shmid == -1)
302 			break;
303 		shmmap_s++;
304 	}
305 	if (i >= shminfo.shmseg) {
306 		error = EMFILE;
307 		goto done;
308 	}
309 	size = round_page(shmseg->shm_segsz);
310 #ifdef VM_PROT_READ_IS_EXEC
311 	prot = VM_PROT_READ | VM_PROT_EXECUTE;
312 #else
313 	prot = VM_PROT_READ;
314 #endif
315 	if ((uap->shmflg & SHM_RDONLY) == 0)
316 		prot |= VM_PROT_WRITE;
317 	flags = MAP_ANON | MAP_SHARED;
318 	if (uap->shmaddr) {
319 		flags |= MAP_FIXED;
320 		if (uap->shmflg & SHM_RND) {
321 			attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);
322 		} else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) {
323 			attach_va = (vm_offset_t)uap->shmaddr;
324 		} else {
325 			error = EINVAL;
326 			goto done;
327 		}
328 	} else {
329 		/*
330 		 * This is just a hint to vm_map_find() about where to put it.
331 		 */
332 		attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr +
333 				       maxtsiz + maxdsiz);
334 	}
335 
336 	/*
337 	 * Handle alignment.  For large memory maps it is possible
338 	 * that the MMU can optimize the page table so align anything
339 	 * that is a multiple of SEG_SIZE to SEG_SIZE.
340 	 */
341 	if ((flags & MAP_FIXED) == 0 && (size & SEG_MASK) == 0)
342 		align = SEG_SIZE;
343 	else
344 		align = PAGE_SIZE;
345 
346 	shm_handle = shmseg->shm_internal;
347 	vm_object_hold(shm_handle->shm_object);
348 	vm_object_chain_wait(shm_handle->shm_object, 0);
349 	vm_object_reference_locked(shm_handle->shm_object);
350 	rv = vm_map_find(&p->p_vmspace->vm_map,
351 			 shm_handle->shm_object, NULL,
352 			 0, &attach_va, size,
353 			 align,
354 			 ((flags & MAP_FIXED) ? 0 : 1),
355 			 VM_MAPTYPE_NORMAL, VM_SUBSYS_SHMEM,
356 			 prot, prot, 0);
357 	vm_object_drop(shm_handle->shm_object);
358 	if (rv != KERN_SUCCESS) {
359                 vm_object_deallocate(shm_handle->shm_object);
360 		error = ENOMEM;
361 		goto done;
362 	}
363 	vm_map_inherit(&p->p_vmspace->vm_map,
364 		       attach_va, attach_va + size, VM_INHERIT_SHARE);
365 
366 	KKASSERT(shmmap_s->shmid == -1);
367 	shmmap_s->va = attach_va;
368 	shmmap_s->shmid = uap->shmid;
369 	shmseg->shm_lpid = p->p_pid;
370 	shmseg->shm_atime = time_second;
371 	shmseg->shm_nattch++;
372 	uap->sysmsg_resultp = (void *)attach_va;
373 	error = 0;
374 done:
375 	lwkt_reltoken(&shm_token);
376 
377 	return error;
378 }
379 
380 /*
381  * MPALMOSTSAFE
382  */
383 int
384 sys_shmctl(struct shmctl_args *uap)
385 {
386 	struct thread *td = curthread;
387 	struct proc *p = td->td_proc;
388 	int error;
389 	struct shmid_ds inbuf;
390 	struct shmid_ds *shmseg;
391 
392 	if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
393 		return (ENOSYS);
394 
395 	lwkt_gettoken(&shm_token);
396 	shmseg = shm_find_segment_by_shmid(uap->shmid);
397 	if (shmseg == NULL) {
398 		error = EINVAL;
399 		goto done;
400 	}
401 
402 	switch (uap->cmd) {
403 	case IPC_STAT:
404 		error = ipcperm(p, &shmseg->shm_perm, IPC_R);
405 		if (error == 0)
406 			error = copyout(shmseg, uap->buf, sizeof(inbuf));
407 		break;
408 	case IPC_SET:
409 		error = ipcperm(p, &shmseg->shm_perm, IPC_M);
410 		if (error == 0)
411 			error = copyin(uap->buf, &inbuf, sizeof(inbuf));
412 		if (error == 0) {
413 			shmseg->shm_perm.uid = inbuf.shm_perm.uid;
414 			shmseg->shm_perm.gid = inbuf.shm_perm.gid;
415 			shmseg->shm_perm.mode =
416 			    (shmseg->shm_perm.mode & ~ACCESSPERMS) |
417 			    (inbuf.shm_perm.mode & ACCESSPERMS);
418 			shmseg->shm_ctime = time_second;
419 		}
420 		break;
421 	case IPC_RMID:
422 		error = ipcperm(p, &shmseg->shm_perm, IPC_M);
423 		if (error == 0) {
424 			shmseg->shm_perm.key = IPC_PRIVATE;
425 			shmseg->shm_perm.mode |= SHMSEG_REMOVED;
426 			if (shmseg->shm_nattch <= 0) {
427 				shm_deallocate_segment(shmseg);
428 				shm_last_free = IPCID_TO_IX(uap->shmid);
429 			}
430 		}
431 		break;
432 #if 0
433 	case SHM_LOCK:
434 	case SHM_UNLOCK:
435 #endif
436 	default:
437 		error = EINVAL;
438 		break;
439 	}
440 done:
441 	lwkt_reltoken(&shm_token);
442 
443 	return error;
444 }
445 
446 static int
447 shmget_existing(struct proc *p, struct shmget_args *uap, int mode, int segnum)
448 {
449 	struct shmid_ds *shmseg;
450 	int error;
451 
452 	shmseg = &shmsegs[segnum];
453 	if (shmseg->shm_perm.mode & SHMSEG_REMOVED) {
454 		/*
455 		 * This segment is in the process of being allocated.  Wait
456 		 * until it's done, and look the key up again (in case the
457 		 * allocation failed or it was freed).
458 		 */
459 		shmseg->shm_perm.mode |= SHMSEG_WANTED;
460 		error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0);
461 		if (error)
462 			return error;
463 		return EAGAIN;
464 	}
465 	if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
466 		return EEXIST;
467 	error = ipcperm(p, &shmseg->shm_perm, mode);
468 	if (error)
469 		return error;
470 	if (uap->size && uap->size > shmseg->shm_segsz)
471 		return EINVAL;
472 	uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
473 	return 0;
474 }
475 
476 static int
477 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode)
478 {
479 	int i, segnum, shmid;
480 	size_t size;
481 	struct ucred *cred = p->p_ucred;
482 	struct shmid_ds *shmseg;
483 	struct shm_handle *shm_handle;
484 
485 	if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
486 		return EINVAL;
487 	if (shm_nused >= shminfo.shmmni) /* any shmids left? */
488 		return ENOSPC;
489 	size = round_page(uap->size);
490 	if (shm_committed + btoc(size) > shminfo.shmall)
491 		return ENOMEM;
492 	if (shm_last_free < 0) {
493 		shmrealloc();	/* maybe expand the shmsegs[] array */
494 		for (i = 0; i < shmalloced; i++) {
495 			if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
496 				break;
497 		}
498 		if (i == shmalloced)
499 			return ENOSPC;
500 		segnum = i;
501 	} else  {
502 		segnum = shm_last_free;
503 		shm_last_free = -1;
504 	}
505 	shmseg = &shmsegs[segnum];
506 	/*
507 	 * In case we sleep in malloc(), mark the segment present but deleted
508 	 * so that noone else tries to create the same key.
509 	 */
510 	shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
511 	shmseg->shm_perm.key = uap->key;
512 	shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff;
513 	shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK);
514 	shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
515 
516 	/*
517 	 * We make sure that we have allocated a pager before we need
518 	 * to.
519 	 */
520 	if (shm_use_phys) {
521 		shm_handle->shm_object =
522 		   phys_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
523 	} else {
524 		shm_handle->shm_object =
525 		   swap_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0);
526 	}
527 	vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING);
528 	vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT);
529 
530 	shmseg->shm_internal = shm_handle;
531 	shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
532 	shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
533 	shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) |
534 	    (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
535 	shmseg->shm_segsz = uap->size;
536 	shmseg->shm_cpid = p->p_pid;
537 	shmseg->shm_lpid = shmseg->shm_nattch = 0;
538 	shmseg->shm_atime = shmseg->shm_dtime = 0;
539 	shmseg->shm_ctime = time_second;
540 	shm_committed += btoc(size);
541 	shm_nused++;
542 
543 	/*
544 	 * If a physical mapping is desired and we have a ton of free pages
545 	 * we pre-allocate the pages here in order to avoid on-the-fly
546 	 * allocation later.  This has a big effect on database warm-up
547 	 * times since DFly supports concurrent page faults coming from the
548 	 * same VM object for pages which already exist.
549 	 *
550 	 * This can hang the kernel for a while so only do it if shm_use_phys
551 	 * is set to 2 or higher.
552 	 */
553 	if (shm_use_phys > 1) {
554 		vm_pindex_t pi, pmax;
555 		vm_page_t m;
556 
557 		pmax = round_page(shmseg->shm_segsz) >> PAGE_SHIFT;
558 		vm_object_hold(shm_handle->shm_object);
559 		if (pmax > vmstats.v_free_count)
560 			pmax = vmstats.v_free_count;
561 		for (pi = 0; pi < pmax; ++pi) {
562 			m = vm_page_grab(shm_handle->shm_object, pi,
563 					 VM_ALLOC_SYSTEM | VM_ALLOC_NULL_OK |
564 					 VM_ALLOC_ZERO);
565 			if (m == NULL)
566 				break;
567 			vm_pager_get_page(shm_handle->shm_object, &m, 1);
568 			vm_page_activate(m);
569 			vm_page_wakeup(m);
570 			lwkt_yield();
571 		}
572 		vm_object_drop(shm_handle->shm_object);
573 	}
574 
575 	if (shmseg->shm_perm.mode & SHMSEG_WANTED) {
576 		/*
577 		 * Somebody else wanted this key while we were asleep.  Wake
578 		 * them up now.
579 		 */
580 		shmseg->shm_perm.mode &= ~SHMSEG_WANTED;
581 		wakeup((caddr_t)shmseg);
582 	}
583 	uap->sysmsg_result = shmid;
584 	return 0;
585 }
586 
587 /*
588  * MPALMOSTSAFE
589  */
590 int
591 sys_shmget(struct shmget_args *uap)
592 {
593 	struct thread *td = curthread;
594 	struct proc *p = td->td_proc;
595 	int segnum, mode, error;
596 
597 	if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL)
598 		return (ENOSYS);
599 
600 	mode = uap->shmflg & ACCESSPERMS;
601 
602 	lwkt_gettoken(&shm_token);
603 
604 	if (uap->key != IPC_PRIVATE) {
605 	again:
606 		segnum = shm_find_segment_by_key(uap->key);
607 		if (segnum >= 0) {
608 			error = shmget_existing(p, uap, mode, segnum);
609 			if (error == EAGAIN)
610 				goto again;
611 			goto done;
612 		}
613 		if ((uap->shmflg & IPC_CREAT) == 0) {
614 			error = ENOENT;
615 			goto done;
616 		}
617 	}
618 	error = shmget_allocate_segment(p, uap, mode);
619 done:
620 	lwkt_reltoken(&shm_token);
621 
622 	return (error);
623 }
624 
625 void
626 shmfork(struct proc *p1, struct proc *p2)
627 {
628 	struct shmmap_state *shmmap_s;
629 	size_t size;
630 	int i;
631 
632 	lwkt_gettoken(&shm_token);
633 	size = shminfo.shmseg * sizeof(struct shmmap_state);
634 	shmmap_s = kmalloc(size, M_SHM, M_WAITOK);
635 	bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size);
636 	p2->p_vmspace->vm_shm = (caddr_t)shmmap_s;
637 	for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) {
638 		if (shmmap_s->shmid != -1)
639 			shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++;
640 	}
641 	lwkt_reltoken(&shm_token);
642 }
643 
644 void
645 shmexit(struct vmspace *vm)
646 {
647 	struct shmmap_state *base, *shm;
648 	int i;
649 
650 	if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) {
651 		vm->vm_shm = NULL;
652 		lwkt_gettoken(&shm_token);
653 		for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
654 			if (shm->shmid != -1)
655 				shm_delete_mapping(vm, shm);
656 		}
657 		kfree(base, M_SHM);
658 		lwkt_reltoken(&shm_token);
659 	}
660 }
661 
662 static void
663 shmrealloc(void)
664 {
665 	int i;
666 	struct shmid_ds *newsegs;
667 
668 	if (shmalloced >= shminfo.shmmni)
669 		return;
670 
671 	newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK);
672 	for (i = 0; i < shmalloced; i++)
673 		bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0]));
674 	for (; i < shminfo.shmmni; i++) {
675 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
676 		shmsegs[i].shm_perm.seq = 0;
677 	}
678 	kfree(shmsegs, M_SHM);
679 	shmsegs = newsegs;
680 	shmalloced = shminfo.shmmni;
681 }
682 
683 static void
684 shminit(void *dummy)
685 {
686 	int i;
687 
688 	/*
689 	 * If not overridden by a tunable set the maximum shm to
690 	 * 2/3 of main memory.
691 	 */
692 	if (shminfo.shmall == 0)
693 		shminfo.shmall = (size_t)vmstats.v_page_count * 2 / 3;
694 
695 	shminfo.shmmax = shminfo.shmall * PAGE_SIZE;
696 	shmalloced = shminfo.shmmni;
697 	shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK);
698 	for (i = 0; i < shmalloced; i++) {
699 		shmsegs[i].shm_perm.mode = SHMSEG_FREE;
700 		shmsegs[i].shm_perm.seq = 0;
701 	}
702 	shm_last_free = 0;
703 	shm_nused = 0;
704 	shm_committed = 0;
705 }
706 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL);
707