1 /* 2 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. All advertising materials mentioning features or use of this software 13 * must display the following acknowledgement: 14 * This product includes software developed by Adam Glass and Charles 15 * Hannum. 16 * 4. The names of the authors may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include "opt_sysvipc.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/sysproto.h> 36 #include <sys/kernel.h> 37 #include <sys/sysctl.h> 38 #include <sys/shm.h> 39 #include <sys/proc.h> 40 #include <sys/malloc.h> 41 #include <sys/mman.h> 42 #include <sys/stat.h> 43 #include <sys/sysent.h> 44 #include <sys/jail.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_param.h> 48 #include <sys/lock.h> 49 #include <vm/pmap.h> 50 #include <vm/vm_object.h> 51 #include <vm/vm_map.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_pager.h> 54 55 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 56 57 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode); 58 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum); 59 60 #define SHMSEG_FREE 0x0200 61 #define SHMSEG_REMOVED 0x0400 62 #define SHMSEG_ALLOCATED 0x0800 63 #define SHMSEG_WANTED 0x1000 64 65 static int shm_last_free, shm_committed, shmalloced; 66 int shm_nused; 67 static struct shmid_ds *shmsegs; 68 static struct lwkt_token shm_token = LWKT_TOKEN_INITIALIZER(shm_token); 69 70 struct shm_handle { 71 /* vm_offset_t kva; */ 72 vm_object_t shm_object; 73 }; 74 75 struct shmmap_state { 76 vm_offset_t va; 77 int shmid; 78 int reserved; 79 }; 80 81 static void shm_deallocate_segment (struct shmid_ds *); 82 static int shm_find_segment_by_key (key_t); 83 static struct shmid_ds *shm_find_segment_by_shmid (int); 84 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *); 85 static void shmrealloc (void); 86 static void shminit (void *); 87 88 /* 89 * Tuneable values 90 */ 91 #ifndef SHMMIN 92 #define SHMMIN 1 93 #endif 94 #ifndef SHMMNI 95 #define SHMMNI 512 96 #endif 97 #ifndef SHMSEG 98 #define SHMSEG 1024 99 #endif 100 101 struct shminfo shminfo = { 102 0, 103 SHMMIN, 104 SHMMNI, 105 SHMSEG, 106 0 107 }; 108 109 /* 110 * allow-removed Allow a shared memory segment to be attached by its shmid 111 * even after it has been deleted, as long as it was still 112 * being referenced by someone. This is a trick used by 113 * chrome and other applications to avoid leaving shm 114 * segments hanging around after the application is killed 115 * or seg-faults unexpectedly. 116 * 117 * use-phys Shared memory segments are to use physical memory by 118 * default, which may allow the kernel to better-optimize 119 * the pmap and reduce overhead. The pages are effectively 120 * wired. 121 */ 122 static int shm_allow_removed = 1; 123 static int shm_use_phys = 1; 124 125 TUNABLE_LONG("kern.ipc.shmmin", &shminfo.shmmin); 126 TUNABLE_LONG("kern.ipc.shmmni", &shminfo.shmmni); 127 TUNABLE_LONG("kern.ipc.shmseg", &shminfo.shmseg); 128 TUNABLE_LONG("kern.ipc.shmmaxpgs", &shminfo.shmall); 129 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys); 130 131 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, 132 "Max shared memory segment size"); 133 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, 134 "Min shared memory segment size"); 135 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, 136 "Max number of shared memory identifiers"); 137 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0, 138 "Max shared memory segments per process"); 139 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, 140 "Max pages of shared memory"); 141 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, 142 "Use phys pager allocation instead of swap pager allocation"); 143 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW, 144 &shm_allow_removed, 0, 145 "Enable/Disable attachment to attached segments marked for removal"); 146 147 static int 148 shm_find_segment_by_key(key_t key) 149 { 150 int i; 151 152 for (i = 0; i < shmalloced; i++) { 153 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 154 shmsegs[i].shm_perm.key == key) 155 return i; 156 } 157 return -1; 158 } 159 160 static struct shmid_ds * 161 shm_find_segment_by_shmid(int shmid) 162 { 163 int segnum; 164 struct shmid_ds *shmseg; 165 166 segnum = IPCID_TO_IX(shmid); 167 if (segnum < 0 || segnum >= shmalloced) 168 return NULL; 169 shmseg = &shmsegs[segnum]; 170 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 171 (!shm_allow_removed && 172 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) || 173 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) { 174 return NULL; 175 } 176 return shmseg; 177 } 178 179 static void 180 shm_deallocate_segment(struct shmid_ds *shmseg) 181 { 182 struct shm_handle *shm_handle; 183 size_t size; 184 185 shm_handle = shmseg->shm_internal; 186 vm_object_deallocate(shm_handle->shm_object); 187 kfree((caddr_t)shm_handle, M_SHM); 188 shmseg->shm_internal = NULL; 189 size = round_page(shmseg->shm_segsz); 190 shm_committed -= btoc(size); 191 shm_nused--; 192 shmseg->shm_perm.mode = SHMSEG_FREE; 193 } 194 195 static int 196 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) 197 { 198 struct shmid_ds *shmseg; 199 int segnum, result; 200 size_t size; 201 202 segnum = IPCID_TO_IX(shmmap_s->shmid); 203 shmseg = &shmsegs[segnum]; 204 size = round_page(shmseg->shm_segsz); 205 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); 206 if (result != KERN_SUCCESS) 207 return EINVAL; 208 shmmap_s->shmid = -1; 209 shmseg->shm_dtime = time_second; 210 if ((--shmseg->shm_nattch <= 0) && 211 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 212 shm_deallocate_segment(shmseg); 213 shm_last_free = segnum; 214 } 215 return 0; 216 } 217 218 /* 219 * MPALMOSTSAFE 220 */ 221 int 222 sys_shmdt(struct shmdt_args *uap) 223 { 224 struct thread *td = curthread; 225 struct proc *p = td->td_proc; 226 struct shmmap_state *shmmap_s; 227 struct prison *pr = p->p_ucred->cr_prison; 228 229 long i; 230 int error; 231 232 if (pr && !pr->pr_sysvipc_allowed) 233 return (ENOSYS); 234 235 lwkt_gettoken(&shm_token); 236 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 237 if (shmmap_s == NULL) { 238 error = EINVAL; 239 goto done; 240 } 241 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 242 if (shmmap_s->shmid != -1 && 243 shmmap_s->va == (vm_offset_t)uap->shmaddr) 244 break; 245 } 246 if (i == shminfo.shmseg) 247 error = EINVAL; 248 else 249 error = shm_delete_mapping(p->p_vmspace, shmmap_s); 250 done: 251 lwkt_reltoken(&shm_token); 252 253 return (error); 254 } 255 256 /* 257 * MPALMOSTSAFE 258 */ 259 int 260 sys_shmat(struct shmat_args *uap) 261 { 262 struct thread *td = curthread; 263 struct proc *p = td->td_proc; 264 struct prison *pr = p->p_ucred->cr_prison; 265 int error, flags; 266 long i; 267 struct shmid_ds *shmseg; 268 struct shmmap_state *shmmap_s = NULL; 269 struct shm_handle *shm_handle; 270 vm_offset_t attach_va; 271 vm_prot_t prot; 272 vm_size_t size; 273 vm_size_t align; 274 int rv; 275 276 if (pr && !pr->pr_sysvipc_allowed) 277 return (ENOSYS); 278 279 lwkt_gettoken(&shm_token); 280 again: 281 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 282 if (shmmap_s == NULL) { 283 size = shminfo.shmseg * sizeof(struct shmmap_state); 284 shmmap_s = kmalloc(size, M_SHM, M_WAITOK); 285 for (i = 0; i < shminfo.shmseg; i++) { 286 shmmap_s[i].shmid = -1; 287 shmmap_s[i].reserved = 0; 288 } 289 if (p->p_vmspace->vm_shm != NULL) { 290 kfree(shmmap_s, M_SHM); 291 goto again; 292 } 293 p->p_vmspace->vm_shm = (caddr_t)shmmap_s; 294 } 295 shmseg = shm_find_segment_by_shmid(uap->shmid); 296 if (shmseg == NULL) { 297 error = EINVAL; 298 goto done; 299 } 300 error = ipcperm(p, &shmseg->shm_perm, 301 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 302 if (error) 303 goto done; 304 305 /* 306 * Find a free element and mark reserved. This fixes races 307 * against concurrent allocations due to the token being 308 * interrupted by blocking operations. The shmmap_s reservation 309 * will be cleared upon completion or error. 310 */ 311 for (i = 0; i < shminfo.shmseg; i++) { 312 if (shmmap_s->shmid == -1 && shmmap_s->reserved == 0) { 313 shmmap_s->reserved = 1; 314 break; 315 } 316 shmmap_s++; 317 } 318 if (i >= shminfo.shmseg) { 319 error = EMFILE; 320 goto done; 321 } 322 size = round_page(shmseg->shm_segsz); 323 #ifdef VM_PROT_READ_IS_EXEC 324 prot = VM_PROT_READ | VM_PROT_EXECUTE; 325 #else 326 prot = VM_PROT_READ; 327 #endif 328 if ((uap->shmflg & SHM_RDONLY) == 0) 329 prot |= VM_PROT_WRITE; 330 flags = MAP_ANON | MAP_SHARED; 331 if (uap->shmaddr) { 332 flags |= MAP_FIXED; 333 if (uap->shmflg & SHM_RND) { 334 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1); 335 } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) { 336 attach_va = (vm_offset_t)uap->shmaddr; 337 } else { 338 error = EINVAL; 339 shmmap_s->reserved = 0; 340 goto done; 341 } 342 } else { 343 /* 344 * This is just a hint to vm_map_find() about where to put it. 345 */ 346 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + 347 maxtsiz + maxdsiz); 348 } 349 350 /* 351 * Handle alignment. For large memory maps it is possible 352 * that the MMU can optimize the page table so align anything 353 * that is a multiple of SEG_SIZE to SEG_SIZE. 354 */ 355 if ((flags & MAP_FIXED) == 0 && (size & SEG_MASK) == 0) 356 align = SEG_SIZE; 357 else 358 align = PAGE_SIZE; 359 360 shm_handle = shmseg->shm_internal; 361 vm_object_hold(shm_handle->shm_object); 362 vm_object_reference_locked(shm_handle->shm_object); 363 rv = vm_map_find(&p->p_vmspace->vm_map, 364 shm_handle->shm_object, NULL, 365 0, &attach_va, size, 366 align, 367 ((flags & MAP_FIXED) ? 0 : 1), 368 VM_MAPTYPE_NORMAL, VM_SUBSYS_SHMEM, 369 prot, prot, 0); 370 vm_object_drop(shm_handle->shm_object); 371 if (rv != KERN_SUCCESS) { 372 vm_object_deallocate(shm_handle->shm_object); 373 shmmap_s->reserved = 0; 374 error = ENOMEM; 375 goto done; 376 } 377 vm_map_inherit(&p->p_vmspace->vm_map, 378 attach_va, attach_va + size, VM_INHERIT_SHARE); 379 380 KKASSERT(shmmap_s->shmid == -1); 381 shmmap_s->va = attach_va; 382 shmmap_s->shmid = uap->shmid; 383 shmmap_s->reserved = 0; 384 shmseg->shm_lpid = p->p_pid; 385 shmseg->shm_atime = time_second; 386 shmseg->shm_nattch++; 387 uap->sysmsg_resultp = (void *)attach_va; 388 error = 0; 389 done: 390 lwkt_reltoken(&shm_token); 391 392 return error; 393 } 394 395 /* 396 * MPALMOSTSAFE 397 */ 398 int 399 sys_shmctl(struct shmctl_args *uap) 400 { 401 struct thread *td = curthread; 402 struct proc *p = td->td_proc; 403 struct prison *pr = p->p_ucred->cr_prison; 404 int error; 405 struct shmid_ds inbuf; 406 struct shmid_ds *shmseg; 407 408 if (pr && !pr->pr_sysvipc_allowed) 409 return (ENOSYS); 410 411 lwkt_gettoken(&shm_token); 412 shmseg = shm_find_segment_by_shmid(uap->shmid); 413 if (shmseg == NULL) { 414 error = EINVAL; 415 goto done; 416 } 417 418 switch (uap->cmd) { 419 case IPC_STAT: 420 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 421 if (error == 0) 422 error = copyout(shmseg, uap->buf, sizeof(inbuf)); 423 break; 424 case IPC_SET: 425 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 426 if (error == 0) 427 error = copyin(uap->buf, &inbuf, sizeof(inbuf)); 428 if (error == 0) { 429 shmseg->shm_perm.uid = inbuf.shm_perm.uid; 430 shmseg->shm_perm.gid = inbuf.shm_perm.gid; 431 shmseg->shm_perm.mode = 432 (shmseg->shm_perm.mode & ~ACCESSPERMS) | 433 (inbuf.shm_perm.mode & ACCESSPERMS); 434 shmseg->shm_ctime = time_second; 435 } 436 break; 437 case IPC_RMID: 438 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 439 if (error == 0) { 440 shmseg->shm_perm.key = IPC_PRIVATE; 441 shmseg->shm_perm.mode |= SHMSEG_REMOVED; 442 if (shmseg->shm_nattch <= 0) { 443 shm_deallocate_segment(shmseg); 444 shm_last_free = IPCID_TO_IX(uap->shmid); 445 } 446 } 447 break; 448 #if 0 449 case SHM_LOCK: 450 case SHM_UNLOCK: 451 #endif 452 default: 453 error = EINVAL; 454 break; 455 } 456 done: 457 lwkt_reltoken(&shm_token); 458 459 return error; 460 } 461 462 static int 463 shmget_existing(struct proc *p, struct shmget_args *uap, int mode, int segnum) 464 { 465 struct shmid_ds *shmseg; 466 int error; 467 468 shmseg = &shmsegs[segnum]; 469 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 470 /* 471 * This segment is in the process of being allocated. Wait 472 * until it's done, and look the key up again (in case the 473 * allocation failed or it was freed). 474 */ 475 shmseg->shm_perm.mode |= SHMSEG_WANTED; 476 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0); 477 if (error) 478 return error; 479 return EAGAIN; 480 } 481 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 482 return EEXIST; 483 error = ipcperm(p, &shmseg->shm_perm, mode); 484 if (error) 485 return error; 486 if (uap->size && uap->size > shmseg->shm_segsz) 487 return EINVAL; 488 uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 489 return 0; 490 } 491 492 static int 493 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode) 494 { 495 int i, segnum, shmid; 496 size_t size; 497 struct ucred *cred = p->p_ucred; 498 struct shmid_ds *shmseg; 499 struct shm_handle *shm_handle; 500 501 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 502 return EINVAL; 503 if (shm_nused >= shminfo.shmmni) /* any shmids left? */ 504 return ENOSPC; 505 size = round_page(uap->size); 506 if (shm_committed + btoc(size) > shminfo.shmall) 507 return ENOMEM; 508 if (shm_last_free < 0) { 509 shmrealloc(); /* maybe expand the shmsegs[] array */ 510 for (i = 0; i < shmalloced; i++) { 511 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 512 break; 513 } 514 if (i == shmalloced) 515 return ENOSPC; 516 segnum = i; 517 } else { 518 segnum = shm_last_free; 519 shm_last_free = -1; 520 } 521 shmseg = &shmsegs[segnum]; 522 /* 523 * In case we sleep in malloc(), mark the segment present but deleted 524 * so that noone else tries to create the same key. 525 */ 526 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 527 shmseg->shm_perm.key = uap->key; 528 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 529 shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK); 530 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 531 532 /* 533 * We make sure that we have allocated a pager before we need 534 * to. 535 */ 536 if (shm_use_phys) { 537 shm_handle->shm_object = 538 phys_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0); 539 } else { 540 shm_handle->shm_object = 541 swap_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0); 542 } 543 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); 544 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); 545 546 shmseg->shm_internal = shm_handle; 547 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 548 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 549 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 550 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 551 shmseg->shm_segsz = uap->size; 552 shmseg->shm_cpid = p->p_pid; 553 shmseg->shm_lpid = shmseg->shm_nattch = 0; 554 shmseg->shm_atime = shmseg->shm_dtime = 0; 555 shmseg->shm_ctime = time_second; 556 shm_committed += btoc(size); 557 shm_nused++; 558 559 /* 560 * If a physical mapping is desired and we have a ton of free pages 561 * we pre-allocate the pages here in order to avoid on-the-fly 562 * allocation later. This has a big effect on database warm-up 563 * times since DFly supports concurrent page faults coming from the 564 * same VM object for pages which already exist. 565 * 566 * This can hang the kernel for a while so only do it if shm_use_phys 567 * is set to 2 or higher. 568 */ 569 if (shm_use_phys > 1) { 570 vm_pindex_t pi, pmax; 571 vm_page_t m; 572 573 pmax = round_page(shmseg->shm_segsz) >> PAGE_SHIFT; 574 vm_object_hold(shm_handle->shm_object); 575 if (pmax > vmstats.v_free_count) 576 pmax = vmstats.v_free_count; 577 for (pi = 0; pi < pmax; ++pi) { 578 m = vm_page_grab(shm_handle->shm_object, pi, 579 VM_ALLOC_SYSTEM | VM_ALLOC_NULL_OK | 580 VM_ALLOC_ZERO); 581 if (m == NULL) 582 break; 583 vm_pager_get_page(shm_handle->shm_object, &m, 1); 584 vm_page_activate(m); 585 vm_page_wakeup(m); 586 lwkt_yield(); 587 } 588 vm_object_drop(shm_handle->shm_object); 589 } 590 591 if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 592 /* 593 * Somebody else wanted this key while we were asleep. Wake 594 * them up now. 595 */ 596 shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 597 wakeup((caddr_t)shmseg); 598 } 599 uap->sysmsg_result = shmid; 600 return 0; 601 } 602 603 /* 604 * MPALMOSTSAFE 605 */ 606 int 607 sys_shmget(struct shmget_args *uap) 608 { 609 struct thread *td = curthread; 610 struct proc *p = td->td_proc; 611 struct prison *pr = p->p_ucred->cr_prison; 612 int segnum, mode, error; 613 614 if (pr && !pr->pr_sysvipc_allowed) 615 return (ENOSYS); 616 617 mode = uap->shmflg & ACCESSPERMS; 618 619 lwkt_gettoken(&shm_token); 620 621 if (uap->key != IPC_PRIVATE) { 622 again: 623 segnum = shm_find_segment_by_key(uap->key); 624 if (segnum >= 0) { 625 error = shmget_existing(p, uap, mode, segnum); 626 if (error == EAGAIN) 627 goto again; 628 goto done; 629 } 630 if ((uap->shmflg & IPC_CREAT) == 0) { 631 error = ENOENT; 632 goto done; 633 } 634 } 635 error = shmget_allocate_segment(p, uap, mode); 636 done: 637 lwkt_reltoken(&shm_token); 638 639 return (error); 640 } 641 642 void 643 shmfork(struct proc *p1, struct proc *p2) 644 { 645 struct shmmap_state *shmmap_s; 646 size_t size; 647 int i; 648 649 lwkt_gettoken(&shm_token); 650 size = shminfo.shmseg * sizeof(struct shmmap_state); 651 shmmap_s = kmalloc(size, M_SHM, M_WAITOK); 652 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size); 653 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s; 654 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 655 if (shmmap_s->shmid != -1) 656 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 657 } 658 lwkt_reltoken(&shm_token); 659 } 660 661 void 662 shmexit(struct vmspace *vm) 663 { 664 struct shmmap_state *base, *shm; 665 int i; 666 667 if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) { 668 vm->vm_shm = NULL; 669 lwkt_gettoken(&shm_token); 670 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { 671 if (shm->shmid != -1) 672 shm_delete_mapping(vm, shm); 673 } 674 kfree(base, M_SHM); 675 lwkt_reltoken(&shm_token); 676 } 677 } 678 679 static void 680 shmrealloc(void) 681 { 682 int i; 683 struct shmid_ds *newsegs; 684 685 if (shmalloced >= shminfo.shmmni) 686 return; 687 688 newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 689 for (i = 0; i < shmalloced; i++) 690 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 691 for (; i < shminfo.shmmni; i++) { 692 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 693 shmsegs[i].shm_perm.seq = 0; 694 } 695 kfree(shmsegs, M_SHM); 696 shmsegs = newsegs; 697 shmalloced = shminfo.shmmni; 698 } 699 700 static void 701 shminit(void *dummy) 702 { 703 int i; 704 705 /* 706 * If not overridden by a tunable set the maximum shm to 707 * 2/3 of main memory. 708 */ 709 if (shminfo.shmall == 0) 710 shminfo.shmall = (size_t)vmstats.v_page_count * 2 / 3; 711 712 shminfo.shmmax = shminfo.shmall * PAGE_SIZE; 713 shmalloced = shminfo.shmmni; 714 shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 715 for (i = 0; i < shmalloced; i++) { 716 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 717 shmsegs[i].shm_perm.seq = 0; 718 } 719 shm_last_free = 0; 720 shm_nused = 0; 721 shm_committed = 0; 722 } 723 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL); 724