1 /* 2 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. All advertising materials mentioning features or use of this software 13 * must display the following acknowledgement: 14 * This product includes software developed by Adam Glass and Charles 15 * Hannum. 16 * 4. The names of the authors may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include "opt_sysvipc.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/sysproto.h> 36 #include <sys/kernel.h> 37 #include <sys/sysctl.h> 38 #include <sys/shm.h> 39 #include <sys/proc.h> 40 #include <sys/malloc.h> 41 #include <sys/mman.h> 42 #include <sys/stat.h> 43 #include <sys/sysent.h> 44 #include <sys/jail.h> 45 46 #include <vm/vm.h> 47 #include <vm/vm_param.h> 48 #include <sys/lock.h> 49 #include <vm/pmap.h> 50 #include <vm/vm_object.h> 51 #include <vm/vm_map.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_pager.h> 54 55 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 56 57 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode); 58 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum); 59 60 #define SHMSEG_FREE 0x0200 61 #define SHMSEG_REMOVED 0x0400 62 #define SHMSEG_ALLOCATED 0x0800 63 #define SHMSEG_WANTED 0x1000 64 65 static int shm_last_free, shm_committed, shmalloced; 66 int shm_nused; 67 static struct shmid_ds *shmsegs; 68 static struct lwkt_token shm_token = LWKT_TOKEN_INITIALIZER(shm_token); 69 70 struct shm_handle { 71 /* vm_offset_t kva; */ 72 vm_object_t shm_object; 73 }; 74 75 struct shmmap_state { 76 vm_offset_t va; 77 int shmid; 78 }; 79 80 static void shm_deallocate_segment (struct shmid_ds *); 81 static int shm_find_segment_by_key (key_t); 82 static struct shmid_ds *shm_find_segment_by_shmid (int); 83 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *); 84 static void shmrealloc (void); 85 static void shminit (void *); 86 87 /* 88 * Tuneable values 89 */ 90 #ifndef SHMMIN 91 #define SHMMIN 1 92 #endif 93 #ifndef SHMMNI 94 #define SHMMNI 512 95 #endif 96 #ifndef SHMSEG 97 #define SHMSEG 1024 98 #endif 99 100 struct shminfo shminfo = { 101 0, 102 SHMMIN, 103 SHMMNI, 104 SHMSEG, 105 0 106 }; 107 108 static int shm_allow_removed; 109 static int shm_use_phys = 1; 110 111 TUNABLE_LONG("kern.ipc.shmmin", &shminfo.shmmin); 112 TUNABLE_LONG("kern.ipc.shmmni", &shminfo.shmmni); 113 TUNABLE_LONG("kern.ipc.shmseg", &shminfo.shmseg); 114 TUNABLE_LONG("kern.ipc.shmmaxpgs", &shminfo.shmall); 115 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys); 116 117 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, 118 "Max shared memory segment size"); 119 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, 120 "Min shared memory segment size"); 121 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, 122 "Max number of shared memory identifiers"); 123 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0, 124 "Max shared memory segments per process"); 125 SYSCTL_LONG(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, 126 "Max pages of shared memory"); 127 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, 128 "Use phys pager allocation instead of swap pager allocation"); 129 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW, 130 &shm_allow_removed, 0, 131 "Enable/Disable attachment to attached segments marked for removal"); 132 133 static int 134 shm_find_segment_by_key(key_t key) 135 { 136 int i; 137 138 for (i = 0; i < shmalloced; i++) { 139 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 140 shmsegs[i].shm_perm.key == key) 141 return i; 142 } 143 return -1; 144 } 145 146 static struct shmid_ds * 147 shm_find_segment_by_shmid(int shmid) 148 { 149 int segnum; 150 struct shmid_ds *shmseg; 151 152 segnum = IPCID_TO_IX(shmid); 153 if (segnum < 0 || segnum >= shmalloced) 154 return NULL; 155 shmseg = &shmsegs[segnum]; 156 if ((shmseg->shm_perm.mode & SHMSEG_ALLOCATED) == 0 || 157 (!shm_allow_removed && 158 (shmseg->shm_perm.mode & SHMSEG_REMOVED) != 0) || 159 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) { 160 return NULL; 161 } 162 return shmseg; 163 } 164 165 static void 166 shm_deallocate_segment(struct shmid_ds *shmseg) 167 { 168 struct shm_handle *shm_handle; 169 size_t size; 170 171 shm_handle = shmseg->shm_internal; 172 vm_object_deallocate(shm_handle->shm_object); 173 kfree((caddr_t)shm_handle, M_SHM); 174 shmseg->shm_internal = NULL; 175 size = round_page(shmseg->shm_segsz); 176 shm_committed -= btoc(size); 177 shm_nused--; 178 shmseg->shm_perm.mode = SHMSEG_FREE; 179 } 180 181 static int 182 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) 183 { 184 struct shmid_ds *shmseg; 185 int segnum, result; 186 size_t size; 187 188 segnum = IPCID_TO_IX(shmmap_s->shmid); 189 shmseg = &shmsegs[segnum]; 190 size = round_page(shmseg->shm_segsz); 191 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); 192 if (result != KERN_SUCCESS) 193 return EINVAL; 194 shmmap_s->shmid = -1; 195 shmseg->shm_dtime = time_second; 196 if ((--shmseg->shm_nattch <= 0) && 197 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 198 shm_deallocate_segment(shmseg); 199 shm_last_free = segnum; 200 } 201 return 0; 202 } 203 204 /* 205 * MPALMOSTSAFE 206 */ 207 int 208 sys_shmdt(struct shmdt_args *uap) 209 { 210 struct thread *td = curthread; 211 struct proc *p = td->td_proc; 212 struct shmmap_state *shmmap_s; 213 long i; 214 int error; 215 216 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 217 return (ENOSYS); 218 219 lwkt_gettoken(&shm_token); 220 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 221 if (shmmap_s == NULL) { 222 error = EINVAL; 223 goto done; 224 } 225 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 226 if (shmmap_s->shmid != -1 && 227 shmmap_s->va == (vm_offset_t)uap->shmaddr) 228 break; 229 } 230 if (i == shminfo.shmseg) 231 error = EINVAL; 232 else 233 error = shm_delete_mapping(p->p_vmspace, shmmap_s); 234 done: 235 lwkt_reltoken(&shm_token); 236 237 return (error); 238 } 239 240 /* 241 * MPALMOSTSAFE 242 */ 243 int 244 sys_shmat(struct shmat_args *uap) 245 { 246 struct thread *td = curthread; 247 struct proc *p = td->td_proc; 248 int error, flags; 249 long i; 250 struct shmid_ds *shmseg; 251 struct shmmap_state *shmmap_s = NULL; 252 struct shm_handle *shm_handle; 253 vm_offset_t attach_va; 254 vm_prot_t prot; 255 vm_size_t size; 256 vm_size_t align; 257 int rv; 258 259 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 260 return (ENOSYS); 261 262 lwkt_gettoken(&shm_token); 263 again: 264 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 265 if (shmmap_s == NULL) { 266 size = shminfo.shmseg * sizeof(struct shmmap_state); 267 shmmap_s = kmalloc(size, M_SHM, M_WAITOK); 268 for (i = 0; i < shminfo.shmseg; i++) 269 shmmap_s[i].shmid = -1; 270 if (p->p_vmspace->vm_shm != NULL) { 271 kfree(shmmap_s, M_SHM); 272 goto again; 273 } 274 p->p_vmspace->vm_shm = (caddr_t)shmmap_s; 275 } 276 shmseg = shm_find_segment_by_shmid(uap->shmid); 277 if (shmseg == NULL) { 278 error = EINVAL; 279 goto done; 280 } 281 error = ipcperm(p, &shmseg->shm_perm, 282 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 283 if (error) 284 goto done; 285 for (i = 0; i < shminfo.shmseg; i++) { 286 if (shmmap_s->shmid == -1) 287 break; 288 shmmap_s++; 289 } 290 if (i >= shminfo.shmseg) { 291 error = EMFILE; 292 goto done; 293 } 294 size = round_page(shmseg->shm_segsz); 295 #ifdef VM_PROT_READ_IS_EXEC 296 prot = VM_PROT_READ | VM_PROT_EXECUTE; 297 #else 298 prot = VM_PROT_READ; 299 #endif 300 if ((uap->shmflg & SHM_RDONLY) == 0) 301 prot |= VM_PROT_WRITE; 302 flags = MAP_ANON | MAP_SHARED; 303 if (uap->shmaddr) { 304 flags |= MAP_FIXED; 305 if (uap->shmflg & SHM_RND) { 306 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1); 307 } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) { 308 attach_va = (vm_offset_t)uap->shmaddr; 309 } else { 310 error = EINVAL; 311 goto done; 312 } 313 } else { 314 /* 315 * This is just a hint to vm_map_find() about where to put it. 316 */ 317 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + 318 maxtsiz + maxdsiz); 319 } 320 321 /* 322 * Handle alignment. For large memory maps it is possible 323 * that the MMU can optimize the page table so align anything 324 * that is a multiple of SEG_SIZE to SEG_SIZE. 325 */ 326 if ((flags & MAP_FIXED) == 0 && (size & SEG_MASK) == 0) 327 align = SEG_SIZE; 328 else 329 align = PAGE_SIZE; 330 331 shm_handle = shmseg->shm_internal; 332 vm_object_hold(shm_handle->shm_object); 333 vm_object_chain_wait(shm_handle->shm_object, 0); 334 vm_object_reference_locked(shm_handle->shm_object); 335 rv = vm_map_find(&p->p_vmspace->vm_map, 336 shm_handle->shm_object, NULL, 337 0, &attach_va, size, 338 align, 339 ((flags & MAP_FIXED) ? 0 : 1), 340 VM_MAPTYPE_NORMAL, VM_SUBSYS_SHMEM, 341 prot, prot, 0); 342 vm_object_drop(shm_handle->shm_object); 343 if (rv != KERN_SUCCESS) { 344 vm_object_deallocate(shm_handle->shm_object); 345 error = ENOMEM; 346 goto done; 347 } 348 vm_map_inherit(&p->p_vmspace->vm_map, 349 attach_va, attach_va + size, VM_INHERIT_SHARE); 350 351 KKASSERT(shmmap_s->shmid == -1); 352 shmmap_s->va = attach_va; 353 shmmap_s->shmid = uap->shmid; 354 shmseg->shm_lpid = p->p_pid; 355 shmseg->shm_atime = time_second; 356 shmseg->shm_nattch++; 357 uap->sysmsg_resultp = (void *)attach_va; 358 error = 0; 359 done: 360 lwkt_reltoken(&shm_token); 361 362 return error; 363 } 364 365 /* 366 * MPALMOSTSAFE 367 */ 368 int 369 sys_shmctl(struct shmctl_args *uap) 370 { 371 struct thread *td = curthread; 372 struct proc *p = td->td_proc; 373 int error; 374 struct shmid_ds inbuf; 375 struct shmid_ds *shmseg; 376 377 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 378 return (ENOSYS); 379 380 lwkt_gettoken(&shm_token); 381 shmseg = shm_find_segment_by_shmid(uap->shmid); 382 if (shmseg == NULL) { 383 error = EINVAL; 384 goto done; 385 } 386 387 switch (uap->cmd) { 388 case IPC_STAT: 389 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 390 if (error == 0) 391 error = copyout(shmseg, uap->buf, sizeof(inbuf)); 392 break; 393 case IPC_SET: 394 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 395 if (error == 0) 396 error = copyin(uap->buf, &inbuf, sizeof(inbuf)); 397 if (error == 0) { 398 shmseg->shm_perm.uid = inbuf.shm_perm.uid; 399 shmseg->shm_perm.gid = inbuf.shm_perm.gid; 400 shmseg->shm_perm.mode = 401 (shmseg->shm_perm.mode & ~ACCESSPERMS) | 402 (inbuf.shm_perm.mode & ACCESSPERMS); 403 shmseg->shm_ctime = time_second; 404 } 405 break; 406 case IPC_RMID: 407 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 408 if (error == 0) { 409 shmseg->shm_perm.key = IPC_PRIVATE; 410 shmseg->shm_perm.mode |= SHMSEG_REMOVED; 411 if (shmseg->shm_nattch <= 0) { 412 shm_deallocate_segment(shmseg); 413 shm_last_free = IPCID_TO_IX(uap->shmid); 414 } 415 } 416 break; 417 #if 0 418 case SHM_LOCK: 419 case SHM_UNLOCK: 420 #endif 421 default: 422 error = EINVAL; 423 break; 424 } 425 done: 426 lwkt_reltoken(&shm_token); 427 428 return error; 429 } 430 431 static int 432 shmget_existing(struct proc *p, struct shmget_args *uap, int mode, int segnum) 433 { 434 struct shmid_ds *shmseg; 435 int error; 436 437 shmseg = &shmsegs[segnum]; 438 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 439 /* 440 * This segment is in the process of being allocated. Wait 441 * until it's done, and look the key up again (in case the 442 * allocation failed or it was freed). 443 */ 444 shmseg->shm_perm.mode |= SHMSEG_WANTED; 445 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0); 446 if (error) 447 return error; 448 return EAGAIN; 449 } 450 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 451 return EEXIST; 452 error = ipcperm(p, &shmseg->shm_perm, mode); 453 if (error) 454 return error; 455 if (uap->size && uap->size > shmseg->shm_segsz) 456 return EINVAL; 457 uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 458 return 0; 459 } 460 461 static int 462 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode) 463 { 464 int i, segnum, shmid; 465 size_t size; 466 struct ucred *cred = p->p_ucred; 467 struct shmid_ds *shmseg; 468 struct shm_handle *shm_handle; 469 470 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 471 return EINVAL; 472 if (shm_nused >= shminfo.shmmni) /* any shmids left? */ 473 return ENOSPC; 474 size = round_page(uap->size); 475 if (shm_committed + btoc(size) > shminfo.shmall) 476 return ENOMEM; 477 if (shm_last_free < 0) { 478 shmrealloc(); /* maybe expand the shmsegs[] array */ 479 for (i = 0; i < shmalloced; i++) { 480 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 481 break; 482 } 483 if (i == shmalloced) 484 return ENOSPC; 485 segnum = i; 486 } else { 487 segnum = shm_last_free; 488 shm_last_free = -1; 489 } 490 shmseg = &shmsegs[segnum]; 491 /* 492 * In case we sleep in malloc(), mark the segment present but deleted 493 * so that noone else tries to create the same key. 494 */ 495 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 496 shmseg->shm_perm.key = uap->key; 497 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 498 shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK); 499 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 500 501 /* 502 * We make sure that we have allocated a pager before we need 503 * to. 504 */ 505 if (shm_use_phys) { 506 shm_handle->shm_object = 507 phys_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0); 508 } else { 509 shm_handle->shm_object = 510 swap_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0); 511 } 512 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); 513 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); 514 515 shmseg->shm_internal = shm_handle; 516 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 517 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 518 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 519 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 520 shmseg->shm_segsz = uap->size; 521 shmseg->shm_cpid = p->p_pid; 522 shmseg->shm_lpid = shmseg->shm_nattch = 0; 523 shmseg->shm_atime = shmseg->shm_dtime = 0; 524 shmseg->shm_ctime = time_second; 525 shm_committed += btoc(size); 526 shm_nused++; 527 528 /* 529 * If a physical mapping is desired and we have a ton of free pages 530 * we pre-allocate the pages here in order to avoid on-the-fly 531 * allocation later. This has a big effect on database warm-up 532 * times since DFly supports concurrent page faults coming from the 533 * same VM object for pages which already exist. 534 * 535 * This can hang the kernel for a while so only do it if shm_use_phys 536 * is set to 2 or higher. 537 */ 538 if (shm_use_phys > 1) { 539 vm_pindex_t pi, pmax; 540 vm_page_t m; 541 542 pmax = round_page(shmseg->shm_segsz) >> PAGE_SHIFT; 543 vm_object_hold(shm_handle->shm_object); 544 if (pmax > vmstats.v_free_count) 545 pmax = vmstats.v_free_count; 546 for (pi = 0; pi < pmax; ++pi) { 547 m = vm_page_grab(shm_handle->shm_object, pi, 548 VM_ALLOC_SYSTEM | VM_ALLOC_NULL_OK | 549 VM_ALLOC_ZERO); 550 if (m == NULL) 551 break; 552 vm_pager_get_page(shm_handle->shm_object, &m, 1); 553 vm_page_activate(m); 554 vm_page_wakeup(m); 555 lwkt_yield(); 556 } 557 vm_object_drop(shm_handle->shm_object); 558 } 559 560 if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 561 /* 562 * Somebody else wanted this key while we were asleep. Wake 563 * them up now. 564 */ 565 shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 566 wakeup((caddr_t)shmseg); 567 } 568 uap->sysmsg_result = shmid; 569 return 0; 570 } 571 572 /* 573 * MPALMOSTSAFE 574 */ 575 int 576 sys_shmget(struct shmget_args *uap) 577 { 578 struct thread *td = curthread; 579 struct proc *p = td->td_proc; 580 int segnum, mode, error; 581 582 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 583 return (ENOSYS); 584 585 mode = uap->shmflg & ACCESSPERMS; 586 587 lwkt_gettoken(&shm_token); 588 589 if (uap->key != IPC_PRIVATE) { 590 again: 591 segnum = shm_find_segment_by_key(uap->key); 592 if (segnum >= 0) { 593 error = shmget_existing(p, uap, mode, segnum); 594 if (error == EAGAIN) 595 goto again; 596 goto done; 597 } 598 if ((uap->shmflg & IPC_CREAT) == 0) { 599 error = ENOENT; 600 goto done; 601 } 602 } 603 error = shmget_allocate_segment(p, uap, mode); 604 done: 605 lwkt_reltoken(&shm_token); 606 607 return (error); 608 } 609 610 void 611 shmfork(struct proc *p1, struct proc *p2) 612 { 613 struct shmmap_state *shmmap_s; 614 size_t size; 615 int i; 616 617 lwkt_gettoken(&shm_token); 618 size = shminfo.shmseg * sizeof(struct shmmap_state); 619 shmmap_s = kmalloc(size, M_SHM, M_WAITOK); 620 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size); 621 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s; 622 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 623 if (shmmap_s->shmid != -1) 624 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 625 } 626 lwkt_reltoken(&shm_token); 627 } 628 629 void 630 shmexit(struct vmspace *vm) 631 { 632 struct shmmap_state *base, *shm; 633 int i; 634 635 if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) { 636 vm->vm_shm = NULL; 637 lwkt_gettoken(&shm_token); 638 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { 639 if (shm->shmid != -1) 640 shm_delete_mapping(vm, shm); 641 } 642 kfree(base, M_SHM); 643 lwkt_reltoken(&shm_token); 644 } 645 } 646 647 static void 648 shmrealloc(void) 649 { 650 int i; 651 struct shmid_ds *newsegs; 652 653 if (shmalloced >= shminfo.shmmni) 654 return; 655 656 newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 657 for (i = 0; i < shmalloced; i++) 658 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 659 for (; i < shminfo.shmmni; i++) { 660 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 661 shmsegs[i].shm_perm.seq = 0; 662 } 663 kfree(shmsegs, M_SHM); 664 shmsegs = newsegs; 665 shmalloced = shminfo.shmmni; 666 } 667 668 static void 669 shminit(void *dummy) 670 { 671 int i; 672 673 /* 674 * If not overridden by a tunable set the maximum shm to 675 * 2/3 of main memory. 676 */ 677 if (shminfo.shmall == 0) 678 shminfo.shmall = (size_t)vmstats.v_page_count * 2 / 3; 679 680 shminfo.shmmax = shminfo.shmall * PAGE_SIZE; 681 shmalloced = shminfo.shmmni; 682 shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 683 for (i = 0; i < shmalloced; i++) { 684 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 685 shmsegs[i].shm_perm.seq = 0; 686 } 687 shm_last_free = 0; 688 shm_nused = 0; 689 shm_committed = 0; 690 } 691 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL); 692