1 /* $FreeBSD: src/sys/kern/sysv_shm.c,v 1.45.2.6 2002/10/22 20:45:03 fjoe Exp $ */ 2 /* $DragonFly: src/sys/kern/sysv_shm.c,v 1.2 2003/06/17 04:28:41 dillon Exp $ */ 3 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 4 5 /* 6 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Adam Glass and Charles 19 * Hannum. 20 * 4. The names of the authors may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "opt_compat.h" 36 #include "opt_sysvipc.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/sysproto.h> 41 #include <sys/kernel.h> 42 #include <sys/sysctl.h> 43 #include <sys/shm.h> 44 #include <sys/proc.h> 45 #include <sys/malloc.h> 46 #include <sys/mman.h> 47 #include <sys/stat.h> 48 #include <sys/sysent.h> 49 #include <sys/jail.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_param.h> 53 #include <sys/lock.h> 54 #include <vm/pmap.h> 55 #include <vm/vm_object.h> 56 #include <vm/vm_map.h> 57 #include <vm/vm_page.h> 58 #include <vm/vm_pager.h> 59 60 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 61 62 struct oshmctl_args; 63 static int oshmctl __P((struct proc *p, struct oshmctl_args *uap)); 64 65 static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode)); 66 static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum)); 67 68 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 69 static sy_call_t *shmcalls[] = { 70 (sy_call_t *)shmat, (sy_call_t *)oshmctl, 71 (sy_call_t *)shmdt, (sy_call_t *)shmget, 72 (sy_call_t *)shmctl 73 }; 74 75 #define SHMSEG_FREE 0x0200 76 #define SHMSEG_REMOVED 0x0400 77 #define SHMSEG_ALLOCATED 0x0800 78 #define SHMSEG_WANTED 0x1000 79 80 static int shm_last_free, shm_nused, shm_committed, shmalloced; 81 static struct shmid_ds *shmsegs; 82 83 struct shm_handle { 84 /* vm_offset_t kva; */ 85 vm_object_t shm_object; 86 }; 87 88 struct shmmap_state { 89 vm_offset_t va; 90 int shmid; 91 }; 92 93 static void shm_deallocate_segment __P((struct shmid_ds *)); 94 static int shm_find_segment_by_key __P((key_t)); 95 static struct shmid_ds *shm_find_segment_by_shmid __P((int)); 96 static int shm_delete_mapping __P((struct proc *, struct shmmap_state *)); 97 static void shmrealloc __P((void)); 98 static void shminit __P((void *)); 99 100 /* 101 * Tuneable values 102 */ 103 #ifndef SHMMAXPGS 104 #define SHMMAXPGS 8192 /* note: sysv shared memory is swap backed */ 105 #endif 106 #ifndef SHMMAX 107 #define SHMMAX (SHMMAXPGS*PAGE_SIZE) 108 #endif 109 #ifndef SHMMIN 110 #define SHMMIN 1 111 #endif 112 #ifndef SHMMNI 113 #define SHMMNI 192 114 #endif 115 #ifndef SHMSEG 116 #define SHMSEG 128 117 #endif 118 #ifndef SHMALL 119 #define SHMALL (SHMMAXPGS) 120 #endif 121 122 struct shminfo shminfo = { 123 SHMMAX, 124 SHMMIN, 125 SHMMNI, 126 SHMSEG, 127 SHMALL 128 }; 129 130 static int shm_use_phys; 131 132 TUNABLE_INT("kern.ipc.shmmin", &shminfo.shmmin); 133 TUNABLE_INT("kern.ipc.shmmni", &shminfo.shmmni); 134 TUNABLE_INT("kern.ipc.shmseg", &shminfo.shmseg); 135 TUNABLE_INT("kern.ipc.shmmaxpgs", &shminfo.shmall); 136 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys); 137 138 SYSCTL_DECL(_kern_ipc); 139 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, ""); 140 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, ""); 141 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, ""); 142 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0, ""); 143 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, ""); 144 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, ""); 145 146 static int 147 shm_find_segment_by_key(key) 148 key_t key; 149 { 150 int i; 151 152 for (i = 0; i < shmalloced; i++) 153 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 154 shmsegs[i].shm_perm.key == key) 155 return i; 156 return -1; 157 } 158 159 static struct shmid_ds * 160 shm_find_segment_by_shmid(shmid) 161 int shmid; 162 { 163 int segnum; 164 struct shmid_ds *shmseg; 165 166 segnum = IPCID_TO_IX(shmid); 167 if (segnum < 0 || segnum >= shmalloced) 168 return NULL; 169 shmseg = &shmsegs[segnum]; 170 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED)) 171 != SHMSEG_ALLOCATED || 172 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) 173 return NULL; 174 return shmseg; 175 } 176 177 static void 178 shm_deallocate_segment(shmseg) 179 struct shmid_ds *shmseg; 180 { 181 struct shm_handle *shm_handle; 182 size_t size; 183 184 shm_handle = shmseg->shm_internal; 185 vm_object_deallocate(shm_handle->shm_object); 186 free((caddr_t)shm_handle, M_SHM); 187 shmseg->shm_internal = NULL; 188 size = round_page(shmseg->shm_segsz); 189 shm_committed -= btoc(size); 190 shm_nused--; 191 shmseg->shm_perm.mode = SHMSEG_FREE; 192 } 193 194 static int 195 shm_delete_mapping(p, shmmap_s) 196 struct proc *p; 197 struct shmmap_state *shmmap_s; 198 { 199 struct shmid_ds *shmseg; 200 int segnum, result; 201 size_t size; 202 203 segnum = IPCID_TO_IX(shmmap_s->shmid); 204 shmseg = &shmsegs[segnum]; 205 size = round_page(shmseg->shm_segsz); 206 result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, shmmap_s->va + size); 207 if (result != KERN_SUCCESS) 208 return EINVAL; 209 shmmap_s->shmid = -1; 210 shmseg->shm_dtime = time_second; 211 if ((--shmseg->shm_nattch <= 0) && 212 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 213 shm_deallocate_segment(shmseg); 214 shm_last_free = segnum; 215 } 216 return 0; 217 } 218 219 #ifndef _SYS_SYSPROTO_H_ 220 struct shmdt_args { 221 void *shmaddr; 222 }; 223 #endif 224 225 int 226 shmdt(p, uap) 227 struct proc *p; 228 struct shmdt_args *uap; 229 { 230 struct shmmap_state *shmmap_s; 231 int i; 232 233 if (!jail_sysvipc_allowed && p->p_prison != NULL) 234 return (ENOSYS); 235 236 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 237 if (shmmap_s == NULL) 238 return EINVAL; 239 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 240 if (shmmap_s->shmid != -1 && 241 shmmap_s->va == (vm_offset_t)uap->shmaddr) 242 break; 243 if (i == shminfo.shmseg) 244 return EINVAL; 245 return shm_delete_mapping(p, shmmap_s); 246 } 247 248 #ifndef _SYS_SYSPROTO_H_ 249 struct shmat_args { 250 int shmid; 251 void *shmaddr; 252 int shmflg; 253 }; 254 #endif 255 256 int 257 shmat(p, uap) 258 struct proc *p; 259 struct shmat_args *uap; 260 { 261 int error, i, flags; 262 struct shmid_ds *shmseg; 263 struct shmmap_state *shmmap_s = NULL; 264 struct shm_handle *shm_handle; 265 vm_offset_t attach_va; 266 vm_prot_t prot; 267 vm_size_t size; 268 int rv; 269 270 if (!jail_sysvipc_allowed && p->p_prison != NULL) 271 return (ENOSYS); 272 273 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 274 if (shmmap_s == NULL) { 275 size = shminfo.shmseg * sizeof(struct shmmap_state); 276 shmmap_s = malloc(size, M_SHM, M_WAITOK); 277 for (i = 0; i < shminfo.shmseg; i++) 278 shmmap_s[i].shmid = -1; 279 p->p_vmspace->vm_shm = (caddr_t)shmmap_s; 280 } 281 shmseg = shm_find_segment_by_shmid(uap->shmid); 282 if (shmseg == NULL) 283 return EINVAL; 284 error = ipcperm(p, &shmseg->shm_perm, 285 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 286 if (error) 287 return error; 288 for (i = 0; i < shminfo.shmseg; i++) { 289 if (shmmap_s->shmid == -1) 290 break; 291 shmmap_s++; 292 } 293 if (i >= shminfo.shmseg) 294 return EMFILE; 295 size = round_page(shmseg->shm_segsz); 296 #ifdef VM_PROT_READ_IS_EXEC 297 prot = VM_PROT_READ | VM_PROT_EXECUTE; 298 #else 299 prot = VM_PROT_READ; 300 #endif 301 if ((uap->shmflg & SHM_RDONLY) == 0) 302 prot |= VM_PROT_WRITE; 303 flags = MAP_ANON | MAP_SHARED; 304 if (uap->shmaddr) { 305 flags |= MAP_FIXED; 306 if (uap->shmflg & SHM_RND) 307 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1); 308 else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) 309 attach_va = (vm_offset_t)uap->shmaddr; 310 else 311 return EINVAL; 312 } else { 313 /* This is just a hint to vm_map_find() about where to put it. */ 314 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + maxtsiz + maxdsiz); 315 } 316 317 shm_handle = shmseg->shm_internal; 318 vm_object_reference(shm_handle->shm_object); 319 rv = vm_map_find(&p->p_vmspace->vm_map, shm_handle->shm_object, 320 0, &attach_va, size, (flags & MAP_FIXED)?0:1, prot, prot, 0); 321 if (rv != KERN_SUCCESS) { 322 return ENOMEM; 323 } 324 vm_map_inherit(&p->p_vmspace->vm_map, 325 attach_va, attach_va + size, VM_INHERIT_SHARE); 326 327 shmmap_s->va = attach_va; 328 shmmap_s->shmid = uap->shmid; 329 shmseg->shm_lpid = p->p_pid; 330 shmseg->shm_atime = time_second; 331 shmseg->shm_nattch++; 332 p->p_retval[0] = attach_va; 333 return 0; 334 } 335 336 struct oshmid_ds { 337 struct ipc_perm shm_perm; /* operation perms */ 338 int shm_segsz; /* size of segment (bytes) */ 339 ushort shm_cpid; /* pid, creator */ 340 ushort shm_lpid; /* pid, last operation */ 341 short shm_nattch; /* no. of current attaches */ 342 time_t shm_atime; /* last attach time */ 343 time_t shm_dtime; /* last detach time */ 344 time_t shm_ctime; /* last change time */ 345 void *shm_handle; /* internal handle for shm segment */ 346 }; 347 348 struct oshmctl_args { 349 int shmid; 350 int cmd; 351 struct oshmid_ds *ubuf; 352 }; 353 354 static int 355 oshmctl(p, uap) 356 struct proc *p; 357 struct oshmctl_args *uap; 358 { 359 #ifdef COMPAT_43 360 int error; 361 struct shmid_ds *shmseg; 362 struct oshmid_ds outbuf; 363 364 if (!jail_sysvipc_allowed && p->p_prison != NULL) 365 return (ENOSYS); 366 367 shmseg = shm_find_segment_by_shmid(uap->shmid); 368 if (shmseg == NULL) 369 return EINVAL; 370 switch (uap->cmd) { 371 case IPC_STAT: 372 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 373 if (error) 374 return error; 375 outbuf.shm_perm = shmseg->shm_perm; 376 outbuf.shm_segsz = shmseg->shm_segsz; 377 outbuf.shm_cpid = shmseg->shm_cpid; 378 outbuf.shm_lpid = shmseg->shm_lpid; 379 outbuf.shm_nattch = shmseg->shm_nattch; 380 outbuf.shm_atime = shmseg->shm_atime; 381 outbuf.shm_dtime = shmseg->shm_dtime; 382 outbuf.shm_ctime = shmseg->shm_ctime; 383 outbuf.shm_handle = shmseg->shm_internal; 384 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf)); 385 if (error) 386 return error; 387 break; 388 default: 389 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 390 return ((sy_call_t *)shmctl)(p, uap); 391 } 392 return 0; 393 #else 394 return EINVAL; 395 #endif 396 } 397 398 #ifndef _SYS_SYSPROTO_H_ 399 struct shmctl_args { 400 int shmid; 401 int cmd; 402 struct shmid_ds *buf; 403 }; 404 #endif 405 406 int 407 shmctl(p, uap) 408 struct proc *p; 409 struct shmctl_args *uap; 410 { 411 int error; 412 struct shmid_ds inbuf; 413 struct shmid_ds *shmseg; 414 415 if (!jail_sysvipc_allowed && p->p_prison != NULL) 416 return (ENOSYS); 417 418 shmseg = shm_find_segment_by_shmid(uap->shmid); 419 if (shmseg == NULL) 420 return EINVAL; 421 switch (uap->cmd) { 422 case IPC_STAT: 423 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 424 if (error) 425 return error; 426 error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf)); 427 if (error) 428 return error; 429 break; 430 case IPC_SET: 431 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 432 if (error) 433 return error; 434 error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf)); 435 if (error) 436 return error; 437 shmseg->shm_perm.uid = inbuf.shm_perm.uid; 438 shmseg->shm_perm.gid = inbuf.shm_perm.gid; 439 shmseg->shm_perm.mode = 440 (shmseg->shm_perm.mode & ~ACCESSPERMS) | 441 (inbuf.shm_perm.mode & ACCESSPERMS); 442 shmseg->shm_ctime = time_second; 443 break; 444 case IPC_RMID: 445 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 446 if (error) 447 return error; 448 shmseg->shm_perm.key = IPC_PRIVATE; 449 shmseg->shm_perm.mode |= SHMSEG_REMOVED; 450 if (shmseg->shm_nattch <= 0) { 451 shm_deallocate_segment(shmseg); 452 shm_last_free = IPCID_TO_IX(uap->shmid); 453 } 454 break; 455 #if 0 456 case SHM_LOCK: 457 case SHM_UNLOCK: 458 #endif 459 default: 460 return EINVAL; 461 } 462 return 0; 463 } 464 465 #ifndef _SYS_SYSPROTO_H_ 466 struct shmget_args { 467 key_t key; 468 size_t size; 469 int shmflg; 470 }; 471 #endif 472 473 static int 474 shmget_existing(p, uap, mode, segnum) 475 struct proc *p; 476 struct shmget_args *uap; 477 int mode; 478 int segnum; 479 { 480 struct shmid_ds *shmseg; 481 int error; 482 483 shmseg = &shmsegs[segnum]; 484 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 485 /* 486 * This segment is in the process of being allocated. Wait 487 * until it's done, and look the key up again (in case the 488 * allocation failed or it was freed). 489 */ 490 shmseg->shm_perm.mode |= SHMSEG_WANTED; 491 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0); 492 if (error) 493 return error; 494 return EAGAIN; 495 } 496 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 497 return EEXIST; 498 error = ipcperm(p, &shmseg->shm_perm, mode); 499 if (error) 500 return error; 501 if (uap->size && uap->size > shmseg->shm_segsz) 502 return EINVAL; 503 p->p_retval[0] = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 504 return 0; 505 } 506 507 static int 508 shmget_allocate_segment(p, uap, mode) 509 struct proc *p; 510 struct shmget_args *uap; 511 int mode; 512 { 513 int i, segnum, shmid, size; 514 struct ucred *cred = p->p_ucred; 515 struct shmid_ds *shmseg; 516 struct shm_handle *shm_handle; 517 518 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 519 return EINVAL; 520 if (shm_nused >= shminfo.shmmni) /* any shmids left? */ 521 return ENOSPC; 522 size = round_page(uap->size); 523 if (shm_committed + btoc(size) > shminfo.shmall) 524 return ENOMEM; 525 if (shm_last_free < 0) { 526 shmrealloc(); /* maybe expand the shmsegs[] array */ 527 for (i = 0; i < shmalloced; i++) 528 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 529 break; 530 if (i == shmalloced) 531 return ENOSPC; 532 segnum = i; 533 } else { 534 segnum = shm_last_free; 535 shm_last_free = -1; 536 } 537 shmseg = &shmsegs[segnum]; 538 /* 539 * In case we sleep in malloc(), mark the segment present but deleted 540 * so that noone else tries to create the same key. 541 */ 542 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 543 shmseg->shm_perm.key = uap->key; 544 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 545 shm_handle = (struct shm_handle *) 546 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK); 547 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 548 549 /* 550 * We make sure that we have allocated a pager before we need 551 * to. 552 */ 553 if (shm_use_phys) { 554 shm_handle->shm_object = 555 vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0); 556 } else { 557 shm_handle->shm_object = 558 vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0); 559 } 560 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); 561 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); 562 563 shmseg->shm_internal = shm_handle; 564 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 565 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 566 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 567 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 568 shmseg->shm_segsz = uap->size; 569 shmseg->shm_cpid = p->p_pid; 570 shmseg->shm_lpid = shmseg->shm_nattch = 0; 571 shmseg->shm_atime = shmseg->shm_dtime = 0; 572 shmseg->shm_ctime = time_second; 573 shm_committed += btoc(size); 574 shm_nused++; 575 if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 576 /* 577 * Somebody else wanted this key while we were asleep. Wake 578 * them up now. 579 */ 580 shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 581 wakeup((caddr_t)shmseg); 582 } 583 p->p_retval[0] = shmid; 584 return 0; 585 } 586 587 int 588 shmget(p, uap) 589 struct proc *p; 590 struct shmget_args *uap; 591 { 592 int segnum, mode, error; 593 594 if (!jail_sysvipc_allowed && p->p_prison != NULL) 595 return (ENOSYS); 596 597 mode = uap->shmflg & ACCESSPERMS; 598 if (uap->key != IPC_PRIVATE) { 599 again: 600 segnum = shm_find_segment_by_key(uap->key); 601 if (segnum >= 0) { 602 error = shmget_existing(p, uap, mode, segnum); 603 if (error == EAGAIN) 604 goto again; 605 return error; 606 } 607 if ((uap->shmflg & IPC_CREAT) == 0) 608 return ENOENT; 609 } 610 return shmget_allocate_segment(p, uap, mode); 611 } 612 613 int 614 shmsys(p, uap) 615 struct proc *p; 616 /* XXX actually varargs. */ 617 struct shmsys_args /* { 618 u_int which; 619 int a2; 620 int a3; 621 int a4; 622 } */ *uap; 623 { 624 625 if (!jail_sysvipc_allowed && p->p_prison != NULL) 626 return (ENOSYS); 627 628 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 629 return EINVAL; 630 return ((*shmcalls[uap->which])(p, &uap->a2)); 631 } 632 633 void 634 shmfork(p1, p2) 635 struct proc *p1, *p2; 636 { 637 struct shmmap_state *shmmap_s; 638 size_t size; 639 int i; 640 641 size = shminfo.shmseg * sizeof(struct shmmap_state); 642 shmmap_s = malloc(size, M_SHM, M_WAITOK); 643 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size); 644 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s; 645 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 646 if (shmmap_s->shmid != -1) 647 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 648 } 649 650 void 651 shmexit(p) 652 struct proc *p; 653 { 654 struct shmmap_state *shmmap_s; 655 int i; 656 657 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 658 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 659 if (shmmap_s->shmid != -1) 660 shm_delete_mapping(p, shmmap_s); 661 free((caddr_t)p->p_vmspace->vm_shm, M_SHM); 662 p->p_vmspace->vm_shm = NULL; 663 } 664 665 static void 666 shmrealloc(void) 667 { 668 int i; 669 struct shmid_ds *newsegs; 670 671 if (shmalloced >= shminfo.shmmni) 672 return; 673 674 newsegs = malloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 675 if (newsegs == NULL) 676 return; 677 for (i = 0; i < shmalloced; i++) 678 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 679 for (; i < shminfo.shmmni; i++) { 680 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 681 shmsegs[i].shm_perm.seq = 0; 682 } 683 free(shmsegs, M_SHM); 684 shmsegs = newsegs; 685 shmalloced = shminfo.shmmni; 686 } 687 688 static void 689 shminit(dummy) 690 void *dummy; 691 { 692 int i; 693 694 shminfo.shmmax = shminfo.shmall * PAGE_SIZE; 695 shmalloced = shminfo.shmmni; 696 shmsegs = malloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 697 if (shmsegs == NULL) 698 panic("cannot allocate initial memory for sysvshm"); 699 for (i = 0; i < shmalloced; i++) { 700 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 701 shmsegs[i].shm_perm.seq = 0; 702 } 703 shm_last_free = 0; 704 shm_nused = 0; 705 shm_committed = 0; 706 } 707 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL); 708