1 /* $FreeBSD: src/sys/kern/sysv_shm.c,v 1.45.2.6 2002/10/22 20:45:03 fjoe Exp $ */ 2 /* $DragonFly: src/sys/kern/sysv_shm.c,v 1.21 2008/01/06 16:55:51 swildner Exp $ */ 3 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 4 5 /* 6 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Adam Glass and Charles 19 * Hannum. 20 * 4. The names of the authors may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "opt_compat.h" 36 #include "opt_sysvipc.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/sysproto.h> 41 #include <sys/kernel.h> 42 #include <sys/sysctl.h> 43 #include <sys/shm.h> 44 #include <sys/proc.h> 45 #include <sys/malloc.h> 46 #include <sys/mman.h> 47 #include <sys/stat.h> 48 #include <sys/sysent.h> 49 #include <sys/jail.h> 50 51 #include <sys/mplock2.h> 52 53 #include <vm/vm.h> 54 #include <vm/vm_param.h> 55 #include <sys/lock.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_pager.h> 61 62 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 63 64 struct oshmctl_args; 65 static int sys_oshmctl (struct proc *p, struct oshmctl_args *uap); 66 67 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode); 68 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum); 69 70 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 71 static sy_call_t *shmcalls[] = { 72 (sy_call_t *)sys_shmat, (sy_call_t *)sys_oshmctl, 73 (sy_call_t *)sys_shmdt, (sy_call_t *)sys_shmget, 74 (sy_call_t *)sys_shmctl 75 }; 76 77 #define SHMSEG_FREE 0x0200 78 #define SHMSEG_REMOVED 0x0400 79 #define SHMSEG_ALLOCATED 0x0800 80 #define SHMSEG_WANTED 0x1000 81 82 static int shm_last_free, shm_nused, shm_committed, shmalloced; 83 static struct shmid_ds *shmsegs; 84 85 struct shm_handle { 86 /* vm_offset_t kva; */ 87 vm_object_t shm_object; 88 }; 89 90 struct shmmap_state { 91 vm_offset_t va; 92 int shmid; 93 }; 94 95 static void shm_deallocate_segment (struct shmid_ds *); 96 static int shm_find_segment_by_key (key_t); 97 static struct shmid_ds *shm_find_segment_by_shmid (int); 98 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *); 99 static void shmrealloc (void); 100 static void shminit (void *); 101 102 /* 103 * Tuneable values 104 */ 105 #ifndef SHMMAXPGS 106 #define SHMMAXPGS 8192 /* note: sysv shared memory is swap backed */ 107 #endif 108 #ifndef SHMMAX 109 #define SHMMAX (SHMMAXPGS*PAGE_SIZE) 110 #endif 111 #ifndef SHMMIN 112 #define SHMMIN 1 113 #endif 114 #ifndef SHMMNI 115 #define SHMMNI 192 116 #endif 117 #ifndef SHMSEG 118 #define SHMSEG 128 119 #endif 120 #ifndef SHMALL 121 #define SHMALL (SHMMAXPGS) 122 #endif 123 124 struct shminfo shminfo = { 125 SHMMAX, 126 SHMMIN, 127 SHMMNI, 128 SHMSEG, 129 SHMALL 130 }; 131 132 static int shm_use_phys; 133 134 TUNABLE_INT("kern.ipc.shmmin", &shminfo.shmmin); 135 TUNABLE_INT("kern.ipc.shmmni", &shminfo.shmmni); 136 TUNABLE_INT("kern.ipc.shmseg", &shminfo.shmseg); 137 TUNABLE_INT("kern.ipc.shmmaxpgs", &shminfo.shmall); 138 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys); 139 140 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, ""); 141 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, ""); 142 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, ""); 143 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0, ""); 144 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, ""); 145 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, ""); 146 147 static int 148 shm_find_segment_by_key(key_t key) 149 { 150 int i; 151 152 for (i = 0; i < shmalloced; i++) 153 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 154 shmsegs[i].shm_perm.key == key) 155 return i; 156 return -1; 157 } 158 159 static struct shmid_ds * 160 shm_find_segment_by_shmid(int shmid) 161 { 162 int segnum; 163 struct shmid_ds *shmseg; 164 165 segnum = IPCID_TO_IX(shmid); 166 if (segnum < 0 || segnum >= shmalloced) 167 return NULL; 168 shmseg = &shmsegs[segnum]; 169 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED)) 170 != SHMSEG_ALLOCATED || 171 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) 172 return NULL; 173 return shmseg; 174 } 175 176 static void 177 shm_deallocate_segment(struct shmid_ds *shmseg) 178 { 179 struct shm_handle *shm_handle; 180 size_t size; 181 182 shm_handle = shmseg->shm_internal; 183 vm_object_deallocate(shm_handle->shm_object); 184 kfree((caddr_t)shm_handle, M_SHM); 185 shmseg->shm_internal = NULL; 186 size = round_page(shmseg->shm_segsz); 187 shm_committed -= btoc(size); 188 shm_nused--; 189 shmseg->shm_perm.mode = SHMSEG_FREE; 190 } 191 192 static int 193 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) 194 { 195 struct shmid_ds *shmseg; 196 int segnum, result; 197 size_t size; 198 199 segnum = IPCID_TO_IX(shmmap_s->shmid); 200 shmseg = &shmsegs[segnum]; 201 size = round_page(shmseg->shm_segsz); 202 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); 203 if (result != KERN_SUCCESS) 204 return EINVAL; 205 shmmap_s->shmid = -1; 206 shmseg->shm_dtime = time_second; 207 if ((--shmseg->shm_nattch <= 0) && 208 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 209 shm_deallocate_segment(shmseg); 210 shm_last_free = segnum; 211 } 212 return 0; 213 } 214 215 /* 216 * MPALMOSTSAFE 217 */ 218 int 219 sys_shmdt(struct shmdt_args *uap) 220 { 221 struct thread *td = curthread; 222 struct proc *p = td->td_proc; 223 struct shmmap_state *shmmap_s; 224 int i; 225 int error; 226 227 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 228 return (ENOSYS); 229 230 get_mplock(); 231 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 232 if (shmmap_s == NULL) { 233 error = EINVAL; 234 goto done; 235 } 236 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 237 if (shmmap_s->shmid != -1 && 238 shmmap_s->va == (vm_offset_t)uap->shmaddr) 239 break; 240 } 241 if (i == shminfo.shmseg) 242 error = EINVAL; 243 else 244 error = shm_delete_mapping(p->p_vmspace, shmmap_s); 245 done: 246 rel_mplock(); 247 return (error); 248 } 249 250 /* 251 * MPALMOSTSAFE 252 */ 253 int 254 sys_shmat(struct shmat_args *uap) 255 { 256 struct thread *td = curthread; 257 struct proc *p = td->td_proc; 258 int error, i, flags; 259 struct shmid_ds *shmseg; 260 struct shmmap_state *shmmap_s = NULL; 261 struct shm_handle *shm_handle; 262 vm_offset_t attach_va; 263 vm_prot_t prot; 264 vm_size_t size; 265 int rv; 266 267 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 268 return (ENOSYS); 269 270 get_mplock(); 271 again: 272 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 273 if (shmmap_s == NULL) { 274 size = shminfo.shmseg * sizeof(struct shmmap_state); 275 shmmap_s = kmalloc(size, M_SHM, M_WAITOK); 276 for (i = 0; i < shminfo.shmseg; i++) 277 shmmap_s[i].shmid = -1; 278 if (p->p_vmspace->vm_shm != NULL) { 279 kfree(shmmap_s, M_SHM); 280 goto again; 281 } 282 p->p_vmspace->vm_shm = (caddr_t)shmmap_s; 283 } 284 shmseg = shm_find_segment_by_shmid(uap->shmid); 285 if (shmseg == NULL) { 286 error = EINVAL; 287 goto done; 288 } 289 error = ipcperm(p, &shmseg->shm_perm, 290 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 291 if (error) 292 goto done; 293 for (i = 0; i < shminfo.shmseg; i++) { 294 if (shmmap_s->shmid == -1) 295 break; 296 shmmap_s++; 297 } 298 if (i >= shminfo.shmseg) { 299 error = EMFILE; 300 goto done; 301 } 302 size = round_page(shmseg->shm_segsz); 303 #ifdef VM_PROT_READ_IS_EXEC 304 prot = VM_PROT_READ | VM_PROT_EXECUTE; 305 #else 306 prot = VM_PROT_READ; 307 #endif 308 if ((uap->shmflg & SHM_RDONLY) == 0) 309 prot |= VM_PROT_WRITE; 310 flags = MAP_ANON | MAP_SHARED; 311 if (uap->shmaddr) { 312 flags |= MAP_FIXED; 313 if (uap->shmflg & SHM_RND) { 314 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1); 315 } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) { 316 attach_va = (vm_offset_t)uap->shmaddr; 317 } else { 318 error = EINVAL; 319 goto done; 320 } 321 } else { 322 /* 323 * This is just a hint to vm_map_find() about where to put it. 324 */ 325 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + maxtsiz + maxdsiz); 326 } 327 328 shm_handle = shmseg->shm_internal; 329 vm_object_reference(shm_handle->shm_object); 330 rv = vm_map_find(&p->p_vmspace->vm_map, 331 shm_handle->shm_object, 0, 332 &attach_va, size, 333 ((flags & MAP_FIXED) ? 0 : 1), 334 VM_MAPTYPE_NORMAL, 335 prot, prot, 336 0); 337 if (rv != KERN_SUCCESS) { 338 vm_object_deallocate(shm_handle->shm_object); 339 error = ENOMEM; 340 goto done; 341 } 342 vm_map_inherit(&p->p_vmspace->vm_map, 343 attach_va, attach_va + size, VM_INHERIT_SHARE); 344 345 KKASSERT(shmmap_s->shmid == -1); 346 shmmap_s->va = attach_va; 347 shmmap_s->shmid = uap->shmid; 348 shmseg->shm_lpid = p->p_pid; 349 shmseg->shm_atime = time_second; 350 shmseg->shm_nattch++; 351 uap->sysmsg_result = attach_va; 352 error = 0; 353 done: 354 rel_mplock(); 355 return error; 356 } 357 358 struct oshmid_ds { 359 struct ipc_perm shm_perm; /* operation perms */ 360 int shm_segsz; /* size of segment (bytes) */ 361 ushort shm_cpid; /* pid, creator */ 362 ushort shm_lpid; /* pid, last operation */ 363 short shm_nattch; /* no. of current attaches */ 364 time_t shm_atime; /* last attach time */ 365 time_t shm_dtime; /* last detach time */ 366 time_t shm_ctime; /* last change time */ 367 void *shm_handle; /* internal handle for shm segment */ 368 }; 369 370 struct oshmctl_args { 371 struct sysmsg sysmsg; 372 int shmid; 373 int cmd; 374 struct oshmid_ds *ubuf; 375 }; 376 377 /* 378 * MPALMOSTSAFE 379 */ 380 static int 381 sys_oshmctl(struct proc *p, struct oshmctl_args *uap) 382 { 383 #ifdef COMPAT_43 384 struct thread *td = curthread; 385 struct shmid_ds *shmseg; 386 struct oshmid_ds outbuf; 387 int error; 388 389 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 390 return (ENOSYS); 391 392 get_mplock(); 393 shmseg = shm_find_segment_by_shmid(uap->shmid); 394 if (shmseg == NULL) { 395 error = EINVAL; 396 goto done; 397 } 398 399 switch (uap->cmd) { 400 case IPC_STAT: 401 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 402 if (error) 403 break; 404 outbuf.shm_perm = shmseg->shm_perm; 405 outbuf.shm_segsz = shmseg->shm_segsz; 406 outbuf.shm_cpid = shmseg->shm_cpid; 407 outbuf.shm_lpid = shmseg->shm_lpid; 408 outbuf.shm_nattch = shmseg->shm_nattch; 409 outbuf.shm_atime = shmseg->shm_atime; 410 outbuf.shm_dtime = shmseg->shm_dtime; 411 outbuf.shm_ctime = shmseg->shm_ctime; 412 outbuf.shm_handle = shmseg->shm_internal; 413 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf)); 414 break; 415 default: 416 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 417 error = sys_shmctl((struct shmctl_args *)uap); 418 } 419 done: 420 rel_mplock(); 421 return error; 422 #else 423 return EINVAL; 424 #endif 425 } 426 427 /* 428 * MPALMOSTSAFE 429 */ 430 int 431 sys_shmctl(struct shmctl_args *uap) 432 { 433 struct thread *td = curthread; 434 struct proc *p = td->td_proc; 435 int error; 436 struct shmid_ds inbuf; 437 struct shmid_ds *shmseg; 438 439 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 440 return (ENOSYS); 441 442 get_mplock(); 443 shmseg = shm_find_segment_by_shmid(uap->shmid); 444 if (shmseg == NULL) { 445 error = EINVAL; 446 goto done; 447 } 448 449 switch (uap->cmd) { 450 case IPC_STAT: 451 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 452 if (error == 0) 453 error = copyout(shmseg, uap->buf, sizeof(inbuf)); 454 break; 455 case IPC_SET: 456 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 457 if (error == 0) 458 error = copyin(uap->buf, &inbuf, sizeof(inbuf)); 459 if (error == 0) { 460 shmseg->shm_perm.uid = inbuf.shm_perm.uid; 461 shmseg->shm_perm.gid = inbuf.shm_perm.gid; 462 shmseg->shm_perm.mode = 463 (shmseg->shm_perm.mode & ~ACCESSPERMS) | 464 (inbuf.shm_perm.mode & ACCESSPERMS); 465 shmseg->shm_ctime = time_second; 466 } 467 break; 468 case IPC_RMID: 469 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 470 if (error == 0) { 471 shmseg->shm_perm.key = IPC_PRIVATE; 472 shmseg->shm_perm.mode |= SHMSEG_REMOVED; 473 if (shmseg->shm_nattch <= 0) { 474 shm_deallocate_segment(shmseg); 475 shm_last_free = IPCID_TO_IX(uap->shmid); 476 } 477 } 478 break; 479 #if 0 480 case SHM_LOCK: 481 case SHM_UNLOCK: 482 #endif 483 default: 484 error = EINVAL; 485 break; 486 } 487 done: 488 rel_mplock(); 489 return error; 490 } 491 492 static int 493 shmget_existing(struct proc *p, struct shmget_args *uap, int mode, int segnum) 494 { 495 struct shmid_ds *shmseg; 496 int error; 497 498 shmseg = &shmsegs[segnum]; 499 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 500 /* 501 * This segment is in the process of being allocated. Wait 502 * until it's done, and look the key up again (in case the 503 * allocation failed or it was freed). 504 */ 505 shmseg->shm_perm.mode |= SHMSEG_WANTED; 506 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0); 507 if (error) 508 return error; 509 return EAGAIN; 510 } 511 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 512 return EEXIST; 513 error = ipcperm(p, &shmseg->shm_perm, mode); 514 if (error) 515 return error; 516 if (uap->size && uap->size > shmseg->shm_segsz) 517 return EINVAL; 518 uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 519 return 0; 520 } 521 522 static int 523 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode) 524 { 525 int i, segnum, shmid, size; 526 struct ucred *cred = p->p_ucred; 527 struct shmid_ds *shmseg; 528 struct shm_handle *shm_handle; 529 530 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 531 return EINVAL; 532 if (shm_nused >= shminfo.shmmni) /* any shmids left? */ 533 return ENOSPC; 534 size = round_page(uap->size); 535 if (shm_committed + btoc(size) > shminfo.shmall) 536 return ENOMEM; 537 if (shm_last_free < 0) { 538 shmrealloc(); /* maybe expand the shmsegs[] array */ 539 for (i = 0; i < shmalloced; i++) 540 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 541 break; 542 if (i == shmalloced) 543 return ENOSPC; 544 segnum = i; 545 } else { 546 segnum = shm_last_free; 547 shm_last_free = -1; 548 } 549 shmseg = &shmsegs[segnum]; 550 /* 551 * In case we sleep in malloc(), mark the segment present but deleted 552 * so that noone else tries to create the same key. 553 */ 554 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 555 shmseg->shm_perm.key = uap->key; 556 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 557 shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK); 558 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 559 560 /* 561 * We make sure that we have allocated a pager before we need 562 * to. 563 */ 564 if (shm_use_phys) { 565 shm_handle->shm_object = 566 phys_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0); 567 } else { 568 shm_handle->shm_object = 569 swap_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0); 570 } 571 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); 572 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); 573 574 shmseg->shm_internal = shm_handle; 575 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 576 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 577 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 578 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 579 shmseg->shm_segsz = uap->size; 580 shmseg->shm_cpid = p->p_pid; 581 shmseg->shm_lpid = shmseg->shm_nattch = 0; 582 shmseg->shm_atime = shmseg->shm_dtime = 0; 583 shmseg->shm_ctime = time_second; 584 shm_committed += btoc(size); 585 shm_nused++; 586 if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 587 /* 588 * Somebody else wanted this key while we were asleep. Wake 589 * them up now. 590 */ 591 shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 592 wakeup((caddr_t)shmseg); 593 } 594 uap->sysmsg_result = shmid; 595 return 0; 596 } 597 598 /* 599 * MPALMOSTSAFE 600 */ 601 int 602 sys_shmget(struct shmget_args *uap) 603 { 604 struct thread *td = curthread; 605 struct proc *p = td->td_proc; 606 int segnum, mode, error; 607 608 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 609 return (ENOSYS); 610 611 mode = uap->shmflg & ACCESSPERMS; 612 get_mplock(); 613 614 if (uap->key != IPC_PRIVATE) { 615 again: 616 segnum = shm_find_segment_by_key(uap->key); 617 if (segnum >= 0) { 618 error = shmget_existing(p, uap, mode, segnum); 619 if (error == EAGAIN) 620 goto again; 621 goto done; 622 } 623 if ((uap->shmflg & IPC_CREAT) == 0) { 624 error = ENOENT; 625 goto done; 626 } 627 } 628 error = shmget_allocate_segment(p, uap, mode); 629 done: 630 rel_mplock(); 631 return (error); 632 } 633 634 /* 635 * shmsys_args(int which, int a2, ...) (VARARGS) 636 * 637 * MPALMOSTSAFE 638 */ 639 int 640 sys_shmsys(struct shmsys_args *uap) 641 { 642 struct thread *td = curthread; 643 unsigned int which = (unsigned int)uap->which; 644 int error; 645 646 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 647 return (ENOSYS); 648 649 if (which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 650 return EINVAL; 651 get_mplock(); 652 bcopy(&uap->a2, &uap->which, 653 sizeof(struct shmsys_args) - offsetof(struct shmsys_args, a2)); 654 error = ((*shmcalls[which])(uap)); 655 rel_mplock(); 656 657 return(error); 658 } 659 660 void 661 shmfork(struct proc *p1, struct proc *p2) 662 { 663 struct shmmap_state *shmmap_s; 664 size_t size; 665 int i; 666 667 size = shminfo.shmseg * sizeof(struct shmmap_state); 668 shmmap_s = kmalloc(size, M_SHM, M_WAITOK); 669 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size); 670 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s; 671 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 672 if (shmmap_s->shmid != -1) 673 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 674 } 675 676 void 677 shmexit(struct vmspace *vm) 678 { 679 struct shmmap_state *base, *shm; 680 int i; 681 682 if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) { 683 vm->vm_shm = NULL; 684 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { 685 if (shm->shmid != -1) 686 shm_delete_mapping(vm, shm); 687 } 688 kfree(base, M_SHM); 689 } 690 } 691 692 static void 693 shmrealloc(void) 694 { 695 int i; 696 struct shmid_ds *newsegs; 697 698 if (shmalloced >= shminfo.shmmni) 699 return; 700 701 newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 702 for (i = 0; i < shmalloced; i++) 703 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 704 for (; i < shminfo.shmmni; i++) { 705 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 706 shmsegs[i].shm_perm.seq = 0; 707 } 708 kfree(shmsegs, M_SHM); 709 shmsegs = newsegs; 710 shmalloced = shminfo.shmmni; 711 } 712 713 static void 714 shminit(void *dummy) 715 { 716 int i; 717 718 shminfo.shmmax = shminfo.shmall * PAGE_SIZE; 719 shmalloced = shminfo.shmmni; 720 shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 721 for (i = 0; i < shmalloced; i++) { 722 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 723 shmsegs[i].shm_perm.seq = 0; 724 } 725 shm_last_free = 0; 726 shm_nused = 0; 727 shm_committed = 0; 728 } 729 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL); 730