1 /* $FreeBSD: src/sys/kern/sysv_shm.c,v 1.45.2.6 2002/10/22 20:45:03 fjoe Exp $ */ 2 /* $DragonFly: src/sys/kern/sysv_shm.c,v 1.21 2008/01/06 16:55:51 swildner Exp $ */ 3 /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ 4 5 /* 6 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Adam Glass and Charles 19 * Hannum. 20 * 4. The names of the authors may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "opt_compat.h" 36 #include "opt_sysvipc.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/sysproto.h> 41 #include <sys/kernel.h> 42 #include <sys/sysctl.h> 43 #include <sys/shm.h> 44 #include <sys/proc.h> 45 #include <sys/malloc.h> 46 #include <sys/mman.h> 47 #include <sys/stat.h> 48 #include <sys/sysent.h> 49 #include <sys/jail.h> 50 51 #include <sys/mplock2.h> 52 53 #include <vm/vm.h> 54 #include <vm/vm_param.h> 55 #include <sys/lock.h> 56 #include <vm/pmap.h> 57 #include <vm/vm_object.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_pager.h> 61 62 static MALLOC_DEFINE(M_SHM, "shm", "SVID compatible shared memory segments"); 63 64 struct oshmctl_args; 65 static int sys_oshmctl (struct proc *p, struct oshmctl_args *uap); 66 67 static int shmget_allocate_segment (struct proc *p, struct shmget_args *uap, int mode); 68 static int shmget_existing (struct proc *p, struct shmget_args *uap, int mode, int segnum); 69 70 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 71 static sy_call_t *shmcalls[] = { 72 (sy_call_t *)sys_shmat, (sy_call_t *)sys_oshmctl, 73 (sy_call_t *)sys_shmdt, (sy_call_t *)sys_shmget, 74 (sy_call_t *)sys_shmctl 75 }; 76 77 #define SHMSEG_FREE 0x0200 78 #define SHMSEG_REMOVED 0x0400 79 #define SHMSEG_ALLOCATED 0x0800 80 #define SHMSEG_WANTED 0x1000 81 82 static int shm_last_free, shm_committed, shmalloced; 83 int shm_nused; 84 static struct shmid_ds *shmsegs; 85 86 struct shm_handle { 87 /* vm_offset_t kva; */ 88 vm_object_t shm_object; 89 }; 90 91 struct shmmap_state { 92 vm_offset_t va; 93 int shmid; 94 }; 95 96 static void shm_deallocate_segment (struct shmid_ds *); 97 static int shm_find_segment_by_key (key_t); 98 static struct shmid_ds *shm_find_segment_by_shmid (int); 99 static int shm_delete_mapping (struct vmspace *vm, struct shmmap_state *); 100 static void shmrealloc (void); 101 static void shminit (void *); 102 103 /* 104 * Tuneable values 105 */ 106 #ifndef SHMMAXPGS 107 #define SHMMAXPGS 8192 /* note: sysv shared memory is swap backed */ 108 #endif 109 #ifndef SHMMAX 110 #define SHMMAX (SHMMAXPGS*PAGE_SIZE) 111 #endif 112 #ifndef SHMMIN 113 #define SHMMIN 1 114 #endif 115 #ifndef SHMMNI 116 #define SHMMNI 192 117 #endif 118 #ifndef SHMSEG 119 #define SHMSEG 128 120 #endif 121 #ifndef SHMALL 122 #define SHMALL (SHMMAXPGS) 123 #endif 124 125 struct shminfo shminfo = { 126 SHMMAX, 127 SHMMIN, 128 SHMMNI, 129 SHMSEG, 130 SHMALL 131 }; 132 133 static int shm_use_phys; 134 135 TUNABLE_INT("kern.ipc.shmmin", &shminfo.shmmin); 136 TUNABLE_INT("kern.ipc.shmmni", &shminfo.shmmni); 137 TUNABLE_INT("kern.ipc.shmseg", &shminfo.shmseg); 138 TUNABLE_INT("kern.ipc.shmmaxpgs", &shminfo.shmall); 139 TUNABLE_INT("kern.ipc.shm_use_phys", &shm_use_phys); 140 141 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmax, CTLFLAG_RW, &shminfo.shmmax, 0, ""); 142 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmin, CTLFLAG_RW, &shminfo.shmmin, 0, ""); 143 SYSCTL_INT(_kern_ipc, OID_AUTO, shmmni, CTLFLAG_RD, &shminfo.shmmni, 0, ""); 144 SYSCTL_INT(_kern_ipc, OID_AUTO, shmseg, CTLFLAG_RW, &shminfo.shmseg, 0, ""); 145 SYSCTL_INT(_kern_ipc, OID_AUTO, shmall, CTLFLAG_RW, &shminfo.shmall, 0, ""); 146 SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW, &shm_use_phys, 0, ""); 147 148 static int 149 shm_find_segment_by_key(key_t key) 150 { 151 int i; 152 153 for (i = 0; i < shmalloced; i++) 154 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 155 shmsegs[i].shm_perm.key == key) 156 return i; 157 return -1; 158 } 159 160 static struct shmid_ds * 161 shm_find_segment_by_shmid(int shmid) 162 { 163 int segnum; 164 struct shmid_ds *shmseg; 165 166 segnum = IPCID_TO_IX(shmid); 167 if (segnum < 0 || segnum >= shmalloced) 168 return NULL; 169 shmseg = &shmsegs[segnum]; 170 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED)) 171 != SHMSEG_ALLOCATED || 172 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) 173 return NULL; 174 return shmseg; 175 } 176 177 static void 178 shm_deallocate_segment(struct shmid_ds *shmseg) 179 { 180 struct shm_handle *shm_handle; 181 size_t size; 182 183 shm_handle = shmseg->shm_internal; 184 vm_object_deallocate(shm_handle->shm_object); 185 kfree((caddr_t)shm_handle, M_SHM); 186 shmseg->shm_internal = NULL; 187 size = round_page(shmseg->shm_segsz); 188 shm_committed -= btoc(size); 189 shm_nused--; 190 shmseg->shm_perm.mode = SHMSEG_FREE; 191 } 192 193 static int 194 shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) 195 { 196 struct shmid_ds *shmseg; 197 int segnum, result; 198 size_t size; 199 200 segnum = IPCID_TO_IX(shmmap_s->shmid); 201 shmseg = &shmsegs[segnum]; 202 size = round_page(shmseg->shm_segsz); 203 result = vm_map_remove(&vm->vm_map, shmmap_s->va, shmmap_s->va + size); 204 if (result != KERN_SUCCESS) 205 return EINVAL; 206 shmmap_s->shmid = -1; 207 shmseg->shm_dtime = time_second; 208 if ((--shmseg->shm_nattch <= 0) && 209 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 210 shm_deallocate_segment(shmseg); 211 shm_last_free = segnum; 212 } 213 return 0; 214 } 215 216 /* 217 * MPALMOSTSAFE 218 */ 219 int 220 sys_shmdt(struct shmdt_args *uap) 221 { 222 struct thread *td = curthread; 223 struct proc *p = td->td_proc; 224 struct shmmap_state *shmmap_s; 225 int i; 226 int error; 227 228 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 229 return (ENOSYS); 230 231 get_mplock(); 232 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 233 if (shmmap_s == NULL) { 234 error = EINVAL; 235 goto done; 236 } 237 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { 238 if (shmmap_s->shmid != -1 && 239 shmmap_s->va == (vm_offset_t)uap->shmaddr) 240 break; 241 } 242 if (i == shminfo.shmseg) 243 error = EINVAL; 244 else 245 error = shm_delete_mapping(p->p_vmspace, shmmap_s); 246 done: 247 rel_mplock(); 248 return (error); 249 } 250 251 /* 252 * MPALMOSTSAFE 253 */ 254 int 255 sys_shmat(struct shmat_args *uap) 256 { 257 struct thread *td = curthread; 258 struct proc *p = td->td_proc; 259 int error, i, flags; 260 struct shmid_ds *shmseg; 261 struct shmmap_state *shmmap_s = NULL; 262 struct shm_handle *shm_handle; 263 vm_offset_t attach_va; 264 vm_prot_t prot; 265 vm_size_t size; 266 int rv; 267 268 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 269 return (ENOSYS); 270 271 get_mplock(); 272 again: 273 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 274 if (shmmap_s == NULL) { 275 size = shminfo.shmseg * sizeof(struct shmmap_state); 276 shmmap_s = kmalloc(size, M_SHM, M_WAITOK); 277 for (i = 0; i < shminfo.shmseg; i++) 278 shmmap_s[i].shmid = -1; 279 if (p->p_vmspace->vm_shm != NULL) { 280 kfree(shmmap_s, M_SHM); 281 goto again; 282 } 283 p->p_vmspace->vm_shm = (caddr_t)shmmap_s; 284 } 285 shmseg = shm_find_segment_by_shmid(uap->shmid); 286 if (shmseg == NULL) { 287 error = EINVAL; 288 goto done; 289 } 290 error = ipcperm(p, &shmseg->shm_perm, 291 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 292 if (error) 293 goto done; 294 for (i = 0; i < shminfo.shmseg; i++) { 295 if (shmmap_s->shmid == -1) 296 break; 297 shmmap_s++; 298 } 299 if (i >= shminfo.shmseg) { 300 error = EMFILE; 301 goto done; 302 } 303 size = round_page(shmseg->shm_segsz); 304 #ifdef VM_PROT_READ_IS_EXEC 305 prot = VM_PROT_READ | VM_PROT_EXECUTE; 306 #else 307 prot = VM_PROT_READ; 308 #endif 309 if ((uap->shmflg & SHM_RDONLY) == 0) 310 prot |= VM_PROT_WRITE; 311 flags = MAP_ANON | MAP_SHARED; 312 if (uap->shmaddr) { 313 flags |= MAP_FIXED; 314 if (uap->shmflg & SHM_RND) { 315 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1); 316 } else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) { 317 attach_va = (vm_offset_t)uap->shmaddr; 318 } else { 319 error = EINVAL; 320 goto done; 321 } 322 } else { 323 /* 324 * This is just a hint to vm_map_find() about where to put it. 325 */ 326 attach_va = round_page((vm_offset_t)p->p_vmspace->vm_taddr + maxtsiz + maxdsiz); 327 } 328 329 shm_handle = shmseg->shm_internal; 330 vm_object_reference(shm_handle->shm_object); 331 rv = vm_map_find(&p->p_vmspace->vm_map, 332 shm_handle->shm_object, 0, 333 &attach_va, 334 size, PAGE_SIZE, 335 ((flags & MAP_FIXED) ? 0 : 1), 336 VM_MAPTYPE_NORMAL, 337 prot, prot, 338 0); 339 if (rv != KERN_SUCCESS) { 340 vm_object_deallocate(shm_handle->shm_object); 341 error = ENOMEM; 342 goto done; 343 } 344 vm_map_inherit(&p->p_vmspace->vm_map, 345 attach_va, attach_va + size, VM_INHERIT_SHARE); 346 347 KKASSERT(shmmap_s->shmid == -1); 348 shmmap_s->va = attach_va; 349 shmmap_s->shmid = uap->shmid; 350 shmseg->shm_lpid = p->p_pid; 351 shmseg->shm_atime = time_second; 352 shmseg->shm_nattch++; 353 uap->sysmsg_resultp = (void *)attach_va; 354 error = 0; 355 done: 356 rel_mplock(); 357 return error; 358 } 359 360 struct oshmid_ds { 361 struct ipc_perm shm_perm; /* operation perms */ 362 int shm_segsz; /* size of segment (bytes) */ 363 ushort shm_cpid; /* pid, creator */ 364 ushort shm_lpid; /* pid, last operation */ 365 short shm_nattch; /* no. of current attaches */ 366 time_t shm_atime; /* last attach time */ 367 time_t shm_dtime; /* last detach time */ 368 time_t shm_ctime; /* last change time */ 369 void *shm_handle; /* internal handle for shm segment */ 370 }; 371 372 struct oshmctl_args { 373 struct sysmsg sysmsg; 374 int shmid; 375 int cmd; 376 struct oshmid_ds *ubuf; 377 }; 378 379 /* 380 * MPALMOSTSAFE 381 */ 382 static int 383 sys_oshmctl(struct proc *p, struct oshmctl_args *uap) 384 { 385 #ifdef COMPAT_43 386 struct thread *td = curthread; 387 struct shmid_ds *shmseg; 388 struct oshmid_ds outbuf; 389 int error; 390 391 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 392 return (ENOSYS); 393 394 get_mplock(); 395 shmseg = shm_find_segment_by_shmid(uap->shmid); 396 if (shmseg == NULL) { 397 error = EINVAL; 398 goto done; 399 } 400 401 switch (uap->cmd) { 402 case IPC_STAT: 403 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 404 if (error) 405 break; 406 outbuf.shm_perm = shmseg->shm_perm; 407 outbuf.shm_segsz = shmseg->shm_segsz; 408 outbuf.shm_cpid = shmseg->shm_cpid; 409 outbuf.shm_lpid = shmseg->shm_lpid; 410 outbuf.shm_nattch = shmseg->shm_nattch; 411 outbuf.shm_atime = shmseg->shm_atime; 412 outbuf.shm_dtime = shmseg->shm_dtime; 413 outbuf.shm_ctime = shmseg->shm_ctime; 414 outbuf.shm_handle = shmseg->shm_internal; 415 error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf)); 416 break; 417 default: 418 /* XXX casting to (sy_call_t *) is bogus, as usual. */ 419 error = sys_shmctl((struct shmctl_args *)uap); 420 } 421 done: 422 rel_mplock(); 423 return error; 424 #else 425 return EINVAL; 426 #endif 427 } 428 429 /* 430 * MPALMOSTSAFE 431 */ 432 int 433 sys_shmctl(struct shmctl_args *uap) 434 { 435 struct thread *td = curthread; 436 struct proc *p = td->td_proc; 437 int error; 438 struct shmid_ds inbuf; 439 struct shmid_ds *shmseg; 440 441 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 442 return (ENOSYS); 443 444 get_mplock(); 445 shmseg = shm_find_segment_by_shmid(uap->shmid); 446 if (shmseg == NULL) { 447 error = EINVAL; 448 goto done; 449 } 450 451 switch (uap->cmd) { 452 case IPC_STAT: 453 error = ipcperm(p, &shmseg->shm_perm, IPC_R); 454 if (error == 0) 455 error = copyout(shmseg, uap->buf, sizeof(inbuf)); 456 break; 457 case IPC_SET: 458 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 459 if (error == 0) 460 error = copyin(uap->buf, &inbuf, sizeof(inbuf)); 461 if (error == 0) { 462 shmseg->shm_perm.uid = inbuf.shm_perm.uid; 463 shmseg->shm_perm.gid = inbuf.shm_perm.gid; 464 shmseg->shm_perm.mode = 465 (shmseg->shm_perm.mode & ~ACCESSPERMS) | 466 (inbuf.shm_perm.mode & ACCESSPERMS); 467 shmseg->shm_ctime = time_second; 468 } 469 break; 470 case IPC_RMID: 471 error = ipcperm(p, &shmseg->shm_perm, IPC_M); 472 if (error == 0) { 473 shmseg->shm_perm.key = IPC_PRIVATE; 474 shmseg->shm_perm.mode |= SHMSEG_REMOVED; 475 if (shmseg->shm_nattch <= 0) { 476 shm_deallocate_segment(shmseg); 477 shm_last_free = IPCID_TO_IX(uap->shmid); 478 } 479 } 480 break; 481 #if 0 482 case SHM_LOCK: 483 case SHM_UNLOCK: 484 #endif 485 default: 486 error = EINVAL; 487 break; 488 } 489 done: 490 rel_mplock(); 491 return error; 492 } 493 494 static int 495 shmget_existing(struct proc *p, struct shmget_args *uap, int mode, int segnum) 496 { 497 struct shmid_ds *shmseg; 498 int error; 499 500 shmseg = &shmsegs[segnum]; 501 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 502 /* 503 * This segment is in the process of being allocated. Wait 504 * until it's done, and look the key up again (in case the 505 * allocation failed or it was freed). 506 */ 507 shmseg->shm_perm.mode |= SHMSEG_WANTED; 508 error = tsleep((caddr_t)shmseg, PCATCH, "shmget", 0); 509 if (error) 510 return error; 511 return EAGAIN; 512 } 513 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) 514 return EEXIST; 515 error = ipcperm(p, &shmseg->shm_perm, mode); 516 if (error) 517 return error; 518 if (uap->size && uap->size > shmseg->shm_segsz) 519 return EINVAL; 520 uap->sysmsg_result = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 521 return 0; 522 } 523 524 static int 525 shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode) 526 { 527 int i, segnum, shmid, size; 528 struct ucred *cred = p->p_ucred; 529 struct shmid_ds *shmseg; 530 struct shm_handle *shm_handle; 531 532 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) 533 return EINVAL; 534 if (shm_nused >= shminfo.shmmni) /* any shmids left? */ 535 return ENOSPC; 536 size = round_page(uap->size); 537 if (shm_committed + btoc(size) > shminfo.shmall) 538 return ENOMEM; 539 if (shm_last_free < 0) { 540 shmrealloc(); /* maybe expand the shmsegs[] array */ 541 for (i = 0; i < shmalloced; i++) 542 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 543 break; 544 if (i == shmalloced) 545 return ENOSPC; 546 segnum = i; 547 } else { 548 segnum = shm_last_free; 549 shm_last_free = -1; 550 } 551 shmseg = &shmsegs[segnum]; 552 /* 553 * In case we sleep in malloc(), mark the segment present but deleted 554 * so that noone else tries to create the same key. 555 */ 556 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 557 shmseg->shm_perm.key = uap->key; 558 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 559 shm_handle = kmalloc(sizeof(struct shm_handle), M_SHM, M_WAITOK); 560 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 561 562 /* 563 * We make sure that we have allocated a pager before we need 564 * to. 565 */ 566 if (shm_use_phys) { 567 shm_handle->shm_object = 568 phys_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0); 569 } else { 570 shm_handle->shm_object = 571 swap_pager_alloc(NULL, size, VM_PROT_DEFAULT, 0); 572 } 573 vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); 574 vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); 575 576 shmseg->shm_internal = shm_handle; 577 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 578 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 579 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 580 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 581 shmseg->shm_segsz = uap->size; 582 shmseg->shm_cpid = p->p_pid; 583 shmseg->shm_lpid = shmseg->shm_nattch = 0; 584 shmseg->shm_atime = shmseg->shm_dtime = 0; 585 shmseg->shm_ctime = time_second; 586 shm_committed += btoc(size); 587 shm_nused++; 588 if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 589 /* 590 * Somebody else wanted this key while we were asleep. Wake 591 * them up now. 592 */ 593 shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 594 wakeup((caddr_t)shmseg); 595 } 596 uap->sysmsg_result = shmid; 597 return 0; 598 } 599 600 /* 601 * MPALMOSTSAFE 602 */ 603 int 604 sys_shmget(struct shmget_args *uap) 605 { 606 struct thread *td = curthread; 607 struct proc *p = td->td_proc; 608 int segnum, mode, error; 609 610 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 611 return (ENOSYS); 612 613 mode = uap->shmflg & ACCESSPERMS; 614 get_mplock(); 615 616 if (uap->key != IPC_PRIVATE) { 617 again: 618 segnum = shm_find_segment_by_key(uap->key); 619 if (segnum >= 0) { 620 error = shmget_existing(p, uap, mode, segnum); 621 if (error == EAGAIN) 622 goto again; 623 goto done; 624 } 625 if ((uap->shmflg & IPC_CREAT) == 0) { 626 error = ENOENT; 627 goto done; 628 } 629 } 630 error = shmget_allocate_segment(p, uap, mode); 631 done: 632 rel_mplock(); 633 return (error); 634 } 635 636 /* 637 * shmsys_args(int which, int a2, ...) (VARARGS) 638 * 639 * MPALMOSTSAFE 640 */ 641 int 642 sys_shmsys(struct shmsys_args *uap) 643 { 644 struct thread *td = curthread; 645 unsigned int which = (unsigned int)uap->which; 646 int error; 647 648 if (!jail_sysvipc_allowed && td->td_ucred->cr_prison != NULL) 649 return (ENOSYS); 650 651 if (which >= sizeof(shmcalls)/sizeof(shmcalls[0])) 652 return EINVAL; 653 get_mplock(); 654 bcopy(&uap->a2, &uap->which, 655 sizeof(struct shmsys_args) - offsetof(struct shmsys_args, a2)); 656 error = ((*shmcalls[which])(uap)); 657 rel_mplock(); 658 659 return(error); 660 } 661 662 void 663 shmfork(struct proc *p1, struct proc *p2) 664 { 665 struct shmmap_state *shmmap_s; 666 size_t size; 667 int i; 668 669 size = shminfo.shmseg * sizeof(struct shmmap_state); 670 shmmap_s = kmalloc(size, M_SHM, M_WAITOK); 671 bcopy((caddr_t)p1->p_vmspace->vm_shm, (caddr_t)shmmap_s, size); 672 p2->p_vmspace->vm_shm = (caddr_t)shmmap_s; 673 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 674 if (shmmap_s->shmid != -1) 675 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 676 } 677 678 void 679 shmexit(struct vmspace *vm) 680 { 681 struct shmmap_state *base, *shm; 682 int i; 683 684 if ((base = (struct shmmap_state *)vm->vm_shm) != NULL) { 685 vm->vm_shm = NULL; 686 for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { 687 if (shm->shmid != -1) 688 shm_delete_mapping(vm, shm); 689 } 690 kfree(base, M_SHM); 691 } 692 } 693 694 static void 695 shmrealloc(void) 696 { 697 int i; 698 struct shmid_ds *newsegs; 699 700 if (shmalloced >= shminfo.shmmni) 701 return; 702 703 newsegs = kmalloc(shminfo.shmmni * sizeof(*newsegs), M_SHM, M_WAITOK); 704 for (i = 0; i < shmalloced; i++) 705 bcopy(&shmsegs[i], &newsegs[i], sizeof(newsegs[0])); 706 for (; i < shminfo.shmmni; i++) { 707 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 708 shmsegs[i].shm_perm.seq = 0; 709 } 710 kfree(shmsegs, M_SHM); 711 shmsegs = newsegs; 712 shmalloced = shminfo.shmmni; 713 } 714 715 static void 716 shminit(void *dummy) 717 { 718 int i; 719 720 shminfo.shmmax = shminfo.shmall * PAGE_SIZE; 721 shmalloced = shminfo.shmmni; 722 shmsegs = kmalloc(shmalloced * sizeof(shmsegs[0]), M_SHM, M_WAITOK); 723 for (i = 0; i < shmalloced; i++) { 724 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 725 shmsegs[i].shm_perm.seq = 0; 726 } 727 shm_last_free = 0; 728 shm_nused = 0; 729 shm_committed = 0; 730 } 731 SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL); 732