1 /* $OpenBSD: sysv_shm.c,v 1.23 2001/11/28 13:47:39 art Exp $ */ 2 /* $NetBSD: sysv_shm.c,v 1.50 1998/10/21 22:24:29 tron Exp $ */ 3 4 /* 5 * Copyright (c) 1994 Adam Glass and Charles M. Hannum. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Adam Glass and Charles M. 18 * Hannum. 19 * 4. The names of the authors may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/types.h> 35 #include <sys/param.h> 36 #include <sys/kernel.h> 37 #include <sys/shm.h> 38 #include <sys/proc.h> 39 #include <sys/uio.h> 40 #include <sys/time.h> 41 #include <sys/malloc.h> 42 #include <sys/mman.h> 43 #include <sys/systm.h> 44 #include <sys/stat.h> 45 46 #include <sys/mount.h> 47 #include <sys/syscallargs.h> 48 49 #include <uvm/uvm_extern.h> 50 51 struct shminfo shminfo; 52 struct shmid_ds *shmsegs; 53 54 struct shmid_ds *shm_find_segment_by_shmid __P((int)); 55 56 /* 57 * Provides the following externally accessible functions: 58 * 59 * shminit(void); initialization 60 * shmexit(struct vmspace *) cleanup 61 * shmfork(struct vmspace *, struct vmspace *) fork handling 62 * shmsys(arg1, arg2, arg3, arg4); shm{at,ctl,dt,get}(arg2, arg3, arg4) 63 * 64 * Structures: 65 * shmsegs (an array of 'struct shmid_ds') 66 * per proc array of 'struct shmmap_state' 67 */ 68 69 #define SHMSEG_FREE 0x0200 70 #define SHMSEG_REMOVED 0x0400 71 #define SHMSEG_ALLOCATED 0x0800 72 #define SHMSEG_WANTED 0x1000 73 74 int shm_last_free, shm_nused, shm_committed; 75 76 struct shm_handle { 77 struct uvm_object *shm_object; 78 }; 79 80 struct shmmap_state { 81 vaddr_t va; 82 int shmid; 83 }; 84 85 int shm_find_segment_by_key __P((key_t)); 86 void shm_deallocate_segment __P((struct shmid_ds *)); 87 int shm_delete_mapping __P((struct vmspace *, struct shmmap_state *)); 88 int shmget_existing __P((struct proc *, struct sys_shmget_args *, 89 int, int, register_t *)); 90 int shmget_allocate_segment __P((struct proc *, struct sys_shmget_args *, 91 int, register_t *)); 92 93 int 94 shm_find_segment_by_key(key) 95 key_t key; 96 { 97 int i; 98 99 for (i = 0; i < shminfo.shmmni; i++) 100 if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && 101 shmsegs[i].shm_perm.key == key) 102 return i; 103 return -1; 104 } 105 106 struct shmid_ds * 107 shm_find_segment_by_shmid(shmid) 108 int shmid; 109 { 110 int segnum; 111 struct shmid_ds *shmseg; 112 113 segnum = IPCID_TO_IX(shmid); 114 if (segnum < 0 || segnum >= shminfo.shmmni) 115 return NULL; 116 shmseg = &shmsegs[segnum]; 117 if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED)) 118 != SHMSEG_ALLOCATED || 119 shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) 120 return NULL; 121 return shmseg; 122 } 123 124 void 125 shm_deallocate_segment(shmseg) 126 struct shmid_ds *shmseg; 127 { 128 struct shm_handle *shm_handle; 129 size_t size; 130 131 shm_handle = shmseg->shm_internal; 132 size = round_page(shmseg->shm_segsz); 133 uao_detach(shm_handle->shm_object); 134 free((caddr_t)shm_handle, M_SHM); 135 shmseg->shm_internal = NULL; 136 shm_committed -= btoc(size); 137 shmseg->shm_perm.mode = SHMSEG_FREE; 138 shm_nused--; 139 } 140 141 int 142 shm_delete_mapping(vm, shmmap_s) 143 struct vmspace *vm; 144 struct shmmap_state *shmmap_s; 145 { 146 struct shmid_ds *shmseg; 147 int segnum; 148 size_t size; 149 150 segnum = IPCID_TO_IX(shmmap_s->shmid); 151 shmseg = &shmsegs[segnum]; 152 size = round_page(shmseg->shm_segsz); 153 uvm_deallocate(&vm->vm_map, shmmap_s->va, size); 154 shmmap_s->shmid = -1; 155 shmseg->shm_dtime = time.tv_sec; 156 if ((--shmseg->shm_nattch <= 0) && 157 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { 158 shm_deallocate_segment(shmseg); 159 shm_last_free = segnum; 160 } 161 return 0; 162 } 163 164 int 165 sys_shmdt(p, v, retval) 166 struct proc *p; 167 void *v; 168 register_t *retval; 169 { 170 struct sys_shmdt_args /* { 171 syscallarg(const void *) shmaddr; 172 } */ *uap = v; 173 struct shmmap_state *shmmap_s; 174 int i; 175 176 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 177 if (shmmap_s == NULL) 178 return EINVAL; 179 180 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 181 if (shmmap_s->shmid != -1 && 182 shmmap_s->va == (vaddr_t)SCARG(uap, shmaddr)) 183 break; 184 if (i == shminfo.shmseg) 185 return EINVAL; 186 return shm_delete_mapping(p->p_vmspace, shmmap_s); 187 } 188 189 int 190 sys_shmat(p, v, retval) 191 struct proc *p; 192 void *v; 193 register_t *retval; 194 { 195 struct sys_shmat_args /* { 196 syscallarg(int) shmid; 197 syscallarg(const void *) shmaddr; 198 syscallarg(int) shmflg; 199 } */ *uap = v; 200 int error, i, flags; 201 struct ucred *cred = p->p_ucred; 202 struct shmid_ds *shmseg; 203 struct shmmap_state *shmmap_s = NULL; 204 struct shm_handle *shm_handle; 205 vaddr_t attach_va; 206 vm_prot_t prot; 207 vsize_t size; 208 209 shmmap_s = (struct shmmap_state *)p->p_vmspace->vm_shm; 210 if (shmmap_s == NULL) { 211 size = shminfo.shmseg * sizeof(struct shmmap_state); 212 shmmap_s = malloc(size, M_SHM, M_WAITOK); 213 for (i = 0; i < shminfo.shmseg; i++) 214 shmmap_s[i].shmid = -1; 215 p->p_vmspace->vm_shm = (caddr_t)shmmap_s; 216 } 217 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid)); 218 if (shmseg == NULL) 219 return EINVAL; 220 error = ipcperm(cred, &shmseg->shm_perm, 221 (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); 222 if (error) 223 return error; 224 for (i = 0; i < shminfo.shmseg; i++) { 225 if (shmmap_s->shmid == -1) 226 break; 227 shmmap_s++; 228 } 229 if (i >= shminfo.shmseg) 230 return EMFILE; 231 size = round_page(shmseg->shm_segsz); 232 prot = VM_PROT_READ; 233 if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0) 234 prot |= VM_PROT_WRITE; 235 flags = MAP_ANON | MAP_SHARED; 236 if (SCARG(uap, shmaddr)) { 237 flags |= MAP_FIXED; 238 if (SCARG(uap, shmflg) & SHM_RND) 239 attach_va = 240 (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1); 241 else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0) 242 attach_va = (vaddr_t)SCARG(uap, shmaddr); 243 else 244 return EINVAL; 245 } else { 246 /* This is just a hint to vm_mmap() about where to put it. */ 247 attach_va = round_page((vaddr_t)p->p_vmspace->vm_taddr + 248 MAXTSIZ + MAXDSIZ); 249 } 250 shm_handle = shmseg->shm_internal; 251 uao_reference(shm_handle->shm_object); 252 error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size, 253 shm_handle->shm_object, 0, 0, UVM_MAPFLAG(prot, prot, 254 UVM_INH_SHARE, UVM_ADV_RANDOM, 0)); 255 if (error) { 256 return error; 257 } 258 259 shmmap_s->va = attach_va; 260 shmmap_s->shmid = SCARG(uap, shmid); 261 shmseg->shm_lpid = p->p_pid; 262 shmseg->shm_atime = time.tv_sec; 263 shmseg->shm_nattch++; 264 *retval = attach_va; 265 return 0; 266 } 267 268 int 269 sys_shmctl(p, v, retval) 270 struct proc *p; 271 void *v; 272 register_t *retval; 273 { 274 struct sys_shmctl_args /* { 275 syscallarg(int) shmid; 276 syscallarg(int) cmd; 277 syscallarg(struct shmid_ds *) buf; 278 } */ *uap = v; 279 int error; 280 struct ucred *cred = p->p_ucred; 281 struct shmid_ds inbuf; 282 struct shmid_ds *shmseg; 283 284 shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid)); 285 if (shmseg == NULL) 286 return EINVAL; 287 switch (SCARG(uap, cmd)) { 288 case IPC_STAT: 289 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0) 290 return error; 291 error = copyout((caddr_t)shmseg, SCARG(uap, buf), 292 sizeof(inbuf)); 293 if (error) 294 return error; 295 break; 296 case IPC_SET: 297 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0) 298 return error; 299 error = copyin(SCARG(uap, buf), (caddr_t)&inbuf, 300 sizeof(inbuf)); 301 if (error) 302 return error; 303 shmseg->shm_perm.uid = inbuf.shm_perm.uid; 304 shmseg->shm_perm.gid = inbuf.shm_perm.gid; 305 shmseg->shm_perm.mode = 306 (shmseg->shm_perm.mode & ~ACCESSPERMS) | 307 (inbuf.shm_perm.mode & ACCESSPERMS); 308 shmseg->shm_ctime = time.tv_sec; 309 break; 310 case IPC_RMID: 311 if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0) 312 return error; 313 shmseg->shm_perm.key = IPC_PRIVATE; 314 shmseg->shm_perm.mode |= SHMSEG_REMOVED; 315 if (shmseg->shm_nattch <= 0) { 316 shm_deallocate_segment(shmseg); 317 shm_last_free = IPCID_TO_IX(SCARG(uap, shmid)); 318 } 319 break; 320 case SHM_LOCK: 321 case SHM_UNLOCK: 322 default: 323 return EINVAL; 324 } 325 return 0; 326 } 327 328 int 329 shmget_existing(p, uap, mode, segnum, retval) 330 struct proc *p; 331 struct sys_shmget_args /* { 332 syscallarg(key_t) key; 333 syscallarg(size_t) size; 334 syscallarg(int) shmflg; 335 } */ *uap; 336 int mode; 337 int segnum; 338 register_t *retval; 339 { 340 struct shmid_ds *shmseg; 341 struct ucred *cred = p->p_ucred; 342 int error; 343 344 shmseg = &shmsegs[segnum]; 345 if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { 346 /* 347 * This segment is in the process of being allocated. Wait 348 * until it's done, and look the key up again (in case the 349 * allocation failed or it was freed). 350 */ 351 shmseg->shm_perm.mode |= SHMSEG_WANTED; 352 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0); 353 if (error) 354 return error; 355 return EAGAIN; 356 } 357 if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0) 358 return error; 359 if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz) 360 return EINVAL; 361 if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) == 362 (IPC_CREAT | IPC_EXCL)) 363 return EEXIST; 364 *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 365 return 0; 366 } 367 368 int 369 shmget_allocate_segment(p, uap, mode, retval) 370 struct proc *p; 371 struct sys_shmget_args /* { 372 syscallarg(key_t) key; 373 syscallarg(size_t) size; 374 syscallarg(int) shmflg; 375 } */ *uap; 376 int mode; 377 register_t *retval; 378 { 379 int i, segnum, shmid, size; 380 struct ucred *cred = p->p_ucred; 381 struct shmid_ds *shmseg; 382 struct shm_handle *shm_handle; 383 int error = 0; 384 385 if (SCARG(uap, size) < shminfo.shmmin || 386 SCARG(uap, size) > shminfo.shmmax) 387 return EINVAL; 388 if (shm_nused >= shminfo.shmmni) /* any shmids left? */ 389 return ENOSPC; 390 size = round_page(SCARG(uap, size)); 391 if (shm_committed + btoc(size) > shminfo.shmall) 392 return ENOMEM; 393 if (shm_last_free < 0) { 394 for (i = 0; i < shminfo.shmmni; i++) 395 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) 396 break; 397 if (i == shminfo.shmmni) 398 panic("shmseg free count inconsistent"); 399 segnum = i; 400 } else { 401 segnum = shm_last_free; 402 shm_last_free = -1; 403 } 404 shmseg = &shmsegs[segnum]; 405 /* 406 * In case we sleep in malloc(), mark the segment present but deleted 407 * so that noone else tries to create the same key. 408 */ 409 shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; 410 shmseg->shm_perm.key = SCARG(uap, key); 411 shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; 412 shm_handle = (struct shm_handle *) 413 malloc(sizeof(struct shm_handle), M_SHM, M_WAITOK); 414 shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); 415 416 417 shm_handle->shm_object = uao_create(size, 0); 418 419 shmseg->shm_internal = shm_handle; 420 shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; 421 shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; 422 shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | 423 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; 424 shmseg->shm_segsz = SCARG(uap, size); 425 shmseg->shm_cpid = p->p_pid; 426 shmseg->shm_lpid = shmseg->shm_nattch = 0; 427 shmseg->shm_atime = shmseg->shm_dtime = 0; 428 shmseg->shm_ctime = time.tv_sec; 429 shm_committed += btoc(size); 430 shm_nused++; 431 432 *retval = shmid; 433 if (shmseg->shm_perm.mode & SHMSEG_WANTED) { 434 /* 435 * Somebody else wanted this key while we were asleep. Wake 436 * them up now. 437 */ 438 shmseg->shm_perm.mode &= ~SHMSEG_WANTED; 439 wakeup((caddr_t)shmseg); 440 } 441 return error; 442 } 443 444 int 445 sys_shmget(p, v, retval) 446 struct proc *p; 447 void *v; 448 register_t *retval; 449 { 450 struct sys_shmget_args /* { 451 syscallarg(key_t) key; 452 syscallarg(int) size; 453 syscallarg(int) shmflg; 454 } */ *uap = v; 455 int segnum, mode, error; 456 457 mode = SCARG(uap, shmflg) & ACCESSPERMS; 458 if (SCARG(uap, key) != IPC_PRIVATE) { 459 again: 460 segnum = shm_find_segment_by_key(SCARG(uap, key)); 461 if (segnum >= 0) { 462 error = shmget_existing(p, uap, mode, segnum, retval); 463 if (error == EAGAIN) 464 goto again; 465 return error; 466 } 467 if ((SCARG(uap, shmflg) & IPC_CREAT) == 0) 468 return ENOENT; 469 } 470 return shmget_allocate_segment(p, uap, mode, retval); 471 } 472 473 void 474 shmfork(vm1, vm2) 475 struct vmspace *vm1, *vm2; 476 { 477 struct shmmap_state *shmmap_s; 478 size_t size; 479 int i; 480 481 if (vm1->vm_shm == NULL) { 482 vm2->vm_shm = NULL; 483 return; 484 } 485 486 size = shminfo.shmseg * sizeof(struct shmmap_state); 487 shmmap_s = malloc(size, M_SHM, M_WAITOK); 488 bcopy(vm1->vm_shm, shmmap_s, size); 489 vm2->vm_shm = (caddr_t)shmmap_s; 490 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 491 if (shmmap_s->shmid != -1) 492 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; 493 } 494 495 void 496 shmexit(vm) 497 struct vmspace *vm; 498 { 499 struct shmmap_state *shmmap_s; 500 int i; 501 502 shmmap_s = (struct shmmap_state *)vm->vm_shm; 503 if (shmmap_s == NULL) 504 return; 505 for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) 506 if (shmmap_s->shmid != -1) 507 shm_delete_mapping(vm, shmmap_s); 508 free(vm->vm_shm, M_SHM); 509 vm->vm_shm = NULL; 510 } 511 512 void 513 shminit() 514 { 515 int i; 516 517 shminfo.shmmax *= PAGE_SIZE; 518 519 for (i = 0; i < shminfo.shmmni; i++) { 520 shmsegs[i].shm_perm.mode = SHMSEG_FREE; 521 shmsegs[i].shm_perm.seq = 0; 522 } 523 shm_last_free = 0; 524 shm_nused = 0; 525 shm_committed = 0; 526 } 527 528 void 529 shmid_n2o(n, o) 530 struct shmid_ds *n; 531 struct oshmid_ds *o; 532 { 533 o->shm_segsz = n->shm_segsz; 534 o->shm_lpid = n->shm_lpid; 535 o->shm_cpid = n->shm_cpid; 536 o->shm_nattch = n->shm_nattch; 537 o->shm_atime = n->shm_atime; 538 o->shm_dtime = n->shm_dtime; 539 o->shm_ctime = n->shm_ctime; 540 o->shm_internal = n->shm_internal; 541 ipc_n2o(&n->shm_perm, &o->shm_perm); 542 } 543