1 /* This file contains the procedures that manipulate file descriptors. 2 * 3 * The entry points into this file are 4 * get_fd: look for free file descriptor and free filp slots 5 * get_filp: look up the filp entry for a given file descriptor 6 * find_filp: find a filp slot that points to a given vnode 7 * inval_filp: invalidate a filp and associated fd's, only let close() 8 * happen on it 9 * do_copyfd: copies a file descriptor from or to another endpoint 10 */ 11 12 #include <sys/select.h> 13 #include <minix/callnr.h> 14 #include <minix/u64.h> 15 #include <assert.h> 16 #include <sys/stat.h> 17 #include "fs.h" 18 #include "file.h" 19 #include "vnode.h" 20 21 22 #if LOCK_DEBUG 23 /*===========================================================================* 24 * check_filp_locks * 25 *===========================================================================*/ 26 void check_filp_locks_by_me(void) 27 { 28 /* Check whether this thread still has filp locks held */ 29 struct filp *f; 30 int r; 31 32 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 33 r = mutex_trylock(&f->filp_lock); 34 if (r == -EDEADLK) 35 panic("Thread %d still holds filp lock on filp %p call_nr=%d\n", 36 mthread_self(), f, job_call_nr); 37 else if (r == 0) { 38 /* We just obtained the lock, release it */ 39 mutex_unlock(&f->filp_lock); 40 } 41 } 42 } 43 #endif 44 45 /*===========================================================================* 46 * check_filp_locks * 47 *===========================================================================*/ 48 void check_filp_locks(void) 49 { 50 struct filp *f; 51 int r, count = 0; 52 53 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 54 r = mutex_trylock(&f->filp_lock); 55 if (r == -EBUSY) { 56 /* Mutex is still locked */ 57 count++; 58 } else if (r == 0) { 59 /* We just obtained a lock, don't want it */ 60 mutex_unlock(&f->filp_lock); 61 } else 62 panic("filp_lock weird state"); 63 } 64 if (count) panic("locked filps"); 65 #if 0 66 else printf("check_filp_locks OK\n"); 67 #endif 68 } 69 70 /*===========================================================================* 71 * init_filps * 72 *===========================================================================*/ 73 void init_filps(void) 74 { 75 /* Initialize filps */ 76 struct filp *f; 77 78 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 79 if (mutex_init(&f->filp_lock, NULL) != 0) 80 panic("Failed to initialize filp mutex"); 81 } 82 83 } 84 85 /*===========================================================================* 86 * get_fd * 87 *===========================================================================*/ 88 int get_fd(struct fproc *rfp, int start, mode_t bits, int *k, struct filp **fpt) 89 { 90 /* Look for a free file descriptor and a free filp slot. Fill in the mode word 91 * in the latter, but don't claim either one yet, since the open() or creat() 92 * may yet fail. 93 */ 94 95 register struct filp *f; 96 register int i; 97 98 /* Search the fproc fp_filp table for a free file descriptor. */ 99 for (i = start; i < OPEN_MAX; i++) { 100 if (rfp->fp_filp[i] == NULL) { 101 /* A file descriptor has been located. */ 102 *k = i; 103 break; 104 } 105 } 106 107 /* Check to see if a file descriptor has been found. */ 108 if (i >= OPEN_MAX) return(EMFILE); 109 110 /* If we don't care about a filp, return now */ 111 if (fpt == NULL) return(OK); 112 113 /* Now that a file descriptor has been found, look for a free filp slot. */ 114 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 115 assert(f->filp_count >= 0); 116 if (f->filp_count == 0 && mutex_trylock(&f->filp_lock) == 0) { 117 f->filp_mode = bits; 118 f->filp_pos = 0; 119 f->filp_selectors = 0; 120 f->filp_select_ops = 0; 121 f->filp_pipe_select_ops = 0; 122 f->filp_char_select_dev = NO_DEV; 123 f->filp_flags = 0; 124 f->filp_select_flags = 0; 125 f->filp_softlock = NULL; 126 f->filp_ioctl_fp = NULL; 127 *fpt = f; 128 return(OK); 129 } 130 } 131 132 /* If control passes here, the filp table must be full. Report that back. */ 133 return(ENFILE); 134 } 135 136 137 /*===========================================================================* 138 * get_filp * 139 *===========================================================================*/ 140 struct filp *get_filp(fild, locktype) 141 int fild; /* file descriptor */ 142 tll_access_t locktype; 143 { 144 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */ 145 146 return get_filp2(fp, fild, locktype); 147 } 148 149 150 /*===========================================================================* 151 * get_filp2 * 152 *===========================================================================*/ 153 struct filp *get_filp2(rfp, fild, locktype) 154 register struct fproc *rfp; 155 int fild; /* file descriptor */ 156 tll_access_t locktype; 157 { 158 /* See if 'fild' refers to a valid file descr. If so, return its filp ptr. */ 159 struct filp *filp; 160 161 filp = NULL; 162 if (fild < 0 || fild >= OPEN_MAX) 163 err_code = EBADF; 164 else if (locktype != VNODE_OPCL && rfp->fp_filp[fild] != NULL && 165 rfp->fp_filp[fild]->filp_mode == FILP_CLOSED) 166 err_code = EIO; /* disallow all use except close(2) */ 167 else if ((filp = rfp->fp_filp[fild]) == NULL) 168 err_code = EBADF; 169 else if (locktype != VNODE_NONE) /* Only lock the filp if requested */ 170 lock_filp(filp, locktype); /* All is fine */ 171 172 return(filp); /* may also be NULL */ 173 } 174 175 176 /*===========================================================================* 177 * find_filp * 178 *===========================================================================*/ 179 struct filp *find_filp(struct vnode *vp, mode_t bits) 180 { 181 /* Find a filp slot that refers to the vnode 'vp' in a way as described 182 * by the mode bit 'bits'. Used for determining whether somebody is still 183 * interested in either end of a pipe. Also used when opening a FIFO to 184 * find partners to share a filp field with (to shared the file position). 185 * Like 'get_fd' it performs its job by linear search through the filp table. 186 */ 187 188 struct filp *f; 189 190 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 191 if (f->filp_count != 0 && f->filp_vno == vp && (f->filp_mode & bits)) { 192 return(f); 193 } 194 } 195 196 /* If control passes here, the filp wasn't there. Report that back. */ 197 return(NULL); 198 } 199 200 /*===========================================================================* 201 * invalidate_filp * 202 *===========================================================================*/ 203 void invalidate_filp(struct filp *rfilp) 204 { 205 /* Invalidate filp. */ 206 207 rfilp->filp_mode = FILP_CLOSED; 208 } 209 210 /*===========================================================================* 211 * invalidate_filp_by_char_major * 212 *===========================================================================*/ 213 void invalidate_filp_by_char_major(devmajor_t major) 214 { 215 struct filp *f; 216 217 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 218 if (f->filp_count != 0 && f->filp_vno != NULL) { 219 if (major(f->filp_vno->v_sdev) == major && 220 S_ISCHR(f->filp_vno->v_mode)) { 221 invalidate_filp(f); 222 } 223 } 224 } 225 } 226 227 /*===========================================================================* 228 * invalidate_filp_by_endpt * 229 *===========================================================================*/ 230 void invalidate_filp_by_endpt(endpoint_t proc_e) 231 { 232 struct filp *f; 233 234 for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { 235 if (f->filp_count != 0 && f->filp_vno != NULL) { 236 if (f->filp_vno->v_fs_e == proc_e) 237 invalidate_filp(f); 238 } 239 } 240 } 241 242 /*===========================================================================* 243 * lock_filp * 244 *===========================================================================*/ 245 void lock_filp(filp, locktype) 246 struct filp *filp; 247 tll_access_t locktype; 248 { 249 struct worker_thread *org_self; 250 struct vnode *vp; 251 252 assert(filp->filp_count > 0); 253 vp = filp->filp_vno; 254 assert(vp != NULL); 255 256 /* Lock vnode only if we haven't already locked it. If already locked by us, 257 * we're allowed to have one additional 'soft' lock. */ 258 if (tll_locked_by_me(&vp->v_lock)) { 259 assert(filp->filp_softlock == NULL); 260 filp->filp_softlock = fp; 261 } else { 262 /* We have to make an exception for vnodes belonging to pipes. Even 263 * read(2) operations on pipes change the vnode and therefore require 264 * exclusive access. 265 */ 266 if (S_ISFIFO(vp->v_mode) && locktype == VNODE_READ) 267 locktype = VNODE_WRITE; 268 lock_vnode(vp, locktype); 269 } 270 271 assert(vp->v_ref_count > 0); /* vnode still in use? */ 272 assert(filp->filp_vno == vp); /* vnode still what we think it is? */ 273 274 /* First try to get filp lock right off the bat */ 275 if (mutex_trylock(&filp->filp_lock) != 0) { 276 277 /* Already in use, let's wait for our turn */ 278 org_self = worker_suspend(); 279 280 if (mutex_lock(&filp->filp_lock) != 0) 281 panic("unable to obtain lock on filp"); 282 283 worker_resume(org_self); 284 } 285 } 286 287 /*===========================================================================* 288 * unlock_filp * 289 *===========================================================================*/ 290 void unlock_filp(filp) 291 struct filp *filp; 292 { 293 /* If this filp holds a soft lock on the vnode, we must be the owner */ 294 if (filp->filp_softlock != NULL) 295 assert(filp->filp_softlock == fp); 296 297 if (filp->filp_count > 0) { 298 /* Only unlock vnode if filp is still in use */ 299 300 /* and if we don't hold a soft lock */ 301 if (filp->filp_softlock == NULL) { 302 assert(tll_islocked(&(filp->filp_vno->v_lock))); 303 unlock_vnode(filp->filp_vno); 304 } 305 } 306 307 filp->filp_softlock = NULL; 308 if (mutex_unlock(&filp->filp_lock) != 0) 309 panic("unable to release lock on filp"); 310 } 311 312 /*===========================================================================* 313 * unlock_filps * 314 *===========================================================================*/ 315 void unlock_filps(filp1, filp2) 316 struct filp *filp1; 317 struct filp *filp2; 318 { 319 /* Unlock two filps that are tied to the same vnode. As a thread can lock a 320 * vnode only once, unlocking the vnode twice would result in an error. */ 321 322 /* No NULL pointers and not equal */ 323 assert(filp1); 324 assert(filp2); 325 assert(filp1 != filp2); 326 327 /* Must be tied to the same vnode and not NULL */ 328 assert(filp1->filp_vno == filp2->filp_vno); 329 assert(filp1->filp_vno != NULL); 330 331 if (filp1->filp_count > 0 && filp2->filp_count > 0) { 332 /* Only unlock vnode if filps are still in use */ 333 unlock_vnode(filp1->filp_vno); 334 } 335 336 filp1->filp_softlock = NULL; 337 filp2->filp_softlock = NULL; 338 if (mutex_unlock(&filp2->filp_lock) != 0) 339 panic("unable to release filp lock on filp2"); 340 if (mutex_unlock(&filp1->filp_lock) != 0) 341 panic("unable to release filp lock on filp1"); 342 } 343 344 /*===========================================================================* 345 * close_filp * 346 *===========================================================================*/ 347 void close_filp(f) 348 struct filp *f; 349 { 350 /* Close a file. Will also unlock filp when done */ 351 352 int rw; 353 dev_t dev; 354 struct vnode *vp; 355 356 /* Must be locked */ 357 assert(mutex_trylock(&f->filp_lock) == -EDEADLK); 358 assert(tll_islocked(&f->filp_vno->v_lock)); 359 360 vp = f->filp_vno; 361 362 if (f->filp_count - 1 == 0 && f->filp_mode != FILP_CLOSED) { 363 /* Check to see if the file is special. */ 364 if (S_ISCHR(vp->v_mode) || S_ISBLK(vp->v_mode)) { 365 dev = vp->v_sdev; 366 if (S_ISBLK(vp->v_mode)) { 367 lock_bsf(); 368 if (vp->v_bfs_e == ROOT_FS_E && dev != ROOT_DEV) { 369 /* Invalidate the cache unless the special is 370 * mounted. Be careful not to flush the root 371 * file system either. 372 */ 373 (void) req_flush(vp->v_bfs_e, dev); 374 } 375 unlock_bsf(); 376 377 (void) bdev_close(dev); /* Ignore errors */ 378 } else { 379 (void) cdev_close(dev); /* Ignore errors */ 380 } 381 382 f->filp_mode = FILP_CLOSED; 383 } 384 } 385 386 /* If the inode being closed is a pipe, release everyone hanging on it. */ 387 if (S_ISFIFO(vp->v_mode)) { 388 rw = (f->filp_mode & R_BIT ? VFS_WRITE : VFS_READ); 389 release(vp, rw, susp_count); 390 } 391 392 if (--f->filp_count == 0) { 393 if (S_ISFIFO(vp->v_mode)) { 394 /* Last reader or writer is going. Tell PFS about latest 395 * pipe size. 396 */ 397 truncate_vnode(vp, vp->v_size); 398 } 399 400 unlock_vnode(f->filp_vno); 401 put_vnode(f->filp_vno); 402 f->filp_vno = NULL; 403 f->filp_mode = FILP_CLOSED; 404 f->filp_count = 0; 405 } else if (f->filp_count < 0) { 406 panic("VFS: invalid filp count: %d ino %llx/%llu", f->filp_count, 407 vp->v_dev, vp->v_inode_nr); 408 } else { 409 unlock_vnode(f->filp_vno); 410 } 411 412 mutex_unlock(&f->filp_lock); 413 } 414 415 /*===========================================================================* 416 * do_copyfd * 417 *===========================================================================*/ 418 int do_copyfd(void) 419 { 420 /* Copy a file descriptor between processes, or close a remote file descriptor. 421 * This call is used as back-call by device drivers (UDS, VND), and is expected 422 * to be used in response to an IOCTL to such device drivers. 423 */ 424 struct fproc *rfp; 425 struct filp *rfilp; 426 endpoint_t endpt; 427 int r, fd, what, slot; 428 429 /* This should be replaced with an ACL check. */ 430 if (!super_user) return(EPERM); 431 432 endpt = job_m_in.m_lsys_vfs_copyfd.endpt; 433 fd = job_m_in.m_lsys_vfs_copyfd.fd; 434 what = job_m_in.m_lsys_vfs_copyfd.what; 435 436 if (isokendpt(endpt, &slot) != OK) return(EINVAL); 437 rfp = &fproc[slot]; 438 439 /* FIXME: we should now check that the user process is indeed blocked on an 440 * IOCTL call, so that we can safely mess with its file descriptors. We 441 * currently do not have the necessary state to verify this, so we assume 442 * that the call is always used in the right way. 443 */ 444 445 /* Depending on the operation, get the file descriptor from the caller or the 446 * user process. Do not lock the filp yet: we first need to make sure that 447 * locking it will not result in a deadlock. 448 */ 449 rfilp = get_filp2((what == COPYFD_TO) ? fp : rfp, fd, VNODE_NONE); 450 if (rfilp == NULL) 451 return(err_code); 452 453 /* If the filp is involved in an IOCTL by the user process, locking the filp 454 * here would result in a deadlock. This would happen if a user process 455 * passes in the file descriptor to the device node on which it is performing 456 * the IOCTL. We do not allow manipulation of such device nodes. In 457 * practice, this only applies to block-special files (and thus VND), because 458 * character-special files (as used by UDS) are unlocked during the IOCTL. 459 */ 460 if (rfilp->filp_ioctl_fp == rfp) 461 return(EBADF); 462 463 /* Now we can safely lock the filp, copy or close it, and unlock it again. */ 464 lock_filp(rfilp, VNODE_READ); 465 466 switch (what) { 467 case COPYFD_FROM: 468 rfp = fp; 469 470 /* FALLTHROUGH */ 471 case COPYFD_TO: 472 /* Find a free file descriptor slot in the local or remote process. */ 473 for (fd = 0; fd < OPEN_MAX; fd++) 474 if (rfp->fp_filp[fd] == NULL) 475 break; 476 477 /* If found, fill the slot and return the slot number. */ 478 if (fd < OPEN_MAX) { 479 rfp->fp_filp[fd] = rfilp; 480 rfilp->filp_count++; 481 r = fd; 482 } else 483 r = EMFILE; 484 485 break; 486 487 case COPYFD_CLOSE: 488 /* This should be used ONLY to revert a successful copy-to operation, 489 * and assumes that the filp is still in use by the caller as well. 490 */ 491 if (rfilp->filp_count > 1) { 492 rfilp->filp_count--; 493 rfp->fp_filp[fd] = NULL; 494 r = OK; 495 } else 496 r = EBADF; 497 498 break; 499 500 default: 501 r = EINVAL; 502 } 503 504 unlock_filp(rfilp); 505 506 return(r); 507 } 508