1 /*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95 34 * $FreeBSD: src/sys/ufs/ufs/ufs_readwrite.c,v 1.65.2.14 2003/04/04 22:21:29 tegge Exp $ 35 * $DragonFly: src/sys/vfs/ufs/ufs_readwrite.c,v 1.25 2008/04/22 18:46:54 dillon Exp $ 36 */ 37 38 #define BLKSIZE(a, b, c) blksize(a, b, c) 39 #define FS struct fs 40 #define I_FS i_fs 41 42 #include <vm/vm.h> 43 #include <vm/vm_object.h> 44 #include <vm/vm_pager.h> 45 #include <vm/vm_map.h> 46 #include <vm/vnode_pager.h> 47 #include <sys/event.h> 48 #include <sys/vmmeter.h> 49 #include <sys/sysctl.h> 50 #include <vm/vm_page2.h> 51 52 #include "opt_directio.h" 53 54 #define VN_KNOTE(vp, b) \ 55 KNOTE((struct klist *)&vp->v_pollinfo.vpi_selinfo.si_note, (b)) 56 57 #ifdef DIRECTIO 58 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone); 59 #endif 60 61 SYSCTL_DECL(_vfs_ffs); 62 static int getpages_uses_bufcache = 0; 63 SYSCTL_INT(_vfs_ffs, OID_AUTO, getpages_uses_bufcache, CTLFLAG_RW, &getpages_uses_bufcache, 0, ""); 64 65 /* 66 * Vnode op for reading. 67 * 68 * ffs_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag, 69 * struct ucred *a_cred) 70 */ 71 /* ARGSUSED */ 72 int 73 ffs_read(struct vop_read_args *ap) 74 { 75 struct vnode *vp; 76 struct inode *ip; 77 struct uio *uio; 78 FS *fs; 79 struct buf *bp; 80 off_t bytesinfile; 81 int xfersize, blkoffset; 82 int error, orig_resid; 83 u_short mode; 84 int seqcount; 85 int ioflag; 86 87 vp = ap->a_vp; 88 seqcount = ap->a_ioflag >> 16; 89 ip = VTOI(vp); 90 mode = ip->i_mode; 91 uio = ap->a_uio; 92 ioflag = ap->a_ioflag; 93 #ifdef DIRECTIO 94 if ((ioflag & IO_DIRECT) != 0) { 95 int workdone; 96 97 error = ffs_rawread(vp, uio, &workdone); 98 if (error || workdone) 99 return error; 100 } 101 #endif 102 103 #ifdef DIAGNOSTIC 104 if (uio->uio_rw != UIO_READ) 105 panic("ffs_read: mode"); 106 107 if (vp->v_type == VLNK) { 108 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) 109 panic("ffs_read: short symlink"); 110 } else if (vp->v_type != VREG && vp->v_type != VDIR) 111 panic("ffs_read: type %d", vp->v_type); 112 #endif 113 fs = ip->I_FS; 114 if ((uint64_t)uio->uio_offset > fs->fs_maxfilesize) 115 return (EFBIG); 116 117 orig_resid = uio->uio_resid; 118 if (orig_resid <= 0) 119 return (0); 120 121 bytesinfile = ip->i_size - uio->uio_offset; 122 if (bytesinfile <= 0) { 123 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) 124 ip->i_flag |= IN_ACCESS; 125 return 0; 126 } 127 128 /* 129 * Ok so we couldn't do it all in one vm trick... 130 * so cycle around trying smaller bites.. 131 */ 132 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 133 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) 134 break; 135 136 error = ffs_blkatoff_ra(vp, uio->uio_offset, NULL, 137 &bp, seqcount); 138 if (error) 139 break; 140 141 /* 142 * If IO_DIRECT then set B_DIRECT for the buffer. This 143 * will cause us to attempt to release the buffer later on 144 * and will cause the buffer cache to attempt to free the 145 * underlying pages. 146 */ 147 if (ioflag & IO_DIRECT) 148 bp->b_flags |= B_DIRECT; 149 150 /* 151 * We should only get non-zero b_resid when an I/O error 152 * has occurred, which should cause us to break above. 153 * However, if the short read did not cause an error, 154 * then we want to ensure that we do not uiomove bad 155 * or uninitialized data. 156 * 157 * XXX b_resid is only valid when an actual I/O has occured 158 * and may be incorrect if the buffer is B_CACHE or if the 159 * last op on the buffer was a failed write. This KASSERT 160 * is a precursor to removing it from the UFS code. 161 */ 162 KASSERT(bp->b_resid == 0, ("bp->b_resid != 0")); 163 164 /* 165 * Calculate how much data we can copy 166 */ 167 blkoffset = blkoff(fs, uio->uio_offset); 168 xfersize = bp->b_bufsize - blkoffset; 169 if (xfersize > uio->uio_resid) 170 xfersize = uio->uio_resid; 171 if (xfersize > bytesinfile) 172 xfersize = bytesinfile; 173 if (xfersize <= 0) { 174 panic("ufs_readwrite: impossible xfersize: %d", 175 xfersize); 176 } 177 178 /* 179 * otherwise use the general form 180 */ 181 error = uiomove((char *)bp->b_data + blkoffset, 182 (int)xfersize, uio); 183 184 if (error) 185 break; 186 187 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 188 (LIST_FIRST(&bp->b_dep) == NULL)) { 189 /* 190 * If there are no dependencies, and it's VMIO, 191 * then we don't need the buf, mark it available 192 * for freeing. The VM has the data. 193 */ 194 bp->b_flags |= B_RELBUF; 195 brelse(bp); 196 } else { 197 /* 198 * Otherwise let whoever 199 * made the request take care of 200 * freeing it. We just queue 201 * it onto another list. 202 */ 203 bqrelse(bp); 204 } 205 } 206 207 /* 208 * This can only happen in the case of an error 209 * because the loop above resets bp to NULL on each iteration 210 * and on normal completion has not set a new value into it. 211 * so it must have come from a 'break' statement 212 */ 213 if (bp != NULL) { 214 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 215 (LIST_FIRST(&bp->b_dep) == NULL)) { 216 bp->b_flags |= B_RELBUF; 217 brelse(bp); 218 } else { 219 bqrelse(bp); 220 } 221 } 222 223 if ((error == 0 || uio->uio_resid != orig_resid) && 224 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0) 225 ip->i_flag |= IN_ACCESS; 226 return (error); 227 } 228 229 /* 230 * Vnode op for writing. 231 * 232 * ffs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag, 233 * struct ucred *a_cred) 234 */ 235 int 236 ffs_write(struct vop_write_args *ap) 237 { 238 struct vnode *vp; 239 struct uio *uio; 240 struct inode *ip; 241 FS *fs; 242 struct buf *bp; 243 ufs_daddr_t lbn; 244 off_t osize; 245 int seqcount; 246 int blkoffset, error, extended, flags, ioflag, resid, size, xfersize; 247 struct thread *td; 248 249 extended = 0; 250 seqcount = ap->a_ioflag >> 16; 251 ioflag = ap->a_ioflag; 252 uio = ap->a_uio; 253 vp = ap->a_vp; 254 ip = VTOI(vp); 255 256 #ifdef DIAGNOSTIC 257 if (uio->uio_rw != UIO_WRITE) 258 panic("ffs_write: mode"); 259 #endif 260 261 switch (vp->v_type) { 262 case VREG: 263 if (ioflag & IO_APPEND) 264 uio->uio_offset = ip->i_size; 265 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) 266 return (EPERM); 267 /* FALLTHROUGH */ 268 case VLNK: 269 break; 270 case VDIR: 271 panic("ffs_write: dir write"); 272 break; 273 default: 274 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type, 275 (int)uio->uio_offset, 276 (int)uio->uio_resid 277 ); 278 } 279 280 fs = ip->I_FS; 281 if (uio->uio_offset < 0 || 282 (uint64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) { 283 return (EFBIG); 284 } 285 /* 286 * Maybe this should be above the vnode op call, but so long as 287 * file servers have no limits, I don't think it matters. 288 */ 289 td = uio->uio_td; 290 if (vp->v_type == VREG && td && td->td_proc && 291 uio->uio_offset + uio->uio_resid > 292 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 293 lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); 294 return (EFBIG); 295 } 296 297 resid = uio->uio_resid; 298 osize = ip->i_size; 299 300 /* 301 * NOTE! These B_ flags are actually balloc-only flags, not buffer 302 * flags. They are similar to the BA_ flags in fbsd. 303 */ 304 if (seqcount > B_SEQMAX) 305 flags = B_SEQMAX << B_SEQSHIFT; 306 else 307 flags = seqcount << B_SEQSHIFT; 308 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) 309 flags |= B_SYNC; 310 311 for (error = 0; uio->uio_resid > 0;) { 312 lbn = lblkno(fs, uio->uio_offset); 313 blkoffset = blkoff(fs, uio->uio_offset); 314 xfersize = fs->fs_bsize - blkoffset; 315 if (uio->uio_resid < xfersize) 316 xfersize = uio->uio_resid; 317 318 if (uio->uio_offset + xfersize > ip->i_size) 319 vnode_pager_setsize(vp, uio->uio_offset + xfersize); 320 321 /* 322 * We must perform a read-before-write if the transfer 323 * size does not cover the entire buffer, or if doing 324 * a dummy write to flush the buffer. 325 */ 326 if (xfersize < fs->fs_bsize || uio->uio_segflg == UIO_NOCOPY) 327 flags |= B_CLRBUF; 328 else 329 flags &= ~B_CLRBUF; 330 /* XXX is uio->uio_offset the right thing here? */ 331 error = VOP_BALLOC(vp, uio->uio_offset, xfersize, 332 ap->a_cred, flags, &bp); 333 if (error != 0) 334 break; 335 /* 336 * If the buffer is not valid and we did not clear garbage 337 * out above, we have to do so here even though the write 338 * covers the entire buffer in order to avoid a mmap()/write 339 * race where another process may see the garbage prior to 340 * the uiomove() for a write replacing it. 341 */ 342 if ((bp->b_flags & B_CACHE) == 0 && (flags & B_CLRBUF) == 0) 343 vfs_bio_clrbuf(bp); 344 if (ioflag & IO_DIRECT) 345 bp->b_flags |= B_DIRECT; 346 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL)) 347 bp->b_flags |= B_NOCACHE; 348 349 if (uio->uio_offset + xfersize > ip->i_size) { 350 ip->i_size = uio->uio_offset + xfersize; 351 extended = 1; 352 } 353 354 size = BLKSIZE(fs, ip, lbn) - bp->b_resid; 355 if (size < xfersize) 356 xfersize = size; 357 358 error = 359 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); 360 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 361 (LIST_FIRST(&bp->b_dep) == NULL)) { 362 bp->b_flags |= B_RELBUF; 363 } 364 365 /* 366 * If IO_SYNC each buffer is written synchronously. Otherwise 367 * if we have a severe page deficiency write the buffer 368 * asynchronously. Otherwise try to cluster, and if that 369 * doesn't do it then either do an async write (if O_DIRECT), 370 * or a delayed write (if not). 371 */ 372 373 if (ioflag & IO_SYNC) { 374 (void)bwrite(bp); 375 } else if (vm_page_count_severe() || 376 buf_dirty_count_severe() || 377 (ioflag & IO_ASYNC)) { 378 bp->b_flags |= B_CLUSTEROK; 379 bawrite(bp); 380 } else if (xfersize + blkoffset == fs->fs_bsize) { 381 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) { 382 bp->b_flags |= B_CLUSTEROK; 383 cluster_write(bp, (off_t)ip->i_size, seqcount); 384 } else { 385 bawrite(bp); 386 } 387 } else if (ioflag & IO_DIRECT) { 388 bp->b_flags |= B_CLUSTEROK; 389 bawrite(bp); 390 } else { 391 bp->b_flags |= B_CLUSTEROK; 392 bdwrite(bp); 393 } 394 if (error || xfersize == 0) 395 break; 396 ip->i_flag |= IN_CHANGE | IN_UPDATE; 397 } 398 /* 399 * If we successfully wrote any data, and we are not the superuser 400 * we clear the setuid and setgid bits as a precaution against 401 * tampering. 402 */ 403 if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0) 404 ip->i_mode &= ~(ISUID | ISGID); 405 if (resid > uio->uio_resid) 406 VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0)); 407 if (error) { 408 if (ioflag & IO_UNIT) { 409 (void)ffs_truncate(vp, osize, ioflag & IO_SYNC, 410 ap->a_cred); 411 uio->uio_offset -= resid - uio->uio_resid; 412 uio->uio_resid = resid; 413 } 414 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) { 415 error = ffs_update(vp, 1); 416 } 417 418 return (error); 419 } 420 421 422 /* 423 * get page routine 424 */ 425 int 426 ffs_getpages(struct vop_getpages_args *ap) 427 { 428 off_t foff, physoffset; 429 int i, size, bsize; 430 struct vnode *dp, *vp; 431 vm_object_t obj; 432 vm_pindex_t pindex, firstindex; 433 vm_page_t mreq; 434 int bbackwards, bforwards; 435 int pbackwards, pforwards; 436 int firstpage; 437 off_t reqoffset; 438 off_t doffset; 439 int poff; 440 int pcount; 441 int rtval; 442 int pagesperblock; 443 444 /* 445 * If set just use the system standard getpages which issues a 446 * UIO_NOCOPY VOP_READ. 447 */ 448 if (getpages_uses_bufcache) { 449 return vop_stdgetpages(ap); 450 } 451 452 pcount = round_page(ap->a_count) / PAGE_SIZE; 453 mreq = ap->a_m[ap->a_reqpage]; 454 firstindex = ap->a_m[0]->pindex; 455 456 /* 457 * if ANY DEV_BSIZE blocks are valid on a large filesystem block, 458 * then the entire page is valid. Since the page may be mapped, 459 * user programs might reference data beyond the actual end of file 460 * occuring within the page. We have to zero that data. 461 */ 462 if (mreq->valid) { 463 if (mreq->valid != VM_PAGE_BITS_ALL) 464 vm_page_zero_invalid(mreq, TRUE); 465 for (i = 0; i < pcount; i++) { 466 if (i != ap->a_reqpage) { 467 vm_page_free(ap->a_m[i]); 468 } 469 } 470 return VM_PAGER_OK; 471 } 472 473 vp = ap->a_vp; 474 obj = vp->v_object; 475 bsize = vp->v_mount->mnt_stat.f_iosize; 476 pindex = mreq->pindex; 477 foff = IDX_TO_OFF(pindex) /* + ap->a_offset should be zero */; 478 479 if (bsize < PAGE_SIZE) 480 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 481 ap->a_count, 482 ap->a_reqpage); 483 484 /* 485 * foff is the file offset of the required page 486 * reqlblkno is the logical block that contains the page 487 * poff is the bytes offset of the page in the logical block 488 */ 489 poff = (int)(foff % bsize); 490 reqoffset = foff - poff; 491 492 if (VOP_BMAP(vp, reqoffset, &doffset, &bforwards, &bbackwards) || 493 doffset == NOOFFSET 494 ) { 495 for (i = 0; i < pcount; i++) { 496 if (i != ap->a_reqpage) 497 vm_page_free(ap->a_m[i]); 498 } 499 if (doffset == NOOFFSET) { 500 if ((mreq->flags & PG_ZERO) == 0) 501 vm_page_zero_fill(mreq); 502 vm_page_undirty(mreq); 503 mreq->valid = VM_PAGE_BITS_ALL; 504 return VM_PAGER_OK; 505 } else { 506 return VM_PAGER_ERROR; 507 } 508 } 509 510 physoffset = doffset + poff; 511 pagesperblock = bsize / PAGE_SIZE; 512 513 /* 514 * find the first page that is contiguous. 515 * 516 * bforwards and bbackwards are the number of contiguous bytes 517 * available before and after the block offset. poff is the page 518 * offset, in bytes, relative to the block offset. 519 * 520 * pforwards and pbackwards are the number of contiguous pages 521 * relative to the requested page, non-inclusive of the requested 522 * page (so a pbackwards and pforwards of 0 indicates just the 523 * requested page). 524 */ 525 firstpage = 0; 526 if (ap->a_count) { 527 /* 528 * Calculate pbackwards and clean up any requested 529 * pages that are too far back. 530 */ 531 pbackwards = (poff + bbackwards) >> PAGE_SHIFT; 532 if (ap->a_reqpage > pbackwards) { 533 firstpage = ap->a_reqpage - pbackwards; 534 for (i = 0; i < firstpage; i++) 535 vm_page_free(ap->a_m[i]); 536 } 537 538 /* 539 * Calculate pforwards 540 */ 541 pforwards = (bforwards - poff - PAGE_SIZE) >> PAGE_SHIFT; 542 if (pforwards < 0) 543 pforwards = 0; 544 if (pforwards < (pcount - (ap->a_reqpage + 1))) { 545 for(i = ap->a_reqpage + pforwards + 1; i < pcount; i++) 546 vm_page_free(ap->a_m[i]); 547 pcount = ap->a_reqpage + pforwards + 1; 548 } 549 550 /* 551 * Adjust pcount to be relative to firstpage. All pages prior 552 * to firstpage in the array have been cleaned up. 553 */ 554 pcount -= firstpage; 555 } 556 557 /* 558 * calculate the size of the transfer 559 */ 560 size = pcount * PAGE_SIZE; 561 562 if ((IDX_TO_OFF(ap->a_m[firstpage]->pindex) + size) > vp->v_filesize) { 563 size = vp->v_filesize - IDX_TO_OFF(ap->a_m[firstpage]->pindex); 564 } 565 566 physoffset -= foff; 567 dp = VTOI(ap->a_vp)->i_devvp; 568 rtval = VOP_GETPAGES(dp, &ap->a_m[firstpage], size, 569 (ap->a_reqpage - firstpage), physoffset); 570 571 return (rtval); 572 } 573 574