1 /*- 2 * Copyright (c) 1993 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * %sccs.include.redist.c% 6 * 7 * @(#)ufs_readwrite.c 8.4 (Berkeley) 01/04/94 8 */ 9 10 #ifdef LFS_READWRITE 11 #define BLKSIZE(a, b, c) blksize(a) 12 #define FS struct lfs 13 #define I_FS i_lfs 14 #define READ lfs_read 15 #define WRITE lfs_write 16 #define fs_bsize lfs_bsize 17 #define fs_maxfilesize lfs_maxfilesize 18 #else 19 #define BLKSIZE(a, b, c) blksize(a, b, c) 20 #define FS struct fs 21 #define I_FS i_fs 22 #define READ ffs_read 23 #define WRITE ffs_write 24 #endif 25 26 /* 27 * Vnode op for reading. 28 */ 29 /* ARGSUSED */ 30 READ(ap) 31 struct vop_read_args /* { 32 struct vnode *a_vp; 33 struct uio *a_uio; 34 int a_ioflag; 35 struct ucred *a_cred; 36 } */ *ap; 37 { 38 register struct vnode *vp; 39 register struct inode *ip; 40 register struct uio *uio; 41 register FS *fs; 42 struct buf *bp; 43 daddr_t lbn, nextlbn; 44 off_t bytesinfile; 45 long size, xfersize, blkoffset; 46 int nextsize, error; 47 u_short mode; 48 49 vp = ap->a_vp; 50 ip = VTOI(vp); 51 mode = ip->i_mode; 52 uio = ap->a_uio; 53 54 #ifdef DIAGNOSTIC 55 if (uio->uio_rw != UIO_READ) 56 panic("%s: mode", READ); 57 58 if (vp->v_type == VLNK) { 59 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) 60 panic("%s: short symlink", READ); 61 } else if (vp->v_type != VREG && vp->v_type != VDIR) 62 panic("%s: type", READ); 63 #endif 64 fs = ip->I_FS; 65 if ((u_quad_t)uio->uio_offset > fs->fs_maxfilesize) 66 return (EFBIG); 67 68 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 69 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) 70 break; 71 lbn = lblkno(fs, uio->uio_offset); 72 nextlbn = lbn + 1; 73 size = BLKSIZE(fs, ip, lbn); 74 blkoffset = blkoff(fs, uio->uio_offset); 75 xfersize = fs->fs_bsize - blkoffset; 76 if (uio->uio_resid < xfersize) 77 xfersize = uio->uio_resid; 78 if (bytesinfile < xfersize) 79 xfersize = bytesinfile; 80 81 #ifdef LFS_READWRITE 82 (void)lfs_check(vp, lbn); 83 error = cluster_read(vp, ip->i_size, lbn, size, NOCRED, &bp); 84 #else 85 if (lblktosize(fs, nextlbn) > ip->i_size) { 86 error = bread(vp, lbn, size, NOCRED, &bp); 87 } else { 88 if (doclusterread) { 89 error = cluster_read(vp, 90 ip->i_size, lbn, size, NOCRED, &bp); 91 } else if (lbn - 1 == vp->v_lastr) { 92 nextsize = BLKSIZE(fs, ip, nextlbn); 93 error = breadn(vp, lbn, 94 size, &nextlbn, &nextsize, 1, NOCRED, &bp); 95 } else { 96 error = bread(vp, lbn, size, NOCRED, &bp); 97 } 98 } 99 #endif 100 if (error) 101 break; 102 vp->v_lastr = lbn; 103 104 /* 105 * We should only get non-zero b_resid when an I/O error 106 * has occurred, which should cause us to break above. 107 * However, if the short read did not cause an error, 108 * then we want to ensure that we do not uiomove bad 109 * or uninitialized data. 110 */ 111 size -= bp->b_resid; 112 if (size < xfersize) { 113 if (size == 0) 114 break; 115 xfersize = size; 116 } 117 if (error = 118 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio)) 119 break; 120 121 if (S_ISREG(mode) && (xfersize + blkoffset == fs->fs_bsize || 122 uio->uio_offset == ip->i_size)) 123 bp->b_flags |= B_AGE; 124 brelse(bp); 125 } 126 if (bp != NULL) 127 brelse(bp); 128 ip->i_flag |= IN_ACCESS; 129 return (error); 130 } 131 132 /* 133 * Vnode op for writing. 134 */ 135 WRITE(ap) 136 struct vop_write_args /* { 137 struct vnode *a_vp; 138 struct uio *a_uio; 139 int a_ioflag; 140 struct ucred *a_cred; 141 } */ *ap; 142 { 143 register struct vnode *vp; 144 register struct uio *uio; 145 register struct inode *ip; 146 register FS *fs; 147 struct buf *bp; 148 struct proc *p; 149 daddr_t lbn; 150 off_t osize; 151 int blkoffset, error, flags, ioflag, resid, size, xfersize; 152 153 ioflag = ap->a_ioflag; 154 uio = ap->a_uio; 155 vp = ap->a_vp; 156 ip = VTOI(vp); 157 158 #ifdef DIAGNOSTIC 159 if (uio->uio_rw != UIO_WRITE) 160 panic("%s: mode", WRITE); 161 #endif 162 163 switch (vp->v_type) { 164 case VREG: 165 if (ioflag & IO_APPEND) 166 uio->uio_offset = ip->i_size; 167 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) 168 return (EPERM); 169 /* FALLTHROUGH */ 170 case VLNK: 171 break; 172 case VDIR: 173 if ((ioflag & IO_SYNC) == 0) 174 panic("%s: nonsync dir write", WRITE); 175 break; 176 default: 177 panic("%s: type", WRITE); 178 } 179 180 fs = ip->I_FS; 181 if (uio->uio_offset < 0 || 182 (u_quad_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) 183 return (EFBIG); 184 /* 185 * Maybe this should be above the vnode op call, but so long as 186 * file servers have no limits, I don't think it matters. 187 */ 188 p = uio->uio_procp; 189 if (vp->v_type == VREG && p && 190 uio->uio_offset + uio->uio_resid > 191 p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 192 psignal(p, SIGXFSZ); 193 return (EFBIG); 194 } 195 196 resid = uio->uio_resid; 197 osize = ip->i_size; 198 flags = ioflag & IO_SYNC ? B_SYNC : 0; 199 200 for (error = 0; uio->uio_resid > 0;) { 201 lbn = lblkno(fs, uio->uio_offset); 202 blkoffset = blkoff(fs, uio->uio_offset); 203 xfersize = fs->fs_bsize - blkoffset; 204 if (uio->uio_resid < xfersize) 205 xfersize = uio->uio_resid; 206 #ifdef LFS_READWRITE 207 (void)lfs_check(vp, lbn); 208 error = lfs_balloc(vp, xfersize, lbn, &bp); 209 #else 210 if (fs->fs_bsize > xfersize) 211 flags |= B_CLRBUF; 212 else 213 flags &= ~B_CLRBUF; 214 215 error = ffs_balloc(ip, 216 lbn, blkoffset + xfersize, ap->a_cred, &bp, flags); 217 #endif 218 if (error) 219 break; 220 if (uio->uio_offset + xfersize > ip->i_size) { 221 ip->i_size = uio->uio_offset + xfersize; 222 vnode_pager_setsize(vp, (u_long)ip->i_size); 223 } 224 (void)vnode_pager_uncache(vp); 225 226 size = BLKSIZE(fs, ip, lbn) - bp->b_resid; 227 if (size < xfersize) 228 xfersize = size; 229 230 error = 231 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); 232 #ifdef LFS_READWRITE 233 (void)VOP_BWRITE(bp); 234 #else 235 if (ioflag & IO_SYNC) 236 (void)bwrite(bp); 237 else if (xfersize + blkoffset == fs->fs_bsize) 238 if (doclusterwrite) 239 cluster_write(bp, ip->i_size); 240 else { 241 bp->b_flags |= B_AGE; 242 bawrite(bp); 243 } 244 else 245 bdwrite(bp); 246 #endif 247 if (error || xfersize == 0) 248 break; 249 ip->i_flag |= IN_CHANGE | IN_UPDATE; 250 } 251 /* 252 * If we successfully wrote any data, and we are not the superuser 253 * we clear the setuid and setgid bits as a precaution against 254 * tampering. 255 */ 256 if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0) 257 ip->i_mode &= ~(ISUID | ISGID); 258 if (error) { 259 if (ioflag & IO_UNIT) { 260 (void)VOP_TRUNCATE(vp, osize, 261 ioflag & IO_SYNC, ap->a_cred, uio->uio_procp); 262 uio->uio_offset -= resid - uio->uio_resid; 263 uio->uio_resid = resid; 264 } 265 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) 266 error = VOP_UPDATE(vp, &time, &time, 1); 267 return (error); 268 } 269