xref: /dragonfly/sys/vfs/ufs/ufs_readwrite.c (revision a32bc35d)
1 /*-
2  * Copyright (c) 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)ufs_readwrite.c	8.11 (Berkeley) 5/8/95
34  * $FreeBSD: src/sys/ufs/ufs/ufs_readwrite.c,v 1.65.2.14 2003/04/04 22:21:29 tegge Exp $
35  */
36 
37 #define	BLKSIZE(a, b, c)	blksize(a, b, c)
38 #define	FS			struct fs
39 #define	I_FS			i_fs
40 
41 #include <vm/vm.h>
42 #include <vm/vm_object.h>
43 #include <vm/vm_pager.h>
44 #include <vm/vm_map.h>
45 #include <vm/vnode_pager.h>
46 #include <sys/event.h>
47 #include <sys/vmmeter.h>
48 #include <sys/sysctl.h>
49 #include <vm/vm_page2.h>
50 
51 #include "opt_directio.h"
52 
53 #define VN_KNOTE(vp, b) \
54 	KNOTE((struct klist *)&vp->v_pollinfo.vpi_kqinfo.ki_note, (b))
55 
56 #ifdef DIRECTIO
57 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone);
58 #endif
59 
60 SYSCTL_DECL(_vfs_ffs);
61 
62 /*
63  * Vnode op for reading.
64  *
65  * ffs_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
66  *	    struct ucred *a_cred)
67  */
68 /* ARGSUSED */
69 int
70 ffs_read(struct vop_read_args *ap)
71 {
72 	struct vnode *vp;
73 	struct inode *ip;
74 	struct uio *uio;
75 	FS *fs;
76 	struct buf *bp;
77 	off_t bytesinfile;
78 	int xfersize, blkoffset;
79 	int error, orig_resid;
80 	u_short mode;
81 	int seqcount;
82 	int ioflag;
83 
84 	vp = ap->a_vp;
85 	seqcount = ap->a_ioflag >> 16;
86 	ip = VTOI(vp);
87 	mode = ip->i_mode;
88 	uio = ap->a_uio;
89 	ioflag = ap->a_ioflag;
90 #ifdef DIRECTIO
91 	if ((ioflag & IO_DIRECT) != 0) {
92 		int workdone;
93 
94 		error = ffs_rawread(vp, uio, &workdone);
95 		if (error || workdone)
96 			return error;
97 	}
98 #endif
99 
100 #ifdef DIAGNOSTIC
101 	if (uio->uio_rw != UIO_READ)
102 		panic("ffs_read: mode");
103 
104 	if (vp->v_type == VLNK) {
105 		if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen)
106 			panic("ffs_read: short symlink");
107 	} else if (vp->v_type != VREG && vp->v_type != VDIR)
108 		panic("ffs_read: type %d", vp->v_type);
109 #endif
110 	fs = ip->I_FS;
111 	if ((uint64_t)uio->uio_offset > fs->fs_maxfilesize)
112 		return (EFBIG);
113 
114 	orig_resid = uio->uio_resid;
115 	if (orig_resid <= 0)
116 		return (0);
117 
118 	bytesinfile = ip->i_size - uio->uio_offset;
119 	if (bytesinfile <= 0) {
120 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
121 			ip->i_flag |= IN_ACCESS;
122 		return 0;
123 	}
124 
125 	/*
126 	 * Ok so we couldn't do it all in one vm trick...
127 	 * so cycle around trying smaller bites..
128 	 */
129 	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
130 		if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
131 			break;
132 
133 		error = ffs_blkatoff_ra(vp, uio->uio_offset, NULL,
134 					&bp, seqcount);
135 		if (error)
136 			break;
137 
138 		/*
139 		 * If IO_DIRECT then set B_DIRECT for the buffer.  This
140 		 * will cause us to attempt to release the buffer later on
141 		 * and will cause the buffer cache to attempt to free the
142 		 * underlying pages.
143 		 */
144 		if (ioflag & IO_DIRECT)
145 			bp->b_flags |= B_DIRECT;
146 
147 		/*
148 		 * We should only get non-zero b_resid when an I/O error
149 		 * has occurred, which should cause us to break above.
150 		 * However, if the short read did not cause an error,
151 		 * then we want to ensure that we do not uiomove bad
152 		 * or uninitialized data.
153 		 *
154 		 * XXX b_resid is only valid when an actual I/O has occured
155 		 * and may be incorrect if the buffer is B_CACHE or if the
156 		 * last op on the buffer was a failed write.  This KASSERT
157 		 * is a precursor to removing it from the UFS code.
158 		 */
159 		KASSERT(bp->b_resid == 0, ("bp->b_resid != 0"));
160 
161 		/*
162 		 * Calculate how much data we can copy
163 		 */
164 		blkoffset = blkoff(fs, uio->uio_offset);
165 		xfersize = bp->b_bufsize - blkoffset;
166 		if (xfersize > uio->uio_resid)
167 			xfersize = uio->uio_resid;
168 		if (xfersize > bytesinfile)
169 			xfersize = bytesinfile;
170 		if (xfersize <= 0) {
171 			panic("ufs_readwrite: impossible xfersize: %d",
172 			      xfersize);
173 		}
174 
175 		/*
176 		 * otherwise use the general form
177 		 */
178 		error = uiomove(bp->b_data + blkoffset, xfersize, uio);
179 
180 		if (error)
181 			break;
182 
183 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
184 		    (LIST_FIRST(&bp->b_dep) == NULL)) {
185 			/*
186 			 * If there are no dependencies, and it's VMIO,
187 			 * then we don't need the buf, mark it available
188 			 * for freeing. The VM has the data.
189 			 */
190 			bp->b_flags |= B_RELBUF;
191 			brelse(bp);
192 		} else {
193 			/*
194 			 * Otherwise let whoever
195 			 * made the request take care of
196 			 * freeing it. We just queue
197 			 * it onto another list.
198 			 */
199 			bqrelse(bp);
200 		}
201 	}
202 
203 	/*
204 	 * This can only happen in the case of an error
205 	 * because the loop above resets bp to NULL on each iteration
206 	 * and on normal completion has not set a new value into it.
207 	 * so it must have come from a 'break' statement
208 	 */
209 	if (bp != NULL) {
210 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
211 		    (LIST_FIRST(&bp->b_dep) == NULL)) {
212 			bp->b_flags |= B_RELBUF;
213 			brelse(bp);
214 		} else {
215 			bqrelse(bp);
216 		}
217 	}
218 
219 	if ((error == 0 || uio->uio_resid != orig_resid) &&
220 	    (vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
221 		ip->i_flag |= IN_ACCESS;
222 	return (error);
223 }
224 
225 /*
226  * Vnode op for writing.
227  *
228  * ffs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
229  *	     struct ucred *a_cred)
230  */
231 int
232 ffs_write(struct vop_write_args *ap)
233 {
234 	struct vnode *vp;
235 	struct uio *uio;
236 	struct inode *ip;
237 	FS *fs;
238 	struct buf *bp;
239 	ufs_daddr_t lbn;
240 	off_t osize;
241 	off_t nsize;
242 	int seqcount;
243 	int blkoffset, error, extended, flags, ioflag, resid, size, xfersize;
244 	struct thread *td;
245 
246 	extended = 0;
247 	seqcount = ap->a_ioflag >> 16;
248 	ioflag = ap->a_ioflag;
249 	uio = ap->a_uio;
250 	vp = ap->a_vp;
251 	ip = VTOI(vp);
252 
253 #ifdef DIAGNOSTIC
254 	if (uio->uio_rw != UIO_WRITE)
255 		panic("ffs_write: mode");
256 #endif
257 
258 	switch (vp->v_type) {
259 	case VREG:
260 		if (ioflag & IO_APPEND)
261 			uio->uio_offset = ip->i_size;
262 		if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
263 			return (EPERM);
264 		/* FALLTHROUGH */
265 	case VLNK:
266 		break;
267 	case VDIR:
268 		panic("ffs_write: dir write");
269 		break;
270 	default:
271 		panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type,
272 			(int)uio->uio_offset,
273 			(int)uio->uio_resid
274 		);
275 	}
276 
277 	fs = ip->I_FS;
278 	if (uio->uio_offset < 0 ||
279 	    (uint64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) {
280 		return (EFBIG);
281 	}
282 	/*
283 	 * Maybe this should be above the vnode op call, but so long as
284 	 * file servers have no limits, I don't think it matters.
285 	 */
286 	td = uio->uio_td;
287 	if (vp->v_type == VREG && td && td->td_proc &&
288 	    uio->uio_offset + uio->uio_resid >
289 	    td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
290 		lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ);
291 		return (EFBIG);
292 	}
293 
294 	resid = uio->uio_resid;
295 	osize = ip->i_size;
296 
297 	/*
298 	 * NOTE! These B_ flags are actually balloc-only flags, not buffer
299 	 * flags.  They are similar to the BA_ flags in fbsd.
300 	 */
301 	if (seqcount > B_SEQMAX)
302 		flags = B_SEQMAX << B_SEQSHIFT;
303 	else
304 		flags = seqcount << B_SEQSHIFT;
305 	if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
306 		flags |= B_SYNC;
307 
308 	for (error = 0; uio->uio_resid > 0;) {
309 		lbn = lblkno(fs, uio->uio_offset);
310 		blkoffset = blkoff(fs, uio->uio_offset);
311 		xfersize = fs->fs_bsize - blkoffset;
312 		if (uio->uio_resid < xfersize)
313 			xfersize = uio->uio_resid;
314 
315 		if (uio->uio_offset + xfersize > ip->i_size) {
316 			nsize = uio->uio_offset + xfersize;
317 			nvnode_pager_setsize(vp, nsize,
318 				blkoffresize(fs, nsize), blkoff(fs, nsize));
319 		}
320 
321 #if 0
322 		/*
323 		 * If doing a dummy write to flush the buffer for a
324 		 * putpages we must perform a read-before-write to
325 		 * fill in any missing spots and clear any invalid
326 		 * areas.  Otherwise a multi-page buffer may not properly
327 		 * flush.
328 		 *
329 		 * We must clear any invalid areas
330 		 */
331 		if (uio->uio_segflg == UIO_NOCOPY) {
332 			error = ffs_blkatoff(vp, uio->uio_offset, NULL, &bp);
333 			if (error)
334 				break;
335 			bqrelse(bp);
336 		}
337 #endif
338 
339 		/*
340 		 * We must clear invalid areas.
341 		 */
342 		if (xfersize < fs->fs_bsize || uio->uio_segflg == UIO_NOCOPY)
343 			flags |= B_CLRBUF;
344 		else
345 			flags &= ~B_CLRBUF;
346 /* XXX is uio->uio_offset the right thing here? */
347 		error = VOP_BALLOC(vp, uio->uio_offset, xfersize,
348 				   ap->a_cred, flags, &bp);
349 		if (error != 0)
350 			break;
351 		/*
352 		 * If the buffer is not valid and we did not clear garbage
353 		 * out above, we have to do so here even though the write
354 		 * covers the entire buffer in order to avoid a mmap()/write
355 		 * race where another process may see the garbage prior to
356 		 * the uiomove() for a write replacing it.
357 		 */
358 		if ((bp->b_flags & B_CACHE) == 0 && (flags & B_CLRBUF) == 0)
359 			vfs_bio_clrbuf(bp);
360 		if (ioflag & IO_DIRECT)
361 			bp->b_flags |= B_DIRECT;
362 		if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL))
363 			bp->b_flags |= B_NOCACHE;
364 
365 		if (uio->uio_offset + xfersize > ip->i_size) {
366 			ip->i_size = uio->uio_offset + xfersize;
367 			extended = 1;
368 		}
369 
370 		size = BLKSIZE(fs, ip, lbn) - bp->b_resid;
371 		if (size < xfersize)
372 			xfersize = size;
373 
374 		error = uiomove(bp->b_data + blkoffset, xfersize, uio);
375 		if ((ioflag & (IO_VMIO|IO_DIRECT)) &&
376 		    (LIST_FIRST(&bp->b_dep) == NULL)) {
377 			bp->b_flags |= B_RELBUF;
378 		}
379 
380 		/*
381 		 * If IO_SYNC each buffer is written synchronously.  Otherwise
382 		 * if we have a severe page deficiency write the buffer
383 		 * asynchronously.  Otherwise try to cluster, and if that
384 		 * doesn't do it then either do an async write (if O_DIRECT),
385 		 * or a delayed write (if not).
386 		 */
387 
388 		if (ioflag & IO_SYNC) {
389 			(void)bwrite(bp);
390 		} else if (vm_page_count_severe() ||
391 			    buf_dirty_count_severe() ||
392 			    (ioflag & IO_ASYNC)) {
393 			bp->b_flags |= B_CLUSTEROK;
394 			bawrite(bp);
395 		} else if (xfersize + blkoffset == fs->fs_bsize) {
396 			if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
397 				bp->b_flags |= B_CLUSTEROK;
398 				cluster_write(bp, (off_t)ip->i_size, fs->fs_bsize, seqcount);
399 			} else {
400 				bawrite(bp);
401 			}
402 		} else if (ioflag & IO_DIRECT) {
403 			bp->b_flags |= B_CLUSTEROK;
404 			bawrite(bp);
405 		} else {
406 			bp->b_flags |= B_CLUSTEROK;
407 			bdwrite(bp);
408 		}
409 		if (error || xfersize == 0)
410 			break;
411 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
412 	}
413 	/*
414 	 * If we successfully wrote any data, and we are not the superuser
415 	 * we clear the setuid and setgid bits as a precaution against
416 	 * tampering.
417 	 */
418 	if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0)
419 		ip->i_mode &= ~(ISUID | ISGID);
420 	if (resid > uio->uio_resid)
421 		VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
422 	if (error) {
423 		if (ioflag & IO_UNIT) {
424 			(void)ffs_truncate(vp, osize, ioflag & IO_SYNC,
425 					   ap->a_cred);
426 			uio->uio_offset -= resid - uio->uio_resid;
427 			uio->uio_resid = resid;
428 		}
429 	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) {
430 		error = ffs_update(vp, 1);
431 	}
432 
433 	return (error);
434 }
435 
436