xref: /dragonfly/sys/kern/vfs_vm.c (revision 10cbe914)
1 /*
2  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * Implements new VFS/VM coherency functions.  For conforming VFSs
37  * we treat the backing VM object slightly differently.  Instead of
38  * maintaining a number of pages to exactly fit the size of the file
39  * we instead maintain pages to fit the entire contents of the last
40  * buffer cache buffer used by the file.
41  *
42  * For VFSs like NFS and HAMMER which use (generally speaking) fixed
43  * sized buffers this greatly reduces the complexity of VFS/VM interactions.
44  *
45  * Truncations no longer invalidate pages covered by the buffer cache
46  * beyond the file EOF which still fit within the file's last buffer.
47  * We simply unmap them and do not allow userland to fault them in.
48  *
49  * The VFS is no longer responsible for zero-filling buffers during a
50  * truncation, the last buffer will be automatically zero-filled by
51  * nvtruncbuf().
52  *
53  * This code is intended to (eventually) replace vtruncbuf() and
54  * vnode_pager_setsize().
55  */
56 
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/buf.h>
60 #include <sys/conf.h>
61 #include <sys/fcntl.h>
62 #include <sys/file.h>
63 #include <sys/kernel.h>
64 #include <sys/malloc.h>
65 #include <sys/mount.h>
66 #include <sys/proc.h>
67 #include <sys/socket.h>
68 #include <sys/stat.h>
69 #include <sys/sysctl.h>
70 #include <sys/unistd.h>
71 #include <sys/vmmeter.h>
72 #include <sys/vnode.h>
73 
74 #include <machine/limits.h>
75 
76 #include <vm/vm.h>
77 #include <vm/vm_object.h>
78 #include <vm/vm_extern.h>
79 #include <vm/vm_kern.h>
80 #include <vm/pmap.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_pager.h>
84 #include <vm/vnode_pager.h>
85 #include <vm/vm_zone.h>
86 
87 #include <sys/buf2.h>
88 #include <sys/thread2.h>
89 #include <sys/sysref2.h>
90 #include <sys/mplock2.h>
91 
92 static int nvtruncbuf_bp_trunc_cmp(struct buf *bp, void *data);
93 static int nvtruncbuf_bp_trunc(struct buf *bp, void *data);
94 static int nvtruncbuf_bp_metasync_cmp(struct buf *bp, void *data);
95 static int nvtruncbuf_bp_metasync(struct buf *bp, void *data);
96 
97 /*
98  * Truncate a file's buffer and pages to a specified length. The
99  * byte-granular length of the file is specified along with the block
100  * size of the buffer containing that offset.
101  *
102  * If the last buffer straddles the length its contents will be zero-filled
103  * as appropriate.  All buffers and pages after the last buffer will be
104  * destroyed.  The last buffer itself will be destroyed only if the length
105  * is exactly aligned with it.
106  *
107  * UFS typically passes the old block size prior to the actual truncation,
108  * then later resizes the block based on the new file size.  NFS uses a
109  * fixed block size and doesn't care.  HAMMER uses a block size based on
110  * the offset which is fixed for any particular offset.
111  *
112  * When zero-filling we must bdwrite() to avoid a window of opportunity
113  * where the kernel might throw away a clean buffer and the filesystem
114  * then attempts to bread() it again before completing (or as part of)
115  * the extension.  The filesystem is still responsible for zero-filling
116  * any remainder when writing to the media in the strategy function when
117  * it is able to do so without the page being mapped.  The page may still
118  * be mapped by userland here.
119  *
120  * When modifying a buffer we must clear any cached raw disk offset.
121  * bdwrite() will call BMAP on it again.  Some filesystems, like HAMMER,
122  * never overwrite existing data blocks.
123  */
124 
125 struct truncbuf_info {
126 	struct vnode *vp;
127 	off_t truncloffset;	/* truncation point */
128 	int clean;		/* clean tree, else dirty tree */
129 };
130 
131 int
132 nvtruncbuf(struct vnode *vp, off_t length, int blksize, int boff)
133 {
134 	struct truncbuf_info info;
135 	off_t truncboffset;
136 	const char *filename;
137 	struct buf *bp;
138 	int count;
139 	int error;
140 
141 	/*
142 	 * Round up to the *next* block, then destroy the buffers in question.
143 	 * Since we are only removing some of the buffers we must rely on the
144 	 * scan count to determine whether a loop is necessary.
145 	 *
146 	 * Destroy any pages beyond the last buffer.
147 	 */
148 	if (boff < 0)
149 		boff = (int)(length % blksize);
150 	if (boff)
151 		info.truncloffset = length + (blksize - boff);
152 	else
153 		info.truncloffset = length;
154 	info.vp = vp;
155 	lwkt_gettoken(&vp->v_token);
156 	do {
157 		info.clean = 1;
158 		count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree,
159 				nvtruncbuf_bp_trunc_cmp,
160 				nvtruncbuf_bp_trunc, &info);
161 		info.clean = 0;
162 		count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
163 				nvtruncbuf_bp_trunc_cmp,
164 				nvtruncbuf_bp_trunc, &info);
165 	} while(count);
166 
167 	nvnode_pager_setsize(vp, length, blksize, boff);
168 
169 	/*
170 	 * Zero-fill the area beyond the file EOF that still fits within
171 	 * the last buffer.  We must mark the buffer as dirty even though
172 	 * the modified area is beyond EOF to avoid races where the kernel
173 	 * might flush the buffer before the filesystem is able to reallocate
174 	 * the block.
175 	 *
176 	 * The VFS is responsible for dealing with the actual truncation.
177 	 */
178 	if (boff) {
179 		truncboffset = length - boff;
180 		error = bread(vp, truncboffset, blksize, &bp);
181 		if (error == 0) {
182 			bzero(bp->b_data + boff, blksize - boff);
183 			if (bp->b_flags & B_DELWRI) {
184 				if (bp->b_dirtyoff > boff)
185 					bp->b_dirtyoff = boff;
186 				if (bp->b_dirtyend > boff)
187 					bp->b_dirtyend = boff;
188 			}
189 			bp->b_bio2.bio_offset = NOOFFSET;
190 			bdwrite(bp);
191 		}
192 	} else {
193 		error = 0;
194 	}
195 
196 	/*
197 	 * For safety, fsync any remaining metadata if the file is not being
198 	 * truncated to 0.  Since the metadata does not represent the entire
199 	 * dirty list we have to rely on the hit count to ensure that we get
200 	 * all of it.
201 	 *
202 	 * This is typically applicable only to UFS.  NFS and HAMMER do
203 	 * not store indirect blocks in the per-vnode buffer cache.
204 	 */
205 	if (length > 0) {
206 		do {
207 			count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
208 					nvtruncbuf_bp_metasync_cmp,
209 					nvtruncbuf_bp_metasync, &info);
210 		} while (count);
211 	}
212 
213 	/*
214 	 * It is possible to have in-progress I/O from buffers that were
215 	 * not part of the truncation.  This should not happen if we
216 	 * are truncating to 0-length.
217 	 */
218 	bio_track_wait(&vp->v_track_write, 0, 0);
219 
220 	/*
221 	 * Debugging only
222 	 */
223 	spin_lock(&vp->v_spinlock);
224 	filename = TAILQ_FIRST(&vp->v_namecache) ?
225 		   TAILQ_FIRST(&vp->v_namecache)->nc_name : "?";
226 	spin_unlock(&vp->v_spinlock);
227 
228 	/*
229 	 * Make sure no buffers were instantiated while we were trying
230 	 * to clean out the remaining VM pages.  This could occur due
231 	 * to busy dirty VM pages being flushed out to disk.
232 	 */
233 	do {
234 		info.clean = 1;
235 		count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree,
236 				nvtruncbuf_bp_trunc_cmp,
237 				nvtruncbuf_bp_trunc, &info);
238 		info.clean = 0;
239 		count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree,
240 				nvtruncbuf_bp_trunc_cmp,
241 				nvtruncbuf_bp_trunc, &info);
242 		if (count) {
243 			kprintf("Warning: vtruncbuf():  Had to re-clean %d "
244 			       "left over buffers in %s\n", count, filename);
245 		}
246 	} while(count);
247 
248 	lwkt_reltoken(&vp->v_token);
249 
250 	return (error);
251 }
252 
253 /*
254  * The callback buffer is beyond the new file EOF and must be destroyed.
255  * Note that the compare function must conform to the RB_SCAN's requirements.
256  */
257 static
258 int
259 nvtruncbuf_bp_trunc_cmp(struct buf *bp, void *data)
260 {
261 	struct truncbuf_info *info = data;
262 
263 	if (bp->b_loffset >= info->truncloffset)
264 		return(0);
265 	return(-1);
266 }
267 
268 static
269 int
270 nvtruncbuf_bp_trunc(struct buf *bp, void *data)
271 {
272 	struct truncbuf_info *info = data;
273 
274 	/*
275 	 * Do not try to use a buffer we cannot immediately lock,
276 	 * but sleep anyway to prevent a livelock.  The code will
277 	 * loop until all buffers can be acted upon.
278 	 */
279 	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
280 		atomic_add_int(&bp->b_refs, 1);
281 		if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0)
282 			BUF_UNLOCK(bp);
283 		atomic_subtract_int(&bp->b_refs, 1);
284 	} else if ((info->clean && (bp->b_flags & B_DELWRI)) ||
285 		   (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0) ||
286 		   bp->b_vp != info->vp ||
287 		   nvtruncbuf_bp_trunc_cmp(bp, data)) {
288 		BUF_UNLOCK(bp);
289 	} else {
290 		bremfree(bp);
291 		bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE);
292 		brelse(bp);
293 	}
294 	return(1);
295 }
296 
297 /*
298  * Fsync all meta-data after truncating a file to be non-zero.  Only metadata
299  * blocks (with a negative loffset) are scanned.
300  * Note that the compare function must conform to the RB_SCAN's requirements.
301  */
302 static int
303 nvtruncbuf_bp_metasync_cmp(struct buf *bp, void *data __unused)
304 {
305 	if (bp->b_loffset < 0)
306 		return(0);
307 	return(1);
308 }
309 
310 static int
311 nvtruncbuf_bp_metasync(struct buf *bp, void *data)
312 {
313 	struct truncbuf_info *info = data;
314 
315 	/*
316 	 * Do not try to use a buffer we cannot immediately lock,
317 	 * but sleep anyway to prevent a livelock.  The code will
318 	 * loop until all buffers can be acted upon.
319 	 */
320 	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
321 		atomic_add_int(&bp->b_refs, 1);
322 		if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0)
323 			BUF_UNLOCK(bp);
324 		atomic_subtract_int(&bp->b_refs, 1);
325 	} else if ((bp->b_flags & B_DELWRI) == 0 ||
326 		   bp->b_vp != info->vp ||
327 		   nvtruncbuf_bp_metasync_cmp(bp, data)) {
328 		BUF_UNLOCK(bp);
329 	} else {
330 		bremfree(bp);
331 		bawrite(bp);
332 	}
333 	return(1);
334 }
335 
336 /*
337  * Extend a file's buffer and pages to a new, larger size.  The block size
338  * at both the old and new length must be passed, but buffer cache operations
339  * will only be performed on the old block.  The new nlength/nblksize will
340  * be used to properly set the VM object size.
341  *
342  * To make this explicit we require the old length to passed even though
343  * we can acquire it from vp->v_filesize, which also avoids potential
344  * corruption if the filesystem and vp get desynchronized somehow.
345  *
346  * If the caller intends to immediately write into the newly extended
347  * space pass trivial == 1.  If trivial is 0 the original buffer will be
348  * zero-filled as necessary to clean out any junk in the extended space.
349  *
350  * When zero-filling we must bdwrite() to avoid a window of opportunity
351  * where the kernel might throw away a clean buffer and the filesystem
352  * then attempts to bread() it again before completing (or as part of)
353  * the extension.  The filesystem is still responsible for zero-filling
354  * any remainder when writing to the media in the strategy function when
355  * it is able to do so without the page being mapped.  The page may still
356  * be mapped by userland here.
357  *
358  * When modifying a buffer we must clear any cached raw disk offset.
359  * bdwrite() will call BMAP on it again.  Some filesystems, like HAMMER,
360  * never overwrite existing data blocks.
361  */
362 int
363 nvextendbuf(struct vnode *vp, off_t olength, off_t nlength,
364 	    int oblksize, int nblksize, int oboff, int nboff, int trivial)
365 {
366 	off_t truncboffset;
367 	struct buf *bp;
368 	int error;
369 
370 	error = 0;
371 	nvnode_pager_setsize(vp, nlength, nblksize, nboff);
372 	if (trivial == 0) {
373 		if (oboff < 0)
374 			oboff = (int)(olength % oblksize);
375 		truncboffset = olength - oboff;
376 
377 		if (oboff) {
378 			error = bread(vp, truncboffset, oblksize, &bp);
379 			if (error == 0) {
380 				bzero(bp->b_data + oboff, oblksize - oboff);
381 				bp->b_bio2.bio_offset = NOOFFSET;
382 				bdwrite(bp);
383 			}
384 		}
385 	}
386 	return (error);
387 }
388 
389 /*
390  * Set vp->v_filesize and vp->v_object->size, destroy pages beyond
391  * the last buffer when truncating.
392  *
393  * This function does not do any zeroing or invalidating of partially
394  * overlapping pages.  Zeroing is the responsibility of nvtruncbuf().
395  * However, it does unmap VM pages from the user address space on a
396  * page-granular (verses buffer cache granular) basis.
397  *
398  * If boff is passed as -1 the base offset of the buffer cache buffer is
399  * calculated from length and blksize.  Filesystems such as UFS which deal
400  * with fragments have to specify a boff >= 0 since the base offset cannot
401  * be calculated from length and blksize.
402  *
403  * For UFS blksize is the 'new' blocksize, used only to determine how large
404  * the VM object must become.
405  */
406 void
407 nvnode_pager_setsize(struct vnode *vp, off_t length, int blksize, int boff)
408 {
409 	vm_pindex_t nobjsize;
410 	vm_pindex_t oobjsize;
411 	vm_pindex_t pi;
412 	vm_object_t object;
413 	vm_page_t m;
414 	off_t truncboffset;
415 
416 	/*
417 	 * Degenerate conditions
418 	 */
419 	if ((object = vp->v_object) == NULL)
420 		return;
421 	if (length == vp->v_filesize)
422 		return;
423 
424 	/*
425 	 * Calculate the size of the VM object, coverage includes
426 	 * the buffer straddling EOF.  If EOF is buffer-aligned
427 	 * we don't bother.
428 	 *
429 	 * Buffers do not have to be page-aligned.  Make sure
430 	 * nobjsize is beyond the last page of the buffer.
431 	 */
432 	if (boff < 0)
433 		boff = (int)(length % blksize);
434 	truncboffset = length - boff;
435 	oobjsize = object->size;
436 	if (boff)
437 		nobjsize = OFF_TO_IDX(truncboffset + blksize + PAGE_MASK);
438 	else
439 		nobjsize = OFF_TO_IDX(truncboffset + PAGE_MASK);
440 	object->size = nobjsize;
441 
442 	if (length < vp->v_filesize) {
443 		/*
444 		 * File has shrunk, toss any cached pages beyond
445 		 * the end of the buffer (blksize aligned) for the
446 		 * new EOF.
447 		 */
448 		vp->v_filesize = length;
449 		if (nobjsize < oobjsize) {
450 			vm_object_page_remove(object, nobjsize, oobjsize,
451 					      FALSE);
452 		}
453 
454 		/*
455 		 * Unmap any pages (page aligned) beyond the new EOF.
456 		 * The pages remain part of the (last) buffer and are not
457 		 * invalidated.
458 		 */
459 		pi = OFF_TO_IDX(length + PAGE_MASK);
460 		lwkt_gettoken(&vm_token);
461 		while (pi < nobjsize) {
462 			do {
463 				m = vm_page_lookup(object, pi);
464 			} while (m && vm_page_sleep_busy(m, TRUE, "vsetsz"));
465 			if (m) {
466 				vm_page_busy(m);
467 				vm_page_protect(m, VM_PROT_NONE);
468 				vm_page_wakeup(m);
469 			}
470 			++pi;
471 		}
472 		lwkt_reltoken(&vm_token);
473 	} else {
474 		/*
475 		 * File has expanded.
476 		 */
477 		vp->v_filesize = length;
478 	}
479 }
480