xref: /dragonfly/sys/kern/vfs_cluster.c (revision 81c11cd3)
1 /*-
2  * Copyright (c) 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
36  * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
37  * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
38  */
39 
40 #include "opt_debug_cluster.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf2.h>
57 #include <vm/vm_page2.h>
58 
59 #include <machine/limits.h>
60 
61 #if defined(CLUSTERDEBUG)
62 #include <sys/sysctl.h>
63 static int	rcluster= 0;
64 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
65 #endif
66 
67 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
68 
69 static struct cluster_save *
70 	cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
71 			    int blksize);
72 static struct buf *
73 	cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
74 			    off_t doffset, int blksize, int run,
75 			    struct buf *fbp);
76 static void cluster_callback (struct bio *);
77 static void cluster_setram (struct buf *);
78 
79 static int write_behind = 1;
80 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
81     "Cluster write-behind setting");
82 static int max_readahead = 2 * 1024 * 1024;
83 SYSCTL_INT(_vfs, OID_AUTO, max_readahead, CTLFLAG_RW, &max_readahead, 0,
84     "Limit in bytes for desired cluster read-ahead");
85 
86 extern vm_page_t	bogus_page;
87 
88 extern int cluster_pbuf_freecnt;
89 
90 /*
91  * This replaces bread.
92  *
93  * filesize	- read-ahead @ blksize will not cross this boundary
94  * loffset	- loffset for returned *bpp
95  * blksize	- blocksize for returned *bpp and read-ahead bps
96  * minreq	- minimum (not a hard minimum) in bytes, typically reflects
97  *		  a higher level uio resid.
98  * maxreq	- maximum (sequential heuristic) in bytes (highet typ ~2MB)
99  * bpp		- return buffer (*bpp) for (loffset,blksize)
100  */
101 int
102 cluster_read(struct vnode *vp, off_t filesize, off_t loffset,
103 	     int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
104 {
105 	struct buf *bp, *rbp, *reqbp;
106 	off_t origoffset;
107 	off_t doffset;
108 	int error;
109 	int i;
110 	int maxra;
111 	int maxrbuild;
112 
113 	error = 0;
114 
115 	/*
116 	 * Calculate the desired read-ahead in blksize'd blocks (maxra).
117 	 * To do this we calculate maxreq.
118 	 *
119 	 * maxreq typically starts out as a sequential heuristic.  If the
120 	 * high level uio/resid is bigger (minreq), we pop maxreq up to
121 	 * minreq.  This represents the case where random I/O is being
122 	 * performed by the userland is issuing big read()'s.
123 	 *
124 	 * Then we limit maxreq to max_readahead to ensure it is a reasonable
125 	 * value.
126 	 *
127 	 * Finally we must ensure that (loffset + maxreq) does not cross the
128 	 * boundary (filesize) for the current blocksize.  If we allowed it
129 	 * to cross we could end up with buffers past the boundary with the
130 	 * wrong block size (HAMMER large-data areas use mixed block sizes).
131 	 * minreq is also absolutely limited to filesize.
132 	 */
133 	if (maxreq < minreq)
134 		maxreq = minreq;
135 	/* minreq not used beyond this point */
136 
137 	if (maxreq > max_readahead) {
138 		maxreq = max_readahead;
139 		if (maxreq > 16 * 1024 * 1024)
140 			maxreq = 16 * 1024 * 1024;
141 	}
142 	if (maxreq < blksize)
143 		maxreq = blksize;
144 	if (loffset + maxreq > filesize) {
145 		if (loffset > filesize)
146 			maxreq = 0;
147 		else
148 			maxreq = filesize - loffset;
149 	}
150 
151 	maxra = (int)(maxreq / blksize);
152 
153 	/*
154 	 * Get the requested block.
155 	 */
156 	*bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
157 	origoffset = loffset;
158 
159 	/*
160 	 * Calculate the maximum cluster size for a single I/O, used
161 	 * by cluster_rbuild().
162 	 */
163 	maxrbuild = vmaxiosize(vp) / blksize;
164 
165 	/*
166 	 * if it is in the cache, then check to see if the reads have been
167 	 * sequential.  If they have, then try some read-ahead, otherwise
168 	 * back-off on prospective read-aheads.
169 	 */
170 	if (bp->b_flags & B_CACHE) {
171 		/*
172 		 * Not sequential, do not do any read-ahead
173 		 */
174 		if (maxra <= 1)
175 			return 0;
176 
177 		/*
178 		 * No read-ahead mark, do not do any read-ahead
179 		 * yet.
180 		 */
181 		if ((bp->b_flags & B_RAM) == 0)
182 			return 0;
183 
184 		/*
185 		 * We hit a read-ahead-mark, figure out how much read-ahead
186 		 * to do (maxra) and where to start (loffset).
187 		 *
188 		 * Shortcut the scan.  Typically the way this works is that
189 		 * we've built up all the blocks inbetween except for the
190 		 * last in previous iterations, so if the second-to-last
191 		 * block is present we just skip ahead to it.
192 		 *
193 		 * This algorithm has O(1) cpu in the steady state no
194 		 * matter how large maxra is.
195 		 */
196 		bp->b_flags &= ~B_RAM;
197 
198 		if (findblk(vp, loffset + (maxra - 2) * blksize, FINDBLK_TEST))
199 			i = maxra - 1;
200 		else
201 			i = 1;
202 		while (i < maxra) {
203 			if (findblk(vp, loffset + i * blksize,
204 				    FINDBLK_TEST) == NULL) {
205 				break;
206 			}
207 			++i;
208 		}
209 
210 		/*
211 		 * We got everything or everything is in the cache, no
212 		 * point continuing.
213 		 */
214 		if (i >= maxra)
215 			return 0;
216 		maxra -= i;
217 		loffset += i * blksize;
218 		reqbp = bp = NULL;
219 	} else {
220 		__debugvar off_t firstread = bp->b_loffset;
221 		int nblks;
222 
223 		/*
224 		 * Set-up synchronous read for bp.
225 		 */
226 		bp->b_cmd = BUF_CMD_READ;
227 		bp->b_bio1.bio_done = biodone_sync;
228 		bp->b_bio1.bio_flags |= BIO_SYNC;
229 
230 		KASSERT(firstread != NOOFFSET,
231 			("cluster_read: no buffer offset"));
232 
233 		/*
234 		 * nblks is our cluster_rbuild request size, limited
235 		 * primarily by the device.
236 		 */
237 		if ((nblks = maxra) > maxrbuild)
238 			nblks = maxrbuild;
239 
240 		if (nblks > 1) {
241 			int burstbytes;
242 
243 	    		error = VOP_BMAP(vp, loffset, &doffset,
244 					 &burstbytes, NULL, BUF_CMD_READ);
245 			if (error)
246 				goto single_block_read;
247 			if (nblks > burstbytes / blksize)
248 				nblks = burstbytes / blksize;
249 			if (doffset == NOOFFSET)
250 				goto single_block_read;
251 			if (nblks <= 1)
252 				goto single_block_read;
253 
254 			bp = cluster_rbuild(vp, filesize, loffset,
255 					    doffset, blksize, nblks, bp);
256 			loffset += bp->b_bufsize;
257 			maxra -= bp->b_bufsize / blksize;
258 		} else {
259 single_block_read:
260 			/*
261 			 * If it isn't in the cache, then get a chunk from
262 			 * disk if sequential, otherwise just get the block.
263 			 */
264 			cluster_setram(bp);
265 			loffset += blksize;
266 			--maxra;
267 		}
268 	}
269 
270 	/*
271 	 * If B_CACHE was not set issue bp.  bp will either be an
272 	 * asynchronous cluster buf or a synchronous single-buf.
273 	 * If it is a single buf it will be the same as reqbp.
274 	 *
275 	 * NOTE: Once an async cluster buf is issued bp becomes invalid.
276 	 */
277 	if (bp) {
278 #if defined(CLUSTERDEBUG)
279 		if (rcluster)
280 			kprintf("S(%012jx,%d,%d)\n",
281 			    (intmax_t)bp->b_loffset, bp->b_bcount, maxra);
282 #endif
283 		if ((bp->b_flags & B_CLUSTER) == 0)
284 			vfs_busy_pages(vp, bp);
285 		bp->b_flags &= ~(B_ERROR|B_INVAL);
286 		vn_strategy(vp, &bp->b_bio1);
287 		error = 0;
288 		/* bp invalid now */
289 	}
290 
291 	/*
292 	 * If we have been doing sequential I/O, then do some read-ahead.
293 	 * The code above us should have positioned us at the next likely
294 	 * offset.
295 	 *
296 	 * Only mess with buffers which we can immediately lock.  HAMMER
297 	 * will do device-readahead irrespective of what the blocks
298 	 * represent.
299 	 */
300 	while (error == 0 && maxra > 0) {
301 		int burstbytes;
302 		int tmp_error;
303 		int nblks;
304 
305 		rbp = getblk(vp, loffset, blksize,
306 			     GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
307 		if (rbp == NULL)
308 			goto no_read_ahead;
309 		if ((rbp->b_flags & B_CACHE)) {
310 			bqrelse(rbp);
311 			goto no_read_ahead;
312 		}
313 
314 		/*
315 		 * An error from the read-ahead bmap has nothing to do
316 		 * with the caller's original request.
317 		 */
318 		tmp_error = VOP_BMAP(vp, loffset, &doffset,
319 				     &burstbytes, NULL, BUF_CMD_READ);
320 		if (tmp_error || doffset == NOOFFSET) {
321 			rbp->b_flags |= B_INVAL;
322 			brelse(rbp);
323 			rbp = NULL;
324 			goto no_read_ahead;
325 		}
326 		if ((nblks = maxra) > maxrbuild)
327 			nblks = maxrbuild;
328 		if (nblks > burstbytes / blksize)
329 			nblks = burstbytes / blksize;
330 
331 		/*
332 		 * rbp: async read
333 		 */
334 		rbp->b_cmd = BUF_CMD_READ;
335 		/*rbp->b_flags |= B_AGE*/;
336 		cluster_setram(rbp);
337 
338 		if (nblks > 1) {
339 			rbp = cluster_rbuild(vp, filesize, loffset,
340 					     doffset, blksize,
341 					     nblks, rbp);
342 		} else {
343 			rbp->b_bio2.bio_offset = doffset;
344 		}
345 
346 #if defined(CLUSTERDEBUG)
347 		if (rcluster) {
348 			if (bp) {
349 				kprintf("A+(%012jx,%d,%jd) "
350 					"doff=%012jx minr=%zd ra=%d\n",
351 				    (intmax_t)loffset, rbp->b_bcount,
352 				    (intmax_t)(loffset - origoffset),
353 				    (intmax_t)doffset, minreq, maxra);
354 			} else {
355 				kprintf("A-(%012jx,%d,%jd) "
356 					"doff=%012jx minr=%zd ra=%d\n",
357 				    (intmax_t)rbp->b_loffset, rbp->b_bcount,
358 				    (intmax_t)(loffset - origoffset),
359 				    (intmax_t)doffset, minreq, maxra);
360 			}
361 		}
362 #endif
363 		rbp->b_flags &= ~(B_ERROR|B_INVAL);
364 
365 		if ((rbp->b_flags & B_CLUSTER) == 0)
366 			vfs_busy_pages(vp, rbp);
367 		BUF_KERNPROC(rbp);
368 		loffset += rbp->b_bufsize;
369 		maxra -= rbp->b_bufsize / blksize;
370 		vn_strategy(vp, &rbp->b_bio1);
371 		/* rbp invalid now */
372 	}
373 
374 	/*
375 	 * Wait for our original buffer to complete its I/O.  reqbp will
376 	 * be NULL if the original buffer was B_CACHE.  We are returning
377 	 * (*bpp) which is the same as reqbp when reqbp != NULL.
378 	 */
379 no_read_ahead:
380 	if (reqbp) {
381 		KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
382 		error = biowait(&reqbp->b_bio1, "clurd");
383 	}
384 	return (error);
385 }
386 
387 /*
388  * If blocks are contiguous on disk, use this to provide clustered
389  * read ahead.  We will read as many blocks as possible sequentially
390  * and then parcel them up into logical blocks in the buffer hash table.
391  *
392  * This function either returns a cluster buf or it returns fbp.  fbp is
393  * already expected to be set up as a synchronous or asynchronous request.
394  *
395  * If a cluster buf is returned it will always be async.
396  */
397 static struct buf *
398 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
399 	       int blksize, int run, struct buf *fbp)
400 {
401 	struct buf *bp, *tbp;
402 	off_t boffset;
403 	int i, j;
404 	int maxiosize = vmaxiosize(vp);
405 
406 	/*
407 	 * avoid a division
408 	 */
409 	while (loffset + run * blksize > filesize) {
410 		--run;
411 	}
412 
413 	tbp = fbp;
414 	tbp->b_bio2.bio_offset = doffset;
415 	if((tbp->b_flags & B_MALLOC) ||
416 	    ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
417 		return tbp;
418 	}
419 
420 	bp = trypbuf_kva(&cluster_pbuf_freecnt);
421 	if (bp == NULL) {
422 		return tbp;
423 	}
424 
425 	/*
426 	 * We are synthesizing a buffer out of vm_page_t's, but
427 	 * if the block size is not page aligned then the starting
428 	 * address may not be either.  Inherit the b_data offset
429 	 * from the original buffer.
430 	 */
431 	bp->b_data = (char *)((vm_offset_t)bp->b_data |
432 	    ((vm_offset_t)tbp->b_data & PAGE_MASK));
433 	bp->b_flags |= B_CLUSTER | B_VMIO;
434 	bp->b_cmd = BUF_CMD_READ;
435 	bp->b_bio1.bio_done = cluster_callback;		/* default to async */
436 	bp->b_bio1.bio_caller_info1.cluster_head = NULL;
437 	bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
438 	bp->b_loffset = loffset;
439 	bp->b_bio2.bio_offset = doffset;
440 	KASSERT(bp->b_loffset != NOOFFSET,
441 		("cluster_rbuild: no buffer offset"));
442 
443 	bp->b_bcount = 0;
444 	bp->b_bufsize = 0;
445 	bp->b_xio.xio_npages = 0;
446 
447 	for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
448 		if (i) {
449 			if ((bp->b_xio.xio_npages * PAGE_SIZE) +
450 			    round_page(blksize) > maxiosize) {
451 				break;
452 			}
453 
454 			/*
455 			 * Shortcut some checks and try to avoid buffers that
456 			 * would block in the lock.  The same checks have to
457 			 * be made again after we officially get the buffer.
458 			 */
459 			tbp = getblk(vp, loffset + i * blksize, blksize,
460 				     GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
461 			if (tbp == NULL)
462 				break;
463 			for (j = 0; j < tbp->b_xio.xio_npages; j++) {
464 				if (tbp->b_xio.xio_pages[j]->valid)
465 					break;
466 			}
467 			if (j != tbp->b_xio.xio_npages) {
468 				bqrelse(tbp);
469 				break;
470 			}
471 
472 			/*
473 			 * Stop scanning if the buffer is fuly valid
474 			 * (marked B_CACHE), or locked (may be doing a
475 			 * background write), or if the buffer is not
476 			 * VMIO backed.  The clustering code can only deal
477 			 * with VMIO-backed buffers.
478 			 */
479 			if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
480 			    (tbp->b_flags & B_VMIO) == 0 ||
481 			    (LIST_FIRST(&tbp->b_dep) != NULL &&
482 			     buf_checkread(tbp))
483 			) {
484 				bqrelse(tbp);
485 				break;
486 			}
487 
488 			/*
489 			 * The buffer must be completely invalid in order to
490 			 * take part in the cluster.  If it is partially valid
491 			 * then we stop.
492 			 */
493 			for (j = 0;j < tbp->b_xio.xio_npages; j++) {
494 				if (tbp->b_xio.xio_pages[j]->valid)
495 					break;
496 			}
497 			if (j != tbp->b_xio.xio_npages) {
498 				bqrelse(tbp);
499 				break;
500 			}
501 
502 			/*
503 			 * Set a read-ahead mark as appropriate
504 			 */
505 			if (i == 1 || i == (run - 1))
506 				cluster_setram(tbp);
507 
508 			/*
509 			 * Depress the priority of buffers not explicitly
510 			 * requested.
511 			 */
512 			/* tbp->b_flags |= B_AGE; */
513 
514 			/*
515 			 * Set the block number if it isn't set, otherwise
516 			 * if it is make sure it matches the block number we
517 			 * expect.
518 			 */
519 			if (tbp->b_bio2.bio_offset == NOOFFSET) {
520 				tbp->b_bio2.bio_offset = boffset;
521 			} else if (tbp->b_bio2.bio_offset != boffset) {
522 				brelse(tbp);
523 				break;
524 			}
525 		}
526 
527 		/*
528 		 * The passed-in tbp (i == 0) will already be set up for
529 		 * async or sync operation.  All other tbp's acquire in
530 		 * our loop are set up for async operation.
531 		 */
532 		tbp->b_cmd = BUF_CMD_READ;
533 		BUF_KERNPROC(tbp);
534 		cluster_append(&bp->b_bio1, tbp);
535 		for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
536 			vm_page_t m;
537 			m = tbp->b_xio.xio_pages[j];
538 			vm_page_io_start(m);
539 			vm_object_pip_add(m->object, 1);
540 			if ((bp->b_xio.xio_npages == 0) ||
541 				(bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
542 				bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
543 				bp->b_xio.xio_npages++;
544 			}
545 			if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
546 				tbp->b_xio.xio_pages[j] = bogus_page;
547 		}
548 		/*
549 		 * XXX shouldn't this be += size for both, like in
550 		 * cluster_wbuild()?
551 		 *
552 		 * Don't inherit tbp->b_bufsize as it may be larger due to
553 		 * a non-page-aligned size.  Instead just aggregate using
554 		 * 'size'.
555 		 */
556 		if (tbp->b_bcount != blksize)
557 		    kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
558 		if (tbp->b_bufsize != blksize)
559 		    kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
560 		bp->b_bcount += blksize;
561 		bp->b_bufsize += blksize;
562 	}
563 
564 	/*
565 	 * Fully valid pages in the cluster are already good and do not need
566 	 * to be re-read from disk.  Replace the page with bogus_page
567 	 */
568 	for (j = 0; j < bp->b_xio.xio_npages; j++) {
569 		if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
570 		    VM_PAGE_BITS_ALL) {
571 			bp->b_xio.xio_pages[j] = bogus_page;
572 		}
573 	}
574 	if (bp->b_bufsize > bp->b_kvasize) {
575 		panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
576 		    bp->b_bufsize, bp->b_kvasize);
577 	}
578 	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
579 		(vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
580 	BUF_KERNPROC(bp);
581 	return (bp);
582 }
583 
584 /*
585  * Cleanup after a clustered read or write.
586  * This is complicated by the fact that any of the buffers might have
587  * extra memory (if there were no empty buffer headers at allocbuf time)
588  * that we will need to shift around.
589  *
590  * The returned bio is &bp->b_bio1
591  */
592 void
593 cluster_callback(struct bio *bio)
594 {
595 	struct buf *bp = bio->bio_buf;
596 	struct buf *tbp;
597 	int error = 0;
598 
599 	/*
600 	 * Must propogate errors to all the components.  A short read (EOF)
601 	 * is a critical error.
602 	 */
603 	if (bp->b_flags & B_ERROR) {
604 		error = bp->b_error;
605 	} else if (bp->b_bcount != bp->b_bufsize) {
606 		panic("cluster_callback: unexpected EOF on cluster %p!", bio);
607 	}
608 
609 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
610 	/*
611 	 * Move memory from the large cluster buffer into the component
612 	 * buffers and mark IO as done on these.  Since the memory map
613 	 * is the same, no actual copying is required.
614 	 */
615 	while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
616 		bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
617 		if (error) {
618 			tbp->b_flags |= B_ERROR | B_IODEBUG;
619 			tbp->b_error = error;
620 		} else {
621 			tbp->b_dirtyoff = tbp->b_dirtyend = 0;
622 			tbp->b_flags &= ~(B_ERROR|B_INVAL);
623 			tbp->b_flags |= B_IODEBUG;
624 			/*
625 			 * XXX the bdwrite()/bqrelse() issued during
626 			 * cluster building clears B_RELBUF (see bqrelse()
627 			 * comment).  If direct I/O was specified, we have
628 			 * to restore it here to allow the buffer and VM
629 			 * to be freed.
630 			 */
631 			if (tbp->b_flags & B_DIRECT)
632 				tbp->b_flags |= B_RELBUF;
633 		}
634 		biodone(&tbp->b_bio1);
635 	}
636 	relpbuf(bp, &cluster_pbuf_freecnt);
637 }
638 
639 /*
640  *	cluster_wbuild_wb:
641  *
642  *	Implement modified write build for cluster.
643  *
644  *		write_behind = 0	write behind disabled
645  *		write_behind = 1	write behind normal (default)
646  *		write_behind = 2	write behind backed-off
647  */
648 
649 static __inline int
650 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
651 {
652 	int r = 0;
653 
654 	switch(write_behind) {
655 	case 2:
656 		if (start_loffset < len)
657 			break;
658 		start_loffset -= len;
659 		/* fall through */
660 	case 1:
661 		r = cluster_wbuild(vp, blksize, start_loffset, len);
662 		/* fall through */
663 	default:
664 		/* fall through */
665 		break;
666 	}
667 	return(r);
668 }
669 
670 /*
671  * Do clustered write for FFS.
672  *
673  * Three cases:
674  *	1. Write is not sequential (write asynchronously)
675  *	Write is sequential:
676  *	2.	beginning of cluster - begin cluster
677  *	3.	middle of a cluster - add to cluster
678  *	4.	end of a cluster - asynchronously write cluster
679  */
680 void
681 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
682 {
683 	struct vnode *vp;
684 	off_t loffset;
685 	int maxclen, cursize;
686 	int async;
687 
688 	vp = bp->b_vp;
689 	if (vp->v_type == VREG)
690 		async = vp->v_mount->mnt_flag & MNT_ASYNC;
691 	else
692 		async = 0;
693 	loffset = bp->b_loffset;
694 	KASSERT(bp->b_loffset != NOOFFSET,
695 		("cluster_write: no buffer offset"));
696 
697 	/* Initialize vnode to beginning of file. */
698 	if (loffset == 0)
699 		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
700 
701 	if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
702 	    bp->b_bio2.bio_offset == NOOFFSET ||
703 	    (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
704 		maxclen = vmaxiosize(vp);
705 		if (vp->v_clen != 0) {
706 			/*
707 			 * Next block is not sequential.
708 			 *
709 			 * If we are not writing at end of file, the process
710 			 * seeked to another point in the file since its last
711 			 * write, or we have reached our maximum cluster size,
712 			 * then push the previous cluster. Otherwise try
713 			 * reallocating to make it sequential.
714 			 *
715 			 * Change to algorithm: only push previous cluster if
716 			 * it was sequential from the point of view of the
717 			 * seqcount heuristic, otherwise leave the buffer
718 			 * intact so we can potentially optimize the I/O
719 			 * later on in the buf_daemon or update daemon
720 			 * flush.
721 			 */
722 			cursize = vp->v_lastw - vp->v_cstart + blksize;
723 			if (bp->b_loffset + blksize != filesize ||
724 			    loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
725 				if (!async && seqcount > 0) {
726 					cluster_wbuild_wb(vp, blksize,
727 						vp->v_cstart, cursize);
728 				}
729 			} else {
730 				struct buf **bpp, **endbp;
731 				struct cluster_save *buflist;
732 
733 				buflist = cluster_collectbufs(vp, bp, blksize);
734 				endbp = &buflist->bs_children
735 				    [buflist->bs_nchildren - 1];
736 				if (VOP_REALLOCBLKS(vp, buflist)) {
737 					/*
738 					 * Failed, push the previous cluster
739 					 * if *really* writing sequentially
740 					 * in the logical file (seqcount > 1),
741 					 * otherwise delay it in the hopes that
742 					 * the low level disk driver can
743 					 * optimize the write ordering.
744 					 */
745 					for (bpp = buflist->bs_children;
746 					     bpp < endbp; bpp++)
747 						brelse(*bpp);
748 					kfree(buflist, M_SEGMENT);
749 					if (seqcount > 1) {
750 						cluster_wbuild_wb(vp,
751 						    blksize, vp->v_cstart,
752 						    cursize);
753 					}
754 				} else {
755 					/*
756 					 * Succeeded, keep building cluster.
757 					 */
758 					for (bpp = buflist->bs_children;
759 					     bpp <= endbp; bpp++)
760 						bdwrite(*bpp);
761 					kfree(buflist, M_SEGMENT);
762 					vp->v_lastw = loffset;
763 					vp->v_lasta = bp->b_bio2.bio_offset;
764 					return;
765 				}
766 			}
767 		}
768 		/*
769 		 * Consider beginning a cluster. If at end of file, make
770 		 * cluster as large as possible, otherwise find size of
771 		 * existing cluster.
772 		 */
773 		if ((vp->v_type == VREG) &&
774 		    bp->b_loffset + blksize != filesize &&
775 		    (bp->b_bio2.bio_offset == NOOFFSET) &&
776 		    (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
777 		     bp->b_bio2.bio_offset == NOOFFSET)) {
778 			bawrite(bp);
779 			vp->v_clen = 0;
780 			vp->v_lasta = bp->b_bio2.bio_offset;
781 			vp->v_cstart = loffset + blksize;
782 			vp->v_lastw = loffset;
783 			return;
784 		}
785 		if (maxclen > blksize)
786 			vp->v_clen = maxclen - blksize;
787 		else
788 			vp->v_clen = 0;
789 		if (!async && vp->v_clen == 0) { /* I/O not contiguous */
790 			vp->v_cstart = loffset + blksize;
791 			bawrite(bp);
792 		} else {	/* Wait for rest of cluster */
793 			vp->v_cstart = loffset;
794 			bdwrite(bp);
795 		}
796 	} else if (loffset == vp->v_cstart + vp->v_clen) {
797 		/*
798 		 * At end of cluster, write it out if seqcount tells us we
799 		 * are operating sequentially, otherwise let the buf or
800 		 * update daemon handle it.
801 		 */
802 		bdwrite(bp);
803 		if (seqcount > 1)
804 			cluster_wbuild_wb(vp, blksize, vp->v_cstart,
805 					  vp->v_clen + blksize);
806 		vp->v_clen = 0;
807 		vp->v_cstart = loffset + blksize;
808 	} else if (vm_page_count_severe()) {
809 		/*
810 		 * We are low on memory, get it going NOW
811 		 */
812 		bawrite(bp);
813 	} else {
814 		/*
815 		 * In the middle of a cluster, so just delay the I/O for now.
816 		 */
817 		bdwrite(bp);
818 	}
819 	vp->v_lastw = loffset;
820 	vp->v_lasta = bp->b_bio2.bio_offset;
821 }
822 
823 
824 /*
825  * This is an awful lot like cluster_rbuild...wish they could be combined.
826  * The last lbn argument is the current block on which I/O is being
827  * performed.  Check to see that it doesn't fall in the middle of
828  * the current block (if last_bp == NULL).
829  */
830 int
831 cluster_wbuild(struct vnode *vp, int blksize, off_t start_loffset, int bytes)
832 {
833 	struct buf *bp, *tbp;
834 	int i, j;
835 	int totalwritten = 0;
836 	int maxiosize = vmaxiosize(vp);
837 
838 	while (bytes > 0) {
839 		/*
840 		 * If the buffer is not delayed-write (i.e. dirty), or it
841 		 * is delayed-write but either locked or inval, it cannot
842 		 * partake in the clustered write.
843 		 */
844 		tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
845 		if (tbp == NULL ||
846 		    (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI ||
847 		    (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
848 			if (tbp)
849 				BUF_UNLOCK(tbp);
850 			start_loffset += blksize;
851 			bytes -= blksize;
852 			continue;
853 		}
854 		bremfree(tbp);
855 		KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
856 
857 		/*
858 		 * Extra memory in the buffer, punt on this buffer.
859 		 * XXX we could handle this in most cases, but we would
860 		 * have to push the extra memory down to after our max
861 		 * possible cluster size and then potentially pull it back
862 		 * up if the cluster was terminated prematurely--too much
863 		 * hassle.
864 		 */
865 		if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
866 		    (tbp->b_bcount != tbp->b_bufsize) ||
867 		    (tbp->b_bcount != blksize) ||
868 		    (bytes == blksize) ||
869 		    ((bp = getpbuf_kva(&cluster_pbuf_freecnt)) == NULL)) {
870 			totalwritten += tbp->b_bufsize;
871 			bawrite(tbp);
872 			start_loffset += blksize;
873 			bytes -= blksize;
874 			continue;
875 		}
876 
877 		/*
878 		 * Set up the pbuf.  Track our append point with b_bcount
879 		 * and b_bufsize.  b_bufsize is not used by the device but
880 		 * our caller uses it to loop clusters and we use it to
881 		 * detect a premature EOF on the block device.
882 		 */
883 		bp->b_bcount = 0;
884 		bp->b_bufsize = 0;
885 		bp->b_xio.xio_npages = 0;
886 		bp->b_loffset = tbp->b_loffset;
887 		bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
888 
889 		/*
890 		 * We are synthesizing a buffer out of vm_page_t's, but
891 		 * if the block size is not page aligned then the starting
892 		 * address may not be either.  Inherit the b_data offset
893 		 * from the original buffer.
894 		 */
895 		bp->b_data = (char *)((vm_offset_t)bp->b_data |
896 		    ((vm_offset_t)tbp->b_data & PAGE_MASK));
897 		bp->b_flags &= ~B_ERROR;
898 		bp->b_flags |= B_CLUSTER | B_BNOCLIP |
899 			(tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
900 		bp->b_bio1.bio_caller_info1.cluster_head = NULL;
901 		bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
902 
903 		/*
904 		 * From this location in the file, scan forward to see
905 		 * if there are buffers with adjacent data that need to
906 		 * be written as well.
907 		 */
908 		for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
909 			if (i != 0) { /* If not the first buffer */
910 				tbp = findblk(vp, start_loffset,
911 					      FINDBLK_NBLOCK);
912 				/*
913 				 * Buffer not found or could not be locked
914 				 * non-blocking.
915 				 */
916 				if (tbp == NULL)
917 					break;
918 
919 				/*
920 				 * If it IS in core, but has different
921 				 * characteristics, then don't cluster
922 				 * with it.
923 				 */
924 				if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
925 				     B_INVAL | B_DELWRI | B_NEEDCOMMIT))
926 				    != (B_DELWRI | B_CLUSTEROK |
927 				     (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
928 				    (tbp->b_flags & B_LOCKED) ||
929 				    (LIST_FIRST(&tbp->b_dep) &&
930 				     buf_checkwrite(tbp))
931 				) {
932 					BUF_UNLOCK(tbp);
933 					break;
934 				}
935 
936 				/*
937 				 * Check that the combined cluster
938 				 * would make sense with regard to pages
939 				 * and would not be too large
940 				 */
941 				if ((tbp->b_bcount != blksize) ||
942 				  ((bp->b_bio2.bio_offset + i) !=
943 				    tbp->b_bio2.bio_offset) ||
944 				  ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
945 				    (maxiosize / PAGE_SIZE))) {
946 					BUF_UNLOCK(tbp);
947 					break;
948 				}
949 				/*
950 				 * Ok, it's passed all the tests,
951 				 * so remove it from the free list
952 				 * and mark it busy. We will use it.
953 				 */
954 				bremfree(tbp);
955 				KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
956 			} /* end of code for non-first buffers only */
957 
958 			/*
959 			 * If the IO is via the VM then we do some
960 			 * special VM hackery (yuck).  Since the buffer's
961 			 * block size may not be page-aligned it is possible
962 			 * for a page to be shared between two buffers.  We
963 			 * have to get rid of the duplication when building
964 			 * the cluster.
965 			 */
966 			if (tbp->b_flags & B_VMIO) {
967 				vm_page_t m;
968 
969 				if (i != 0) { /* if not first buffer */
970 					for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
971 						m = tbp->b_xio.xio_pages[j];
972 						if (m->flags & PG_BUSY) {
973 							bqrelse(tbp);
974 							goto finishcluster;
975 						}
976 					}
977 				}
978 
979 				for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
980 					m = tbp->b_xio.xio_pages[j];
981 					vm_page_io_start(m);
982 					vm_object_pip_add(m->object, 1);
983 					if ((bp->b_xio.xio_npages == 0) ||
984 					  (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
985 						bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
986 						bp->b_xio.xio_npages++;
987 					}
988 				}
989 			}
990 			bp->b_bcount += blksize;
991 			bp->b_bufsize += blksize;
992 
993 			bundirty(tbp);
994 			tbp->b_flags &= ~B_ERROR;
995 			tbp->b_cmd = BUF_CMD_WRITE;
996 			BUF_KERNPROC(tbp);
997 			cluster_append(&bp->b_bio1, tbp);
998 
999 			/*
1000 			 * check for latent dependencies to be handled
1001 			 */
1002 			if (LIST_FIRST(&tbp->b_dep) != NULL)
1003 				buf_start(tbp);
1004 		}
1005 	finishcluster:
1006 		pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
1007 			(vm_page_t *) bp->b_xio.xio_pages, bp->b_xio.xio_npages);
1008 		if (bp->b_bufsize > bp->b_kvasize) {
1009 			panic(
1010 			    "cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n",
1011 			    bp->b_bufsize, bp->b_kvasize);
1012 		}
1013 		totalwritten += bp->b_bufsize;
1014 		bp->b_dirtyoff = 0;
1015 		bp->b_dirtyend = bp->b_bufsize;
1016 		bp->b_bio1.bio_done = cluster_callback;
1017 		bp->b_cmd = BUF_CMD_WRITE;
1018 
1019 		vfs_busy_pages(vp, bp);
1020 		bsetrunningbufspace(bp, bp->b_bufsize);
1021 		BUF_KERNPROC(bp);
1022 		vn_strategy(vp, &bp->b_bio1);
1023 
1024 		bytes -= i;
1025 	}
1026 	return totalwritten;
1027 }
1028 
1029 /*
1030  * Collect together all the buffers in a cluster.
1031  * Plus add one additional buffer.
1032  */
1033 static struct cluster_save *
1034 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
1035 {
1036 	struct cluster_save *buflist;
1037 	struct buf *bp;
1038 	off_t loffset;
1039 	int i, len;
1040 
1041 	len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
1042 	buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1043 			 M_SEGMENT, M_WAITOK);
1044 	buflist->bs_nchildren = 0;
1045 	buflist->bs_children = (struct buf **) (buflist + 1);
1046 	for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
1047 		(void) bread(vp, loffset, last_bp->b_bcount, &bp);
1048 		buflist->bs_children[i] = bp;
1049 		if (bp->b_bio2.bio_offset == NOOFFSET) {
1050 			VOP_BMAP(bp->b_vp, bp->b_loffset,
1051 				 &bp->b_bio2.bio_offset,
1052 				 NULL, NULL, BUF_CMD_WRITE);
1053 		}
1054 	}
1055 	buflist->bs_children[i] = bp = last_bp;
1056 	if (bp->b_bio2.bio_offset == NOOFFSET) {
1057 		VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
1058 			 NULL, NULL, BUF_CMD_WRITE);
1059 	}
1060 	buflist->bs_nchildren = i + 1;
1061 	return (buflist);
1062 }
1063 
1064 void
1065 cluster_append(struct bio *bio, struct buf *tbp)
1066 {
1067 	tbp->b_cluster_next = NULL;
1068 	if (bio->bio_caller_info1.cluster_head == NULL) {
1069 		bio->bio_caller_info1.cluster_head = tbp;
1070 		bio->bio_caller_info2.cluster_tail = tbp;
1071 	} else {
1072 		bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1073 		bio->bio_caller_info2.cluster_tail = tbp;
1074 	}
1075 }
1076 
1077 static
1078 void
1079 cluster_setram (struct buf *bp)
1080 {
1081 	bp->b_flags |= B_RAM;
1082 	if (bp->b_xio.xio_npages)
1083 		vm_page_flag_set(bp->b_xio.xio_pages[0], PG_RAM);
1084 }
1085