xref: /dragonfly/sys/kern/vfs_cluster.c (revision b71f52a9)
1 /*-
2  * Copyright (c) 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
36  * $FreeBSD: src/sys/kern/vfs_cluster.c,v 1.92.2.9 2001/11/18 07:10:59 dillon Exp $
37  * $DragonFly: src/sys/kern/vfs_cluster.c,v 1.40 2008/07/14 03:09:00 dillon Exp $
38  */
39 
40 #include "opt_debug_cluster.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
56 #include <sys/buf2.h>
57 #include <vm/vm_page2.h>
58 
59 #if defined(CLUSTERDEBUG)
60 #include <sys/sysctl.h>
61 static int	rcluster= 0;
62 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, "");
63 #endif
64 
65 static MALLOC_DEFINE(M_SEGMENT, "cluster_save", "cluster_save buffer");
66 
67 static struct cluster_save *
68 	cluster_collectbufs (struct vnode *vp, struct buf *last_bp,
69 			    int blksize);
70 static struct buf *
71 	cluster_rbuild (struct vnode *vp, off_t filesize, off_t loffset,
72 			    off_t doffset, int blksize, int run,
73 			    struct buf *fbp);
74 static void cluster_callback (struct bio *);
75 
76 
77 static int write_behind = 1;
78 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, "");
79 
80 extern vm_page_t	bogus_page;
81 
82 extern int cluster_pbuf_freecnt;
83 
84 /*
85  * Maximum number of blocks for read-ahead.
86  */
87 #define MAXRA 32
88 
89 /*
90  * This replaces bread.
91  */
92 int
93 cluster_read(struct vnode *vp, off_t filesize, off_t loffset,
94 	     int blksize, int totread, int seqcount, struct buf **bpp)
95 {
96 	struct buf *bp, *rbp, *reqbp;
97 	off_t origoffset;
98 	off_t doffset;
99 	int error;
100 	int i;
101 	int maxra, racluster;
102 
103 	error = 0;
104 
105 	/*
106 	 * Try to limit the amount of read-ahead by a few
107 	 * ad-hoc parameters.  This needs work!!!
108 	 */
109 	racluster = vmaxiosize(vp) / blksize;
110 	maxra = 2 * racluster + (totread / blksize);
111 	if (maxra > MAXRA)
112 		maxra = MAXRA;
113 	if (maxra > nbuf/8)
114 		maxra = nbuf/8;
115 
116 	/*
117 	 * Get the requested block.
118 	 */
119 	*bpp = reqbp = bp = getblk(vp, loffset, blksize, 0, 0);
120 	origoffset = loffset;
121 
122 	/*
123 	 * if it is in the cache, then check to see if the reads have been
124 	 * sequential.  If they have, then try some read-ahead, otherwise
125 	 * back-off on prospective read-aheads.
126 	 */
127 	if (bp->b_flags & B_CACHE) {
128 		if (!seqcount) {
129 			return 0;
130 		} else if ((bp->b_flags & B_RAM) == 0) {
131 			return 0;
132 		} else {
133 			struct buf *tbp;
134 			bp->b_flags &= ~B_RAM;
135 
136 			/*
137 			 * Set read-ahead-mark only if we can passively lock
138 			 * the buffer.  Note that with these flags the bp
139 			 * could very exist even though NULL is returned.
140 			 */
141 			for (i = 1; i < maxra; i++) {
142 				tbp = findblk(vp, loffset + i * blksize,
143 					      FINDBLK_NBLOCK);
144 				if (tbp == NULL)
145 					break;
146 				if (((i % racluster) == (racluster - 1)) ||
147 				    (i == (maxra - 1))) {
148 					tbp->b_flags |= B_RAM;
149 				}
150 				BUF_UNLOCK(tbp);
151 			}
152 			if (i >= maxra)
153 				return 0;
154 			loffset += i * blksize;
155 		}
156 		reqbp = bp = NULL;
157 	} else {
158 		off_t firstread = bp->b_loffset;
159 		int nblks;
160 
161 		/*
162 		 * Set-up synchronous read for bp.
163 		 */
164 		bp->b_cmd = BUF_CMD_READ;
165 		bp->b_bio1.bio_done = biodone_sync;
166 		bp->b_bio1.bio_flags |= BIO_SYNC;
167 
168 		KASSERT(firstread != NOOFFSET,
169 			("cluster_read: no buffer offset"));
170 		if (firstread + totread > filesize)
171 			totread = (int)(filesize - firstread);
172 		nblks = totread / blksize;
173 		if (nblks) {
174 			int burstbytes;
175 
176 			if (nblks > racluster)
177 				nblks = racluster;
178 
179 	    		error = VOP_BMAP(vp, loffset, &doffset,
180 					 &burstbytes, NULL, BUF_CMD_READ);
181 			if (error)
182 				goto single_block_read;
183 			if (doffset == NOOFFSET)
184 				goto single_block_read;
185 			if (burstbytes < blksize * 2)
186 				goto single_block_read;
187 			if (nblks > burstbytes / blksize)
188 				nblks = burstbytes / blksize;
189 
190 			bp = cluster_rbuild(vp, filesize, loffset,
191 					    doffset, blksize, nblks, bp);
192 			loffset += bp->b_bufsize;
193 		} else {
194 single_block_read:
195 			/*
196 			 * if it isn't in the cache, then get a chunk from
197 			 * disk if sequential, otherwise just get the block.
198 			 */
199 			bp->b_flags |= B_RAM;
200 			loffset += blksize;
201 		}
202 	}
203 
204 	/*
205 	 * If B_CACHE was not set issue bp.  bp will either be an
206 	 * asynchronous cluster buf or a synchronous single-buf.
207 	 * If it is a single buf it will be the same as reqbp.
208 	 *
209 	 * NOTE: Once an async cluster buf is issued bp becomes invalid.
210 	 */
211 	if (bp) {
212 #if defined(CLUSTERDEBUG)
213 		if (rcluster)
214 			kprintf("S(%lld,%d,%d) ",
215 			    bp->b_loffset, bp->b_bcount, seqcount);
216 #endif
217 		if ((bp->b_flags & B_CLUSTER) == 0)
218 			vfs_busy_pages(vp, bp);
219 		bp->b_flags &= ~(B_ERROR|B_INVAL);
220 		vn_strategy(vp, &bp->b_bio1);
221 		error = 0;
222 		/* bp invalid now */
223 	}
224 
225 	/*
226 	 * If we have been doing sequential I/O, then do some read-ahead.
227 	 *
228 	 * Only mess with buffers which we can immediately lock.  HAMMER
229 	 * will do device-readahead irrespective of what the blocks
230 	 * represent.
231 	 */
232 	rbp = NULL;
233 	if (!error &&
234 	    seqcount &&
235 	    loffset < origoffset + seqcount * blksize &&
236 	    loffset + blksize <= filesize
237 	) {
238 		int nblksread;
239 		int ntoread;
240 		int burstbytes;
241 		int tmp_error;
242 
243 		rbp = getblk(vp, loffset, blksize,
244 			     GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
245 		if (rbp == NULL)
246 			goto no_read_ahead;
247 		if ((rbp->b_flags & B_CACHE)) {
248 			bqrelse(rbp);
249 			goto no_read_ahead;
250 		}
251 
252 		/*
253 		 * An error from the read-ahead bmap has nothing to do
254 		 * with the caller's original request.
255 		 */
256 		tmp_error = VOP_BMAP(vp, loffset, &doffset,
257 				     &burstbytes, NULL, BUF_CMD_READ);
258 		if (tmp_error || doffset == NOOFFSET) {
259 			rbp->b_flags |= B_INVAL;
260 			brelse(rbp);
261 			rbp = NULL;
262 			goto no_read_ahead;
263 		}
264 		ntoread = burstbytes / blksize;
265 		nblksread = (totread + blksize - 1) / blksize;
266 		if (seqcount < nblksread)
267 			seqcount = nblksread;
268 		if (ntoread > seqcount)
269 			ntoread = seqcount;
270 
271 		/*
272 		 * rbp: async read
273 		 */
274 		rbp->b_cmd = BUF_CMD_READ;
275 		rbp->b_flags |= B_RAM/* | B_AGE*/;
276 
277 		if (burstbytes) {
278 			rbp = cluster_rbuild(vp, filesize, loffset,
279 					     doffset, blksize,
280 					     ntoread, rbp);
281 		} else {
282 			rbp->b_bio2.bio_offset = doffset;
283 		}
284 #if defined(CLUSTERDEBUG)
285 		if (rcluster) {
286 			if (bp)
287 				kprintf("A+(%lld,%d,%lld,%d) ",
288 				    rbp->b_loffset, rbp->b_bcount,
289 				    rbp->b_loffset - origoffset,
290 				    seqcount);
291 			else
292 				kprintf("A(%lld,%d,%lld,%d) ",
293 				    rbp->b_loffset, rbp->b_bcount,
294 				    rbp->b_loffset - origoffset,
295 				    seqcount);
296 		}
297 #endif
298 		rbp->b_flags &= ~(B_ERROR|B_INVAL);
299 
300 		if ((rbp->b_flags & B_CLUSTER) == 0)
301 			vfs_busy_pages(vp, rbp);
302 		BUF_KERNPROC(rbp);
303 		vn_strategy(vp, &rbp->b_bio1);
304 		/* rbp invalid now */
305 	}
306 
307 	/*
308 	 * Wait for our original buffer to complete its I/O.  reqbp will
309 	 * be NULL if the original buffer was B_CACHE.  We are returning
310 	 * (*bpp) which is the same as reqbp when reqbp != NULL.
311 	 */
312 no_read_ahead:
313 	if (reqbp) {
314 		KKASSERT(reqbp->b_bio1.bio_flags & BIO_SYNC);
315 		error = biowait(&reqbp->b_bio1, "clurd");
316 	}
317 	return (error);
318 }
319 
320 /*
321  * If blocks are contiguous on disk, use this to provide clustered
322  * read ahead.  We will read as many blocks as possible sequentially
323  * and then parcel them up into logical blocks in the buffer hash table.
324  *
325  * This function either returns a cluster buf or it returns fbp.  fbp is
326  * already expected to be set up as a synchronous or asynchronous request.
327  *
328  * If a cluster buf is returned it will always be async.
329  */
330 static struct buf *
331 cluster_rbuild(struct vnode *vp, off_t filesize, off_t loffset, off_t doffset,
332 	       int blksize, int run, struct buf *fbp)
333 {
334 	struct buf *bp, *tbp;
335 	off_t boffset;
336 	int i, j;
337 	int maxiosize = vmaxiosize(vp);
338 
339 	/*
340 	 * avoid a division
341 	 */
342 	while (loffset + run * blksize > filesize) {
343 		--run;
344 	}
345 
346 	tbp = fbp;
347 	tbp->b_bio2.bio_offset = doffset;
348 	if((tbp->b_flags & B_MALLOC) ||
349 	    ((tbp->b_flags & B_VMIO) == 0) || (run <= 1)) {
350 		return tbp;
351 	}
352 
353 	bp = trypbuf(&cluster_pbuf_freecnt);
354 	if (bp == NULL) {
355 		return tbp;
356 	}
357 
358 	/*
359 	 * We are synthesizing a buffer out of vm_page_t's, but
360 	 * if the block size is not page aligned then the starting
361 	 * address may not be either.  Inherit the b_data offset
362 	 * from the original buffer.
363 	 */
364 	bp->b_data = (char *)((vm_offset_t)bp->b_data |
365 	    ((vm_offset_t)tbp->b_data & PAGE_MASK));
366 	bp->b_flags |= B_CLUSTER | B_VMIO;
367 	bp->b_cmd = BUF_CMD_READ;
368 	bp->b_bio1.bio_done = cluster_callback;		/* default to async */
369 	bp->b_bio1.bio_caller_info1.cluster_head = NULL;
370 	bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
371 	bp->b_loffset = loffset;
372 	bp->b_bio2.bio_offset = doffset;
373 	KASSERT(bp->b_loffset != NOOFFSET,
374 		("cluster_rbuild: no buffer offset"));
375 
376 	bp->b_bcount = 0;
377 	bp->b_bufsize = 0;
378 	bp->b_xio.xio_npages = 0;
379 
380 	for (boffset = doffset, i = 0; i < run; ++i, boffset += blksize) {
381 		if (i) {
382 			if ((bp->b_xio.xio_npages * PAGE_SIZE) +
383 			    round_page(blksize) > maxiosize) {
384 				break;
385 			}
386 
387 			/*
388 			 * Shortcut some checks and try to avoid buffers that
389 			 * would block in the lock.  The same checks have to
390 			 * be made again after we officially get the buffer.
391 			 */
392 			tbp = getblk(vp, loffset + i * blksize, blksize,
393 				     GETBLK_SZMATCH|GETBLK_NOWAIT, 0);
394 			if (tbp == NULL)
395 				break;
396 			for (j = 0; j < tbp->b_xio.xio_npages; j++) {
397 				if (tbp->b_xio.xio_pages[j]->valid)
398 					break;
399 			}
400 			if (j != tbp->b_xio.xio_npages) {
401 				bqrelse(tbp);
402 				break;
403 			}
404 
405 			/*
406 			 * Stop scanning if the buffer is fuly valid
407 			 * (marked B_CACHE), or locked (may be doing a
408 			 * background write), or if the buffer is not
409 			 * VMIO backed.  The clustering code can only deal
410 			 * with VMIO-backed buffers.
411 			 */
412 			if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
413 			    (tbp->b_flags & B_VMIO) == 0 ||
414 			    (LIST_FIRST(&tbp->b_dep) != NULL &&
415 			     buf_checkread(tbp))
416 			) {
417 				bqrelse(tbp);
418 				break;
419 			}
420 
421 			/*
422 			 * The buffer must be completely invalid in order to
423 			 * take part in the cluster.  If it is partially valid
424 			 * then we stop.
425 			 */
426 			for (j = 0;j < tbp->b_xio.xio_npages; j++) {
427 				if (tbp->b_xio.xio_pages[j]->valid)
428 					break;
429 			}
430 			if (j != tbp->b_xio.xio_npages) {
431 				bqrelse(tbp);
432 				break;
433 			}
434 
435 			/*
436 			 * Set a read-ahead mark as appropriate
437 			 */
438 			if (i == 1 || i == (run - 1))
439 				tbp->b_flags |= B_RAM;
440 
441 			/*
442 			 * Depress the priority of buffers not explicitly
443 			 * requested.
444 			 */
445 			/* tbp->b_flags |= B_AGE; */
446 
447 			/*
448 			 * Set the block number if it isn't set, otherwise
449 			 * if it is make sure it matches the block number we
450 			 * expect.
451 			 */
452 			if (tbp->b_bio2.bio_offset == NOOFFSET) {
453 				tbp->b_bio2.bio_offset = boffset;
454 			} else if (tbp->b_bio2.bio_offset != boffset) {
455 				brelse(tbp);
456 				break;
457 			}
458 		}
459 
460 		/*
461 		 * The passed-in tbp (i == 0) will already be set up for
462 		 * async or sync operation.  All other tbp's acquire in
463 		 * our loop are set up for async operation.
464 		 */
465 		tbp->b_cmd = BUF_CMD_READ;
466 		BUF_KERNPROC(tbp);
467 		cluster_append(&bp->b_bio1, tbp);
468 		for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
469 			vm_page_t m;
470 			m = tbp->b_xio.xio_pages[j];
471 			vm_page_io_start(m);
472 			vm_object_pip_add(m->object, 1);
473 			if ((bp->b_xio.xio_npages == 0) ||
474 				(bp->b_xio.xio_pages[bp->b_xio.xio_npages-1] != m)) {
475 				bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
476 				bp->b_xio.xio_npages++;
477 			}
478 			if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
479 				tbp->b_xio.xio_pages[j] = bogus_page;
480 		}
481 		/*
482 		 * XXX shouldn't this be += size for both, like in
483 		 * cluster_wbuild()?
484 		 *
485 		 * Don't inherit tbp->b_bufsize as it may be larger due to
486 		 * a non-page-aligned size.  Instead just aggregate using
487 		 * 'size'.
488 		 */
489 		if (tbp->b_bcount != blksize)
490 		    kprintf("warning: tbp->b_bcount wrong %d vs %d\n", tbp->b_bcount, blksize);
491 		if (tbp->b_bufsize != blksize)
492 		    kprintf("warning: tbp->b_bufsize wrong %d vs %d\n", tbp->b_bufsize, blksize);
493 		bp->b_bcount += blksize;
494 		bp->b_bufsize += blksize;
495 	}
496 
497 	/*
498 	 * Fully valid pages in the cluster are already good and do not need
499 	 * to be re-read from disk.  Replace the page with bogus_page
500 	 */
501 	for (j = 0; j < bp->b_xio.xio_npages; j++) {
502 		if ((bp->b_xio.xio_pages[j]->valid & VM_PAGE_BITS_ALL) ==
503 		    VM_PAGE_BITS_ALL) {
504 			bp->b_xio.xio_pages[j] = bogus_page;
505 		}
506 	}
507 	if (bp->b_bufsize > bp->b_kvasize) {
508 		panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)",
509 		    bp->b_bufsize, bp->b_kvasize);
510 	}
511 	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
512 		(vm_page_t *)bp->b_xio.xio_pages, bp->b_xio.xio_npages);
513 	BUF_KERNPROC(bp);
514 	return (bp);
515 }
516 
517 /*
518  * Cleanup after a clustered read or write.
519  * This is complicated by the fact that any of the buffers might have
520  * extra memory (if there were no empty buffer headers at allocbuf time)
521  * that we will need to shift around.
522  *
523  * The returned bio is &bp->b_bio1
524  */
525 void
526 cluster_callback(struct bio *bio)
527 {
528 	struct buf *bp = bio->bio_buf;
529 	struct buf *tbp;
530 	int error = 0;
531 
532 	/*
533 	 * Must propogate errors to all the components.  A short read (EOF)
534 	 * is a critical error.
535 	 */
536 	if (bp->b_flags & B_ERROR) {
537 		error = bp->b_error;
538 	} else if (bp->b_bcount != bp->b_bufsize) {
539 		panic("cluster_callback: unexpected EOF on cluster %p!", bio);
540 	}
541 
542 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_xio.xio_npages);
543 	/*
544 	 * Move memory from the large cluster buffer into the component
545 	 * buffers and mark IO as done on these.  Since the memory map
546 	 * is the same, no actual copying is required.
547 	 */
548 	while ((tbp = bio->bio_caller_info1.cluster_head) != NULL) {
549 		bio->bio_caller_info1.cluster_head = tbp->b_cluster_next;
550 		if (error) {
551 			tbp->b_flags |= B_ERROR;
552 			tbp->b_error = error;
553 		} else {
554 			tbp->b_dirtyoff = tbp->b_dirtyend = 0;
555 			tbp->b_flags &= ~(B_ERROR|B_INVAL);
556 			/*
557 			 * XXX the bdwrite()/bqrelse() issued during
558 			 * cluster building clears B_RELBUF (see bqrelse()
559 			 * comment).  If direct I/O was specified, we have
560 			 * to restore it here to allow the buffer and VM
561 			 * to be freed.
562 			 */
563 			if (tbp->b_flags & B_DIRECT)
564 				tbp->b_flags |= B_RELBUF;
565 		}
566 		biodone(&tbp->b_bio1);
567 	}
568 	relpbuf(bp, &cluster_pbuf_freecnt);
569 }
570 
571 /*
572  *	cluster_wbuild_wb:
573  *
574  *	Implement modified write build for cluster.
575  *
576  *		write_behind = 0	write behind disabled
577  *		write_behind = 1	write behind normal (default)
578  *		write_behind = 2	write behind backed-off
579  */
580 
581 static __inline int
582 cluster_wbuild_wb(struct vnode *vp, int blksize, off_t start_loffset, int len)
583 {
584 	int r = 0;
585 
586 	switch(write_behind) {
587 	case 2:
588 		if (start_loffset < len)
589 			break;
590 		start_loffset -= len;
591 		/* fall through */
592 	case 1:
593 		r = cluster_wbuild(vp, blksize, start_loffset, len);
594 		/* fall through */
595 	default:
596 		/* fall through */
597 		break;
598 	}
599 	return(r);
600 }
601 
602 /*
603  * Do clustered write for FFS.
604  *
605  * Three cases:
606  *	1. Write is not sequential (write asynchronously)
607  *	Write is sequential:
608  *	2.	beginning of cluster - begin cluster
609  *	3.	middle of a cluster - add to cluster
610  *	4.	end of a cluster - asynchronously write cluster
611  */
612 void
613 cluster_write(struct buf *bp, off_t filesize, int blksize, int seqcount)
614 {
615 	struct vnode *vp;
616 	off_t loffset;
617 	int maxclen, cursize;
618 	int async;
619 
620 	vp = bp->b_vp;
621 	if (vp->v_type == VREG)
622 		async = vp->v_mount->mnt_flag & MNT_ASYNC;
623 	else
624 		async = 0;
625 	loffset = bp->b_loffset;
626 	KASSERT(bp->b_loffset != NOOFFSET,
627 		("cluster_write: no buffer offset"));
628 
629 	/* Initialize vnode to beginning of file. */
630 	if (loffset == 0)
631 		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
632 
633 	if (vp->v_clen == 0 || loffset != vp->v_lastw + blksize ||
634 	    bp->b_bio2.bio_offset == NOOFFSET ||
635 	    (bp->b_bio2.bio_offset != vp->v_lasta + blksize)) {
636 		maxclen = vmaxiosize(vp);
637 		if (vp->v_clen != 0) {
638 			/*
639 			 * Next block is not sequential.
640 			 *
641 			 * If we are not writing at end of file, the process
642 			 * seeked to another point in the file since its last
643 			 * write, or we have reached our maximum cluster size,
644 			 * then push the previous cluster. Otherwise try
645 			 * reallocating to make it sequential.
646 			 *
647 			 * Change to algorithm: only push previous cluster if
648 			 * it was sequential from the point of view of the
649 			 * seqcount heuristic, otherwise leave the buffer
650 			 * intact so we can potentially optimize the I/O
651 			 * later on in the buf_daemon or update daemon
652 			 * flush.
653 			 */
654 			cursize = vp->v_lastw - vp->v_cstart + blksize;
655 			if (bp->b_loffset + blksize != filesize ||
656 			    loffset != vp->v_lastw + blksize || vp->v_clen <= cursize) {
657 				if (!async && seqcount > 0) {
658 					cluster_wbuild_wb(vp, blksize,
659 						vp->v_cstart, cursize);
660 				}
661 			} else {
662 				struct buf **bpp, **endbp;
663 				struct cluster_save *buflist;
664 
665 				buflist = cluster_collectbufs(vp, bp, blksize);
666 				endbp = &buflist->bs_children
667 				    [buflist->bs_nchildren - 1];
668 				if (VOP_REALLOCBLKS(vp, buflist)) {
669 					/*
670 					 * Failed, push the previous cluster
671 					 * if *really* writing sequentially
672 					 * in the logical file (seqcount > 1),
673 					 * otherwise delay it in the hopes that
674 					 * the low level disk driver can
675 					 * optimize the write ordering.
676 					 */
677 					for (bpp = buflist->bs_children;
678 					     bpp < endbp; bpp++)
679 						brelse(*bpp);
680 					kfree(buflist, M_SEGMENT);
681 					if (seqcount > 1) {
682 						cluster_wbuild_wb(vp,
683 						    blksize, vp->v_cstart,
684 						    cursize);
685 					}
686 				} else {
687 					/*
688 					 * Succeeded, keep building cluster.
689 					 */
690 					for (bpp = buflist->bs_children;
691 					     bpp <= endbp; bpp++)
692 						bdwrite(*bpp);
693 					kfree(buflist, M_SEGMENT);
694 					vp->v_lastw = loffset;
695 					vp->v_lasta = bp->b_bio2.bio_offset;
696 					return;
697 				}
698 			}
699 		}
700 		/*
701 		 * Consider beginning a cluster. If at end of file, make
702 		 * cluster as large as possible, otherwise find size of
703 		 * existing cluster.
704 		 */
705 		if ((vp->v_type == VREG) &&
706 		    bp->b_loffset + blksize != filesize &&
707 		    (bp->b_bio2.bio_offset == NOOFFSET) &&
708 		    (VOP_BMAP(vp, loffset, &bp->b_bio2.bio_offset, &maxclen, NULL, BUF_CMD_WRITE) ||
709 		     bp->b_bio2.bio_offset == NOOFFSET)) {
710 			bawrite(bp);
711 			vp->v_clen = 0;
712 			vp->v_lasta = bp->b_bio2.bio_offset;
713 			vp->v_cstart = loffset + blksize;
714 			vp->v_lastw = loffset;
715 			return;
716 		}
717 		if (maxclen > blksize)
718 			vp->v_clen = maxclen - blksize;
719 		else
720 			vp->v_clen = 0;
721 		if (!async && vp->v_clen == 0) { /* I/O not contiguous */
722 			vp->v_cstart = loffset + blksize;
723 			bawrite(bp);
724 		} else {	/* Wait for rest of cluster */
725 			vp->v_cstart = loffset;
726 			bdwrite(bp);
727 		}
728 	} else if (loffset == vp->v_cstart + vp->v_clen) {
729 		/*
730 		 * At end of cluster, write it out if seqcount tells us we
731 		 * are operating sequentially, otherwise let the buf or
732 		 * update daemon handle it.
733 		 */
734 		bdwrite(bp);
735 		if (seqcount > 1)
736 			cluster_wbuild_wb(vp, blksize, vp->v_cstart,
737 					  vp->v_clen + blksize);
738 		vp->v_clen = 0;
739 		vp->v_cstart = loffset + blksize;
740 	} else if (vm_page_count_severe()) {
741 		/*
742 		 * We are low on memory, get it going NOW
743 		 */
744 		bawrite(bp);
745 	} else {
746 		/*
747 		 * In the middle of a cluster, so just delay the I/O for now.
748 		 */
749 		bdwrite(bp);
750 	}
751 	vp->v_lastw = loffset;
752 	vp->v_lasta = bp->b_bio2.bio_offset;
753 }
754 
755 
756 /*
757  * This is an awful lot like cluster_rbuild...wish they could be combined.
758  * The last lbn argument is the current block on which I/O is being
759  * performed.  Check to see that it doesn't fall in the middle of
760  * the current block (if last_bp == NULL).
761  */
762 int
763 cluster_wbuild(struct vnode *vp, int blksize, off_t start_loffset, int bytes)
764 {
765 	struct buf *bp, *tbp;
766 	int i, j;
767 	int totalwritten = 0;
768 	int maxiosize = vmaxiosize(vp);
769 
770 	while (bytes > 0) {
771 		/*
772 		 * If the buffer is not delayed-write (i.e. dirty), or it
773 		 * is delayed-write but either locked or inval, it cannot
774 		 * partake in the clustered write.
775 		 */
776 		tbp = findblk(vp, start_loffset, FINDBLK_NBLOCK);
777 		if (tbp == NULL ||
778 		    (tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI ||
779 		    (LIST_FIRST(&tbp->b_dep) && buf_checkwrite(tbp))) {
780 			if (tbp)
781 				BUF_UNLOCK(tbp);
782 			start_loffset += blksize;
783 			bytes -= blksize;
784 			continue;
785 		}
786 		bremfree(tbp);
787 		KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
788 
789 		/*
790 		 * Extra memory in the buffer, punt on this buffer.
791 		 * XXX we could handle this in most cases, but we would
792 		 * have to push the extra memory down to after our max
793 		 * possible cluster size and then potentially pull it back
794 		 * up if the cluster was terminated prematurely--too much
795 		 * hassle.
796 		 */
797 		if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) ||
798 		    (tbp->b_bcount != tbp->b_bufsize) ||
799 		    (tbp->b_bcount != blksize) ||
800 		    (bytes == blksize) ||
801 		    ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
802 			totalwritten += tbp->b_bufsize;
803 			bawrite(tbp);
804 			start_loffset += blksize;
805 			bytes -= blksize;
806 			continue;
807 		}
808 
809 		/*
810 		 * Set up the pbuf.  Track our append point with b_bcount
811 		 * and b_bufsize.  b_bufsize is not used by the device but
812 		 * our caller uses it to loop clusters and we use it to
813 		 * detect a premature EOF on the block device.
814 		 */
815 		bp->b_bcount = 0;
816 		bp->b_bufsize = 0;
817 		bp->b_xio.xio_npages = 0;
818 		bp->b_loffset = tbp->b_loffset;
819 		bp->b_bio2.bio_offset = tbp->b_bio2.bio_offset;
820 
821 		/*
822 		 * We are synthesizing a buffer out of vm_page_t's, but
823 		 * if the block size is not page aligned then the starting
824 		 * address may not be either.  Inherit the b_data offset
825 		 * from the original buffer.
826 		 */
827 		bp->b_data = (char *)((vm_offset_t)bp->b_data |
828 		    ((vm_offset_t)tbp->b_data & PAGE_MASK));
829 		bp->b_flags &= ~B_ERROR;
830 		bp->b_flags |= B_CLUSTER | B_BNOCLIP |
831 			(tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
832 		bp->b_bio1.bio_caller_info1.cluster_head = NULL;
833 		bp->b_bio1.bio_caller_info2.cluster_tail = NULL;
834 
835 		/*
836 		 * From this location in the file, scan forward to see
837 		 * if there are buffers with adjacent data that need to
838 		 * be written as well.
839 		 */
840 		for (i = 0; i < bytes; (i += blksize), (start_loffset += blksize)) {
841 			if (i != 0) { /* If not the first buffer */
842 				tbp = findblk(vp, start_loffset,
843 					      FINDBLK_NBLOCK);
844 				/*
845 				 * Buffer not found or could not be locked
846 				 * non-blocking.
847 				 */
848 				if (tbp == NULL)
849 					break;
850 
851 				/*
852 				 * If it IS in core, but has different
853 				 * characteristics, then don't cluster
854 				 * with it.
855 				 */
856 				if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
857 				     B_INVAL | B_DELWRI | B_NEEDCOMMIT))
858 				    != (B_DELWRI | B_CLUSTEROK |
859 				     (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
860 				    (tbp->b_flags & B_LOCKED) ||
861 				    (LIST_FIRST(&tbp->b_dep) &&
862 				     buf_checkwrite(tbp))
863 				) {
864 					BUF_UNLOCK(tbp);
865 					break;
866 				}
867 
868 				/*
869 				 * Check that the combined cluster
870 				 * would make sense with regard to pages
871 				 * and would not be too large
872 				 */
873 				if ((tbp->b_bcount != blksize) ||
874 				  ((bp->b_bio2.bio_offset + i) !=
875 				    tbp->b_bio2.bio_offset) ||
876 				  ((tbp->b_xio.xio_npages + bp->b_xio.xio_npages) >
877 				    (maxiosize / PAGE_SIZE))) {
878 					BUF_UNLOCK(tbp);
879 					break;
880 				}
881 				/*
882 				 * Ok, it's passed all the tests,
883 				 * so remove it from the free list
884 				 * and mark it busy. We will use it.
885 				 */
886 				bremfree(tbp);
887 				KKASSERT(tbp->b_cmd == BUF_CMD_DONE);
888 			} /* end of code for non-first buffers only */
889 
890 			/*
891 			 * If the IO is via the VM then we do some
892 			 * special VM hackery (yuck).  Since the buffer's
893 			 * block size may not be page-aligned it is possible
894 			 * for a page to be shared between two buffers.  We
895 			 * have to get rid of the duplication when building
896 			 * the cluster.
897 			 */
898 			if (tbp->b_flags & B_VMIO) {
899 				vm_page_t m;
900 
901 				if (i != 0) { /* if not first buffer */
902 					for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
903 						m = tbp->b_xio.xio_pages[j];
904 						if (m->flags & PG_BUSY) {
905 							bqrelse(tbp);
906 							goto finishcluster;
907 						}
908 					}
909 				}
910 
911 				for (j = 0; j < tbp->b_xio.xio_npages; ++j) {
912 					m = tbp->b_xio.xio_pages[j];
913 					vm_page_io_start(m);
914 					vm_object_pip_add(m->object, 1);
915 					if ((bp->b_xio.xio_npages == 0) ||
916 					  (bp->b_xio.xio_pages[bp->b_xio.xio_npages - 1] != m)) {
917 						bp->b_xio.xio_pages[bp->b_xio.xio_npages] = m;
918 						bp->b_xio.xio_npages++;
919 					}
920 				}
921 			}
922 			bp->b_bcount += blksize;
923 			bp->b_bufsize += blksize;
924 
925 			bundirty(tbp);
926 			tbp->b_flags &= ~B_ERROR;
927 			tbp->b_cmd = BUF_CMD_WRITE;
928 			BUF_KERNPROC(tbp);
929 			cluster_append(&bp->b_bio1, tbp);
930 
931 			/*
932 			 * check for latent dependencies to be handled
933 			 */
934 			if (LIST_FIRST(&tbp->b_dep) != NULL)
935 				buf_start(tbp);
936 		}
937 	finishcluster:
938 		pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
939 			(vm_page_t *) bp->b_xio.xio_pages, bp->b_xio.xio_npages);
940 		if (bp->b_bufsize > bp->b_kvasize) {
941 			panic(
942 			    "cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n",
943 			    bp->b_bufsize, bp->b_kvasize);
944 		}
945 		totalwritten += bp->b_bufsize;
946 		bp->b_dirtyoff = 0;
947 		bp->b_dirtyend = bp->b_bufsize;
948 		bp->b_bio1.bio_done = cluster_callback;
949 		bp->b_cmd = BUF_CMD_WRITE;
950 
951 		vfs_busy_pages(vp, bp);
952 		bp->b_runningbufspace = bp->b_bufsize;
953 		if (bp->b_runningbufspace) {
954 			runningbufspace += bp->b_runningbufspace;
955 			++runningbufcount;
956 		}
957 		BUF_KERNPROC(bp);
958 		vn_strategy(vp, &bp->b_bio1);
959 
960 		bytes -= i;
961 	}
962 	return totalwritten;
963 }
964 
965 /*
966  * Collect together all the buffers in a cluster.
967  * Plus add one additional buffer.
968  */
969 static struct cluster_save *
970 cluster_collectbufs(struct vnode *vp, struct buf *last_bp, int blksize)
971 {
972 	struct cluster_save *buflist;
973 	struct buf *bp;
974 	off_t loffset;
975 	int i, len;
976 
977 	len = (int)(vp->v_lastw - vp->v_cstart + blksize) / blksize;
978 	buflist = kmalloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
979 			 M_SEGMENT, M_WAITOK);
980 	buflist->bs_nchildren = 0;
981 	buflist->bs_children = (struct buf **) (buflist + 1);
982 	for (loffset = vp->v_cstart, i = 0; i < len; (loffset += blksize), i++) {
983 		(void) bread(vp, loffset, last_bp->b_bcount, &bp);
984 		buflist->bs_children[i] = bp;
985 		if (bp->b_bio2.bio_offset == NOOFFSET) {
986 			VOP_BMAP(bp->b_vp, bp->b_loffset,
987 				 &bp->b_bio2.bio_offset,
988 				 NULL, NULL, BUF_CMD_WRITE);
989 		}
990 	}
991 	buflist->bs_children[i] = bp = last_bp;
992 	if (bp->b_bio2.bio_offset == NOOFFSET) {
993 		VOP_BMAP(bp->b_vp, bp->b_loffset, &bp->b_bio2.bio_offset,
994 			 NULL, NULL, BUF_CMD_WRITE);
995 	}
996 	buflist->bs_nchildren = i + 1;
997 	return (buflist);
998 }
999 
1000 void
1001 cluster_append(struct bio *bio, struct buf *tbp)
1002 {
1003 	tbp->b_cluster_next = NULL;
1004 	if (bio->bio_caller_info1.cluster_head == NULL) {
1005 		bio->bio_caller_info1.cluster_head = tbp;
1006 		bio->bio_caller_info2.cluster_tail = tbp;
1007 	} else {
1008 		bio->bio_caller_info2.cluster_tail->b_cluster_next = tbp;
1009 		bio->bio_caller_info2.cluster_tail = tbp;
1010 	}
1011 }
1012 
1013