xref: /freebsd/sys/kern/vfs_cluster.c (revision 81ad6265)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * Modifications/enhancements:
7  * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/proc.h>
43 #include <sys/bio.h>
44 #include <sys/buf.h>
45 #include <sys/vnode.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/racct.h>
49 #include <sys/resourcevar.h>
50 #include <sys/rwlock.h>
51 #include <sys/vmmeter.h>
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
56 
57 static MALLOC_DEFINE(M_SEGMENT, "cl_savebuf", "cluster_save buffer");
58 static uma_zone_t cluster_pbuf_zone;
59 
60 static void cluster_init(void *);
61 static struct cluster_save *cluster_collectbufs(struct vnode *vp,
62 	    struct vn_clusterw *vnc, struct buf *last_bp, int gbflags);
63 static struct buf *cluster_rbuild(struct vnode *vp, u_quad_t filesize,
64 	    daddr_t lbn, daddr_t blkno, long size, int run, int gbflags,
65 	    struct buf *fbp);
66 static void cluster_callback(struct buf *);
67 
68 static int write_behind = 1;
69 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
70     "Cluster write-behind; 0: disable, 1: enable, 2: backed off");
71 
72 static int read_max = 64;
73 SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0,
74     "Cluster read-ahead max block count");
75 
76 static int read_min = 1;
77 SYSCTL_INT(_vfs, OID_AUTO, read_min, CTLFLAG_RW, &read_min, 0,
78     "Cluster read min block count");
79 
80 SYSINIT(cluster, SI_SUB_CPU, SI_ORDER_ANY, cluster_init, NULL);
81 
82 static void
83 cluster_init(void *dummy)
84 {
85 
86 	cluster_pbuf_zone = pbuf_zsecond_create("clpbuf", nswbuf / 2);
87 }
88 
89 /*
90  * Read data to a buf, including read-ahead if we find this to be beneficial.
91  * cluster_read replaces bread.
92  */
93 int
94 cluster_read(struct vnode *vp, u_quad_t filesize, daddr_t lblkno, long size,
95     struct ucred *cred, long totread, int seqcount, int gbflags,
96     struct buf **bpp)
97 {
98 	struct buf *bp, *rbp, *reqbp;
99 	struct bufobj *bo;
100 	struct thread *td;
101 	daddr_t blkno, origblkno;
102 	int maxra, racluster;
103 	int error, ncontig;
104 	int i;
105 
106 	error = 0;
107 	td = curthread;
108 	bo = &vp->v_bufobj;
109 	if (!unmapped_buf_allowed)
110 		gbflags &= ~GB_UNMAPPED;
111 
112 	/*
113 	 * Try to limit the amount of read-ahead by a few
114 	 * ad-hoc parameters.  This needs work!!!
115 	 */
116 	racluster = vp->v_mount->mnt_iosize_max / size;
117 	maxra = seqcount;
118 	maxra = min(read_max, maxra);
119 	maxra = min(nbuf/8, maxra);
120 	if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize)
121 		maxra = (filesize / size) - lblkno;
122 
123 	/*
124 	 * get the requested block
125 	 */
126 	error = getblkx(vp, lblkno, lblkno, size, 0, 0, gbflags, &bp);
127 	if (error != 0) {
128 		*bpp = NULL;
129 		return (error);
130 	}
131 	gbflags &= ~GB_NOSPARSE;
132 	origblkno = lblkno;
133 	*bpp = reqbp = bp;
134 
135 	/*
136 	 * if it is in the cache, then check to see if the reads have been
137 	 * sequential.  If they have, then try some read-ahead, otherwise
138 	 * back-off on prospective read-aheads.
139 	 */
140 	if (bp->b_flags & B_CACHE) {
141 		if (!seqcount) {
142 			return 0;
143 		} else if ((bp->b_flags & B_RAM) == 0) {
144 			return 0;
145 		} else {
146 			bp->b_flags &= ~B_RAM;
147 			BO_RLOCK(bo);
148 			for (i = 1; i < maxra; i++) {
149 				/*
150 				 * Stop if the buffer does not exist or it
151 				 * is invalid (about to go away?)
152 				 */
153 				rbp = gbincore(&vp->v_bufobj, lblkno+i);
154 				if (rbp == NULL || (rbp->b_flags & B_INVAL))
155 					break;
156 
157 				/*
158 				 * Set another read-ahead mark so we know
159 				 * to check again. (If we can lock the
160 				 * buffer without waiting)
161 				 */
162 				if ((((i % racluster) == (racluster - 1)) ||
163 				    (i == (maxra - 1)))
164 				    && (0 == BUF_LOCK(rbp,
165 					LK_EXCLUSIVE | LK_NOWAIT, NULL))) {
166 					rbp->b_flags |= B_RAM;
167 					BUF_UNLOCK(rbp);
168 				}
169 			}
170 			BO_RUNLOCK(bo);
171 			if (i >= maxra) {
172 				return 0;
173 			}
174 			lblkno += i;
175 		}
176 		reqbp = bp = NULL;
177 	/*
178 	 * If it isn't in the cache, then get a chunk from
179 	 * disk if sequential, otherwise just get the block.
180 	 */
181 	} else {
182 		off_t firstread = bp->b_offset;
183 		int nblks;
184 		long minread;
185 
186 		KASSERT(bp->b_offset != NOOFFSET,
187 		    ("cluster_read: no buffer offset"));
188 
189 		ncontig = 0;
190 
191 		/*
192 		 * Adjust totread if needed
193 		 */
194 		minread = read_min * size;
195 		if (minread > totread)
196 			totread = minread;
197 
198 		/*
199 		 * Compute the total number of blocks that we should read
200 		 * synchronously.
201 		 */
202 		if (firstread + totread > filesize)
203 			totread = filesize - firstread;
204 		nblks = howmany(totread, size);
205 		if (nblks > racluster)
206 			nblks = racluster;
207 
208 		/*
209 		 * Now compute the number of contiguous blocks.
210 		 */
211 		if (nblks > 1) {
212 	    		error = VOP_BMAP(vp, lblkno, NULL,
213 				&blkno, &ncontig, NULL);
214 			/*
215 			 * If this failed to map just do the original block.
216 			 */
217 			if (error || blkno == -1)
218 				ncontig = 0;
219 		}
220 
221 		/*
222 		 * If we have contiguous data available do a cluster
223 		 * otherwise just read the requested block.
224 		 */
225 		if (ncontig) {
226 			/* Account for our first block. */
227 			ncontig = min(ncontig + 1, nblks);
228 			if (ncontig < nblks)
229 				nblks = ncontig;
230 			bp = cluster_rbuild(vp, filesize, lblkno,
231 			    blkno, size, nblks, gbflags, bp);
232 			lblkno += (bp->b_bufsize / size);
233 		} else {
234 			bp->b_flags |= B_RAM;
235 			bp->b_iocmd = BIO_READ;
236 			lblkno += 1;
237 		}
238 	}
239 
240 	/*
241 	 * handle the synchronous read so that it is available ASAP.
242 	 */
243 	if (bp) {
244 		if ((bp->b_flags & B_CLUSTER) == 0) {
245 			vfs_busy_pages(bp, 0);
246 		}
247 		bp->b_flags &= ~B_INVAL;
248 		bp->b_ioflags &= ~BIO_ERROR;
249 		if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL)
250 			BUF_KERNPROC(bp);
251 		bp->b_iooffset = dbtob(bp->b_blkno);
252 		bstrategy(bp);
253 #ifdef RACCT
254 		if (racct_enable) {
255 			PROC_LOCK(td->td_proc);
256 			racct_add_buf(td->td_proc, bp, 0);
257 			PROC_UNLOCK(td->td_proc);
258 		}
259 #endif /* RACCT */
260 		td->td_ru.ru_inblock++;
261 	}
262 
263 	/*
264 	 * If we have been doing sequential I/O, then do some read-ahead.
265 	 */
266 	while (lblkno < (origblkno + maxra)) {
267 		error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL);
268 		if (error)
269 			break;
270 
271 		if (blkno == -1)
272 			break;
273 
274 		/*
275 		 * We could throttle ncontig here by maxra but we might as
276 		 * well read the data if it is contiguous.  We're throttled
277 		 * by racluster anyway.
278 		 */
279 		if (ncontig) {
280 			ncontig = min(ncontig + 1, racluster);
281 			rbp = cluster_rbuild(vp, filesize, lblkno, blkno,
282 			    size, ncontig, gbflags, NULL);
283 			lblkno += (rbp->b_bufsize / size);
284 			if (rbp->b_flags & B_DELWRI) {
285 				bqrelse(rbp);
286 				continue;
287 			}
288 		} else {
289 			rbp = getblk(vp, lblkno, size, 0, 0, gbflags);
290 			lblkno += 1;
291 			if (rbp->b_flags & B_DELWRI) {
292 				bqrelse(rbp);
293 				continue;
294 			}
295 			rbp->b_flags |= B_ASYNC | B_RAM;
296 			rbp->b_iocmd = BIO_READ;
297 			rbp->b_blkno = blkno;
298 		}
299 		if (rbp->b_flags & B_CACHE) {
300 			rbp->b_flags &= ~B_ASYNC;
301 			bqrelse(rbp);
302 			continue;
303 		}
304 		if ((rbp->b_flags & B_CLUSTER) == 0) {
305 			vfs_busy_pages(rbp, 0);
306 		}
307 		rbp->b_flags &= ~B_INVAL;
308 		rbp->b_ioflags &= ~BIO_ERROR;
309 		if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL)
310 			BUF_KERNPROC(rbp);
311 		rbp->b_iooffset = dbtob(rbp->b_blkno);
312 		bstrategy(rbp);
313 #ifdef RACCT
314 		if (racct_enable) {
315 			PROC_LOCK(td->td_proc);
316 			racct_add_buf(td->td_proc, rbp, 0);
317 			PROC_UNLOCK(td->td_proc);
318 		}
319 #endif /* RACCT */
320 		td->td_ru.ru_inblock++;
321 	}
322 
323 	if (reqbp) {
324 		/*
325 		 * Like bread, always brelse() the buffer when
326 		 * returning an error.
327 		 */
328 		error = bufwait(reqbp);
329 		if (error != 0) {
330 			brelse(reqbp);
331 			*bpp = NULL;
332 		}
333 	}
334 	return (error);
335 }
336 
337 /*
338  * If blocks are contiguous on disk, use this to provide clustered
339  * read ahead.  We will read as many blocks as possible sequentially
340  * and then parcel them up into logical blocks in the buffer hash table.
341  */
342 static struct buf *
343 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
344     daddr_t blkno, long size, int run, int gbflags, struct buf *fbp)
345 {
346 	struct buf *bp, *tbp;
347 	daddr_t bn;
348 	off_t off;
349 	long tinc, tsize;
350 	int i, inc, j, k, toff;
351 
352 	KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
353 	    ("cluster_rbuild: size %ld != f_iosize %jd\n",
354 	    size, (intmax_t)vp->v_mount->mnt_stat.f_iosize));
355 
356 	/*
357 	 * avoid a division
358 	 */
359 	while ((u_quad_t) size * (lbn + run) > filesize) {
360 		--run;
361 	}
362 
363 	if (fbp) {
364 		tbp = fbp;
365 		tbp->b_iocmd = BIO_READ;
366 	} else {
367 		tbp = getblk(vp, lbn, size, 0, 0, gbflags);
368 		if (tbp->b_flags & B_CACHE)
369 			return tbp;
370 		tbp->b_flags |= B_ASYNC | B_RAM;
371 		tbp->b_iocmd = BIO_READ;
372 	}
373 	tbp->b_blkno = blkno;
374 	if ( (tbp->b_flags & B_MALLOC) ||
375 		((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
376 		return tbp;
377 
378 	bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT);
379 	if (bp == NULL)
380 		return tbp;
381 	MPASS((bp->b_flags & B_MAXPHYS) != 0);
382 
383 	/*
384 	 * We are synthesizing a buffer out of vm_page_t's, but
385 	 * if the block size is not page aligned then the starting
386 	 * address may not be either.  Inherit the b_data offset
387 	 * from the original buffer.
388 	 */
389 	bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO;
390 	if ((gbflags & GB_UNMAPPED) != 0) {
391 		bp->b_data = unmapped_buf;
392 	} else {
393 		bp->b_data = (char *)((vm_offset_t)bp->b_data |
394 		    ((vm_offset_t)tbp->b_data & PAGE_MASK));
395 	}
396 	bp->b_iocmd = BIO_READ;
397 	bp->b_iodone = cluster_callback;
398 	bp->b_blkno = blkno;
399 	bp->b_lblkno = lbn;
400 	bp->b_offset = tbp->b_offset;
401 	KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset"));
402 	pbgetvp(vp, bp);
403 
404 	TAILQ_INIT(&bp->b_cluster.cluster_head);
405 
406 	bp->b_bcount = 0;
407 	bp->b_bufsize = 0;
408 	bp->b_npages = 0;
409 
410 	inc = btodb(size);
411 	for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
412 		if (i == 0) {
413 			vm_object_pip_add(tbp->b_bufobj->bo_object,
414 			    tbp->b_npages);
415 			vfs_busy_pages_acquire(tbp);
416 		} else {
417 			if ((bp->b_npages * PAGE_SIZE) +
418 			    round_page(size) > vp->v_mount->mnt_iosize_max) {
419 				break;
420 			}
421 
422 			tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT |
423 			    (gbflags & GB_UNMAPPED));
424 
425 			/* Don't wait around for locked bufs. */
426 			if (tbp == NULL)
427 				break;
428 
429 			/*
430 			 * Stop scanning if the buffer is fully valid
431 			 * (marked B_CACHE), or locked (may be doing a
432 			 * background write), or if the buffer is not
433 			 * VMIO backed.  The clustering code can only deal
434 			 * with VMIO-backed buffers.  The bo lock is not
435 			 * required for the BKGRDINPROG check since it
436 			 * can not be set without the buf lock.
437 			 */
438 			if ((tbp->b_vflags & BV_BKGRDINPROG) ||
439 			    (tbp->b_flags & B_CACHE) ||
440 			    (tbp->b_flags & B_VMIO) == 0) {
441 				bqrelse(tbp);
442 				break;
443 			}
444 
445 			/*
446 			 * The buffer must be completely invalid in order to
447 			 * take part in the cluster.  If it is partially valid
448 			 * then we stop.
449 			 */
450 			off = tbp->b_offset;
451 			tsize = size;
452 			for (j = 0; tsize > 0; j++) {
453 				toff = off & PAGE_MASK;
454 				tinc = tsize;
455 				if (toff + tinc > PAGE_SIZE)
456 					tinc = PAGE_SIZE - toff;
457 				if (vm_page_trysbusy(tbp->b_pages[j]) == 0)
458 					break;
459 				if ((tbp->b_pages[j]->valid &
460 				    vm_page_bits(toff, tinc)) != 0) {
461 					vm_page_sunbusy(tbp->b_pages[j]);
462 					break;
463 				}
464 				vm_object_pip_add(tbp->b_bufobj->bo_object, 1);
465 				off += tinc;
466 				tsize -= tinc;
467 			}
468 			if (tsize > 0) {
469 clean_sbusy:
470 				vm_object_pip_wakeupn(tbp->b_bufobj->bo_object,
471 				    j);
472 				for (k = 0; k < j; k++)
473 					vm_page_sunbusy(tbp->b_pages[k]);
474 				bqrelse(tbp);
475 				break;
476 			}
477 
478 			/*
479 			 * Set a read-ahead mark as appropriate
480 			 */
481 			if ((fbp && (i == 1)) || (i == (run - 1)))
482 				tbp->b_flags |= B_RAM;
483 
484 			/*
485 			 * Set the buffer up for an async read (XXX should
486 			 * we do this only if we do not wind up brelse()ing?).
487 			 * Set the block number if it isn't set, otherwise
488 			 * if it is make sure it matches the block number we
489 			 * expect.
490 			 */
491 			tbp->b_flags |= B_ASYNC;
492 			tbp->b_iocmd = BIO_READ;
493 			if (tbp->b_blkno == tbp->b_lblkno) {
494 				tbp->b_blkno = bn;
495 			} else if (tbp->b_blkno != bn) {
496 				goto clean_sbusy;
497 			}
498 		}
499 		/*
500 		 * XXX fbp from caller may not be B_ASYNC, but we are going
501 		 * to biodone() it in cluster_callback() anyway
502 		 */
503 		BUF_KERNPROC(tbp);
504 		TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
505 			tbp, b_cluster.cluster_entry);
506 		for (j = 0; j < tbp->b_npages; j += 1) {
507 			vm_page_t m;
508 
509 			m = tbp->b_pages[j];
510 			if ((bp->b_npages == 0) ||
511 			    (bp->b_pages[bp->b_npages-1] != m)) {
512 				bp->b_pages[bp->b_npages] = m;
513 				bp->b_npages++;
514 			}
515 			if (vm_page_all_valid(m))
516 				tbp->b_pages[j] = bogus_page;
517 		}
518 
519 		/*
520 		 * Don't inherit tbp->b_bufsize as it may be larger due to
521 		 * a non-page-aligned size.  Instead just aggregate using
522 		 * 'size'.
523 		 */
524 		if (tbp->b_bcount != size)
525 			printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size);
526 		if (tbp->b_bufsize != size)
527 			printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size);
528 		bp->b_bcount += size;
529 		bp->b_bufsize += size;
530 	}
531 
532 	/*
533 	 * Fully valid pages in the cluster are already good and do not need
534 	 * to be re-read from disk.  Replace the page with bogus_page
535 	 */
536 	for (j = 0; j < bp->b_npages; j++) {
537 		if (vm_page_all_valid(bp->b_pages[j]))
538 			bp->b_pages[j] = bogus_page;
539 	}
540 	if (bp->b_bufsize > bp->b_kvasize)
541 		panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
542 		    bp->b_bufsize, bp->b_kvasize);
543 
544 	if (buf_mapped(bp)) {
545 		pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
546 		    (vm_page_t *)bp->b_pages, bp->b_npages);
547 	}
548 	return (bp);
549 }
550 
551 /*
552  * Cleanup after a clustered read or write.
553  * This is complicated by the fact that any of the buffers might have
554  * extra memory (if there were no empty buffer headers at allocbuf time)
555  * that we will need to shift around.
556  */
557 static void
558 cluster_callback(struct buf *bp)
559 {
560 	struct buf *nbp, *tbp;
561 	int error = 0;
562 
563 	/*
564 	 * Must propagate errors to all the components.
565 	 */
566 	if (bp->b_ioflags & BIO_ERROR)
567 		error = bp->b_error;
568 
569 	if (buf_mapped(bp)) {
570 		pmap_qremove(trunc_page((vm_offset_t) bp->b_data),
571 		    bp->b_npages);
572 	}
573 	/*
574 	 * Move memory from the large cluster buffer into the component
575 	 * buffers and mark IO as done on these.
576 	 */
577 	for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
578 		tbp; tbp = nbp) {
579 		nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry);
580 		if (error) {
581 			tbp->b_ioflags |= BIO_ERROR;
582 			tbp->b_error = error;
583 		} else {
584 			tbp->b_dirtyoff = tbp->b_dirtyend = 0;
585 			tbp->b_flags &= ~B_INVAL;
586 			tbp->b_ioflags &= ~BIO_ERROR;
587 			/*
588 			 * XXX the bdwrite()/bqrelse() issued during
589 			 * cluster building clears B_RELBUF (see bqrelse()
590 			 * comment).  If direct I/O was specified, we have
591 			 * to restore it here to allow the buffer and VM
592 			 * to be freed.
593 			 */
594 			if (tbp->b_flags & B_DIRECT)
595 				tbp->b_flags |= B_RELBUF;
596 		}
597 		bufdone(tbp);
598 	}
599 	pbrelvp(bp);
600 	uma_zfree(cluster_pbuf_zone, bp);
601 }
602 
603 /*
604  *	cluster_wbuild_wb:
605  *
606  *	Implement modified write build for cluster.
607  *
608  *		write_behind = 0	write behind disabled
609  *		write_behind = 1	write behind normal (default)
610  *		write_behind = 2	write behind backed-off
611  */
612 
613 static __inline int
614 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len,
615     int gbflags)
616 {
617 	int r = 0;
618 
619 	switch (write_behind) {
620 	case 2:
621 		if (start_lbn < len)
622 			break;
623 		start_lbn -= len;
624 		/* FALLTHROUGH */
625 	case 1:
626 		r = cluster_wbuild(vp, size, start_lbn, len, gbflags);
627 		/* FALLTHROUGH */
628 	default:
629 		/* FALLTHROUGH */
630 		break;
631 	}
632 	return(r);
633 }
634 
635 /*
636  * Do clustered write for FFS.
637  *
638  * Three cases:
639  *	1. Write is not sequential (write asynchronously)
640  *	Write is sequential:
641  *	2.	beginning of cluster - begin cluster
642  *	3.	middle of a cluster - add to cluster
643  *	4.	end of a cluster - asynchronously write cluster
644  */
645 void
646 cluster_write(struct vnode *vp, struct vn_clusterw *vnc, struct buf *bp,
647     u_quad_t filesize, int seqcount, int gbflags)
648 {
649 	daddr_t lbn, pbn;
650 	int maxclen, cursize;
651 	int lblocksize;
652 	int async;
653 
654 	if (!unmapped_buf_allowed)
655 		gbflags &= ~GB_UNMAPPED;
656 
657 	if (vp->v_type == VREG) {
658 		async = DOINGASYNC(vp);
659 		lblocksize = vp->v_mount->mnt_stat.f_iosize;
660 	} else {
661 		async = 0;
662 		lblocksize = bp->b_bufsize;
663 	}
664 	lbn = bp->b_lblkno;
665 	KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset"));
666 
667 	/* Initialize vnode to beginning of file. */
668 	if (lbn == 0)
669 		vnc->v_lasta = vnc->v_clen = vnc->v_cstart = vnc->v_lastw = 0;
670 
671 	if (vnc->v_clen == 0 || lbn != vnc->v_lastw + 1 ||
672 	    (bp->b_blkno != vnc->v_lasta + btodb(lblocksize))) {
673 		maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1;
674 		if (vnc->v_clen != 0) {
675 			/*
676 			 * Next block is not sequential.
677 			 *
678 			 * If we are not writing at end of file, the process
679 			 * seeked to another point in the file since its last
680 			 * write, or we have reached our maximum cluster size,
681 			 * then push the previous cluster. Otherwise try
682 			 * reallocating to make it sequential.
683 			 *
684 			 * Change to algorithm: only push previous cluster if
685 			 * it was sequential from the point of view of the
686 			 * seqcount heuristic, otherwise leave the buffer
687 			 * intact so we can potentially optimize the I/O
688 			 * later on in the buf_daemon or update daemon
689 			 * flush.
690 			 */
691 			cursize = vnc->v_lastw - vnc->v_cstart + 1;
692 			if ((u_quad_t)bp->b_offset + lblocksize != filesize ||
693 			    lbn != vnc->v_lastw + 1 || vnc->v_clen <= cursize) {
694 				if (!async && seqcount > 0) {
695 					cluster_wbuild_wb(vp, lblocksize,
696 					    vnc->v_cstart, cursize, gbflags);
697 				}
698 			} else {
699 				struct buf **bpp, **endbp;
700 				struct cluster_save *buflist;
701 
702 				buflist = cluster_collectbufs(vp, vnc, bp,
703 				    gbflags);
704 				if (buflist == NULL) {
705 					/*
706 					 * Cluster build failed so just write
707 					 * it now.
708 					 */
709 					bawrite(bp);
710 					return;
711 				}
712 				endbp = &buflist->bs_children
713 				    [buflist->bs_nchildren - 1];
714 				if (VOP_REALLOCBLKS(vp, buflist)) {
715 					/*
716 					 * Failed, push the previous cluster
717 					 * if *really* writing sequentially
718 					 * in the logical file (seqcount > 1),
719 					 * otherwise delay it in the hopes that
720 					 * the low level disk driver can
721 					 * optimize the write ordering.
722 					 */
723 					for (bpp = buflist->bs_children;
724 					     bpp < endbp; bpp++)
725 						brelse(*bpp);
726 					free(buflist, M_SEGMENT);
727 					if (seqcount > 1) {
728 						cluster_wbuild_wb(vp,
729 						    lblocksize, vnc->v_cstart,
730 						    cursize, gbflags);
731 					}
732 				} else {
733 					/*
734 					 * Succeeded, keep building cluster.
735 					 */
736 					for (bpp = buflist->bs_children;
737 					     bpp <= endbp; bpp++)
738 						bdwrite(*bpp);
739 					free(buflist, M_SEGMENT);
740 					vnc->v_lastw = lbn;
741 					vnc->v_lasta = bp->b_blkno;
742 					return;
743 				}
744 			}
745 		}
746 		/*
747 		 * Consider beginning a cluster. If at end of file, make
748 		 * cluster as large as possible, otherwise find size of
749 		 * existing cluster.
750 		 */
751 		if (vp->v_type == VREG &&
752 		    (u_quad_t) bp->b_offset + lblocksize != filesize &&
753 		    bp->b_blkno == bp->b_lblkno &&
754 		    (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen,
755 		    NULL) != 0 || bp->b_blkno == -1)) {
756 			pbn = bp->b_blkno;
757 			bawrite(bp);
758 			vnc->v_clen = 0;
759 			vnc->v_lasta = pbn;
760 			vnc->v_cstart = lbn + 1;
761 			vnc->v_lastw = lbn;
762 			return;
763 		}
764 		vnc->v_clen = maxclen;
765 		pbn = bp->b_blkno;
766 		if (!async && maxclen == 0) {	/* I/O not contiguous */
767 			vnc->v_cstart = lbn + 1;
768 			bawrite(bp);
769 		} else {	/* Wait for rest of cluster */
770 			vnc->v_cstart = lbn;
771 			bdwrite(bp);
772 		}
773 	} else if (lbn == vnc->v_cstart + vnc->v_clen) {
774 		/*
775 		 * At end of cluster, write it out if seqcount tells us we
776 		 * are operating sequentially, otherwise let the buf or
777 		 * update daemon handle it.
778 		 */
779 		pbn = bp->b_blkno;
780 		bdwrite(bp);
781 		if (seqcount > 1) {
782 			cluster_wbuild_wb(vp, lblocksize, vnc->v_cstart,
783 			    vnc->v_clen + 1, gbflags);
784 		}
785 		vnc->v_clen = 0;
786 		vnc->v_cstart = lbn + 1;
787 	} else if (vm_page_count_severe()) {
788 		/*
789 		 * We are low on memory, get it going NOW
790 		 */
791 		pbn = bp->b_blkno;
792 		bawrite(bp);
793 	} else {
794 		/*
795 		 * In the middle of a cluster, so just delay the I/O for now.
796 		 */
797 		pbn = bp->b_blkno;
798 		bdwrite(bp);
799 	}
800 	vnc->v_lastw = lbn;
801 	vnc->v_lasta = pbn;
802 }
803 
804 /*
805  * This is an awful lot like cluster_rbuild...wish they could be combined.
806  * The last lbn argument is the current block on which I/O is being
807  * performed.  Check to see that it doesn't fall in the middle of
808  * the current block (if last_bp == NULL).
809  */
810 int
811 cluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
812     int gbflags)
813 {
814 	struct buf *bp, *tbp;
815 	struct bufobj *bo;
816 	int i, j;
817 	int totalwritten = 0;
818 	int dbsize = btodb(size);
819 
820 	if (!unmapped_buf_allowed)
821 		gbflags &= ~GB_UNMAPPED;
822 
823 	bo = &vp->v_bufobj;
824 	while (len > 0) {
825 		/*
826 		 * If the buffer is not delayed-write (i.e. dirty), or it
827 		 * is delayed-write but either locked or inval, it cannot
828 		 * partake in the clustered write.
829 		 */
830 		BO_LOCK(bo);
831 		if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
832 		    (tbp->b_vflags & BV_BKGRDINPROG)) {
833 			BO_UNLOCK(bo);
834 			++start_lbn;
835 			--len;
836 			continue;
837 		}
838 		if (BUF_LOCK(tbp,
839 		    LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, BO_LOCKPTR(bo))) {
840 			++start_lbn;
841 			--len;
842 			continue;
843 		}
844 		if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) {
845 			BUF_UNLOCK(tbp);
846 			++start_lbn;
847 			--len;
848 			continue;
849 		}
850 		bremfree(tbp);
851 		tbp->b_flags &= ~B_DONE;
852 
853 		/*
854 		 * Extra memory in the buffer, punt on this buffer.
855 		 * XXX we could handle this in most cases, but we would
856 		 * have to push the extra memory down to after our max
857 		 * possible cluster size and then potentially pull it back
858 		 * up if the cluster was terminated prematurely--too much
859 		 * hassle.
860 		 */
861 		if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) !=
862 		     (B_CLUSTEROK | B_VMIO)) ||
863 		  (tbp->b_bcount != tbp->b_bufsize) ||
864 		  (tbp->b_bcount != size) ||
865 		  (len == 1) ||
866 		  ((bp = uma_zalloc(cluster_pbuf_zone, M_NOWAIT)) == NULL)) {
867 			totalwritten += tbp->b_bufsize;
868 			bawrite(tbp);
869 			++start_lbn;
870 			--len;
871 			continue;
872 		}
873 		MPASS((bp->b_flags & B_MAXPHYS) != 0);
874 
875 		/*
876 		 * We got a pbuf to make the cluster in.
877 		 * so initialise it.
878 		 */
879 		TAILQ_INIT(&bp->b_cluster.cluster_head);
880 		bp->b_bcount = 0;
881 		bp->b_bufsize = 0;
882 		bp->b_npages = 0;
883 		if (tbp->b_wcred != NOCRED)
884 			bp->b_wcred = crhold(tbp->b_wcred);
885 
886 		bp->b_blkno = tbp->b_blkno;
887 		bp->b_lblkno = tbp->b_lblkno;
888 		bp->b_offset = tbp->b_offset;
889 
890 		/*
891 		 * We are synthesizing a buffer out of vm_page_t's, but
892 		 * if the block size is not page aligned then the starting
893 		 * address may not be either.  Inherit the b_data offset
894 		 * from the original buffer.
895 		 */
896 		if ((gbflags & GB_UNMAPPED) == 0 ||
897 		    (tbp->b_flags & B_VMIO) == 0) {
898 			bp->b_data = (char *)((vm_offset_t)bp->b_data |
899 			    ((vm_offset_t)tbp->b_data & PAGE_MASK));
900 		} else {
901 			bp->b_data = unmapped_buf;
902 		}
903 		bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO |
904 		    B_NEEDCOMMIT));
905 		bp->b_iodone = cluster_callback;
906 		pbgetvp(vp, bp);
907 		/*
908 		 * From this location in the file, scan forward to see
909 		 * if there are buffers with adjacent data that need to
910 		 * be written as well.
911 		 */
912 		for (i = 0; i < len; ++i, ++start_lbn) {
913 			if (i != 0) { /* If not the first buffer */
914 				/*
915 				 * If the adjacent data is not even in core it
916 				 * can't need to be written.
917 				 */
918 				BO_LOCK(bo);
919 				if ((tbp = gbincore(bo, start_lbn)) == NULL ||
920 				    (tbp->b_vflags & BV_BKGRDINPROG)) {
921 					BO_UNLOCK(bo);
922 					break;
923 				}
924 
925 				/*
926 				 * If it IS in core, but has different
927 				 * characteristics, or is locked (which
928 				 * means it could be undergoing a background
929 				 * I/O or be in a weird state), then don't
930 				 * cluster with it.
931 				 */
932 				if (BUF_LOCK(tbp,
933 				    LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
934 				    BO_LOCKPTR(bo)))
935 					break;
936 
937 				if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
938 				    B_INVAL | B_DELWRI | B_NEEDCOMMIT))
939 				    != (B_DELWRI | B_CLUSTEROK |
940 				    (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
941 				    tbp->b_wcred != bp->b_wcred) {
942 					BUF_UNLOCK(tbp);
943 					break;
944 				}
945 
946 				/*
947 				 * Check that the combined cluster
948 				 * would make sense with regard to pages
949 				 * and would not be too large
950 				 */
951 				if ((tbp->b_bcount != size) ||
952 				  ((bp->b_blkno + (dbsize * i)) !=
953 				    tbp->b_blkno) ||
954 				  ((tbp->b_npages + bp->b_npages) >
955 				    (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
956 					BUF_UNLOCK(tbp);
957 					break;
958 				}
959 
960 				/*
961 				 * Ok, it's passed all the tests,
962 				 * so remove it from the free list
963 				 * and mark it busy. We will use it.
964 				 */
965 				bremfree(tbp);
966 				tbp->b_flags &= ~B_DONE;
967 			} /* end of code for non-first buffers only */
968 			/*
969 			 * If the IO is via the VM then we do some
970 			 * special VM hackery (yuck).  Since the buffer's
971 			 * block size may not be page-aligned it is possible
972 			 * for a page to be shared between two buffers.  We
973 			 * have to get rid of the duplication when building
974 			 * the cluster.
975 			 */
976 			if (tbp->b_flags & B_VMIO) {
977 				vm_page_t m;
978 
979 				if (i == 0) {
980 					vfs_busy_pages_acquire(tbp);
981 				} else { /* if not first buffer */
982 					for (j = 0; j < tbp->b_npages; j += 1) {
983 						m = tbp->b_pages[j];
984 						if (vm_page_trysbusy(m) == 0) {
985 							for (j--; j >= 0; j--)
986 								vm_page_sunbusy(
987 								    tbp->b_pages[j]);
988 							bqrelse(tbp);
989 							goto finishcluster;
990 						}
991 					}
992 				}
993 				vm_object_pip_add(tbp->b_bufobj->bo_object,
994 				    tbp->b_npages);
995 				for (j = 0; j < tbp->b_npages; j += 1) {
996 					m = tbp->b_pages[j];
997 					if ((bp->b_npages == 0) ||
998 					  (bp->b_pages[bp->b_npages - 1] != m)) {
999 						bp->b_pages[bp->b_npages] = m;
1000 						bp->b_npages++;
1001 					}
1002 				}
1003 			}
1004 			bp->b_bcount += size;
1005 			bp->b_bufsize += size;
1006 			/*
1007 			 * If any of the clustered buffers have their
1008 			 * B_BARRIER flag set, transfer that request to
1009 			 * the cluster.
1010 			 */
1011 			bp->b_flags |= (tbp->b_flags & B_BARRIER);
1012 			tbp->b_flags &= ~(B_DONE | B_BARRIER);
1013 			tbp->b_flags |= B_ASYNC;
1014 			tbp->b_ioflags &= ~BIO_ERROR;
1015 			tbp->b_iocmd = BIO_WRITE;
1016 			bundirty(tbp);
1017 			reassignbuf(tbp);		/* put on clean list */
1018 			bufobj_wref(tbp->b_bufobj);
1019 			BUF_KERNPROC(tbp);
1020 			buf_track(tbp, __func__);
1021 			TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
1022 				tbp, b_cluster.cluster_entry);
1023 		}
1024 	finishcluster:
1025 		if (buf_mapped(bp)) {
1026 			pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
1027 			    (vm_page_t *)bp->b_pages, bp->b_npages);
1028 		}
1029 		if (bp->b_bufsize > bp->b_kvasize)
1030 			panic(
1031 			    "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
1032 			    bp->b_bufsize, bp->b_kvasize);
1033 		totalwritten += bp->b_bufsize;
1034 		bp->b_dirtyoff = 0;
1035 		bp->b_dirtyend = bp->b_bufsize;
1036 		bawrite(bp);
1037 
1038 		len -= i;
1039 	}
1040 	return totalwritten;
1041 }
1042 
1043 /*
1044  * Collect together all the buffers in a cluster.
1045  * Plus add one additional buffer.
1046  */
1047 static struct cluster_save *
1048 cluster_collectbufs(struct vnode *vp, struct vn_clusterw *vnc,
1049     struct buf *last_bp, int gbflags)
1050 {
1051 	struct cluster_save *buflist;
1052 	struct buf *bp;
1053 	daddr_t lbn;
1054 	int i, j, len, error;
1055 
1056 	len = vnc->v_lastw - vnc->v_cstart + 1;
1057 	buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1058 	    M_SEGMENT, M_WAITOK);
1059 	buflist->bs_nchildren = 0;
1060 	buflist->bs_children = (struct buf **) (buflist + 1);
1061 	for (lbn = vnc->v_cstart, i = 0; i < len; lbn++, i++) {
1062 		error = bread_gb(vp, lbn, last_bp->b_bcount, NOCRED,
1063 		    gbflags, &bp);
1064 		if (error != 0) {
1065 			/*
1066 			 * If read fails, release collected buffers
1067 			 * and return failure.
1068 			 */
1069 			for (j = 0; j < i; j++)
1070 				brelse(buflist->bs_children[j]);
1071 			free(buflist, M_SEGMENT);
1072 			return (NULL);
1073 		}
1074 		buflist->bs_children[i] = bp;
1075 		if (bp->b_blkno == bp->b_lblkno)
1076 			VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
1077 				NULL, NULL);
1078 	}
1079 	buflist->bs_children[i] = bp = last_bp;
1080 	if (bp->b_blkno == bp->b_lblkno)
1081 		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
1082 	buflist->bs_nchildren = i + 1;
1083 	return (buflist);
1084 }
1085 
1086 void
1087 cluster_init_vn(struct vn_clusterw *vnc)
1088 {
1089 	vnc->v_lasta = 0;
1090 	vnc->v_clen = 0;
1091 	vnc->v_cstart = 0;
1092 	vnc->v_lastw = 0;
1093 }
1094