1 /* $NetBSD: vfs_bio.c,v 1.259 2016/02/01 05:05:43 riz Exp $ */
2
3 /*-
4 * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c) 1982, 1986, 1989, 1993
34 * The Regents of the University of California. All rights reserved.
35 * (c) UNIX System Laboratories, Inc.
36 * All or some portions of this file are derived from material licensed
37 * to the University of California by American Telephone and Telegraph
38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39 * the permission of UNIX System Laboratories, Inc.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
66 */
67
68 /*-
69 * Copyright (c) 1994 Christopher G. Demetriou
70 *
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions
73 * are met:
74 * 1. Redistributions of source code must retain the above copyright
75 * notice, this list of conditions and the following disclaimer.
76 * 2. Redistributions in binary form must reproduce the above copyright
77 * notice, this list of conditions and the following disclaimer in the
78 * documentation and/or other materials provided with the distribution.
79 * 3. All advertising materials mentioning features or use of this software
80 * must display the following acknowledgement:
81 * This product includes software developed by the University of
82 * California, Berkeley and its contributors.
83 * 4. Neither the name of the University nor the names of its contributors
84 * may be used to endorse or promote products derived from this software
85 * without specific prior written permission.
86 *
87 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
88 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
89 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
90 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
91 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
92 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
93 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
94 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
95 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
96 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
97 * SUCH DAMAGE.
98 *
99 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
100 */
101
102 /*
103 * The buffer cache subsystem.
104 *
105 * Some references:
106 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
107 * Leffler, et al.: The Design and Implementation of the 4.3BSD
108 * UNIX Operating System (Addison Welley, 1989)
109 *
110 * Locking
111 *
112 * There are three locks:
113 * - bufcache_lock: protects global buffer cache state.
114 * - BC_BUSY: a long term per-buffer lock.
115 * - buf_t::b_objlock: lock on completion (biowait vs biodone).
116 *
117 * For buffers associated with vnodes (a most common case) b_objlock points
118 * to the vnode_t::v_interlock. Otherwise, it points to generic buffer_lock.
119 *
120 * Lock order:
121 * bufcache_lock ->
122 * buf_t::b_objlock
123 */
124
125 #include <sys/cdefs.h>
126 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.259 2016/02/01 05:05:43 riz Exp $");
127
128 #ifdef _KERNEL_OPT
129 #include "opt_bufcache.h"
130 #include "opt_dtrace.h"
131 #endif
132
133 #include <sys/param.h>
134 #include <sys/systm.h>
135 #include <sys/kernel.h>
136 #include <sys/proc.h>
137 #include <sys/buf.h>
138 #include <sys/vnode.h>
139 #include <sys/mount.h>
140 #include <sys/resourcevar.h>
141 #include <sys/sysctl.h>
142 #include <sys/conf.h>
143 #include <sys/kauth.h>
144 #include <sys/fstrans.h>
145 #include <sys/intr.h>
146 #include <sys/cpu.h>
147 #include <sys/wapbl.h>
148 #include <sys/bitops.h>
149 #include <sys/cprng.h>
150 #include <sys/sdt.h>
151
152 #include <uvm/uvm.h> /* extern struct uvm uvm */
153
154 #include <miscfs/specfs/specdev.h>
155
156 #ifndef BUFPAGES
157 # define BUFPAGES 0
158 #endif
159
160 #ifdef BUFCACHE
161 # if (BUFCACHE < 5) || (BUFCACHE > 95)
162 # error BUFCACHE is not between 5 and 95
163 # endif
164 #else
165 # define BUFCACHE 15
166 #endif
167
168 u_int nbuf; /* desired number of buffer headers */
169 u_int bufpages = BUFPAGES; /* optional hardwired count */
170 u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */
171
172 /* Function prototypes */
173 struct bqueue;
174
175 static void buf_setwm(void);
176 static int buf_trim(void);
177 static void *bufpool_page_alloc(struct pool *, int);
178 static void bufpool_page_free(struct pool *, void *);
179 static buf_t *bio_doread(struct vnode *, daddr_t, int, int);
180 static buf_t *getnewbuf(int, int, int);
181 static int buf_lotsfree(void);
182 static int buf_canrelease(void);
183 static u_long buf_mempoolidx(u_long);
184 static u_long buf_roundsize(u_long);
185 static void *buf_alloc(size_t);
186 static void buf_mrelease(void *, size_t);
187 static void binsheadfree(buf_t *, struct bqueue *);
188 static void binstailfree(buf_t *, struct bqueue *);
189 #ifdef DEBUG
190 static int checkfreelist(buf_t *, struct bqueue *, int);
191 #endif
192 static void biointr(void *);
193 static void biodone2(buf_t *);
194 static void bref(buf_t *);
195 static void brele(buf_t *);
196 static void sysctl_kern_buf_setup(void);
197 static void sysctl_vm_buf_setup(void);
198
199 /*
200 * Definitions for the buffer hash lists.
201 */
202 #define BUFHASH(dvp, lbn) \
203 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
204 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
205 u_long bufhash;
206 struct bqueue bufqueues[BQUEUES];
207
208 static kcondvar_t needbuffer_cv;
209
210 /*
211 * Buffer queue lock.
212 */
213 kmutex_t bufcache_lock;
214 kmutex_t buffer_lock;
215
216 /* Software ISR for completed transfers. */
217 static void *biodone_sih;
218
219 /* Buffer pool for I/O buffers. */
220 static pool_cache_t buf_cache;
221 static pool_cache_t bufio_cache;
222
223 #define MEMPOOL_INDEX_OFFSET (ilog2(DEV_BSIZE)) /* smallest pool is 512 bytes */
224 #define NMEMPOOLS (ilog2(MAXBSIZE) - MEMPOOL_INDEX_OFFSET + 1)
225 __CTASSERT((1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) == MAXBSIZE);
226
227 /* Buffer memory pools */
228 static struct pool bmempools[NMEMPOOLS];
229
230 static struct vm_map *buf_map;
231
232 /*
233 * Buffer memory pool allocator.
234 */
235 static void *
bufpool_page_alloc(struct pool * pp,int flags)236 bufpool_page_alloc(struct pool *pp, int flags)
237 {
238
239 return (void *)uvm_km_alloc(buf_map,
240 MAXBSIZE, MAXBSIZE,
241 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT|UVM_KMF_TRYLOCK)
242 | UVM_KMF_WIRED);
243 }
244
245 static void
bufpool_page_free(struct pool * pp,void * v)246 bufpool_page_free(struct pool *pp, void *v)
247 {
248
249 uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED);
250 }
251
252 static struct pool_allocator bufmempool_allocator = {
253 .pa_alloc = bufpool_page_alloc,
254 .pa_free = bufpool_page_free,
255 .pa_pagesz = MAXBSIZE,
256 };
257
258 /* Buffer memory management variables */
259 u_long bufmem_valimit;
260 u_long bufmem_hiwater;
261 u_long bufmem_lowater;
262 u_long bufmem;
263
264 /*
265 * MD code can call this to set a hard limit on the amount
266 * of virtual memory used by the buffer cache.
267 */
268 int
buf_setvalimit(vsize_t sz)269 buf_setvalimit(vsize_t sz)
270 {
271
272 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
273 if (sz < NMEMPOOLS * MAXBSIZE)
274 return EINVAL;
275
276 bufmem_valimit = sz;
277 return 0;
278 }
279
280 static void
buf_setwm(void)281 buf_setwm(void)
282 {
283
284 bufmem_hiwater = buf_memcalc();
285 /* lowater is approx. 2% of memory (with bufcache = 15) */
286 #define BUFMEM_WMSHIFT 3
287 #define BUFMEM_HIWMMIN (64 * 1024 << BUFMEM_WMSHIFT)
288 if (bufmem_hiwater < BUFMEM_HIWMMIN)
289 /* Ensure a reasonable minimum value */
290 bufmem_hiwater = BUFMEM_HIWMMIN;
291 bufmem_lowater = bufmem_hiwater >> BUFMEM_WMSHIFT;
292 }
293
294 #ifdef DEBUG
295 int debug_verify_freelist = 0;
296 static int
checkfreelist(buf_t * bp,struct bqueue * dp,int ison)297 checkfreelist(buf_t *bp, struct bqueue *dp, int ison)
298 {
299 buf_t *b;
300
301 if (!debug_verify_freelist)
302 return 1;
303
304 TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) {
305 if (b == bp)
306 return ison ? 1 : 0;
307 }
308
309 return ison ? 0 : 1;
310 }
311 #endif
312
313 /*
314 * Insq/Remq for the buffer hash lists.
315 * Call with buffer queue locked.
316 */
317 static void
binsheadfree(buf_t * bp,struct bqueue * dp)318 binsheadfree(buf_t *bp, struct bqueue *dp)
319 {
320
321 KASSERT(mutex_owned(&bufcache_lock));
322 KASSERT(bp->b_freelistindex == -1);
323 TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist);
324 dp->bq_bytes += bp->b_bufsize;
325 bp->b_freelistindex = dp - bufqueues;
326 }
327
328 static void
binstailfree(buf_t * bp,struct bqueue * dp)329 binstailfree(buf_t *bp, struct bqueue *dp)
330 {
331
332 KASSERT(mutex_owned(&bufcache_lock));
333 KASSERTMSG(bp->b_freelistindex == -1, "double free of buffer? "
334 "bp=%p, b_freelistindex=%d\n", bp, bp->b_freelistindex);
335 TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist);
336 dp->bq_bytes += bp->b_bufsize;
337 bp->b_freelistindex = dp - bufqueues;
338 }
339
340 void
bremfree(buf_t * bp)341 bremfree(buf_t *bp)
342 {
343 struct bqueue *dp;
344 int bqidx = bp->b_freelistindex;
345
346 KASSERT(mutex_owned(&bufcache_lock));
347
348 KASSERT(bqidx != -1);
349 dp = &bufqueues[bqidx];
350 KDASSERT(checkfreelist(bp, dp, 1));
351 KASSERT(dp->bq_bytes >= bp->b_bufsize);
352 TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist);
353 dp->bq_bytes -= bp->b_bufsize;
354
355 /* For the sysctl helper. */
356 if (bp == dp->bq_marker)
357 dp->bq_marker = NULL;
358
359 #if defined(DIAGNOSTIC)
360 bp->b_freelistindex = -1;
361 #endif /* defined(DIAGNOSTIC) */
362 }
363
364 /*
365 * Add a reference to an buffer structure that came from buf_cache.
366 */
367 static inline void
bref(buf_t * bp)368 bref(buf_t *bp)
369 {
370
371 KASSERT(mutex_owned(&bufcache_lock));
372 KASSERT(bp->b_refcnt > 0);
373
374 bp->b_refcnt++;
375 }
376
377 /*
378 * Free an unused buffer structure that came from buf_cache.
379 */
380 static inline void
brele(buf_t * bp)381 brele(buf_t *bp)
382 {
383
384 KASSERT(mutex_owned(&bufcache_lock));
385 KASSERT(bp->b_refcnt > 0);
386
387 if (bp->b_refcnt-- == 1) {
388 buf_destroy(bp);
389 #ifdef DEBUG
390 memset((char *)bp, 0, sizeof(*bp));
391 #endif
392 pool_cache_put(buf_cache, bp);
393 }
394 }
395
396 /*
397 * note that for some ports this is used by pmap bootstrap code to
398 * determine kva size.
399 */
400 u_long
buf_memcalc(void)401 buf_memcalc(void)
402 {
403 u_long n;
404 vsize_t mapsz = 0;
405
406 /*
407 * Determine the upper bound of memory to use for buffers.
408 *
409 * - If bufpages is specified, use that as the number
410 * pages.
411 *
412 * - Otherwise, use bufcache as the percentage of
413 * physical memory.
414 */
415 if (bufpages != 0) {
416 n = bufpages;
417 } else {
418 if (bufcache < 5) {
419 printf("forcing bufcache %d -> 5", bufcache);
420 bufcache = 5;
421 }
422 if (bufcache > 95) {
423 printf("forcing bufcache %d -> 95", bufcache);
424 bufcache = 95;
425 }
426 if (buf_map != NULL)
427 mapsz = vm_map_max(buf_map) - vm_map_min(buf_map);
428 n = calc_cache_size(mapsz, bufcache,
429 (buf_map != kernel_map) ? 100 : BUFCACHE_VA_MAXPCT)
430 / PAGE_SIZE;
431 }
432
433 n <<= PAGE_SHIFT;
434 if (bufmem_valimit != 0 && n > bufmem_valimit)
435 n = bufmem_valimit;
436
437 return (n);
438 }
439
440 /*
441 * Initialize buffers and hash links for buffers.
442 */
443 void
bufinit(void)444 bufinit(void)
445 {
446 struct bqueue *dp;
447 int use_std;
448 u_int i;
449
450 biodone_vfs = biodone;
451
452 mutex_init(&bufcache_lock, MUTEX_DEFAULT, IPL_NONE);
453 mutex_init(&buffer_lock, MUTEX_DEFAULT, IPL_NONE);
454 cv_init(&needbuffer_cv, "needbuf");
455
456 if (bufmem_valimit != 0) {
457 vaddr_t minaddr = 0, maxaddr;
458 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
459 bufmem_valimit, 0, false, 0);
460 if (buf_map == NULL)
461 panic("bufinit: cannot allocate submap");
462 } else
463 buf_map = kernel_map;
464
465 /*
466 * Initialize buffer cache memory parameters.
467 */
468 bufmem = 0;
469 buf_setwm();
470
471 /* On "small" machines use small pool page sizes where possible */
472 use_std = (physmem < atop(16*1024*1024));
473
474 /*
475 * Also use them on systems that can map the pool pages using
476 * a direct-mapped segment.
477 */
478 #ifdef PMAP_MAP_POOLPAGE
479 use_std = 1;
480 #endif
481
482 buf_cache = pool_cache_init(sizeof(buf_t), 0, 0, 0,
483 "bufpl", NULL, IPL_SOFTBIO, NULL, NULL, NULL);
484 bufio_cache = pool_cache_init(sizeof(buf_t), 0, 0, 0,
485 "biopl", NULL, IPL_BIO, NULL, NULL, NULL);
486
487 for (i = 0; i < NMEMPOOLS; i++) {
488 struct pool_allocator *pa;
489 struct pool *pp = &bmempools[i];
490 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
491 char *name = kmem_alloc(8, KM_SLEEP); /* XXX: never freed */
492 if (__predict_false(size >= 1048576))
493 (void)snprintf(name, 8, "buf%um", size / 1048576);
494 else if (__predict_true(size >= 1024))
495 (void)snprintf(name, 8, "buf%uk", size / 1024);
496 else
497 (void)snprintf(name, 8, "buf%ub", size);
498 pa = (size <= PAGE_SIZE && use_std)
499 ? &pool_allocator_nointr
500 : &bufmempool_allocator;
501 pool_init(pp, size, 0, 0, 0, name, pa, IPL_NONE);
502 pool_setlowat(pp, 1);
503 pool_sethiwat(pp, 1);
504 }
505
506 /* Initialize the buffer queues */
507 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) {
508 TAILQ_INIT(&dp->bq_queue);
509 dp->bq_bytes = 0;
510 }
511
512 /*
513 * Estimate hash table size based on the amount of memory we
514 * intend to use for the buffer cache. The average buffer
515 * size is dependent on our clients (i.e. filesystems).
516 *
517 * For now, use an empirical 3K per buffer.
518 */
519 nbuf = (bufmem_hiwater / 1024) / 3;
520 bufhashtbl = hashinit(nbuf, HASH_LIST, true, &bufhash);
521
522 sysctl_kern_buf_setup();
523 sysctl_vm_buf_setup();
524 }
525
526 void
bufinit2(void)527 bufinit2(void)
528 {
529
530 biodone_sih = softint_establish(SOFTINT_BIO | SOFTINT_MPSAFE, biointr,
531 NULL);
532 if (biodone_sih == NULL)
533 panic("bufinit2: can't establish soft interrupt");
534 }
535
536 static int
buf_lotsfree(void)537 buf_lotsfree(void)
538 {
539 u_long guess;
540
541 /* Always allocate if less than the low water mark. */
542 if (bufmem < bufmem_lowater)
543 return 1;
544
545 /* Never allocate if greater than the high water mark. */
546 if (bufmem > bufmem_hiwater)
547 return 0;
548
549 /* If there's anything on the AGE list, it should be eaten. */
550 if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL)
551 return 0;
552
553 /*
554 * The probabily of getting a new allocation is inversely
555 * proportional to the current size of the cache above
556 * the low water mark. Divide the total first to avoid overflows
557 * in the product.
558 */
559 guess = cprng_fast32() % 16;
560
561 if ((bufmem_hiwater - bufmem_lowater) / 16 * guess >=
562 (bufmem - bufmem_lowater))
563 return 1;
564
565 /* Otherwise don't allocate. */
566 return 0;
567 }
568
569 /*
570 * Return estimate of bytes we think need to be
571 * released to help resolve low memory conditions.
572 *
573 * => called with bufcache_lock held.
574 */
575 static int
buf_canrelease(void)576 buf_canrelease(void)
577 {
578 int pagedemand, ninvalid = 0;
579
580 KASSERT(mutex_owned(&bufcache_lock));
581
582 if (bufmem < bufmem_lowater)
583 return 0;
584
585 if (bufmem > bufmem_hiwater)
586 return bufmem - bufmem_hiwater;
587
588 ninvalid += bufqueues[BQ_AGE].bq_bytes;
589
590 pagedemand = uvmexp.freetarg - uvmexp.free;
591 if (pagedemand < 0)
592 return ninvalid;
593 return MAX(ninvalid, MIN(2 * MAXBSIZE,
594 MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE)));
595 }
596
597 /*
598 * Buffer memory allocation helper functions
599 */
600 static u_long
buf_mempoolidx(u_long size)601 buf_mempoolidx(u_long size)
602 {
603 u_int n = 0;
604
605 size -= 1;
606 size >>= MEMPOOL_INDEX_OFFSET;
607 while (size) {
608 size >>= 1;
609 n += 1;
610 }
611 if (n >= NMEMPOOLS)
612 panic("buf mem pool index %d", n);
613 return n;
614 }
615
616 static u_long
buf_roundsize(u_long size)617 buf_roundsize(u_long size)
618 {
619 /* Round up to nearest power of 2 */
620 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
621 }
622
623 static void *
buf_alloc(size_t size)624 buf_alloc(size_t size)
625 {
626 u_int n = buf_mempoolidx(size);
627 void *addr;
628
629 while (1) {
630 addr = pool_get(&bmempools[n], PR_NOWAIT);
631 if (addr != NULL)
632 break;
633
634 /* No memory, see if we can free some. If so, try again */
635 mutex_enter(&bufcache_lock);
636 if (buf_drain(1) > 0) {
637 mutex_exit(&bufcache_lock);
638 continue;
639 }
640
641 if (curlwp == uvm.pagedaemon_lwp) {
642 mutex_exit(&bufcache_lock);
643 return NULL;
644 }
645
646 /* Wait for buffers to arrive on the LRU queue */
647 cv_timedwait(&needbuffer_cv, &bufcache_lock, hz / 4);
648 mutex_exit(&bufcache_lock);
649 }
650
651 return addr;
652 }
653
654 static void
buf_mrelease(void * addr,size_t size)655 buf_mrelease(void *addr, size_t size)
656 {
657
658 pool_put(&bmempools[buf_mempoolidx(size)], addr);
659 }
660
661 /*
662 * bread()/breadn() helper.
663 */
664 static buf_t *
bio_doread(struct vnode * vp,daddr_t blkno,int size,int async)665 bio_doread(struct vnode *vp, daddr_t blkno, int size, int async)
666 {
667 buf_t *bp;
668 struct mount *mp;
669
670 bp = getblk(vp, blkno, size, 0, 0);
671
672 /*
673 * getblk() may return NULL if we are the pagedaemon.
674 */
675 if (bp == NULL) {
676 KASSERT(curlwp == uvm.pagedaemon_lwp);
677 return NULL;
678 }
679
680 /*
681 * If buffer does not have data valid, start a read.
682 * Note that if buffer is BC_INVAL, getblk() won't return it.
683 * Therefore, it's valid if its I/O has completed or been delayed.
684 */
685 if (!ISSET(bp->b_oflags, (BO_DONE | BO_DELWRI))) {
686 /* Start I/O for the buffer. */
687 SET(bp->b_flags, B_READ | async);
688 if (async)
689 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
690 else
691 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
692 VOP_STRATEGY(vp, bp);
693
694 /* Pay for the read. */
695 curlwp->l_ru.ru_inblock++;
696 } else if (async)
697 brelse(bp, 0);
698
699 if (vp->v_type == VBLK)
700 mp = spec_node_getmountedfs(vp);
701 else
702 mp = vp->v_mount;
703
704 /*
705 * Collect statistics on synchronous and asynchronous reads.
706 * Reads from block devices are charged to their associated
707 * filesystem (if any).
708 */
709 if (mp != NULL) {
710 if (async == 0)
711 mp->mnt_stat.f_syncreads++;
712 else
713 mp->mnt_stat.f_asyncreads++;
714 }
715
716 return (bp);
717 }
718
719 /*
720 * Read a disk block.
721 * This algorithm described in Bach (p.54).
722 */
723 int
bread(struct vnode * vp,daddr_t blkno,int size,int flags,buf_t ** bpp)724 bread(struct vnode *vp, daddr_t blkno, int size, int flags, buf_t **bpp)
725 {
726 buf_t *bp;
727 int error;
728
729 /* Get buffer for block. */
730 bp = *bpp = bio_doread(vp, blkno, size, 0);
731 if (bp == NULL)
732 return ENOMEM;
733
734 /* Wait for the read to complete, and return result. */
735 error = biowait(bp);
736 if (error == 0 && (flags & B_MODIFY) != 0)
737 error = fscow_run(bp, true);
738 if (error) {
739 brelse(bp, 0);
740 *bpp = NULL;
741 }
742
743 return error;
744 }
745
746 /*
747 * Read-ahead multiple disk blocks. The first is sync, the rest async.
748 * Trivial modification to the breada algorithm presented in Bach (p.55).
749 */
750 int
breadn(struct vnode * vp,daddr_t blkno,int size,daddr_t * rablks,int * rasizes,int nrablks,int flags,buf_t ** bpp)751 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks,
752 int *rasizes, int nrablks, int flags, buf_t **bpp)
753 {
754 buf_t *bp;
755 int error, i;
756
757 bp = *bpp = bio_doread(vp, blkno, size, 0);
758 if (bp == NULL)
759 return ENOMEM;
760
761 /*
762 * For each of the read-ahead blocks, start a read, if necessary.
763 */
764 mutex_enter(&bufcache_lock);
765 for (i = 0; i < nrablks; i++) {
766 /* If it's in the cache, just go on to next one. */
767 if (incore(vp, rablks[i]))
768 continue;
769
770 /* Get a buffer for the read-ahead block */
771 mutex_exit(&bufcache_lock);
772 (void) bio_doread(vp, rablks[i], rasizes[i], B_ASYNC);
773 mutex_enter(&bufcache_lock);
774 }
775 mutex_exit(&bufcache_lock);
776
777 /* Otherwise, we had to start a read for it; wait until it's valid. */
778 error = biowait(bp);
779 if (error == 0 && (flags & B_MODIFY) != 0)
780 error = fscow_run(bp, true);
781 if (error) {
782 brelse(bp, 0);
783 *bpp = NULL;
784 }
785
786 return error;
787 }
788
789 /*
790 * Block write. Described in Bach (p.56)
791 */
792 int
bwrite(buf_t * bp)793 bwrite(buf_t *bp)
794 {
795 int rv, sync, wasdelayed;
796 struct vnode *vp;
797 struct mount *mp;
798
799 KASSERT(ISSET(bp->b_cflags, BC_BUSY));
800 KASSERT(!cv_has_waiters(&bp->b_done));
801
802 vp = bp->b_vp;
803 if (vp != NULL) {
804 KASSERT(bp->b_objlock == vp->v_interlock);
805 if (vp->v_type == VBLK)
806 mp = spec_node_getmountedfs(vp);
807 else
808 mp = vp->v_mount;
809 } else {
810 mp = NULL;
811 }
812
813 if (mp && mp->mnt_wapbl) {
814 if (bp->b_iodone != mp->mnt_wapbl_op->wo_wapbl_biodone) {
815 bdwrite(bp);
816 return 0;
817 }
818 }
819
820 /*
821 * Remember buffer type, to switch on it later. If the write was
822 * synchronous, but the file system was mounted with MNT_ASYNC,
823 * convert it to a delayed write.
824 * XXX note that this relies on delayed tape writes being converted
825 * to async, not sync writes (which is safe, but ugly).
826 */
827 sync = !ISSET(bp->b_flags, B_ASYNC);
828 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
829 bdwrite(bp);
830 return (0);
831 }
832
833 /*
834 * Collect statistics on synchronous and asynchronous writes.
835 * Writes to block devices are charged to their associated
836 * filesystem (if any).
837 */
838 if (mp != NULL) {
839 if (sync)
840 mp->mnt_stat.f_syncwrites++;
841 else
842 mp->mnt_stat.f_asyncwrites++;
843 }
844
845 /*
846 * Pay for the I/O operation and make sure the buf is on the correct
847 * vnode queue.
848 */
849 bp->b_error = 0;
850 wasdelayed = ISSET(bp->b_oflags, BO_DELWRI);
851 CLR(bp->b_flags, B_READ);
852 if (wasdelayed) {
853 mutex_enter(&bufcache_lock);
854 mutex_enter(bp->b_objlock);
855 CLR(bp->b_oflags, BO_DONE | BO_DELWRI);
856 reassignbuf(bp, bp->b_vp);
857 mutex_exit(&bufcache_lock);
858 } else {
859 curlwp->l_ru.ru_oublock++;
860 mutex_enter(bp->b_objlock);
861 CLR(bp->b_oflags, BO_DONE | BO_DELWRI);
862 }
863 if (vp != NULL)
864 vp->v_numoutput++;
865 mutex_exit(bp->b_objlock);
866
867 /* Initiate disk write. */
868 if (sync)
869 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
870 else
871 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
872
873 VOP_STRATEGY(vp, bp);
874
875 if (sync) {
876 /* If I/O was synchronous, wait for it to complete. */
877 rv = biowait(bp);
878
879 /* Release the buffer. */
880 brelse(bp, 0);
881
882 return (rv);
883 } else {
884 return (0);
885 }
886 }
887
888 int
vn_bwrite(void * v)889 vn_bwrite(void *v)
890 {
891 struct vop_bwrite_args *ap = v;
892
893 return (bwrite(ap->a_bp));
894 }
895
896 /*
897 * Delayed write.
898 *
899 * The buffer is marked dirty, but is not queued for I/O.
900 * This routine should be used when the buffer is expected
901 * to be modified again soon, typically a small write that
902 * partially fills a buffer.
903 *
904 * NB: magnetic tapes cannot be delayed; they must be
905 * written in the order that the writes are requested.
906 *
907 * Described in Leffler, et al. (pp. 208-213).
908 */
909 void
bdwrite(buf_t * bp)910 bdwrite(buf_t *bp)
911 {
912
913 KASSERT(bp->b_vp == NULL || bp->b_vp->v_tag != VT_UFS ||
914 bp->b_vp->v_type == VBLK || ISSET(bp->b_flags, B_COWDONE));
915 KASSERT(ISSET(bp->b_cflags, BC_BUSY));
916 KASSERT(!cv_has_waiters(&bp->b_done));
917
918 /* If this is a tape block, write the block now. */
919 if (bdev_type(bp->b_dev) == D_TAPE) {
920 bawrite(bp);
921 return;
922 }
923
924 if (wapbl_vphaswapbl(bp->b_vp)) {
925 struct mount *mp = wapbl_vptomp(bp->b_vp);
926
927 if (bp->b_iodone != mp->mnt_wapbl_op->wo_wapbl_biodone) {
928 WAPBL_ADD_BUF(mp, bp);
929 }
930 }
931
932 /*
933 * If the block hasn't been seen before:
934 * (1) Mark it as having been seen,
935 * (2) Charge for the write,
936 * (3) Make sure it's on its vnode's correct block list.
937 */
938 KASSERT(bp->b_vp == NULL || bp->b_objlock == bp->b_vp->v_interlock);
939
940 if (!ISSET(bp->b_oflags, BO_DELWRI)) {
941 mutex_enter(&bufcache_lock);
942 mutex_enter(bp->b_objlock);
943 SET(bp->b_oflags, BO_DELWRI);
944 curlwp->l_ru.ru_oublock++;
945 reassignbuf(bp, bp->b_vp);
946 mutex_exit(&bufcache_lock);
947 } else {
948 mutex_enter(bp->b_objlock);
949 }
950 /* Otherwise, the "write" is done, so mark and release the buffer. */
951 CLR(bp->b_oflags, BO_DONE);
952 mutex_exit(bp->b_objlock);
953
954 brelse(bp, 0);
955 }
956
957 /*
958 * Asynchronous block write; just an asynchronous bwrite().
959 */
960 void
bawrite(buf_t * bp)961 bawrite(buf_t *bp)
962 {
963
964 KASSERT(ISSET(bp->b_cflags, BC_BUSY));
965 KASSERT(bp->b_vp != NULL);
966
967 SET(bp->b_flags, B_ASYNC);
968 VOP_BWRITE(bp->b_vp, bp);
969 }
970
971 /*
972 * Release a buffer on to the free lists.
973 * Described in Bach (p. 46).
974 */
975 void
brelsel(buf_t * bp,int set)976 brelsel(buf_t *bp, int set)
977 {
978 struct bqueue *bufq;
979 struct vnode *vp;
980
981 KASSERT(bp != NULL);
982 KASSERT(mutex_owned(&bufcache_lock));
983 KASSERT(!cv_has_waiters(&bp->b_done));
984 KASSERT(bp->b_refcnt > 0);
985
986 SET(bp->b_cflags, set);
987
988 KASSERT(ISSET(bp->b_cflags, BC_BUSY));
989 KASSERT(bp->b_iodone == NULL);
990
991 /* Wake up any processes waiting for any buffer to become free. */
992 cv_signal(&needbuffer_cv);
993
994 /* Wake up any proceeses waiting for _this_ buffer to become */
995 if (ISSET(bp->b_cflags, BC_WANTED))
996 CLR(bp->b_cflags, BC_WANTED|BC_AGE);
997
998 /* If it's clean clear the copy-on-write flag. */
999 if (ISSET(bp->b_flags, B_COWDONE)) {
1000 mutex_enter(bp->b_objlock);
1001 if (!ISSET(bp->b_oflags, BO_DELWRI))
1002 CLR(bp->b_flags, B_COWDONE);
1003 mutex_exit(bp->b_objlock);
1004 }
1005
1006 /*
1007 * Determine which queue the buffer should be on, then put it there.
1008 */
1009
1010 /* If it's locked, don't report an error; try again later. */
1011 if (ISSET(bp->b_flags, B_LOCKED))
1012 bp->b_error = 0;
1013
1014 /* If it's not cacheable, or an error, mark it invalid. */
1015 if (ISSET(bp->b_cflags, BC_NOCACHE) || bp->b_error != 0)
1016 SET(bp->b_cflags, BC_INVAL);
1017
1018 if (ISSET(bp->b_cflags, BC_VFLUSH)) {
1019 /*
1020 * This is a delayed write buffer that was just flushed to
1021 * disk. It is still on the LRU queue. If it's become
1022 * invalid, then we need to move it to a different queue;
1023 * otherwise leave it in its current position.
1024 */
1025 CLR(bp->b_cflags, BC_VFLUSH);
1026 if (!ISSET(bp->b_cflags, BC_INVAL|BC_AGE) &&
1027 !ISSET(bp->b_flags, B_LOCKED) && bp->b_error == 0) {
1028 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LRU], 1));
1029 goto already_queued;
1030 } else {
1031 bremfree(bp);
1032 }
1033 }
1034
1035 KDASSERT(checkfreelist(bp, &bufqueues[BQ_AGE], 0));
1036 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LRU], 0));
1037 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LOCKED], 0));
1038
1039 if ((bp->b_bufsize <= 0) || ISSET(bp->b_cflags, BC_INVAL)) {
1040 /*
1041 * If it's invalid or empty, dissociate it from its vnode
1042 * and put on the head of the appropriate queue.
1043 */
1044 if (ISSET(bp->b_flags, B_LOCKED)) {
1045 if (wapbl_vphaswapbl(vp = bp->b_vp)) {
1046 struct mount *mp = wapbl_vptomp(vp);
1047
1048 KASSERT(bp->b_iodone
1049 != mp->mnt_wapbl_op->wo_wapbl_biodone);
1050 WAPBL_REMOVE_BUF(mp, bp);
1051 }
1052 }
1053
1054 mutex_enter(bp->b_objlock);
1055 CLR(bp->b_oflags, BO_DONE|BO_DELWRI);
1056 if ((vp = bp->b_vp) != NULL) {
1057 KASSERT(bp->b_objlock == vp->v_interlock);
1058 reassignbuf(bp, bp->b_vp);
1059 brelvp(bp);
1060 mutex_exit(vp->v_interlock);
1061 } else {
1062 KASSERT(bp->b_objlock == &buffer_lock);
1063 mutex_exit(bp->b_objlock);
1064 }
1065
1066 if (bp->b_bufsize <= 0)
1067 /* no data */
1068 goto already_queued;
1069 else
1070 /* invalid data */
1071 bufq = &bufqueues[BQ_AGE];
1072 binsheadfree(bp, bufq);
1073 } else {
1074 /*
1075 * It has valid data. Put it on the end of the appropriate
1076 * queue, so that it'll stick around for as long as possible.
1077 * If buf is AGE, but has dependencies, must put it on last
1078 * bufqueue to be scanned, ie LRU. This protects against the
1079 * livelock where BQ_AGE only has buffers with dependencies,
1080 * and we thus never get to the dependent buffers in BQ_LRU.
1081 */
1082 if (ISSET(bp->b_flags, B_LOCKED)) {
1083 /* locked in core */
1084 bufq = &bufqueues[BQ_LOCKED];
1085 } else if (!ISSET(bp->b_cflags, BC_AGE)) {
1086 /* valid data */
1087 bufq = &bufqueues[BQ_LRU];
1088 } else {
1089 /* stale but valid data */
1090 bufq = &bufqueues[BQ_AGE];
1091 }
1092 binstailfree(bp, bufq);
1093 }
1094 already_queued:
1095 /* Unlock the buffer. */
1096 CLR(bp->b_cflags, BC_AGE|BC_BUSY|BC_NOCACHE);
1097 CLR(bp->b_flags, B_ASYNC);
1098 cv_broadcast(&bp->b_busy);
1099
1100 if (bp->b_bufsize <= 0)
1101 brele(bp);
1102 }
1103
1104 void
brelse(buf_t * bp,int set)1105 brelse(buf_t *bp, int set)
1106 {
1107
1108 mutex_enter(&bufcache_lock);
1109 brelsel(bp, set);
1110 mutex_exit(&bufcache_lock);
1111 }
1112
1113 /*
1114 * Determine if a block is in the cache.
1115 * Just look on what would be its hash chain. If it's there, return
1116 * a pointer to it, unless it's marked invalid. If it's marked invalid,
1117 * we normally don't return the buffer, unless the caller explicitly
1118 * wants us to.
1119 */
1120 buf_t *
incore(struct vnode * vp,daddr_t blkno)1121 incore(struct vnode *vp, daddr_t blkno)
1122 {
1123 buf_t *bp;
1124
1125 KASSERT(mutex_owned(&bufcache_lock));
1126
1127 /* Search hash chain */
1128 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
1129 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
1130 !ISSET(bp->b_cflags, BC_INVAL)) {
1131 KASSERT(bp->b_objlock == vp->v_interlock);
1132 return (bp);
1133 }
1134 }
1135
1136 return (NULL);
1137 }
1138
1139 /*
1140 * Get a block of requested size that is associated with
1141 * a given vnode and block offset. If it is found in the
1142 * block cache, mark it as having been found, make it busy
1143 * and return it. Otherwise, return an empty block of the
1144 * correct size. It is up to the caller to insure that the
1145 * cached blocks be of the correct size.
1146 */
1147 buf_t *
getblk(struct vnode * vp,daddr_t blkno,int size,int slpflag,int slptimeo)1148 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1149 {
1150 int err, preserve;
1151 buf_t *bp;
1152
1153 mutex_enter(&bufcache_lock);
1154 loop:
1155 bp = incore(vp, blkno);
1156 if (bp != NULL) {
1157 err = bbusy(bp, ((slpflag & PCATCH) != 0), slptimeo, NULL);
1158 if (err != 0) {
1159 if (err == EPASSTHROUGH)
1160 goto loop;
1161 mutex_exit(&bufcache_lock);
1162 return (NULL);
1163 }
1164 KASSERT(!cv_has_waiters(&bp->b_done));
1165 #ifdef DIAGNOSTIC
1166 if (ISSET(bp->b_oflags, BO_DONE|BO_DELWRI) &&
1167 bp->b_bcount < size && vp->v_type != VBLK)
1168 panic("getblk: block size invariant failed");
1169 #endif
1170 bremfree(bp);
1171 preserve = 1;
1172 } else {
1173 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL)
1174 goto loop;
1175
1176 if (incore(vp, blkno) != NULL) {
1177 /* The block has come into memory in the meantime. */
1178 brelsel(bp, 0);
1179 goto loop;
1180 }
1181
1182 LIST_INSERT_HEAD(BUFHASH(vp, blkno), bp, b_hash);
1183 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
1184 mutex_enter(vp->v_interlock);
1185 bgetvp(vp, bp);
1186 mutex_exit(vp->v_interlock);
1187 preserve = 0;
1188 }
1189 mutex_exit(&bufcache_lock);
1190
1191 /*
1192 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
1193 * if we re-size buffers here.
1194 */
1195 if (ISSET(bp->b_flags, B_LOCKED)) {
1196 KASSERT(bp->b_bufsize >= size);
1197 } else {
1198 if (allocbuf(bp, size, preserve)) {
1199 mutex_enter(&bufcache_lock);
1200 LIST_REMOVE(bp, b_hash);
1201 mutex_exit(&bufcache_lock);
1202 brelse(bp, BC_INVAL);
1203 return NULL;
1204 }
1205 }
1206 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1207 return (bp);
1208 }
1209
1210 /*
1211 * Get an empty, disassociated buffer of given size.
1212 */
1213 buf_t *
geteblk(int size)1214 geteblk(int size)
1215 {
1216 buf_t *bp;
1217 int error __diagused;
1218
1219 mutex_enter(&bufcache_lock);
1220 while ((bp = getnewbuf(0, 0, 0)) == NULL)
1221 ;
1222
1223 SET(bp->b_cflags, BC_INVAL);
1224 LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1225 mutex_exit(&bufcache_lock);
1226 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1227 error = allocbuf(bp, size, 0);
1228 KASSERT(error == 0);
1229 return (bp);
1230 }
1231
1232 /*
1233 * Expand or contract the actual memory allocated to a buffer.
1234 *
1235 * If the buffer shrinks, data is lost, so it's up to the
1236 * caller to have written it out *first*; this routine will not
1237 * start a write. If the buffer grows, it's the callers
1238 * responsibility to fill out the buffer's additional contents.
1239 */
1240 int
allocbuf(buf_t * bp,int size,int preserve)1241 allocbuf(buf_t *bp, int size, int preserve)
1242 {
1243 void *addr;
1244 vsize_t oldsize, desired_size;
1245 int oldcount;
1246 int delta;
1247
1248 desired_size = buf_roundsize(size);
1249 if (desired_size > MAXBSIZE)
1250 printf("allocbuf: buffer larger than MAXBSIZE requested");
1251
1252 oldcount = bp->b_bcount;
1253
1254 bp->b_bcount = size;
1255
1256 oldsize = bp->b_bufsize;
1257 if (oldsize == desired_size) {
1258 /*
1259 * Do not short cut the WAPBL resize, as the buffer length
1260 * could still have changed and this would corrupt the
1261 * tracking of the transaction length.
1262 */
1263 goto out;
1264 }
1265
1266 /*
1267 * If we want a buffer of a different size, re-allocate the
1268 * buffer's memory; copy old content only if needed.
1269 */
1270 addr = buf_alloc(desired_size);
1271 if (addr == NULL)
1272 return ENOMEM;
1273 if (preserve)
1274 memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
1275 if (bp->b_data != NULL)
1276 buf_mrelease(bp->b_data, oldsize);
1277 bp->b_data = addr;
1278 bp->b_bufsize = desired_size;
1279
1280 /*
1281 * Update overall buffer memory counter (protected by bufcache_lock)
1282 */
1283 delta = (long)desired_size - (long)oldsize;
1284
1285 mutex_enter(&bufcache_lock);
1286 if ((bufmem += delta) > bufmem_hiwater) {
1287 /*
1288 * Need to trim overall memory usage.
1289 */
1290 while (buf_canrelease()) {
1291 if (curcpu()->ci_schedstate.spc_flags &
1292 SPCF_SHOULDYIELD) {
1293 mutex_exit(&bufcache_lock);
1294 preempt();
1295 mutex_enter(&bufcache_lock);
1296 }
1297 if (buf_trim() == 0)
1298 break;
1299 }
1300 }
1301 mutex_exit(&bufcache_lock);
1302
1303 out:
1304 if (wapbl_vphaswapbl(bp->b_vp))
1305 WAPBL_RESIZE_BUF(wapbl_vptomp(bp->b_vp), bp, oldsize, oldcount);
1306
1307 return 0;
1308 }
1309
1310 /*
1311 * Find a buffer which is available for use.
1312 * Select something from a free list.
1313 * Preference is to AGE list, then LRU list.
1314 *
1315 * Called with the buffer queues locked.
1316 * Return buffer locked.
1317 */
1318 buf_t *
getnewbuf(int slpflag,int slptimeo,int from_bufq)1319 getnewbuf(int slpflag, int slptimeo, int from_bufq)
1320 {
1321 buf_t *bp;
1322 struct vnode *vp;
1323
1324 start:
1325 KASSERT(mutex_owned(&bufcache_lock));
1326
1327 /*
1328 * Get a new buffer from the pool.
1329 */
1330 if (!from_bufq && buf_lotsfree()) {
1331 mutex_exit(&bufcache_lock);
1332 bp = pool_cache_get(buf_cache, PR_NOWAIT);
1333 if (bp != NULL) {
1334 memset((char *)bp, 0, sizeof(*bp));
1335 buf_init(bp);
1336 SET(bp->b_cflags, BC_BUSY); /* mark buffer busy */
1337 mutex_enter(&bufcache_lock);
1338 #if defined(DIAGNOSTIC)
1339 bp->b_freelistindex = -1;
1340 #endif /* defined(DIAGNOSTIC) */
1341 return (bp);
1342 }
1343 mutex_enter(&bufcache_lock);
1344 }
1345
1346 KASSERT(mutex_owned(&bufcache_lock));
1347 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL ||
1348 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU].bq_queue)) != NULL) {
1349 KASSERT(!ISSET(bp->b_cflags, BC_BUSY) || ISSET(bp->b_cflags, BC_VFLUSH));
1350 bremfree(bp);
1351
1352 /* Buffer is no longer on free lists. */
1353 SET(bp->b_cflags, BC_BUSY);
1354 } else {
1355 /*
1356 * XXX: !from_bufq should be removed.
1357 */
1358 if (!from_bufq || curlwp != uvm.pagedaemon_lwp) {
1359 /* wait for a free buffer of any kind */
1360 if ((slpflag & PCATCH) != 0)
1361 (void)cv_timedwait_sig(&needbuffer_cv,
1362 &bufcache_lock, slptimeo);
1363 else
1364 (void)cv_timedwait(&needbuffer_cv,
1365 &bufcache_lock, slptimeo);
1366 }
1367 return (NULL);
1368 }
1369
1370 #ifdef DIAGNOSTIC
1371 if (bp->b_bufsize <= 0)
1372 panic("buffer %p: on queue but empty", bp);
1373 #endif
1374
1375 if (ISSET(bp->b_cflags, BC_VFLUSH)) {
1376 /*
1377 * This is a delayed write buffer being flushed to disk. Make
1378 * sure it gets aged out of the queue when it's finished, and
1379 * leave it off the LRU queue.
1380 */
1381 CLR(bp->b_cflags, BC_VFLUSH);
1382 SET(bp->b_cflags, BC_AGE);
1383 goto start;
1384 }
1385
1386 KASSERT(ISSET(bp->b_cflags, BC_BUSY));
1387 KASSERT(bp->b_refcnt > 0);
1388 KASSERT(!cv_has_waiters(&bp->b_done));
1389
1390 /*
1391 * If buffer was a delayed write, start it and return NULL
1392 * (since we might sleep while starting the write).
1393 */
1394 if (ISSET(bp->b_oflags, BO_DELWRI)) {
1395 /*
1396 * This buffer has gone through the LRU, so make sure it gets
1397 * reused ASAP.
1398 */
1399 SET(bp->b_cflags, BC_AGE);
1400 mutex_exit(&bufcache_lock);
1401 bawrite(bp);
1402 mutex_enter(&bufcache_lock);
1403 return (NULL);
1404 }
1405
1406 vp = bp->b_vp;
1407
1408 /* clear out various other fields */
1409 bp->b_cflags = BC_BUSY;
1410 bp->b_oflags = 0;
1411 bp->b_flags = 0;
1412 bp->b_dev = NODEV;
1413 bp->b_blkno = 0;
1414 bp->b_lblkno = 0;
1415 bp->b_rawblkno = 0;
1416 bp->b_iodone = 0;
1417 bp->b_error = 0;
1418 bp->b_resid = 0;
1419 bp->b_bcount = 0;
1420
1421 LIST_REMOVE(bp, b_hash);
1422
1423 /* Disassociate us from our vnode, if we had one... */
1424 if (vp != NULL) {
1425 mutex_enter(vp->v_interlock);
1426 brelvp(bp);
1427 mutex_exit(vp->v_interlock);
1428 }
1429
1430 return (bp);
1431 }
1432
1433 /*
1434 * Attempt to free an aged buffer off the queues.
1435 * Called with queue lock held.
1436 * Returns the amount of buffer memory freed.
1437 */
1438 static int
buf_trim(void)1439 buf_trim(void)
1440 {
1441 buf_t *bp;
1442 long size;
1443
1444 KASSERT(mutex_owned(&bufcache_lock));
1445
1446 /* Instruct getnewbuf() to get buffers off the queues */
1447 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
1448 return 0;
1449
1450 KASSERT((bp->b_cflags & BC_WANTED) == 0);
1451 size = bp->b_bufsize;
1452 bufmem -= size;
1453 if (size > 0) {
1454 buf_mrelease(bp->b_data, size);
1455 bp->b_bcount = bp->b_bufsize = 0;
1456 }
1457 /* brelse() will return the buffer to the global buffer pool */
1458 brelsel(bp, 0);
1459 return size;
1460 }
1461
1462 int
buf_drain(int n)1463 buf_drain(int n)
1464 {
1465 int size = 0, sz;
1466
1467 KASSERT(mutex_owned(&bufcache_lock));
1468
1469 while (size < n && bufmem > bufmem_lowater) {
1470 sz = buf_trim();
1471 if (sz <= 0)
1472 break;
1473 size += sz;
1474 }
1475
1476 return size;
1477 }
1478
1479 SDT_PROVIDER_DEFINE(io);
1480
1481 SDT_PROBE_DEFINE1(io, kernel, , wait__start, "struct buf *"/*bp*/);
1482 SDT_PROBE_DEFINE1(io, kernel, , wait__done, "struct buf *"/*bp*/);
1483
1484 /*
1485 * Wait for operations on the buffer to complete.
1486 * When they do, extract and return the I/O's error value.
1487 */
1488 int
biowait(buf_t * bp)1489 biowait(buf_t *bp)
1490 {
1491
1492 KASSERT(ISSET(bp->b_cflags, BC_BUSY));
1493 KASSERT(bp->b_refcnt > 0);
1494
1495 SDT_PROBE1(io, kernel, , wait__start, bp);
1496
1497 mutex_enter(bp->b_objlock);
1498 while (!ISSET(bp->b_oflags, BO_DONE | BO_DELWRI))
1499 cv_wait(&bp->b_done, bp->b_objlock);
1500 mutex_exit(bp->b_objlock);
1501
1502 SDT_PROBE1(io, kernel, , wait__done, bp);
1503
1504 return bp->b_error;
1505 }
1506
1507 /*
1508 * Mark I/O complete on a buffer.
1509 *
1510 * If a callback has been requested, e.g. the pageout
1511 * daemon, do so. Otherwise, awaken waiting processes.
1512 *
1513 * [ Leffler, et al., says on p.247:
1514 * "This routine wakes up the blocked process, frees the buffer
1515 * for an asynchronous write, or, for a request by the pagedaemon
1516 * process, invokes a procedure specified in the buffer structure" ]
1517 *
1518 * In real life, the pagedaemon (or other system processes) wants
1519 * to do async stuff to, and doesn't want the buffer brelse()'d.
1520 * (for swap pager, that puts swap buffers on the free lists (!!!),
1521 * for the vn device, that puts allocated buffers on the free lists!)
1522 */
1523 void
biodone(buf_t * bp)1524 biodone(buf_t *bp)
1525 {
1526 int s;
1527
1528 KASSERT(!ISSET(bp->b_oflags, BO_DONE));
1529
1530 if (cpu_intr_p()) {
1531 /* From interrupt mode: defer to a soft interrupt. */
1532 s = splvm();
1533 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_biodone, bp, b_actq);
1534 softint_schedule(biodone_sih);
1535 splx(s);
1536 } else {
1537 /* Process now - the buffer may be freed soon. */
1538 biodone2(bp);
1539 }
1540 }
1541
1542 SDT_PROBE_DEFINE1(io, kernel, , done, "struct buf *"/*bp*/);
1543
1544 static void
biodone2(buf_t * bp)1545 biodone2(buf_t *bp)
1546 {
1547 void (*callout)(buf_t *);
1548
1549 SDT_PROBE1(io, kernel, ,done, bp);
1550
1551 mutex_enter(bp->b_objlock);
1552 /* Note that the transfer is done. */
1553 if (ISSET(bp->b_oflags, BO_DONE))
1554 panic("biodone2 already");
1555 CLR(bp->b_flags, B_COWDONE);
1556 SET(bp->b_oflags, BO_DONE);
1557 BIO_SETPRIO(bp, BPRIO_DEFAULT);
1558
1559 /* Wake up waiting writers. */
1560 if (!ISSET(bp->b_flags, B_READ))
1561 vwakeup(bp);
1562
1563 if ((callout = bp->b_iodone) != NULL) {
1564 /* Note callout done, then call out. */
1565 KASSERT(!cv_has_waiters(&bp->b_done));
1566 KERNEL_LOCK(1, NULL); /* XXXSMP */
1567 bp->b_iodone = NULL;
1568 mutex_exit(bp->b_objlock);
1569 (*callout)(bp);
1570 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */
1571 } else if (ISSET(bp->b_flags, B_ASYNC)) {
1572 /* If async, release. */
1573 KASSERT(!cv_has_waiters(&bp->b_done));
1574 mutex_exit(bp->b_objlock);
1575 brelse(bp, 0);
1576 } else {
1577 /* Otherwise just wake up waiters in biowait(). */
1578 cv_broadcast(&bp->b_done);
1579 mutex_exit(bp->b_objlock);
1580 }
1581 }
1582
1583 static void
biointr(void * cookie)1584 biointr(void *cookie)
1585 {
1586 struct cpu_info *ci;
1587 buf_t *bp;
1588 int s;
1589
1590 ci = curcpu();
1591
1592 while (!TAILQ_EMPTY(&ci->ci_data.cpu_biodone)) {
1593 KASSERT(curcpu() == ci);
1594
1595 s = splvm();
1596 bp = TAILQ_FIRST(&ci->ci_data.cpu_biodone);
1597 TAILQ_REMOVE(&ci->ci_data.cpu_biodone, bp, b_actq);
1598 splx(s);
1599
1600 biodone2(bp);
1601 }
1602 }
1603
1604 /*
1605 * Wait for all buffers to complete I/O
1606 * Return the number of "stuck" buffers.
1607 */
1608 int
buf_syncwait(void)1609 buf_syncwait(void)
1610 {
1611 buf_t *bp;
1612 int iter, nbusy, nbusy_prev = 0, ihash;
1613
1614 for (iter = 0; iter < 20;) {
1615 mutex_enter(&bufcache_lock);
1616 nbusy = 0;
1617 for (ihash = 0; ihash < bufhash+1; ihash++) {
1618 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1619 if ((bp->b_cflags & (BC_BUSY|BC_INVAL)) == BC_BUSY)
1620 nbusy += ((bp->b_flags & B_READ) == 0);
1621 }
1622 }
1623 mutex_exit(&bufcache_lock);
1624
1625 if (nbusy == 0)
1626 break;
1627 if (nbusy_prev == 0)
1628 nbusy_prev = nbusy;
1629 printf("%d ", nbusy);
1630 kpause("bflush", false, MAX(1, hz / 25 * iter), NULL);
1631 if (nbusy >= nbusy_prev) /* we didn't flush anything */
1632 iter++;
1633 else
1634 nbusy_prev = nbusy;
1635 }
1636
1637 if (nbusy) {
1638 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
1639 printf("giving up\nPrinting vnodes for busy buffers\n");
1640 for (ihash = 0; ihash < bufhash+1; ihash++) {
1641 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
1642 if ((bp->b_cflags & (BC_BUSY|BC_INVAL)) == BC_BUSY &&
1643 (bp->b_flags & B_READ) == 0)
1644 vprint(NULL, bp->b_vp);
1645 }
1646 }
1647 #endif
1648 }
1649
1650 return nbusy;
1651 }
1652
1653 static void
sysctl_fillbuf(buf_t * i,struct buf_sysctl * o)1654 sysctl_fillbuf(buf_t *i, struct buf_sysctl *o)
1655 {
1656
1657 o->b_flags = i->b_flags | i->b_cflags | i->b_oflags;
1658 o->b_error = i->b_error;
1659 o->b_prio = i->b_prio;
1660 o->b_dev = i->b_dev;
1661 o->b_bufsize = i->b_bufsize;
1662 o->b_bcount = i->b_bcount;
1663 o->b_resid = i->b_resid;
1664 o->b_addr = PTRTOUINT64(i->b_data);
1665 o->b_blkno = i->b_blkno;
1666 o->b_rawblkno = i->b_rawblkno;
1667 o->b_iodone = PTRTOUINT64(i->b_iodone);
1668 o->b_proc = PTRTOUINT64(i->b_proc);
1669 o->b_vp = PTRTOUINT64(i->b_vp);
1670 o->b_saveaddr = PTRTOUINT64(i->b_saveaddr);
1671 o->b_lblkno = i->b_lblkno;
1672 }
1673
1674 #define KERN_BUFSLOP 20
1675 static int
sysctl_dobuf(SYSCTLFN_ARGS)1676 sysctl_dobuf(SYSCTLFN_ARGS)
1677 {
1678 buf_t *bp;
1679 struct buf_sysctl bs;
1680 struct bqueue *bq;
1681 char *dp;
1682 u_int i, op, arg;
1683 size_t len, needed, elem_size, out_size;
1684 int error, elem_count, retries;
1685
1686 if (namelen == 1 && name[0] == CTL_QUERY)
1687 return (sysctl_query(SYSCTLFN_CALL(rnode)));
1688
1689 if (namelen != 4)
1690 return (EINVAL);
1691
1692 retries = 100;
1693 retry:
1694 dp = oldp;
1695 len = (oldp != NULL) ? *oldlenp : 0;
1696 op = name[0];
1697 arg = name[1];
1698 elem_size = name[2];
1699 elem_count = name[3];
1700 out_size = MIN(sizeof(bs), elem_size);
1701
1702 /*
1703 * at the moment, these are just "placeholders" to make the
1704 * API for retrieving kern.buf data more extensible in the
1705 * future.
1706 *
1707 * XXX kern.buf currently has "netbsd32" issues. hopefully
1708 * these will be resolved at a later point.
1709 */
1710 if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL ||
1711 elem_size < 1 || elem_count < 0)
1712 return (EINVAL);
1713
1714 error = 0;
1715 needed = 0;
1716 sysctl_unlock();
1717 mutex_enter(&bufcache_lock);
1718 for (i = 0; i < BQUEUES; i++) {
1719 bq = &bufqueues[i];
1720 TAILQ_FOREACH(bp, &bq->bq_queue, b_freelist) {
1721 bq->bq_marker = bp;
1722 if (len >= elem_size && elem_count > 0) {
1723 sysctl_fillbuf(bp, &bs);
1724 mutex_exit(&bufcache_lock);
1725 error = copyout(&bs, dp, out_size);
1726 mutex_enter(&bufcache_lock);
1727 if (error)
1728 break;
1729 if (bq->bq_marker != bp) {
1730 /*
1731 * This sysctl node is only for
1732 * statistics. Retry; if the
1733 * queue keeps changing, then
1734 * bail out.
1735 */
1736 if (retries-- == 0) {
1737 error = EAGAIN;
1738 break;
1739 }
1740 mutex_exit(&bufcache_lock);
1741 sysctl_relock();
1742 goto retry;
1743 }
1744 dp += elem_size;
1745 len -= elem_size;
1746 }
1747 needed += elem_size;
1748 if (elem_count > 0 && elem_count != INT_MAX)
1749 elem_count--;
1750 }
1751 if (error != 0)
1752 break;
1753 }
1754 mutex_exit(&bufcache_lock);
1755 sysctl_relock();
1756
1757 *oldlenp = needed;
1758 if (oldp == NULL)
1759 *oldlenp += KERN_BUFSLOP * sizeof(buf_t);
1760
1761 return (error);
1762 }
1763
1764 static int
sysctl_bufvm_update(SYSCTLFN_ARGS)1765 sysctl_bufvm_update(SYSCTLFN_ARGS)
1766 {
1767 int error, rv;
1768 struct sysctlnode node;
1769 unsigned int temp_bufcache;
1770 unsigned long temp_water;
1771
1772 /* Take a copy of the supplied node and its data */
1773 node = *rnode;
1774 if (node.sysctl_data == &bufcache) {
1775 node.sysctl_data = &temp_bufcache;
1776 temp_bufcache = *(unsigned int *)rnode->sysctl_data;
1777 } else {
1778 node.sysctl_data = &temp_water;
1779 temp_water = *(unsigned long *)rnode->sysctl_data;
1780 }
1781
1782 /* Update the copy */
1783 error = sysctl_lookup(SYSCTLFN_CALL(&node));
1784 if (error || newp == NULL)
1785 return (error);
1786
1787 if (rnode->sysctl_data == &bufcache) {
1788 if (temp_bufcache > 100)
1789 return (EINVAL);
1790 bufcache = temp_bufcache;
1791 buf_setwm();
1792 } else if (rnode->sysctl_data == &bufmem_lowater) {
1793 if (bufmem_hiwater - temp_water < 16)
1794 return (EINVAL);
1795 bufmem_lowater = temp_water;
1796 } else if (rnode->sysctl_data == &bufmem_hiwater) {
1797 if (temp_water - bufmem_lowater < 16)
1798 return (EINVAL);
1799 bufmem_hiwater = temp_water;
1800 } else
1801 return (EINVAL);
1802
1803 /* Drain until below new high water mark */
1804 sysctl_unlock();
1805 mutex_enter(&bufcache_lock);
1806 while (bufmem > bufmem_hiwater) {
1807 rv = buf_drain((bufmem - bufmem_hiwater) / (2 * 1024));
1808 if (rv <= 0)
1809 break;
1810 }
1811 mutex_exit(&bufcache_lock);
1812 sysctl_relock();
1813
1814 return 0;
1815 }
1816
1817 static struct sysctllog *vfsbio_sysctllog;
1818
1819 static void
sysctl_kern_buf_setup(void)1820 sysctl_kern_buf_setup(void)
1821 {
1822
1823 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL,
1824 CTLFLAG_PERMANENT,
1825 CTLTYPE_NODE, "buf",
1826 SYSCTL_DESCR("Kernel buffer cache information"),
1827 sysctl_dobuf, 0, NULL, 0,
1828 CTL_KERN, KERN_BUF, CTL_EOL);
1829 }
1830
1831 static void
sysctl_vm_buf_setup(void)1832 sysctl_vm_buf_setup(void)
1833 {
1834
1835 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL,
1836 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1837 CTLTYPE_INT, "bufcache",
1838 SYSCTL_DESCR("Percentage of physical memory to use for "
1839 "buffer cache"),
1840 sysctl_bufvm_update, 0, &bufcache, 0,
1841 CTL_VM, CTL_CREATE, CTL_EOL);
1842 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL,
1843 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
1844 CTLTYPE_LONG, "bufmem",
1845 SYSCTL_DESCR("Amount of kernel memory used by buffer "
1846 "cache"),
1847 NULL, 0, &bufmem, 0,
1848 CTL_VM, CTL_CREATE, CTL_EOL);
1849 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL,
1850 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1851 CTLTYPE_LONG, "bufmem_lowater",
1852 SYSCTL_DESCR("Minimum amount of kernel memory to "
1853 "reserve for buffer cache"),
1854 sysctl_bufvm_update, 0, &bufmem_lowater, 0,
1855 CTL_VM, CTL_CREATE, CTL_EOL);
1856 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL,
1857 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1858 CTLTYPE_LONG, "bufmem_hiwater",
1859 SYSCTL_DESCR("Maximum amount of kernel memory to use "
1860 "for buffer cache"),
1861 sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
1862 CTL_VM, CTL_CREATE, CTL_EOL);
1863 }
1864
1865 #ifdef DEBUG
1866 /*
1867 * Print out statistics on the current allocation of the buffer pool.
1868 * Can be enabled to print out on every ``sync'' by setting "syncprt"
1869 * in vfs_syscalls.c using sysctl.
1870 */
1871 void
vfs_bufstats(void)1872 vfs_bufstats(void)
1873 {
1874 int i, j, count;
1875 buf_t *bp;
1876 struct bqueue *dp;
1877 int counts[(MAXBSIZE / PAGE_SIZE) + 1];
1878 static const char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
1879
1880 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1881 count = 0;
1882 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1883 counts[j] = 0;
1884 TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) {
1885 counts[bp->b_bufsize/PAGE_SIZE]++;
1886 count++;
1887 }
1888 printf("%s: total-%d", bname[i], count);
1889 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
1890 if (counts[j] != 0)
1891 printf(", %d-%d", j * PAGE_SIZE, counts[j]);
1892 printf("\n");
1893 }
1894 }
1895 #endif /* DEBUG */
1896
1897 /* ------------------------------ */
1898
1899 buf_t *
getiobuf(struct vnode * vp,bool waitok)1900 getiobuf(struct vnode *vp, bool waitok)
1901 {
1902 buf_t *bp;
1903
1904 bp = pool_cache_get(bufio_cache, (waitok ? PR_WAITOK : PR_NOWAIT));
1905 if (bp == NULL)
1906 return bp;
1907
1908 buf_init(bp);
1909
1910 if ((bp->b_vp = vp) == NULL)
1911 bp->b_objlock = &buffer_lock;
1912 else
1913 bp->b_objlock = vp->v_interlock;
1914
1915 return bp;
1916 }
1917
1918 void
putiobuf(buf_t * bp)1919 putiobuf(buf_t *bp)
1920 {
1921
1922 buf_destroy(bp);
1923 pool_cache_put(bufio_cache, bp);
1924 }
1925
1926 /*
1927 * nestiobuf_iodone: b_iodone callback for nested buffers.
1928 */
1929
1930 void
nestiobuf_iodone(buf_t * bp)1931 nestiobuf_iodone(buf_t *bp)
1932 {
1933 buf_t *mbp = bp->b_private;
1934 int error;
1935 int donebytes;
1936
1937 KASSERT(bp->b_bcount <= bp->b_bufsize);
1938 KASSERT(mbp != bp);
1939
1940 error = bp->b_error;
1941 if (bp->b_error == 0 &&
1942 (bp->b_bcount < bp->b_bufsize || bp->b_resid > 0)) {
1943 /*
1944 * Not all got transfered, raise an error. We have no way to
1945 * propagate these conditions to mbp.
1946 */
1947 error = EIO;
1948 }
1949
1950 donebytes = bp->b_bufsize;
1951
1952 putiobuf(bp);
1953 nestiobuf_done(mbp, donebytes, error);
1954 }
1955
1956 /*
1957 * nestiobuf_setup: setup a "nested" buffer.
1958 *
1959 * => 'mbp' is a "master" buffer which is being divided into sub pieces.
1960 * => 'bp' should be a buffer allocated by getiobuf.
1961 * => 'offset' is a byte offset in the master buffer.
1962 * => 'size' is a size in bytes of this nested buffer.
1963 */
1964
1965 void
nestiobuf_setup(buf_t * mbp,buf_t * bp,int offset,size_t size)1966 nestiobuf_setup(buf_t *mbp, buf_t *bp, int offset, size_t size)
1967 {
1968 const int b_read = mbp->b_flags & B_READ;
1969 struct vnode *vp = mbp->b_vp;
1970
1971 KASSERT(mbp->b_bcount >= offset + size);
1972 bp->b_vp = vp;
1973 bp->b_dev = mbp->b_dev;
1974 bp->b_objlock = mbp->b_objlock;
1975 bp->b_cflags = BC_BUSY;
1976 bp->b_flags = B_ASYNC | b_read;
1977 bp->b_iodone = nestiobuf_iodone;
1978 bp->b_data = (char *)mbp->b_data + offset;
1979 bp->b_resid = bp->b_bcount = size;
1980 bp->b_bufsize = bp->b_bcount;
1981 bp->b_private = mbp;
1982 BIO_COPYPRIO(bp, mbp);
1983 if (!b_read && vp != NULL) {
1984 mutex_enter(vp->v_interlock);
1985 vp->v_numoutput++;
1986 mutex_exit(vp->v_interlock);
1987 }
1988 }
1989
1990 /*
1991 * nestiobuf_done: propagate completion to the master buffer.
1992 *
1993 * => 'donebytes' specifies how many bytes in the 'mbp' is completed.
1994 * => 'error' is an errno(2) that 'donebytes' has been completed with.
1995 */
1996
1997 void
nestiobuf_done(buf_t * mbp,int donebytes,int error)1998 nestiobuf_done(buf_t *mbp, int donebytes, int error)
1999 {
2000
2001 if (donebytes == 0) {
2002 return;
2003 }
2004 mutex_enter(mbp->b_objlock);
2005 KASSERT(mbp->b_resid >= donebytes);
2006 mbp->b_resid -= donebytes;
2007 if (error)
2008 mbp->b_error = error;
2009 if (mbp->b_resid == 0) {
2010 if (mbp->b_error)
2011 mbp->b_resid = mbp->b_bcount;
2012 mutex_exit(mbp->b_objlock);
2013 biodone(mbp);
2014 } else
2015 mutex_exit(mbp->b_objlock);
2016 }
2017
2018 void
buf_init(buf_t * bp)2019 buf_init(buf_t *bp)
2020 {
2021
2022 cv_init(&bp->b_busy, "biolock");
2023 cv_init(&bp->b_done, "biowait");
2024 bp->b_dev = NODEV;
2025 bp->b_error = 0;
2026 bp->b_flags = 0;
2027 bp->b_cflags = 0;
2028 bp->b_oflags = 0;
2029 bp->b_objlock = &buffer_lock;
2030 bp->b_iodone = NULL;
2031 bp->b_refcnt = 1;
2032 bp->b_dev = NODEV;
2033 bp->b_vnbufs.le_next = NOLIST;
2034 BIO_SETPRIO(bp, BPRIO_DEFAULT);
2035 }
2036
2037 void
buf_destroy(buf_t * bp)2038 buf_destroy(buf_t *bp)
2039 {
2040
2041 cv_destroy(&bp->b_done);
2042 cv_destroy(&bp->b_busy);
2043 }
2044
2045 int
bbusy(buf_t * bp,bool intr,int timo,kmutex_t * interlock)2046 bbusy(buf_t *bp, bool intr, int timo, kmutex_t *interlock)
2047 {
2048 int error;
2049
2050 KASSERT(mutex_owned(&bufcache_lock));
2051
2052 if ((bp->b_cflags & BC_BUSY) != 0) {
2053 if (curlwp == uvm.pagedaemon_lwp)
2054 return EDEADLK;
2055 bp->b_cflags |= BC_WANTED;
2056 bref(bp);
2057 if (interlock != NULL)
2058 mutex_exit(interlock);
2059 if (intr) {
2060 error = cv_timedwait_sig(&bp->b_busy, &bufcache_lock,
2061 timo);
2062 } else {
2063 error = cv_timedwait(&bp->b_busy, &bufcache_lock,
2064 timo);
2065 }
2066 brele(bp);
2067 if (interlock != NULL)
2068 mutex_enter(interlock);
2069 if (error != 0)
2070 return error;
2071 return EPASSTHROUGH;
2072 }
2073 bp->b_cflags |= BC_BUSY;
2074
2075 return 0;
2076 }
2077