xref: /freebsd/sys/sys/buf.h (revision aa0a1e58)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)buf.h	8.9 (Berkeley) 3/30/95
35  * $FreeBSD$
36  */
37 
38 #ifndef _SYS_BUF_H_
39 #define	_SYS_BUF_H_
40 
41 #include <sys/bufobj.h>
42 #include <sys/queue.h>
43 #include <sys/lock.h>
44 #include <sys/lockmgr.h>
45 
46 struct bio;
47 struct buf;
48 struct bufobj;
49 struct mount;
50 struct vnode;
51 struct uio;
52 
53 /*
54  * To avoid including <ufs/ffs/softdep.h>
55  */
56 LIST_HEAD(workhead, worklist);
57 /*
58  * These are currently used only by the soft dependency code, hence
59  * are stored once in a global variable. If other subsystems wanted
60  * to use these hooks, a pointer to a set of bio_ops could be added
61  * to each buffer.
62  */
63 extern struct bio_ops {
64 	void	(*io_start)(struct buf *);
65 	void	(*io_complete)(struct buf *);
66 	void	(*io_deallocate)(struct buf *);
67 	int	(*io_countdeps)(struct buf *, int);
68 } bioops;
69 
70 struct vm_object;
71 
72 typedef unsigned char b_xflags_t;
73 
74 /*
75  * The buffer header describes an I/O operation in the kernel.
76  *
77  * NOTES:
78  *	b_bufsize, b_bcount.  b_bufsize is the allocation size of the
79  *	buffer, either DEV_BSIZE or PAGE_SIZE aligned.  b_bcount is the
80  *	originally requested buffer size and can serve as a bounds check
81  *	against EOF.  For most, but not all uses, b_bcount == b_bufsize.
82  *
83  *	b_dirtyoff, b_dirtyend.  Buffers support piecemeal, unaligned
84  *	ranges of dirty data that need to be written to backing store.
85  *	The range is typically clipped at b_bcount ( not b_bufsize ).
86  *
87  *	b_resid.  Number of bytes remaining in I/O.  After an I/O operation
88  *	completes, b_resid is usually 0 indicating 100% success.
89  *
90  *	All fields are protected by the buffer lock except those marked:
91  *		V - Protected by owning bufobj lock
92  *		Q - Protected by the buf queue lock
93  *		D - Protected by an dependency implementation specific lock
94  */
95 struct buf {
96 	struct bufobj	*b_bufobj;
97 	long		b_bcount;
98 	void		*b_caller1;
99 	caddr_t		b_data;
100 	int		b_error;
101 	uint8_t		b_iocmd;
102 	uint8_t		b_ioflags;
103 	off_t		b_iooffset;
104 	long		b_resid;
105 	void	(*b_iodone)(struct buf *);
106 	daddr_t b_blkno;		/* Underlying physical block number. */
107 	off_t	b_offset;		/* Offset into file. */
108 	TAILQ_ENTRY(buf) b_bobufs;	/* (V) Buffer's associated vnode. */
109 	struct buf	*b_left;	/* (V) splay tree link */
110 	struct buf	*b_right;	/* (V) splay tree link */
111 	uint32_t	b_vflags;	/* (V) BV_* flags */
112 	TAILQ_ENTRY(buf) b_freelist;	/* (Q) Free list position inactive. */
113 	unsigned short b_qindex;	/* (Q) buffer queue index */
114 	uint32_t	b_flags;	/* B_* flags. */
115 	b_xflags_t b_xflags;		/* extra flags */
116 	struct lock b_lock;		/* Buffer lock */
117 	long	b_bufsize;		/* Allocated buffer size. */
118 	long	b_runningbufspace;	/* when I/O is running, pipelining */
119 	caddr_t	b_kvabase;		/* base kva for buffer */
120 	int	b_kvasize;		/* size of kva for buffer */
121 	daddr_t b_lblkno;		/* Logical block number. */
122 	struct	vnode *b_vp;		/* Device vnode. */
123 	int	b_dirtyoff;		/* Offset in buffer of dirty region. */
124 	int	b_dirtyend;		/* Offset of end of dirty region. */
125 	struct	ucred *b_rcred;		/* Read credentials reference. */
126 	struct	ucred *b_wcred;		/* Write credentials reference. */
127 	void	*b_saveaddr;		/* Original b_addr for physio. */
128 	union	pager_info {
129 		int	pg_reqpage;
130 	} b_pager;
131 	union	cluster_info {
132 		TAILQ_HEAD(cluster_list_head, buf) cluster_head;
133 		TAILQ_ENTRY(buf) cluster_entry;
134 	} b_cluster;
135 	struct	vm_page *b_pages[btoc(MAXPHYS)];
136 	int		b_npages;
137 	struct	workhead b_dep;		/* (D) List of filesystem dependencies. */
138 	void	*b_fsprivate1;
139 	void	*b_fsprivate2;
140 	void	*b_fsprivate3;
141 	int	b_pin_count;
142 };
143 
144 #define b_object	b_bufobj->bo_object
145 
146 /*
147  * These flags are kept in b_flags.
148  *
149  * Notes:
150  *
151  *	B_ASYNC		VOP calls on bp's are usually async whether or not
152  *			B_ASYNC is set, but some subsystems, such as NFS, like
153  *			to know what is best for the caller so they can
154  *			optimize the I/O.
155  *
156  *	B_PAGING	Indicates that bp is being used by the paging system or
157  *			some paging system and that the bp is not linked into
158  *			the b_vp's clean/dirty linked lists or ref counts.
159  *			Buffer vp reassignments are illegal in this case.
160  *
161  *	B_CACHE		This may only be set if the buffer is entirely valid.
162  *			The situation where B_DELWRI is set and B_CACHE is
163  *			clear MUST be committed to disk by getblk() so
164  *			B_DELWRI can also be cleared.  See the comments for
165  *			getblk() in kern/vfs_bio.c.  If B_CACHE is clear,
166  *			the caller is expected to clear BIO_ERROR and B_INVAL,
167  *			set BIO_READ, and initiate an I/O.
168  *
169  *			The 'entire buffer' is defined to be the range from
170  *			0 through b_bcount.
171  *
172  *	B_MALLOC	Request that the buffer be allocated from the malloc
173  *			pool, DEV_BSIZE aligned instead of PAGE_SIZE aligned.
174  *
175  *	B_CLUSTEROK	This flag is typically set for B_DELWRI buffers
176  *			by filesystems that allow clustering when the buffer
177  *			is fully dirty and indicates that it may be clustered
178  *			with other adjacent dirty buffers.  Note the clustering
179  *			may not be used with the stage 1 data write under NFS
180  *			but may be used for the commit rpc portion.
181  *
182  *	B_VMIO		Indicates that the buffer is tied into an VM object.
183  *			The buffer's data is always PAGE_SIZE aligned even
184  *			if b_bufsize and b_bcount are not.  ( b_bufsize is
185  *			always at least DEV_BSIZE aligned, though ).
186  *
187  *	B_DIRECT	Hint that we should attempt to completely free
188  *			the pages underlying the buffer.  B_DIRECT is
189  *			sticky until the buffer is released and typically
190  *			only has an effect when B_RELBUF is also set.
191  *
192  */
193 
194 #define	B_AGE		0x00000001	/* Move to age queue when I/O done. */
195 #define	B_NEEDCOMMIT	0x00000002	/* Append-write in progress. */
196 #define	B_ASYNC		0x00000004	/* Start I/O, do not wait. */
197 #define	B_DIRECT	0x00000008	/* direct I/O flag (pls free vmio) */
198 #define	B_DEFERRED	0x00000010	/* Skipped over for cleaning */
199 #define	B_CACHE		0x00000020	/* Bread found us in the cache. */
200 #define	B_VALIDSUSPWRT	0x00000040	/* Valid write during suspension. */
201 #define	B_DELWRI	0x00000080	/* Delay I/O until buffer reused. */
202 #define	B_PERSISTENT	0x00000100	/* Perm. ref'ed while EXT2FS mounted. */
203 #define	B_DONE		0x00000200	/* I/O completed. */
204 #define	B_EINTR		0x00000400	/* I/O was interrupted */
205 #define	B_00000800	0x00000800	/* Available flag. */
206 #define	B_00001000	0x00001000	/* Available flag. */
207 #define	B_INVAL		0x00002000	/* Does not contain valid info. */
208 #define	B_00004000	0x00004000	/* Available flag. */
209 #define	B_NOCACHE	0x00008000	/* Do not cache block after use. */
210 #define	B_MALLOC	0x00010000	/* malloced b_data */
211 #define	B_CLUSTEROK	0x00020000	/* Pagein op, so swap() can count it. */
212 #define	B_000400000	0x00040000	/* Available flag. */
213 #define	B_000800000	0x00080000	/* Available flag. */
214 #define	B_00100000	0x00100000	/* Available flag. */
215 #define	B_DIRTY		0x00200000	/* Needs writing later (in EXT2FS). */
216 #define	B_RELBUF	0x00400000	/* Release VMIO buffer. */
217 #define	B_00800000	0x00800000	/* Available flag. */
218 #define	B_NOCOPY	0x01000000	/* Don't copy-on-write this buf. */
219 #define	B_NEEDSGIANT	0x02000000	/* Buffer's vnode needs giant. */
220 #define	B_PAGING	0x04000000	/* volatile paging I/O -- bypass VMIO */
221 #define B_MANAGED	0x08000000	/* Managed by FS. */
222 #define B_RAM		0x10000000	/* Read ahead mark (flag) */
223 #define B_VMIO		0x20000000	/* VMIO flag */
224 #define B_CLUSTER	0x40000000	/* pagein op, so swap() can count it */
225 #define B_REMFREE	0x80000000	/* Delayed bremfree */
226 
227 #define PRINT_BUF_FLAGS "\20\40remfree\37cluster\36vmio\35ram\34b27" \
228 	"\33paging\32b25\31b24\30b23\27relbuf\26dirty\25b20" \
229 	"\24b19\23b18\22clusterok\21malloc\20nocache\17b14\16inval" \
230 	"\15b12\14b11\13eintr\12done\11persist\10delwri\7validsuspwrt" \
231 	"\6cache\5deferred\4direct\3async\2needcommit\1age"
232 
233 /*
234  * These flags are kept in b_xflags.
235  */
236 #define	BX_VNDIRTY	0x00000001	/* On vnode dirty list */
237 #define	BX_VNCLEAN	0x00000002	/* On vnode clean list */
238 #define	BX_BKGRDWRITE	0x00000010	/* Do writes in background */
239 #define BX_BKGRDMARKER	0x00000020	/* Mark buffer for splay tree */
240 #define	BX_ALTDATA	0x00000040	/* Holds extended data */
241 
242 #define	NOOFFSET	(-1LL)		/* No buffer offset calculated yet */
243 
244 /*
245  * These flags are kept in b_vflags.
246  */
247 #define	BV_SCANNED	0x00000001	/* VOP_FSYNC funcs mark written bufs */
248 #define	BV_BKGRDINPROG	0x00000002	/* Background write in progress */
249 #define	BV_BKGRDWAIT	0x00000004	/* Background write waiting */
250 #define	BV_INFREECNT	0x80000000	/* buf is counted in numfreebufs */
251 
252 #ifdef _KERNEL
253 /*
254  * Buffer locking
255  */
256 extern const char *buf_wmesg;		/* Default buffer lock message */
257 #define BUF_WMESG "bufwait"
258 #include <sys/proc.h>			/* XXX for curthread */
259 #include <sys/mutex.h>
260 
261 /*
262  * Initialize a lock.
263  */
264 #define BUF_LOCKINIT(bp)						\
265 	lockinit(&(bp)->b_lock, PRIBIO + 4, buf_wmesg, 0, 0)
266 /*
267  *
268  * Get a lock sleeping non-interruptably until it becomes available.
269  */
270 #define	BUF_LOCK(bp, locktype, interlock)				\
271 	_lockmgr_args(&(bp)->b_lock, (locktype), (interlock),		\
272 	    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,		\
273 	    LOCK_FILE, LOCK_LINE)
274 
275 /*
276  * Get a lock sleeping with specified interruptably and timeout.
277  */
278 #define	BUF_TIMELOCK(bp, locktype, interlock, wmesg, catch, timo)	\
279 	_lockmgr_args(&(bp)->b_lock, (locktype) | LK_TIMELOCK,		\
280 	    (interlock), (wmesg), (PRIBIO + 4) | (catch), (timo),	\
281 	    LOCK_FILE, LOCK_LINE)
282 
283 /*
284  * Release a lock. Only the acquiring process may free the lock unless
285  * it has been handed off to biodone.
286  */
287 #define	BUF_UNLOCK(bp) do {						\
288 	KASSERT(((bp)->b_flags & B_REMFREE) == 0,			\
289 	    ("BUF_UNLOCK %p while B_REMFREE is still set.", (bp)));	\
290 									\
291 	(void)_lockmgr_args(&(bp)->b_lock, LK_RELEASE, NULL,		\
292 	    LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,		\
293 	    LOCK_FILE, LOCK_LINE);					\
294 } while (0)
295 
296 /*
297  * Check if a buffer lock is recursed.
298  */
299 #define	BUF_LOCKRECURSED(bp)						\
300 	lockmgr_recursed(&(bp)->b_lock)
301 
302 /*
303  * Check if a buffer lock is currently held.
304  */
305 #define	BUF_ISLOCKED(bp)						\
306 	lockstatus(&(bp)->b_lock)
307 /*
308  * Free a buffer lock.
309  */
310 #define BUF_LOCKFREE(bp) 						\
311 	lockdestroy(&(bp)->b_lock)
312 
313 /*
314  * Buffer lock assertions.
315  */
316 #if defined(INVARIANTS) && defined(INVARIANT_SUPPORT)
317 #define	BUF_ASSERT_LOCKED(bp)						\
318 	_lockmgr_assert(&(bp)->b_lock, KA_LOCKED, LOCK_FILE, LOCK_LINE)
319 #define	BUF_ASSERT_SLOCKED(bp)						\
320 	_lockmgr_assert(&(bp)->b_lock, KA_SLOCKED, LOCK_FILE, LOCK_LINE)
321 #define	BUF_ASSERT_XLOCKED(bp)						\
322 	_lockmgr_assert(&(bp)->b_lock, KA_XLOCKED, LOCK_FILE, LOCK_LINE)
323 #define	BUF_ASSERT_UNLOCKED(bp)						\
324 	_lockmgr_assert(&(bp)->b_lock, KA_UNLOCKED, LOCK_FILE, LOCK_LINE)
325 #define	BUF_ASSERT_HELD(bp)
326 #define	BUF_ASSERT_UNHELD(bp)
327 #else
328 #define	BUF_ASSERT_LOCKED(bp)
329 #define	BUF_ASSERT_SLOCKED(bp)
330 #define	BUF_ASSERT_XLOCKED(bp)
331 #define	BUF_ASSERT_UNLOCKED(bp)
332 #define	BUF_ASSERT_HELD(bp)
333 #define	BUF_ASSERT_UNHELD(bp)
334 #endif
335 
336 #ifdef _SYS_PROC_H_	/* Avoid #include <sys/proc.h> pollution */
337 /*
338  * When initiating asynchronous I/O, change ownership of the lock to the
339  * kernel. Once done, the lock may legally released by biodone. The
340  * original owning process can no longer acquire it recursively, but must
341  * wait until the I/O is completed and the lock has been freed by biodone.
342  */
343 #define	BUF_KERNPROC(bp)						\
344 	_lockmgr_disown(&(bp)->b_lock, LOCK_FILE, LOCK_LINE)
345 #endif
346 
347 /*
348  * Find out if the lock has waiters or not.
349  */
350 #define	BUF_LOCKWAITERS(bp)						\
351 	lockmgr_waiters(&(bp)->b_lock)
352 
353 #endif /* _KERNEL */
354 
355 struct buf_queue_head {
356 	TAILQ_HEAD(buf_queue, buf) queue;
357 	daddr_t last_pblkno;
358 	struct	buf *insert_point;
359 	struct	buf *switch_point;
360 };
361 
362 /*
363  * This structure describes a clustered I/O.  It is stored in the b_saveaddr
364  * field of the buffer on which I/O is done.  At I/O completion, cluster
365  * callback uses the structure to parcel I/O's to individual buffers, and
366  * then free's this structure.
367  */
368 struct cluster_save {
369 	long	bs_bcount;		/* Saved b_bcount. */
370 	long	bs_bufsize;		/* Saved b_bufsize. */
371 	void	*bs_saveaddr;		/* Saved b_addr. */
372 	int	bs_nchildren;		/* Number of associated buffers. */
373 	struct buf **bs_children;	/* List of associated buffers. */
374 };
375 
376 #ifdef _KERNEL
377 
378 static __inline int
379 bwrite(struct buf *bp)
380 {
381 
382 	KASSERT(bp->b_bufobj != NULL, ("bwrite: no bufobj bp=%p", bp));
383 	KASSERT(bp->b_bufobj->bo_ops != NULL, ("bwrite: no bo_ops bp=%p", bp));
384 	KASSERT(bp->b_bufobj->bo_ops->bop_write != NULL,
385 	    ("bwrite: no bop_write bp=%p", bp));
386 	return (BO_WRITE(bp->b_bufobj, bp));
387 }
388 
389 static __inline void
390 bstrategy(struct buf *bp)
391 {
392 
393 	KASSERT(bp->b_bufobj != NULL, ("bstrategy: no bufobj bp=%p", bp));
394 	KASSERT(bp->b_bufobj->bo_ops != NULL,
395 	    ("bstrategy: no bo_ops bp=%p", bp));
396 	KASSERT(bp->b_bufobj->bo_ops->bop_strategy != NULL,
397 	    ("bstrategy: no bop_strategy bp=%p", bp));
398 	BO_STRATEGY(bp->b_bufobj, bp);
399 }
400 
401 static __inline void
402 buf_start(struct buf *bp)
403 {
404 	if (bioops.io_start)
405 		(*bioops.io_start)(bp);
406 }
407 
408 static __inline void
409 buf_complete(struct buf *bp)
410 {
411 	if (bioops.io_complete)
412 		(*bioops.io_complete)(bp);
413 }
414 
415 static __inline void
416 buf_deallocate(struct buf *bp)
417 {
418 	if (bioops.io_deallocate)
419 		(*bioops.io_deallocate)(bp);
420 	BUF_LOCKFREE(bp);
421 }
422 
423 static __inline int
424 buf_countdeps(struct buf *bp, int i)
425 {
426 	if (bioops.io_countdeps)
427 		return ((*bioops.io_countdeps)(bp, i));
428 	else
429 		return (0);
430 }
431 
432 #endif /* _KERNEL */
433 
434 /*
435  * Zero out the buffer's data area.
436  */
437 #define	clrbuf(bp) {							\
438 	bzero((bp)->b_data, (u_int)(bp)->b_bcount);			\
439 	(bp)->b_resid = 0;						\
440 }
441 
442 /*
443  * Flags for getblk's last parameter.
444  */
445 #define	GB_LOCK_NOWAIT	0x0001		/* Fail if we block on a buf lock. */
446 #define	GB_NOCREAT	0x0002		/* Don't create a buf if not found. */
447 #define	GB_NOWAIT_BD	0x0004		/* Do not wait for bufdaemon */
448 
449 #ifdef _KERNEL
450 extern int	nbuf;			/* The number of buffer headers */
451 extern long	maxswzone;		/* Max KVA for swap structures */
452 extern long	maxbcache;		/* Max KVA for buffer cache */
453 extern long	runningbufspace;
454 extern long	hibufspace;
455 extern int	dirtybufthresh;
456 extern int	bdwriteskip;
457 extern int	dirtybufferflushes;
458 extern int	altbufferflushes;
459 extern int      buf_maxio;              /* nominal maximum I/O for buffer */
460 extern struct	buf *buf;		/* The buffer headers. */
461 extern char	*buffers;		/* The buffer contents. */
462 extern int	bufpages;		/* Number of memory pages in the buffer pool. */
463 extern struct	buf *swbuf;		/* Swap I/O buffer headers. */
464 extern int	nswbuf;			/* Number of swap I/O buffer headers. */
465 extern int	cluster_pbuf_freecnt;	/* Number of pbufs for clusters */
466 extern int	vnode_pbuf_freecnt;	/* Number of pbufs for vnode pager */
467 
468 void	runningbufwakeup(struct buf *);
469 void	waitrunningbufspace(void);
470 caddr_t	kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est);
471 void	bufinit(void);
472 void	bwillwrite(void);
473 int	buf_dirty_count_severe(void);
474 void	bremfree(struct buf *);
475 void	bremfreef(struct buf *);	/* XXX Force bremfree, only for nfs. */
476 int	bread(struct vnode *, daddr_t, int, struct ucred *, struct buf **);
477 void	breada(struct vnode *, daddr_t *, int *, int, struct ucred *);
478 int	breadn(struct vnode *, daddr_t, int, daddr_t *, int *, int,
479 	    struct ucred *, struct buf **);
480 void	bdwrite(struct buf *);
481 void	bawrite(struct buf *);
482 void	bdirty(struct buf *);
483 void	bundirty(struct buf *);
484 void	bufstrategy(struct bufobj *, struct buf *);
485 void	brelse(struct buf *);
486 void	bqrelse(struct buf *);
487 int	vfs_bio_awrite(struct buf *);
488 struct buf *     getpbuf(int *);
489 struct buf *incore(struct bufobj *, daddr_t);
490 struct buf *gbincore(struct bufobj *, daddr_t);
491 struct buf *getblk(struct vnode *, daddr_t, int, int, int, int);
492 struct buf *geteblk(int, int);
493 int	bufwait(struct buf *);
494 int	bufwrite(struct buf *);
495 void	bufdone(struct buf *);
496 void	bufdone_finish(struct buf *);
497 void	bd_speedup(void);
498 
499 int	cluster_read(struct vnode *, u_quad_t, daddr_t, long,
500 	    struct ucred *, long, int, struct buf **);
501 int	cluster_wbuild(struct vnode *, long, daddr_t, int);
502 void	cluster_write(struct vnode *, struct buf *, u_quad_t, int);
503 void	vfs_bio_set_valid(struct buf *, int base, int size);
504 void	vfs_bio_clrbuf(struct buf *);
505 void	vfs_busy_pages(struct buf *, int clear_modify);
506 void	vfs_unbusy_pages(struct buf *);
507 int	vmapbuf(struct buf *);
508 void	vunmapbuf(struct buf *);
509 void	relpbuf(struct buf *, int *);
510 void	brelvp(struct buf *);
511 void	bgetvp(struct vnode *, struct buf *);
512 void	pbgetbo(struct bufobj *bo, struct buf *bp);
513 void	pbgetvp(struct vnode *, struct buf *);
514 void	pbrelbo(struct buf *);
515 void	pbrelvp(struct buf *);
516 int	allocbuf(struct buf *bp, int size);
517 void	reassignbuf(struct buf *);
518 struct	buf *trypbuf(int *);
519 void	bwait(struct buf *, u_char, const char *);
520 void	bdone(struct buf *);
521 void	bpin(struct buf *);
522 void	bunpin(struct buf *);
523 void 	bunpin_wait(struct buf *);
524 
525 #endif /* _KERNEL */
526 
527 #endif /* !_SYS_BUF_H_ */
528