xref: /dragonfly/sys/sys/buf2.h (revision 3d33658b)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)buf.h	8.9 (Berkeley) 3/30/95
35  * $FreeBSD: src/sys/sys/buf.h,v 1.88.2.10 2003/01/25 19:02:23 dillon Exp $
36  */
37 
38 #ifndef _SYS_BUF2_H_
39 #define	_SYS_BUF2_H_
40 
41 #ifdef _KERNEL
42 
43 #ifndef _SYS_BUF_H_
44 #include <sys/buf.h>
45 #endif
46 #ifndef _SYS_MOUNT_H_
47 #include <sys/mount.h>
48 #endif
49 #ifndef _SYS_VNODE_H_
50 #include <sys/vnode.h>
51 #endif
52 #ifndef _VM_VM_PAGE_H_
53 #include <vm/vm_page.h>
54 #endif
55 
56 /*
57  * Initialize a lock.
58  */
59 #define BUF_LOCKINIT(bp) \
60 	lockinit(&(bp)->b_lock, buf_wmesg, 0, LK_NOCOLLSTATS)
61 
62 /*
63  *
64  * Get a lock sleeping non-interruptably until it becomes available.
65  *
66  * XXX lk_wmesg can race, but should not result in any operational issues.
67  */
68 static __inline int
69 BUF_LOCK(struct buf *bp, int locktype)
70 {
71 	bp->b_lock.lk_wmesg = buf_wmesg;
72 	return (lockmgr(&(bp)->b_lock, locktype));
73 }
74 /*
75  * Get a lock sleeping with specified interruptably and timeout.
76  *
77  * XXX lk_timo can race against other entities calling BUF_TIMELOCK,
78  * but will not interfere with entities calling BUF_LOCK since LK_TIMELOCK
79  * will not be set in that case.
80  *
81  * XXX lk_wmesg can race, but should not result in any operational issues.
82  */
83 static __inline int
84 BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int timo)
85 {
86 	bp->b_lock.lk_wmesg = wmesg;
87 	bp->b_lock.lk_timo = timo;
88 	return (lockmgr(&(bp)->b_lock, locktype | LK_TIMELOCK));
89 }
90 /*
91  * Release a lock. Only the acquiring process may free the lock unless
92  * it has been handed off to biodone.
93  */
94 static __inline void
95 BUF_UNLOCK(struct buf *bp)
96 {
97 	lockmgr(&(bp)->b_lock, LK_RELEASE);
98 }
99 
100 /*
101  * When initiating asynchronous I/O, change ownership of the lock to the
102  * kernel. Once done, the lock may legally released by biodone. The
103  * original owning process can no longer acquire it recursively, but must
104  * wait until the I/O is completed and the lock has been freed by biodone.
105  */
106 static __inline void
107 BUF_KERNPROC(struct buf *bp)
108 {
109 	lockmgr_kernproc(&(bp)->b_lock);
110 }
111 /*
112  * Find out the number of references to a lock.
113  *
114  * The non-blocking version should only be used for assertions in cases
115  * where the buffer is expected to be owned or otherwise data stable.
116  */
117 static __inline int
118 BUF_LOCKINUSE(struct buf *bp)
119 {
120 	return (lockinuse(&(bp)->b_lock));
121 }
122 
123 /*
124  * Free a buffer lock.
125  */
126 #define BUF_LOCKFREE(bp) 			\
127 	if (BUF_LOCKINUSE(bp))			\
128 		panic("free locked buf")
129 
130 static __inline void
131 bioq_init(struct bio_queue_head *bioq)
132 {
133 	TAILQ_INIT(&bioq->queue);
134 	bioq->off_unused = 0;
135 	bioq->reorder = 0;
136 	bioq->transition = NULL;
137 	bioq->bio_unused = NULL;
138 }
139 
140 static __inline void
141 bioq_insert_tail(struct bio_queue_head *bioq, struct bio *bio)
142 {
143 	bioq->transition = NULL;
144 	TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
145 }
146 
147 static __inline void
148 bioq_remove(struct bio_queue_head *bioq, struct bio *bio)
149 {
150 	/*
151 	 * Adjust read insertion point when removing the bioq.  The
152 	 * bio after the insert point is a write so move backwards
153 	 * one (NULL will indicate all the reads have cleared).
154 	 */
155 	if (bio == bioq->transition)
156 		bioq->transition = TAILQ_NEXT(bio, bio_act);
157 	TAILQ_REMOVE(&bioq->queue, bio, bio_act);
158 }
159 
160 static __inline struct bio *
161 bioq_first(struct bio_queue_head *bioq)
162 {
163 	return (TAILQ_FIRST(&bioq->queue));
164 }
165 
166 static __inline struct bio *
167 bioq_takefirst(struct bio_queue_head *bioq)
168 {
169 	struct bio *bp;
170 
171 	bp = TAILQ_FIRST(&bioq->queue);
172 	if (bp != NULL)
173 		bioq_remove(bioq, bp);
174 	return (bp);
175 }
176 
177 /*
178  * Adjust buffer cache buffer's activity count.  This
179  * works similarly to vm_page->act_count.
180  */
181 static __inline void
182 buf_act_advance(struct buf *bp)
183 {
184 	if (bp->b_act_count > ACT_MAX - ACT_ADVANCE)
185 		bp->b_act_count = ACT_MAX;
186 	else
187 		bp->b_act_count += ACT_ADVANCE;
188 }
189 
190 static __inline void
191 buf_act_decline(struct buf *bp)
192 {
193 	if (bp->b_act_count < ACT_DECLINE)
194 		bp->b_act_count = 0;
195 	else
196 		bp->b_act_count -= ACT_DECLINE;
197 }
198 
199 /*
200  * biodeps inlines - used by softupdates and HAMMER.
201  *
202  * All bioops are MPSAFE
203  */
204 static __inline void
205 buf_dep_init(struct buf *bp)
206 {
207 	bp->b_ops = NULL;
208 	LIST_INIT(&bp->b_dep);
209 }
210 
211 /*
212  * Precondition: the buffer has some dependencies.
213  *
214  * MPSAFE
215  */
216 static __inline void
217 buf_deallocate(struct buf *bp)
218 {
219 	struct bio_ops *ops = bp->b_ops;
220 
221 	KKASSERT(! LIST_EMPTY(&bp->b_dep));
222 	if (ops)
223 		ops->io_deallocate(bp);
224 }
225 
226 /*
227  * This callback is made from flushbufqueues() which uses BUF_LOCK().
228  * Since it isn't going through a normal buffer aquisition mechanic
229  * and calling the filesystem back enforce the vnode's KVABIO support.
230  */
231 static __inline int
232 buf_countdeps(struct buf *bp, int n)
233 {
234 	struct bio_ops *ops = bp->b_ops;
235 	int r;
236 
237 	if (ops) {
238 		if (bp->b_vp == NULL || (bp->b_vp->v_flag & VKVABIO) == 0)
239 			bkvasync_all(bp);
240 		r = ops->io_countdeps(bp, n);
241 	} else {
242 		r = 0;
243 	}
244 	return(r);
245 }
246 
247 /*
248  * MPSAFE
249  */
250 static __inline void
251 buf_start(struct buf *bp)
252 {
253 	struct bio_ops *ops = bp->b_ops;
254 
255 	if (ops)
256 		ops->io_start(bp);
257 }
258 
259 /*
260  * MPSAFE
261  */
262 static __inline void
263 buf_complete(struct buf *bp)
264 {
265 	struct bio_ops *ops = bp->b_ops;
266 
267 	if (ops)
268 		ops->io_complete(bp);
269 }
270 
271 /*
272  * MPSAFE
273  */
274 static __inline int
275 buf_fsync(struct vnode *vp)
276 {
277 	struct bio_ops *ops = vp->v_mount->mnt_bioops;
278 	int r;
279 
280 	if (ops)
281 		r = ops->io_fsync(vp);
282 	else
283 		r = 0;
284 	return(r);
285 }
286 
287 /*
288  * MPSAFE
289  */
290 static __inline void
291 buf_movedeps(struct buf *bp1, struct buf *bp2)
292 {
293 	struct bio_ops *ops = bp1->b_ops;
294 
295 	if (ops)
296 		ops->io_movedeps(bp1, bp2);
297 }
298 
299 /*
300  * MPSAFE
301  */
302 static __inline int
303 buf_checkread(struct buf *bp)
304 {
305 	struct bio_ops *ops = bp->b_ops;
306 
307 	if (ops)
308 		return(ops->io_checkread(bp));
309 	return(0);
310 }
311 
312 /*
313  * This callback is made from flushbufqueues() which uses BUF_LOCK().
314  * Since it isn't going through a normal buffer aquisition mechanic
315  * and calling the filesystem back enforce the vnode's KVABIO support.
316  */
317 static __inline int
318 buf_checkwrite(struct buf *bp)
319 {
320 	struct bio_ops *ops = bp->b_ops;
321 
322 	if (ops) {
323 		if (bp->b_vp == NULL || (bp->b_vp->v_flag & VKVABIO) == 0)
324 			bkvasync_all(bp);
325 		return(ops->io_checkwrite(bp));
326 	}
327 	return(0);
328 }
329 
330 /*
331  * Chained biodone.  The bio callback was made and the callback function
332  * wishes to chain the biodone.  If no BIO's are left we call bpdone()
333  * with elseit=TRUE (asynchronous completion).
334  *
335  * MPSAFE
336  */
337 static __inline void
338 biodone_chain(struct bio *bio)
339 {
340 	if (bio->bio_prev)
341 		biodone(bio->bio_prev);
342 	else
343 		bpdone(bio->bio_buf, 1);
344 }
345 
346 static __inline int
347 bread(struct vnode *vp, off_t loffset, int size, struct buf **bpp)
348 {
349 	*bpp = NULL;
350 	return(breadnx(vp, loffset, size, B_NOTMETA,
351 		       NULL, NULL, 0, bpp));
352 }
353 
354 static __inline int
355 bread_kvabio(struct vnode *vp, off_t loffset, int size, struct buf **bpp)
356 {
357 	*bpp = NULL;
358 	return(breadnx(vp, loffset, size, B_NOTMETA | B_KVABIO,
359 		       NULL, NULL, 0, bpp));
360 }
361 
362 static __inline int
363 breadn(struct vnode *vp, off_t loffset, int size, off_t *raoffset,
364        int *rabsize, int cnt, struct buf **bpp)
365 {
366 	*bpp = NULL;
367 	return(breadnx(vp, loffset, size, B_NOTMETA, raoffset,
368 		       rabsize, cnt, bpp));
369 }
370 
371 static __inline int
372 cluster_read(struct vnode *vp, off_t filesize, off_t loffset,
373              int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
374 {
375 	*bpp = NULL;
376 	return(cluster_readx(vp, filesize, loffset, blksize, B_NOTMETA,
377 			     minreq, maxreq, bpp));
378 }
379 
380 static __inline int
381 cluster_read_kvabio(struct vnode *vp, off_t filesize, off_t loffset,
382              int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
383 {
384 	*bpp = NULL;
385 	return(cluster_readx(vp, filesize, loffset, blksize,
386 			     B_NOTMETA | B_KVABIO,
387 			     minreq, maxreq, bpp));
388 }
389 
390 #endif /* _KERNEL */
391 
392 #endif /* !_SYS_BUF2_H_ */
393