xref: /dragonfly/sys/sys/buf2.h (revision 2234273d)
1 /*
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)buf.h	8.9 (Berkeley) 3/30/95
35  * $FreeBSD: src/sys/sys/buf.h,v 1.88.2.10 2003/01/25 19:02:23 dillon Exp $
36  * $DragonFly: src/sys/sys/buf2.h,v 1.21 2008/01/28 07:19:06 nth Exp $
37  */
38 
39 #ifndef _SYS_BUF2_H_
40 #define	_SYS_BUF2_H_
41 
42 #ifdef _KERNEL
43 
44 #ifndef _SYS_BUF_H_
45 #include <sys/buf.h>		/* crit_*() functions */
46 #endif
47 #ifndef _SYS_GLOBALDATA_H_
48 #include <sys/globaldata.h>	/* curthread */
49 #endif
50 #ifndef _SYS_THREAD2_H_
51 #include <sys/thread2.h>	/* crit_*() functions */
52 #endif
53 #ifndef _SYS_SPINLOCK2_H_
54 #include <sys/spinlock2.h>	/* crit_*() functions */
55 #endif
56 #ifndef _SYS_MOUNT_H_
57 #include <sys/mount.h>
58 #endif
59 #ifndef _SYS_VNODE_H_
60 #include <sys/vnode.h>
61 #endif
62 #ifndef _VM_VM_PAGE_H_
63 #include <vm/vm_page.h>
64 #endif
65 
66 /*
67  * Initialize a lock.
68  */
69 #define BUF_LOCKINIT(bp) \
70 	lockinit(&(bp)->b_lock, buf_wmesg, 0, 0)
71 
72 /*
73  *
74  * Get a lock sleeping non-interruptably until it becomes available.
75  *
76  * XXX lk_wmesg can race, but should not result in any operational issues.
77  */
78 static __inline int
79 BUF_LOCK(struct buf *bp, int locktype)
80 {
81 	bp->b_lock.lk_wmesg = buf_wmesg;
82 	return (lockmgr(&(bp)->b_lock, locktype));
83 }
84 /*
85  * Get a lock sleeping with specified interruptably and timeout.
86  *
87  * XXX lk_timo can race against other entities calling BUF_TIMELOCK,
88  * but will not interfere with entities calling BUF_LOCK since LK_TIMELOCK
89  * will not be set in that case.
90  *
91  * XXX lk_wmesg can race, but should not result in any operational issues.
92  */
93 static __inline int
94 BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int timo)
95 {
96 	bp->b_lock.lk_wmesg = wmesg;
97 	bp->b_lock.lk_timo = timo;
98 	return (lockmgr(&(bp)->b_lock, locktype | LK_TIMELOCK));
99 }
100 /*
101  * Release a lock. Only the acquiring process may free the lock unless
102  * it has been handed off to biodone.
103  */
104 static __inline void
105 BUF_UNLOCK(struct buf *bp)
106 {
107 	lockmgr(&(bp)->b_lock, LK_RELEASE);
108 }
109 
110 /*
111  * When initiating asynchronous I/O, change ownership of the lock to the
112  * kernel. Once done, the lock may legally released by biodone. The
113  * original owning process can no longer acquire it recursively, but must
114  * wait until the I/O is completed and the lock has been freed by biodone.
115  */
116 static __inline void
117 BUF_KERNPROC(struct buf *bp)
118 {
119 	lockmgr_kernproc(&(bp)->b_lock);
120 }
121 /*
122  * Find out the number of references to a lock.
123  *
124  * The non-blocking version should only be used for assertions in cases
125  * where the buffer is expected to be owned or otherwise data stable.
126  */
127 static __inline int
128 BUF_REFCNT(struct buf *bp)
129 {
130 	return (lockcount(&(bp)->b_lock));
131 }
132 
133 static __inline int
134 BUF_REFCNTNB(struct buf *bp)
135 {
136 	return (lockcountnb(&(bp)->b_lock));
137 }
138 
139 /*
140  * Free a buffer lock.
141  */
142 #define BUF_LOCKFREE(bp) 			\
143 	if (BUF_REFCNTNB(bp) > 0)		\
144 		panic("free locked buf")
145 
146 static __inline void
147 bioq_init(struct bio_queue_head *bioq)
148 {
149 	TAILQ_INIT(&bioq->queue);
150 	bioq->off_unused = 0;
151 	bioq->reorder = 0;
152 	bioq->transition = NULL;
153 	bioq->bio_unused = NULL;
154 }
155 
156 static __inline void
157 bioq_insert_tail(struct bio_queue_head *bioq, struct bio *bio)
158 {
159 	bioq->transition = NULL;
160 	TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
161 }
162 
163 static __inline void
164 bioq_remove(struct bio_queue_head *bioq, struct bio *bio)
165 {
166 	/*
167 	 * Adjust read insertion point when removing the bioq.  The
168 	 * bio after the insert point is a write so move backwards
169 	 * one (NULL will indicate all the reads have cleared).
170 	 */
171 	if (bio == bioq->transition)
172 		bioq->transition = TAILQ_NEXT(bio, bio_act);
173 	TAILQ_REMOVE(&bioq->queue, bio, bio_act);
174 }
175 
176 static __inline struct bio *
177 bioq_first(struct bio_queue_head *bioq)
178 {
179 	return (TAILQ_FIRST(&bioq->queue));
180 }
181 
182 static __inline struct bio *
183 bioq_takefirst(struct bio_queue_head *bioq)
184 {
185 	struct bio *bp;
186 
187 	bp = TAILQ_FIRST(&bioq->queue);
188 	if (bp != NULL)
189 		bioq_remove(bioq, bp);
190 	return (bp);
191 }
192 
193 /*
194  * Adjust buffer cache buffer's activity count.  This
195  * works similarly to vm_page->act_count.
196  */
197 static __inline void
198 buf_act_advance(struct buf *bp)
199 {
200 	if (bp->b_act_count > ACT_MAX - ACT_ADVANCE)
201 		bp->b_act_count = ACT_MAX;
202 	else
203 		bp->b_act_count += ACT_ADVANCE;
204 }
205 
206 static __inline void
207 buf_act_decline(struct buf *bp)
208 {
209 	if (bp->b_act_count < ACT_DECLINE)
210 		bp->b_act_count = 0;
211 	else
212 		bp->b_act_count -= ACT_DECLINE;
213 }
214 
215 /*
216  * biodeps inlines - used by softupdates and HAMMER.
217  *
218  * All bioops are MPSAFE
219  */
220 static __inline void
221 buf_dep_init(struct buf *bp)
222 {
223 	bp->b_ops = NULL;
224 	LIST_INIT(&bp->b_dep);
225 }
226 
227 /*
228  * Precondition: the buffer has some dependencies.
229  *
230  * MPSAFE
231  */
232 static __inline void
233 buf_deallocate(struct buf *bp)
234 {
235 	struct bio_ops *ops = bp->b_ops;
236 
237 	KKASSERT(! LIST_EMPTY(&bp->b_dep));
238 	if (ops)
239 		ops->io_deallocate(bp);
240 }
241 
242 /*
243  * MPSAFE
244  */
245 static __inline int
246 buf_countdeps(struct buf *bp, int n)
247 {
248 	struct bio_ops *ops = bp->b_ops;
249 	int r;
250 
251 	if (ops)
252 		r = ops->io_countdeps(bp, n);
253 	else
254 		r = 0;
255 	return(r);
256 }
257 
258 /*
259  * MPSAFE
260  */
261 static __inline void
262 buf_start(struct buf *bp)
263 {
264 	struct bio_ops *ops = bp->b_ops;
265 
266 	if (ops)
267 		ops->io_start(bp);
268 }
269 
270 /*
271  * MPSAFE
272  */
273 static __inline void
274 buf_complete(struct buf *bp)
275 {
276 	struct bio_ops *ops = bp->b_ops;
277 
278 	if (ops)
279 		ops->io_complete(bp);
280 }
281 
282 /*
283  * MPSAFE
284  */
285 static __inline int
286 buf_fsync(struct vnode *vp)
287 {
288 	struct bio_ops *ops = vp->v_mount->mnt_bioops;
289 	int r;
290 
291 	if (ops)
292 		r = ops->io_fsync(vp);
293 	else
294 		r = 0;
295 	return(r);
296 }
297 
298 /*
299  * MPSAFE
300  */
301 static __inline void
302 buf_movedeps(struct buf *bp1, struct buf *bp2)
303 {
304 	struct bio_ops *ops = bp1->b_ops;
305 
306 	if (ops)
307 		ops->io_movedeps(bp1, bp2);
308 }
309 
310 /*
311  * MPSAFE
312  */
313 static __inline int
314 buf_checkread(struct buf *bp)
315 {
316 	struct bio_ops *ops = bp->b_ops;
317 
318 	if (ops)
319 		return(ops->io_checkread(bp));
320 	return(0);
321 }
322 
323 /*
324  * MPSAFE
325  */
326 static __inline int
327 buf_checkwrite(struct buf *bp)
328 {
329 	struct bio_ops *ops = bp->b_ops;
330 
331 	if (ops)
332 		return(ops->io_checkwrite(bp));
333 	return(0);
334 }
335 
336 /*
337  * Chained biodone.  The bio callback was made and the callback function
338  * wishes to chain the biodone.  If no BIO's are left we call bpdone()
339  * with elseit=TRUE (asynchronous completion).
340  *
341  * MPSAFE
342  */
343 static __inline void
344 biodone_chain(struct bio *bio)
345 {
346 	if (bio->bio_prev)
347 		biodone(bio->bio_prev);
348 	else
349 		bpdone(bio->bio_buf, 1);
350 }
351 
352 static __inline int
353 bread(struct vnode *vp, off_t loffset, int size, struct buf **bpp)
354 {
355 	*bpp = NULL;
356 	return(breadnx(vp, loffset, size, NULL, NULL, 0, bpp));
357 }
358 
359 
360 static __inline int
361 breadn(struct vnode *vp, off_t loffset, int size, off_t *raoffset,
362       int *rabsize, int cnt, struct buf **bpp)
363 {
364 	*bpp = NULL;
365 	return(breadnx(vp, loffset, size, raoffset, rabsize, cnt, bpp));
366 }
367 
368 static __inline int
369 cluster_read(struct vnode *vp, off_t filesize, off_t loffset,
370              int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
371 {
372 	*bpp = NULL;
373 	return(cluster_readx(vp, filesize, loffset, blksize, minreq,
374 			     maxreq, bpp));
375 }
376 
377 #endif /* _KERNEL */
378 
379 #endif /* !_SYS_BUF2_H_ */
380