1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)buf.h 8.9 (Berkeley) 3/30/95 35 * $FreeBSD: src/sys/sys/buf.h,v 1.88.2.10 2003/01/25 19:02:23 dillon Exp $ 36 * $DragonFly: src/sys/sys/buf2.h,v 1.21 2008/01/28 07:19:06 nth Exp $ 37 */ 38 39 #ifndef _SYS_BUF2_H_ 40 #define _SYS_BUF2_H_ 41 42 #ifdef _KERNEL 43 44 #ifndef _SYS_BUF_H_ 45 #include <sys/buf.h> /* crit_*() functions */ 46 #endif 47 #ifndef _SYS_GLOBALDATA_H_ 48 #include <sys/globaldata.h> /* curthread */ 49 #endif 50 #ifndef _SYS_THREAD2_H_ 51 #include <sys/thread2.h> /* crit_*() functions */ 52 #endif 53 #ifndef _SYS_SPINLOCK2_H_ 54 #include <sys/spinlock2.h> /* crit_*() functions */ 55 #endif 56 #ifndef _SYS_MOUNT_H_ 57 #include <sys/mount.h> 58 #endif 59 #ifndef _SYS_VNODE_H_ 60 #include <sys/vnode.h> 61 #endif 62 #ifndef _VM_VM_PAGE_H_ 63 #include <vm/vm_page.h> 64 #endif 65 66 /* 67 * Initialize a lock. 68 */ 69 #define BUF_LOCKINIT(bp) \ 70 lockinit(&(bp)->b_lock, buf_wmesg, 0, LK_NOCOLLSTATS) 71 72 /* 73 * 74 * Get a lock sleeping non-interruptably until it becomes available. 75 * 76 * XXX lk_wmesg can race, but should not result in any operational issues. 77 */ 78 static __inline int 79 BUF_LOCK(struct buf *bp, int locktype) 80 { 81 bp->b_lock.lk_wmesg = buf_wmesg; 82 return (lockmgr(&(bp)->b_lock, locktype)); 83 } 84 /* 85 * Get a lock sleeping with specified interruptably and timeout. 86 * 87 * XXX lk_timo can race against other entities calling BUF_TIMELOCK, 88 * but will not interfere with entities calling BUF_LOCK since LK_TIMELOCK 89 * will not be set in that case. 90 * 91 * XXX lk_wmesg can race, but should not result in any operational issues. 92 */ 93 static __inline int 94 BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int timo) 95 { 96 bp->b_lock.lk_wmesg = wmesg; 97 bp->b_lock.lk_timo = timo; 98 return (lockmgr(&(bp)->b_lock, locktype | LK_TIMELOCK)); 99 } 100 /* 101 * Release a lock. Only the acquiring process may free the lock unless 102 * it has been handed off to biodone. 103 */ 104 static __inline void 105 BUF_UNLOCK(struct buf *bp) 106 { 107 lockmgr(&(bp)->b_lock, LK_RELEASE); 108 } 109 110 /* 111 * When initiating asynchronous I/O, change ownership of the lock to the 112 * kernel. Once done, the lock may legally released by biodone. The 113 * original owning process can no longer acquire it recursively, but must 114 * wait until the I/O is completed and the lock has been freed by biodone. 115 */ 116 static __inline void 117 BUF_KERNPROC(struct buf *bp) 118 { 119 lockmgr_kernproc(&(bp)->b_lock); 120 } 121 /* 122 * Find out the number of references to a lock. 123 * 124 * The non-blocking version should only be used for assertions in cases 125 * where the buffer is expected to be owned or otherwise data stable. 126 */ 127 static __inline int 128 BUF_LOCKINUSE(struct buf *bp) 129 { 130 return (lockinuse(&(bp)->b_lock)); 131 } 132 133 /* 134 * Free a buffer lock. 135 */ 136 #define BUF_LOCKFREE(bp) \ 137 if (BUF_LOCKINUSE(bp)) \ 138 panic("free locked buf") 139 140 static __inline void 141 bioq_init(struct bio_queue_head *bioq) 142 { 143 TAILQ_INIT(&bioq->queue); 144 bioq->off_unused = 0; 145 bioq->reorder = 0; 146 bioq->transition = NULL; 147 bioq->bio_unused = NULL; 148 } 149 150 static __inline void 151 bioq_insert_tail(struct bio_queue_head *bioq, struct bio *bio) 152 { 153 bioq->transition = NULL; 154 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act); 155 } 156 157 static __inline void 158 bioq_remove(struct bio_queue_head *bioq, struct bio *bio) 159 { 160 /* 161 * Adjust read insertion point when removing the bioq. The 162 * bio after the insert point is a write so move backwards 163 * one (NULL will indicate all the reads have cleared). 164 */ 165 if (bio == bioq->transition) 166 bioq->transition = TAILQ_NEXT(bio, bio_act); 167 TAILQ_REMOVE(&bioq->queue, bio, bio_act); 168 } 169 170 static __inline struct bio * 171 bioq_first(struct bio_queue_head *bioq) 172 { 173 return (TAILQ_FIRST(&bioq->queue)); 174 } 175 176 static __inline struct bio * 177 bioq_takefirst(struct bio_queue_head *bioq) 178 { 179 struct bio *bp; 180 181 bp = TAILQ_FIRST(&bioq->queue); 182 if (bp != NULL) 183 bioq_remove(bioq, bp); 184 return (bp); 185 } 186 187 /* 188 * Adjust buffer cache buffer's activity count. This 189 * works similarly to vm_page->act_count. 190 */ 191 static __inline void 192 buf_act_advance(struct buf *bp) 193 { 194 if (bp->b_act_count > ACT_MAX - ACT_ADVANCE) 195 bp->b_act_count = ACT_MAX; 196 else 197 bp->b_act_count += ACT_ADVANCE; 198 } 199 200 static __inline void 201 buf_act_decline(struct buf *bp) 202 { 203 if (bp->b_act_count < ACT_DECLINE) 204 bp->b_act_count = 0; 205 else 206 bp->b_act_count -= ACT_DECLINE; 207 } 208 209 /* 210 * biodeps inlines - used by softupdates and HAMMER. 211 * 212 * All bioops are MPSAFE 213 */ 214 static __inline void 215 buf_dep_init(struct buf *bp) 216 { 217 bp->b_ops = NULL; 218 LIST_INIT(&bp->b_dep); 219 } 220 221 /* 222 * Precondition: the buffer has some dependencies. 223 * 224 * MPSAFE 225 */ 226 static __inline void 227 buf_deallocate(struct buf *bp) 228 { 229 struct bio_ops *ops = bp->b_ops; 230 231 KKASSERT(! LIST_EMPTY(&bp->b_dep)); 232 if (ops) 233 ops->io_deallocate(bp); 234 } 235 236 /* 237 * This callback is made from flushbufqueues() which uses BUF_LOCK(). 238 * Since it isn't going through a normal buffer aquisition mechanic 239 * and calling the filesystem back enforce the vnode's KVABIO support. 240 */ 241 static __inline int 242 buf_countdeps(struct buf *bp, int n) 243 { 244 struct bio_ops *ops = bp->b_ops; 245 int r; 246 247 if (ops) { 248 if (bp->b_vp == NULL || (bp->b_vp->v_flag & VKVABIO) == 0) 249 bkvasync_all(bp); 250 r = ops->io_countdeps(bp, n); 251 } else { 252 r = 0; 253 } 254 return(r); 255 } 256 257 /* 258 * MPSAFE 259 */ 260 static __inline void 261 buf_start(struct buf *bp) 262 { 263 struct bio_ops *ops = bp->b_ops; 264 265 if (ops) 266 ops->io_start(bp); 267 } 268 269 /* 270 * MPSAFE 271 */ 272 static __inline void 273 buf_complete(struct buf *bp) 274 { 275 struct bio_ops *ops = bp->b_ops; 276 277 if (ops) 278 ops->io_complete(bp); 279 } 280 281 /* 282 * MPSAFE 283 */ 284 static __inline int 285 buf_fsync(struct vnode *vp) 286 { 287 struct bio_ops *ops = vp->v_mount->mnt_bioops; 288 int r; 289 290 if (ops) 291 r = ops->io_fsync(vp); 292 else 293 r = 0; 294 return(r); 295 } 296 297 /* 298 * MPSAFE 299 */ 300 static __inline void 301 buf_movedeps(struct buf *bp1, struct buf *bp2) 302 { 303 struct bio_ops *ops = bp1->b_ops; 304 305 if (ops) 306 ops->io_movedeps(bp1, bp2); 307 } 308 309 /* 310 * MPSAFE 311 */ 312 static __inline int 313 buf_checkread(struct buf *bp) 314 { 315 struct bio_ops *ops = bp->b_ops; 316 317 if (ops) 318 return(ops->io_checkread(bp)); 319 return(0); 320 } 321 322 /* 323 * This callback is made from flushbufqueues() which uses BUF_LOCK(). 324 * Since it isn't going through a normal buffer aquisition mechanic 325 * and calling the filesystem back enforce the vnode's KVABIO support. 326 */ 327 static __inline int 328 buf_checkwrite(struct buf *bp) 329 { 330 struct bio_ops *ops = bp->b_ops; 331 332 if (ops) { 333 if (bp->b_vp == NULL || (bp->b_vp->v_flag & VKVABIO) == 0) 334 bkvasync_all(bp); 335 return(ops->io_checkwrite(bp)); 336 } 337 return(0); 338 } 339 340 /* 341 * Chained biodone. The bio callback was made and the callback function 342 * wishes to chain the biodone. If no BIO's are left we call bpdone() 343 * with elseit=TRUE (asynchronous completion). 344 * 345 * MPSAFE 346 */ 347 static __inline void 348 biodone_chain(struct bio *bio) 349 { 350 if (bio->bio_prev) 351 biodone(bio->bio_prev); 352 else 353 bpdone(bio->bio_buf, 1); 354 } 355 356 static __inline int 357 bread(struct vnode *vp, off_t loffset, int size, struct buf **bpp) 358 { 359 *bpp = NULL; 360 return(breadnx(vp, loffset, size, B_NOTMETA, 361 NULL, NULL, 0, bpp)); 362 } 363 364 static __inline int 365 bread_kvabio(struct vnode *vp, off_t loffset, int size, struct buf **bpp) 366 { 367 *bpp = NULL; 368 return(breadnx(vp, loffset, size, B_NOTMETA | B_KVABIO, 369 NULL, NULL, 0, bpp)); 370 } 371 372 static __inline int 373 breadn(struct vnode *vp, off_t loffset, int size, off_t *raoffset, 374 int *rabsize, int cnt, struct buf **bpp) 375 { 376 *bpp = NULL; 377 return(breadnx(vp, loffset, size, B_NOTMETA, raoffset, 378 rabsize, cnt, bpp)); 379 } 380 381 static __inline int 382 cluster_read(struct vnode *vp, off_t filesize, off_t loffset, 383 int blksize, size_t minreq, size_t maxreq, struct buf **bpp) 384 { 385 *bpp = NULL; 386 return(cluster_readx(vp, filesize, loffset, blksize, B_NOTMETA, 387 minreq, maxreq, bpp)); 388 } 389 390 static __inline int 391 cluster_read_kvabio(struct vnode *vp, off_t filesize, off_t loffset, 392 int blksize, size_t minreq, size_t maxreq, struct buf **bpp) 393 { 394 *bpp = NULL; 395 return(cluster_readx(vp, filesize, loffset, blksize, 396 B_NOTMETA | B_KVABIO, 397 minreq, maxreq, bpp)); 398 } 399 400 #endif /* _KERNEL */ 401 402 #endif /* !_SYS_BUF2_H_ */ 403