1 /* 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)buf.h 8.9 (Berkeley) 3/30/95 39 * $FreeBSD: src/sys/sys/buf.h,v 1.88.2.10 2003/01/25 19:02:23 dillon Exp $ 40 * $DragonFly: src/sys/sys/buf2.h,v 1.12 2006/02/17 19:18:07 dillon Exp $ 41 */ 42 43 #ifndef _SYS_BUF2_H_ 44 #define _SYS_BUF2_H_ 45 46 #ifdef _KERNEL 47 48 #ifndef _SYS_GLOBALDATA_H_ 49 #include <sys/globaldata.h> /* curthread */ 50 #endif 51 #ifndef _SYS_THREAD2_H_ 52 #include <sys/thread2.h> /* crit_*() functions */ 53 #endif 54 #ifndef _SYS_SPINLOCK2_H_ 55 #include <sys/spinlock2.h> /* crit_*() functions */ 56 #endif 57 58 /* 59 * Initialize a lock. 60 */ 61 #define BUF_LOCKINIT(bp) \ 62 lockinit(&(bp)->b_lock, 0, buf_wmesg, 0, 0) 63 64 /* 65 * 66 * Get a lock sleeping non-interruptably until it becomes available. 67 */ 68 static __inline int 69 BUF_LOCK(struct buf *bp, int locktype) 70 { 71 int ret; 72 73 spin_lock(&buftimespinlock); 74 bp->b_lock.lk_wmesg = buf_wmesg; 75 bp->b_lock.lk_prio = 0; /* tsleep flags */ 76 /* bp->b_lock.lk_timo = 0; not necessary */ 77 ret = lockmgr(&(bp)->b_lock, locktype | LK_INTERLOCK, 78 &buftimespinlock, curthread); 79 return ret; 80 } 81 /* 82 * Get a lock sleeping with specified interruptably and timeout. 83 */ 84 static __inline int 85 BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int catch, int timo) 86 { 87 int ret; 88 89 spin_lock(&buftimespinlock); 90 bp->b_lock.lk_wmesg = wmesg; 91 bp->b_lock.lk_prio = catch; /* tsleep flags */ 92 bp->b_lock.lk_timo = timo; 93 ret = lockmgr(&(bp)->b_lock, locktype | LK_INTERLOCK | LK_TIMELOCK, 94 &buftimespinlock, curthread); 95 return ret; 96 } 97 /* 98 * Release a lock. Only the acquiring process may free the lock unless 99 * it has been handed off to biodone. 100 */ 101 static __inline void 102 BUF_UNLOCK(struct buf *bp) 103 { 104 lockmgr(&(bp)->b_lock, LK_RELEASE, NULL, curthread); 105 } 106 107 /* 108 * When initiating asynchronous I/O, change ownership of the lock to the 109 * kernel. Once done, the lock may legally released by biodone. The 110 * original owning process can no longer acquire it recursively, but must 111 * wait until the I/O is completed and the lock has been freed by biodone. 112 */ 113 static __inline void 114 BUF_KERNPROC(struct buf *bp) 115 { 116 struct thread *td = curthread; 117 118 if (bp->b_lock.lk_lockholder == td) 119 td->td_locks--; 120 bp->b_lock.lk_lockholder = LK_KERNTHREAD; 121 } 122 /* 123 * Find out the number of references to a lock. 124 * 125 * The non-blocking version should only be used for assertions in cases 126 * where the buffer is expected to be owned or otherwise data stable. 127 */ 128 static __inline int 129 BUF_REFCNT(struct buf *bp) 130 { 131 return (lockcount(&(bp)->b_lock)); 132 } 133 134 static __inline int 135 BUF_REFCNTNB(struct buf *bp) 136 { 137 return (lockcountnb(&(bp)->b_lock)); 138 } 139 140 /* 141 * Free a buffer lock. 142 */ 143 #define BUF_LOCKFREE(bp) \ 144 if (BUF_REFCNTNB(bp) > 0) \ 145 panic("free locked buf") 146 147 static __inline void 148 bioq_init(struct bio_queue_head *head) 149 { 150 TAILQ_INIT(&head->queue); 151 head->last_blkno = 0; 152 head->insert_point = NULL; 153 head->switch_point = NULL; 154 } 155 156 static __inline void 157 bioq_insert_tail(struct bio_queue_head *head, struct bio *bio) 158 { 159 if ((bio->bio_buf->b_flags & B_ORDERED) != 0) { 160 head->insert_point = bio; 161 head->switch_point = NULL; 162 } 163 TAILQ_INSERT_TAIL(&head->queue, bio, bio_act); 164 } 165 166 static __inline void 167 bioq_remove(struct bio_queue_head *head, struct bio *bio) 168 { 169 if (bio == head->switch_point) 170 head->switch_point = TAILQ_NEXT(bio, bio_act); 171 if (bio == head->insert_point) { 172 head->insert_point = TAILQ_PREV(bio, bio_queue, bio_act); 173 if (head->insert_point == NULL) 174 head->last_blkno = 0; 175 } else if (bio == TAILQ_FIRST(&head->queue)) 176 head->last_blkno = bio->bio_blkno; 177 TAILQ_REMOVE(&head->queue, bio, bio_act); 178 if (TAILQ_FIRST(&head->queue) == head->switch_point) 179 head->switch_point = NULL; 180 } 181 182 static __inline struct bio * 183 bioq_first(struct bio_queue_head *head) 184 { 185 return (TAILQ_FIRST(&head->queue)); 186 } 187 188 #endif /* _KERNEL */ 189 190 #endif /* !_SYS_BUF2_H_ */ 191