FreeBSD/Linux Kernel Cross Reference
sys/sys/buf2.h
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)buf.h 8.9 (Berkeley) 3/30/95
39 * $FreeBSD: src/sys/sys/buf.h,v 1.88.2.10 2003/01/25 19:02:23 dillon Exp $
40 * $DragonFly: src/sys/sys/buf2.h,v 1.21 2008/01/28 07:19:06 nth Exp $
41 */
42
43 #ifndef _SYS_BUF2_H_
44 #define _SYS_BUF2_H_
45
46 #ifdef _KERNEL
47
48 #ifndef _SYS_BUF_H_
49 #include <sys/buf.h> /* crit_*() functions */
50 #endif
51 #ifndef _SYS_GLOBALDATA_H_
52 #include <sys/globaldata.h> /* curthread */
53 #endif
54 #ifndef _SYS_THREAD2_H_
55 #include <sys/thread2.h> /* crit_*() functions */
56 #endif
57 #ifndef _SYS_SPINLOCK2_H_
58 #include <sys/spinlock2.h> /* crit_*() functions */
59 #endif
60 #ifndef _SYS_MOUNT_H_
61 #include <sys/mount.h>
62 #endif
63 #ifndef _SYS_VNODE_H_
64 #include <sys/vnode.h>
65 #endif
66 #ifndef _VM_VM_PAGE_H_
67 #include <vm/vm_page.h>
68 #endif
69
70 /*
71 * Initialize a lock.
72 */
73 #define BUF_LOCKINIT(bp) \
74 lockinit(&(bp)->b_lock, buf_wmesg, 0, 0)
75
76 /*
77 *
78 * Get a lock sleeping non-interruptably until it becomes available.
79 *
80 * XXX lk_wmesg can race, but should not result in any operational issues.
81 */
82 static __inline int
83 BUF_LOCK(struct buf *bp, int locktype)
84 {
85 bp->b_lock.lk_wmesg = buf_wmesg;
86 return (lockmgr(&(bp)->b_lock, locktype));
87 }
88 /*
89 * Get a lock sleeping with specified interruptably and timeout.
90 *
91 * XXX lk_timo can race against other entities calling BUF_TIMELOCK,
92 * but will not interfere with entities calling BUF_LOCK since LK_TIMELOCK
93 * will not be set in that case.
94 *
95 * XXX lk_wmesg can race, but should not result in any operational issues.
96 */
97 static __inline int
98 BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int timo)
99 {
100 bp->b_lock.lk_wmesg = wmesg;
101 bp->b_lock.lk_timo = timo;
102 return (lockmgr(&(bp)->b_lock, locktype | LK_TIMELOCK));
103 }
104 /*
105 * Release a lock. Only the acquiring process may free the lock unless
106 * it has been handed off to biodone.
107 */
108 static __inline void
109 BUF_UNLOCK(struct buf *bp)
110 {
111 lockmgr(&(bp)->b_lock, LK_RELEASE);
112 }
113
114 /*
115 * When initiating asynchronous I/O, change ownership of the lock to the
116 * kernel. Once done, the lock may legally released by biodone. The
117 * original owning process can no longer acquire it recursively, but must
118 * wait until the I/O is completed and the lock has been freed by biodone.
119 */
120 static __inline void
121 BUF_KERNPROC(struct buf *bp)
122 {
123 lockmgr_kernproc(&(bp)->b_lock);
124 }
125 /*
126 * Find out the number of references to a lock.
127 *
128 * The non-blocking version should only be used for assertions in cases
129 * where the buffer is expected to be owned or otherwise data stable.
130 */
131 static __inline int
132 BUF_REFCNT(struct buf *bp)
133 {
134 return (lockcount(&(bp)->b_lock));
135 }
136
137 static __inline int
138 BUF_REFCNTNB(struct buf *bp)
139 {
140 return (lockcountnb(&(bp)->b_lock));
141 }
142
143 /*
144 * Free a buffer lock.
145 */
146 #define BUF_LOCKFREE(bp) \
147 if (BUF_REFCNTNB(bp) > 0) \
148 panic("free locked buf")
149
150 static __inline void
151 bioq_init(struct bio_queue_head *bioq)
152 {
153 TAILQ_INIT(&bioq->queue);
154 bioq->off_unused = 0;
155 bioq->reorder = 0;
156 bioq->transition = NULL;
157 bioq->bio_unused = NULL;
158 }
159
160 static __inline void
161 bioq_insert_tail(struct bio_queue_head *bioq, struct bio *bio)
162 {
163 bioq->transition = NULL;
164 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
165 }
166
167 static __inline void
168 bioq_remove(struct bio_queue_head *bioq, struct bio *bio)
169 {
170 /*
171 * Adjust read insertion point when removing the bioq. The
172 * bio after the insert point is a write so move backwards
173 * one (NULL will indicate all the reads have cleared).
174 */
175 if (bio == bioq->transition)
176 bioq->transition = TAILQ_NEXT(bio, bio_act);
177 TAILQ_REMOVE(&bioq->queue, bio, bio_act);
178 }
179
180 static __inline struct bio *
181 bioq_first(struct bio_queue_head *bioq)
182 {
183 return (TAILQ_FIRST(&bioq->queue));
184 }
185
186 static __inline struct bio *
187 bioq_takefirst(struct bio_queue_head *bioq)
188 {
189 struct bio *bp;
190
191 bp = TAILQ_FIRST(&bioq->queue);
192 if (bp != NULL)
193 bioq_remove(bioq, bp);
194 return (bp);
195 }
196
197 /*
198 * Adjust buffer cache buffer's activity count. This
199 * works similarly to vm_page->act_count.
200 */
201 static __inline void
202 buf_act_advance(struct buf *bp)
203 {
204 if (bp->b_act_count > ACT_MAX - ACT_ADVANCE)
205 bp->b_act_count = ACT_MAX;
206 else
207 bp->b_act_count += ACT_ADVANCE;
208 }
209
210 static __inline void
211 buf_act_decline(struct buf *bp)
212 {
213 if (bp->b_act_count < ACT_DECLINE)
214 bp->b_act_count = 0;
215 else
216 bp->b_act_count -= ACT_DECLINE;
217 }
218
219 /*
220 * biodeps inlines - used by softupdates and HAMMER.
221 *
222 * All bioops are MPSAFE
223 */
224 static __inline void
225 buf_dep_init(struct buf *bp)
226 {
227 bp->b_ops = NULL;
228 LIST_INIT(&bp->b_dep);
229 }
230
231 /*
232 * Precondition: the buffer has some dependencies.
233 *
234 * MPSAFE
235 */
236 static __inline void
237 buf_deallocate(struct buf *bp)
238 {
239 struct bio_ops *ops = bp->b_ops;
240
241 KKASSERT(! LIST_EMPTY(&bp->b_dep));
242 if (ops)
243 ops->io_deallocate(bp);
244 }
245
246 /*
247 * MPSAFE
248 */
249 static __inline int
250 buf_countdeps(struct buf *bp, int n)
251 {
252 struct bio_ops *ops = bp->b_ops;
253 int r;
254
255 if (ops)
256 r = ops->io_countdeps(bp, n);
257 else
258 r = 0;
259 return(r);
260 }
261
262 /*
263 * MPSAFE
264 */
265 static __inline void
266 buf_start(struct buf *bp)
267 {
268 struct bio_ops *ops = bp->b_ops;
269
270 if (ops)
271 ops->io_start(bp);
272 }
273
274 /*
275 * MPSAFE
276 */
277 static __inline void
278 buf_complete(struct buf *bp)
279 {
280 struct bio_ops *ops = bp->b_ops;
281
282 if (ops)
283 ops->io_complete(bp);
284 }
285
286 /*
287 * MPSAFE
288 */
289 static __inline int
290 buf_fsync(struct vnode *vp)
291 {
292 struct bio_ops *ops = vp->v_mount->mnt_bioops;
293 int r;
294
295 if (ops)
296 r = ops->io_fsync(vp);
297 else
298 r = 0;
299 return(r);
300 }
301
302 /*
303 * MPSAFE
304 */
305 static __inline void
306 buf_movedeps(struct buf *bp1, struct buf *bp2)
307 {
308 struct bio_ops *ops = bp1->b_ops;
309
310 if (ops)
311 ops->io_movedeps(bp1, bp2);
312 }
313
314 /*
315 * MPSAFE
316 */
317 static __inline int
318 buf_checkread(struct buf *bp)
319 {
320 struct bio_ops *ops = bp->b_ops;
321
322 if (ops)
323 return(ops->io_checkread(bp));
324 return(0);
325 }
326
327 /*
328 * MPSAFE
329 */
330 static __inline int
331 buf_checkwrite(struct buf *bp)
332 {
333 struct bio_ops *ops = bp->b_ops;
334
335 if (ops)
336 return(ops->io_checkwrite(bp));
337 return(0);
338 }
339
340 /*
341 * Chained biodone. The bio callback was made and the callback function
342 * wishes to chain the biodone. If no BIO's are left we call bpdone()
343 * with elseit=TRUE (asynchronous completion).
344 *
345 * MPSAFE
346 */
347 static __inline void
348 biodone_chain(struct bio *bio)
349 {
350 if (bio->bio_prev)
351 biodone(bio->bio_prev);
352 else
353 bpdone(bio->bio_buf, 1);
354 }
355
356 static __inline int
357 bread(struct vnode *vp, off_t loffset, int size, struct buf **bpp)
358 {
359 *bpp = NULL;
360 return(breadnx(vp, loffset, size, NULL, NULL, 0, bpp));
361 }
362
363
364 static __inline int
365 breadn(struct vnode *vp, off_t loffset, int size, off_t *raoffset,
366 int *rabsize, int cnt, struct buf **bpp)
367 {
368 *bpp = NULL;
369 return(breadnx(vp, loffset, size, raoffset, rabsize, cnt, bpp));
370 }
371
372 static __inline int
373 cluster_read(struct vnode *vp, off_t filesize, off_t loffset,
374 int blksize, size_t minreq, size_t maxreq, struct buf **bpp)
375 {
376 *bpp = NULL;
377 return(cluster_readx(vp, filesize, loffset, blksize, minreq,
378 maxreq, bpp));
379 }
380
381 #endif /* _KERNEL */
382
383 #endif /* !_SYS_BUF2_H_ */
Cache object: cd0261ab48cd25ac8604cbcc15d10d31
|