FreeBSD/Linux Kernel Cross Reference
sys/sys/buf.h
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)buf.h 8.9 (Berkeley) 3/30/95
39 * $FreeBSD: releng/5.0/sys/sys/buf.h 102600 2002-08-30 04:04:37Z peter $
40 */
41
42 #ifndef _SYS_BUF_H_
43 #define _SYS_BUF_H_
44
45 #include <sys/queue.h>
46 #include <sys/lock.h>
47 #include <sys/lockmgr.h>
48
49 struct bio;
50 struct buf;
51 struct mount;
52 struct vnode;
53
54 /*
55 * To avoid including <ufs/ffs/softdep.h>
56 */
57 LIST_HEAD(workhead, worklist);
58 /*
59 * These are currently used only by the soft dependency code, hence
60 * are stored once in a global variable. If other subsystems wanted
61 * to use these hooks, a pointer to a set of bio_ops could be added
62 * to each buffer.
63 */
64 extern struct bio_ops {
65 void (*io_start)(struct buf *);
66 void (*io_complete)(struct buf *);
67 void (*io_deallocate)(struct buf *);
68 void (*io_movedeps)(struct buf *, struct buf *);
69 int (*io_countdeps)(struct buf *, int);
70 } bioops;
71
72 struct buf_ops {
73 char *bop_name;
74 int (*bop_write)(struct buf *);
75 };
76
77 extern struct buf_ops buf_ops_bio;
78
79 struct vm_object;
80
81 typedef unsigned char b_xflags_t;
82
83 /*
84 * The buffer header describes an I/O operation in the kernel.
85 *
86 * NOTES:
87 * b_bufsize, b_bcount. b_bufsize is the allocation size of the
88 * buffer, either DEV_BSIZE or PAGE_SIZE aligned. b_bcount is the
89 * originally requested buffer size and can serve as a bounds check
90 * against EOF. For most, but not all uses, b_bcount == b_bufsize.
91 *
92 * b_dirtyoff, b_dirtyend. Buffers support piecemeal, unaligned
93 * ranges of dirty data that need to be written to backing store.
94 * The range is typically clipped at b_bcount ( not b_bufsize ).
95 *
96 * b_resid. Number of bytes remaining in I/O. After an I/O operation
97 * completes, b_resid is usually 0 indicating 100% success.
98 */
99 struct buf {
100 /* XXX: b_io must be the first element of struct buf for now /phk */
101 /* XXX: if you change this, fix BIOTOBUF macro below */
102 struct bio b_io; /* "Builtin" I/O request. */
103 #define BIOTOBUF(biop) ((struct buf *)(biop))
104 #define b_bcount b_io.bio_bcount
105 #define b_blkno b_io.bio_blkno
106 #define b_caller1 b_io.bio_caller1
107 #define b_data b_io.bio_data
108 #define b_dev b_io.bio_dev
109 #define b_driver1 b_io.bio_driver1
110 #define b_driver2 b_io.bio_driver2
111 #define b_error b_io.bio_error
112 #define b_iocmd b_io.bio_cmd
113 #define b_ioflags b_io.bio_flags
114 #define b_pblkno b_io.bio_pblkno
115 #define b_resid b_io.bio_resid
116 struct buf_ops *b_op;
117 unsigned b_magic;
118 #define B_MAGIC_BIO 0x10b10b10
119 #define B_MAGIC_NFS 0x67238234
120 void (*b_iodone)(struct buf *);
121 off_t b_offset; /* Offset into file. */
122 #ifdef USE_BUFHASH
123 LIST_ENTRY(buf) b_hash; /* Hash chain. */
124 #endif
125 TAILQ_ENTRY(buf) b_vnbufs; /* Buffer's associated vnode. */
126 struct buf *b_left; /* splay tree link (V) */
127 struct buf *b_right; /* splay tree link (V) */
128 TAILQ_ENTRY(buf) b_freelist; /* Free list position if not active. */
129 long b_flags; /* B_* flags. */
130 unsigned short b_qindex; /* buffer queue index */
131 b_xflags_t b_xflags; /* extra flags */
132 struct lock b_lock; /* Buffer lock */
133 long b_bufsize; /* Allocated buffer size. */
134 long b_runningbufspace; /* when I/O is running, pipelining */
135 caddr_t b_kvabase; /* base kva for buffer */
136 int b_kvasize; /* size of kva for buffer */
137 daddr_t b_lblkno; /* Logical block number. */
138 struct vnode *b_vp; /* Device vnode. */
139 struct vm_object *b_object; /* Object for vp */
140 int b_dirtyoff; /* Offset in buffer of dirty region. */
141 int b_dirtyend; /* Offset of end of dirty region. */
142 struct ucred *b_rcred; /* Read credentials reference. */
143 struct ucred *b_wcred; /* Write credentials reference. */
144 void *b_saveaddr; /* Original b_addr for physio. */
145 union pager_info {
146 void *pg_spc;
147 int pg_reqpage;
148 } b_pager;
149 union cluster_info {
150 TAILQ_HEAD(cluster_list_head, buf) cluster_head;
151 TAILQ_ENTRY(buf) cluster_entry;
152 } b_cluster;
153 struct vm_page *b_pages[btoc(MAXPHYS)];
154 int b_npages;
155 struct workhead b_dep; /* List of filesystem dependencies. */
156 };
157
158 #define b_spc b_pager.pg_spc
159
160 /*
161 * These flags are kept in b_flags.
162 *
163 * Notes:
164 *
165 * B_ASYNC VOP calls on bp's are usually async whether or not
166 * B_ASYNC is set, but some subsystems, such as NFS, like
167 * to know what is best for the caller so they can
168 * optimize the I/O.
169 *
170 * B_PAGING Indicates that bp is being used by the paging system or
171 * some paging system and that the bp is not linked into
172 * the b_vp's clean/dirty linked lists or ref counts.
173 * Buffer vp reassignments are illegal in this case.
174 *
175 * B_CACHE This may only be set if the buffer is entirely valid.
176 * The situation where B_DELWRI is set and B_CACHE is
177 * clear MUST be committed to disk by getblk() so
178 * B_DELWRI can also be cleared. See the comments for
179 * getblk() in kern/vfs_bio.c. If B_CACHE is clear,
180 * the caller is expected to clear BIO_ERROR and B_INVAL,
181 * set BIO_READ, and initiate an I/O.
182 *
183 * The 'entire buffer' is defined to be the range from
184 * 0 through b_bcount.
185 *
186 * B_MALLOC Request that the buffer be allocated from the malloc
187 * pool, DEV_BSIZE aligned instead of PAGE_SIZE aligned.
188 *
189 * B_CLUSTEROK This flag is typically set for B_DELWRI buffers
190 * by filesystems that allow clustering when the buffer
191 * is fully dirty and indicates that it may be clustered
192 * with other adjacent dirty buffers. Note the clustering
193 * may not be used with the stage 1 data write under NFS
194 * but may be used for the commit rpc portion.
195 *
196 * B_VMIO Indicates that the buffer is tied into an VM object.
197 * The buffer's data is always PAGE_SIZE aligned even
198 * if b_bufsize and b_bcount are not. ( b_bufsize is
199 * always at least DEV_BSIZE aligned, though ).
200 *
201 * B_DIRECT Hint that we should attempt to completely free
202 * the pages underlying the buffer. B_DIRECT is
203 * sticky until the buffer is released and typically
204 * only has an effect when B_RELBUF is also set.
205 *
206 * B_NOWDRAIN This flag should be set when a device (like MD)
207 * does a turn-around VOP_WRITE from its strategy
208 * routine. This flag prevents bwrite() from blocking
209 * in wdrain, avoiding a deadlock situation.
210 */
211
212 #define B_AGE 0x00000001 /* Move to age queue when I/O done. */
213 #define B_NEEDCOMMIT 0x00000002 /* Append-write in progress. */
214 #define B_ASYNC 0x00000004 /* Start I/O, do not wait. */
215 #define B_DIRECT 0x00000008 /* direct I/O flag (pls free vmio) */
216 #define B_DEFERRED 0x00000010 /* Skipped over for cleaning */
217 #define B_CACHE 0x00000020 /* Bread found us in the cache. */
218 #define B_VALIDSUSPWRT 0x00000040 /* Valid write during suspension. */
219 #define B_DELWRI 0x00000080 /* Delay I/O until buffer reused. */
220 #define B_00000100 0x00000100 /* Available flag. */
221 #define B_DONE 0x00000200 /* I/O completed. */
222 #define B_EINTR 0x00000400 /* I/O was interrupted */
223 #define B_NOWDRAIN 0x00000800 /* Avoid wdrain deadlock */
224 #define B_SCANNED 0x00001000 /* VOP_FSYNC funcs mark written bufs */
225 #define B_INVAL 0x00002000 /* Does not contain valid info. */
226 #define B_LOCKED 0x00004000 /* Locked in core (not reusable). */
227 #define B_NOCACHE 0x00008000 /* Do not cache block after use. */
228 #define B_MALLOC 0x00010000 /* malloced b_data */
229 #define B_CLUSTEROK 0x00020000 /* Pagein op, so swap() can count it. */
230 #define B_PHYS 0x00040000 /* I/O to user memory. */
231 #define B_00080000 0x00080000 /* Available flag. */
232 #define B_00100000 0x00100000 /* Available flag. */
233 #define B_DIRTY 0x00200000 /* Needs writing later (in EXT2FS). */
234 #define B_RELBUF 0x00400000 /* Release VMIO buffer. */
235 #define B_00800000 0x00800000 /* Available flag. */
236 #define B_WRITEINPROG 0x01000000 /* Write in progress. */
237 #define B_02000000 0x02000000 /* Available flag. */
238 #define B_PAGING 0x04000000 /* volatile paging I/O -- bypass VMIO */
239 #define B_08000000 0x08000000 /* Available flag. */
240 #define B_RAM 0x10000000 /* Read ahead mark (flag) */
241 #define B_VMIO 0x20000000 /* VMIO flag */
242 #define B_CLUSTER 0x40000000 /* pagein op, so swap() can count it */
243 #define B_80000000 0x80000000 /* Available flag. */
244
245 #define PRINT_BUF_FLAGS "\2\40b31\37cluster\36vmio\35ram\34b27" \
246 "\33paging\32b25\31writeinprog\30b23\27relbuf\26dirty\25b20" \
247 "\24b19\23phys\22clusterok\21malloc\20nocache\17locked\16inval" \
248 "\15scanned\14nowdrain\13eintr\12done\11b8\10delwri\7validsuspwrt" \
249 "\6cache\5deferred\4direct\3async\2needcommit\1age"
250
251 /*
252 * These flags are kept in b_xflags.
253 */
254 #define BX_VNDIRTY 0x00000001 /* On vnode dirty list */
255 #define BX_VNCLEAN 0x00000002 /* On vnode clean list */
256 #define BX_BKGRDWRITE 0x00000004 /* Do writes in background */
257 #define BX_BKGRDINPROG 0x00000008 /* Background write in progress */
258 #define BX_BKGRDWAIT 0x00000010 /* Background write waiting */
259 #define BX_BKGRDMARKER 0x00000020 /* Mark buffer for splay tree */
260 #define BX_ALTDATA 0x00000040 /* Holds extended data */
261
262 #define NOOFFSET (-1LL) /* No buffer offset calculated yet */
263
264 #ifdef _KERNEL
265 /*
266 * Buffer locking
267 */
268 extern struct mtx buftimelock; /* Interlock on setting prio and timo */
269 extern const char *buf_wmesg; /* Default buffer lock message */
270 #define BUF_WMESG "bufwait"
271 #include <sys/proc.h> /* XXX for curthread */
272 #include <sys/mutex.h>
273
274 /*
275 * Initialize a lock.
276 */
277 #define BUF_LOCKINIT(bp) \
278 lockinit(&(bp)->b_lock, PRIBIO + 4, buf_wmesg, 0, 0)
279 /*
280 *
281 * Get a lock sleeping non-interruptably until it becomes available.
282 */
283 static __inline int BUF_LOCK(struct buf *, int);
284 static __inline int
285 BUF_LOCK(struct buf *bp, int locktype)
286 {
287 int s, ret;
288
289 s = splbio();
290 mtx_lock(&buftimelock);
291 locktype |= LK_INTERLOCK;
292 bp->b_lock.lk_wmesg = buf_wmesg;
293 bp->b_lock.lk_prio = PRIBIO + 4;
294 ret = lockmgr(&(bp)->b_lock, locktype, &buftimelock, curthread);
295 splx(s);
296 return ret;
297 }
298 /*
299 * Get a lock sleeping with specified interruptably and timeout.
300 */
301 static __inline int BUF_TIMELOCK(struct buf *, int, char *, int, int);
302 static __inline int
303 BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int catch, int timo)
304 {
305 int s, ret;
306
307 s = splbio();
308 mtx_lock(&buftimelock);
309 locktype |= LK_INTERLOCK | LK_TIMELOCK;
310 bp->b_lock.lk_wmesg = wmesg;
311 bp->b_lock.lk_prio = (PRIBIO + 4) | catch;
312 bp->b_lock.lk_timo = timo;
313 ret = lockmgr(&(bp)->b_lock, (locktype), &buftimelock, curthread);
314 splx(s);
315 return ret;
316 }
317 /*
318 * Release a lock. Only the acquiring process may free the lock unless
319 * it has been handed off to biodone.
320 */
321 static __inline void BUF_UNLOCK(struct buf *);
322 static __inline void
323 BUF_UNLOCK(struct buf *bp)
324 {
325 int s;
326
327 s = splbio();
328 lockmgr(&(bp)->b_lock, LK_RELEASE, NULL, curthread);
329 splx(s);
330 }
331
332 /*
333 * Free a buffer lock.
334 */
335 #define BUF_LOCKFREE(bp) \
336 do { \
337 if (BUF_REFCNT(bp) > 0) \
338 panic("free locked buf"); \
339 lockdestroy(&(bp)->b_lock); \
340 } while (0)
341
342 #ifdef _SYS_PROC_H_ /* Avoid #include <sys/proc.h> pollution */
343 /*
344 * When initiating asynchronous I/O, change ownership of the lock to the
345 * kernel. Once done, the lock may legally released by biodone. The
346 * original owning process can no longer acquire it recursively, but must
347 * wait until the I/O is completed and the lock has been freed by biodone.
348 */
349 static __inline void BUF_KERNPROC(struct buf *);
350 static __inline void
351 BUF_KERNPROC(struct buf *bp)
352 {
353 struct thread *td = curthread;
354
355 if ((td != PCPU_GET(idlethread))
356 && bp->b_lock.lk_lockholder == td->td_proc->p_pid)
357 td->td_locks--;
358 bp->b_lock.lk_lockholder = LK_KERNPROC;
359 }
360 #endif
361 /*
362 * Find out the number of references to a lock.
363 */
364 static __inline int BUF_REFCNT(struct buf *);
365 static __inline int
366 BUF_REFCNT(struct buf *bp)
367 {
368 int s, ret;
369
370 s = splbio();
371 ret = lockcount(&(bp)->b_lock);
372 splx(s);
373 return ret;
374 }
375
376 #endif /* _KERNEL */
377
378 struct buf_queue_head {
379 TAILQ_HEAD(buf_queue, buf) queue;
380 daddr_t last_pblkno;
381 struct buf *insert_point;
382 struct buf *switch_point;
383 };
384
385 /*
386 * This structure describes a clustered I/O. It is stored in the b_saveaddr
387 * field of the buffer on which I/O is done. At I/O completion, cluster
388 * callback uses the structure to parcel I/O's to individual buffers, and
389 * then free's this structure.
390 */
391 struct cluster_save {
392 long bs_bcount; /* Saved b_bcount. */
393 long bs_bufsize; /* Saved b_bufsize. */
394 void *bs_saveaddr; /* Saved b_addr. */
395 int bs_nchildren; /* Number of associated buffers. */
396 struct buf **bs_children; /* List of associated buffers. */
397 };
398
399 #ifdef _KERNEL
400
401 #define BUF_WRITE(bp) \
402 (bp)->b_op->bop_write(bp)
403
404 #define BUF_STRATEGY(bp) VOP_STRATEGY((bp)->b_vp, (bp))
405
406 static __inline void
407 buf_start(struct buf *bp)
408 {
409 if (bioops.io_start)
410 (*bioops.io_start)(bp);
411 }
412
413 static __inline void
414 buf_complete(struct buf *bp)
415 {
416 if (bioops.io_complete)
417 (*bioops.io_complete)(bp);
418 }
419
420 static __inline void
421 buf_deallocate(struct buf *bp)
422 {
423 if (bioops.io_deallocate)
424 (*bioops.io_deallocate)(bp);
425 BUF_LOCKFREE(bp);
426 }
427
428 static __inline void
429 buf_movedeps(struct buf *bp, struct buf *bp2)
430 {
431 if (bioops.io_movedeps)
432 (*bioops.io_movedeps)(bp, bp2);
433 }
434
435 static __inline int
436 buf_countdeps(struct buf *bp, int i)
437 {
438 if (bioops.io_countdeps)
439 return ((*bioops.io_countdeps)(bp, i));
440 else
441 return (0);
442 }
443
444 #endif /* _KERNEL */
445
446 /*
447 * Zero out the buffer's data area.
448 */
449 #define clrbuf(bp) { \
450 bzero((bp)->b_data, (u_int)(bp)->b_bcount); \
451 (bp)->b_resid = 0; \
452 }
453
454 #ifdef _KERNEL
455 extern int nbuf; /* The number of buffer headers */
456 extern int maxswzone; /* Max KVA for swap structures */
457 extern int maxbcache; /* Max KVA for buffer cache */
458 extern int runningbufspace;
459 extern int buf_maxio; /* nominal maximum I/O for buffer */
460 extern struct buf *buf; /* The buffer headers. */
461 extern char *buffers; /* The buffer contents. */
462 extern int bufpages; /* Number of memory pages in the buffer pool. */
463 extern struct buf *swbuf; /* Swap I/O buffer headers. */
464 extern int nswbuf; /* Number of swap I/O buffer headers. */
465
466 struct uio;
467
468 caddr_t kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est);
469 void bufinit(void);
470 void bwillwrite(void);
471 int buf_dirty_count_severe(void);
472 void bremfree(struct buf *);
473 int bread(struct vnode *, daddr_t, int, struct ucred *, struct buf **);
474 int breadn(struct vnode *, daddr_t, int, daddr_t *, int *, int,
475 struct ucred *, struct buf **);
476 int bwrite(struct buf *);
477 void bdwrite(struct buf *);
478 void bawrite(struct buf *);
479 void bdirty(struct buf *);
480 void bundirty(struct buf *);
481 void brelse(struct buf *);
482 void bqrelse(struct buf *);
483 int vfs_bio_awrite(struct buf *);
484 struct buf * getpbuf(int *);
485 struct buf *incore(struct vnode *, daddr_t);
486 struct buf *gbincore(struct vnode *, daddr_t);
487 int inmem(struct vnode *, daddr_t);
488 struct buf *getblk(struct vnode *, daddr_t, int, int, int);
489 struct buf *geteblk(int);
490 int bufwait(struct buf *);
491 void bufdone(struct buf *);
492 void bufdonebio(struct bio *);
493
494 void cluster_callback(struct buf *);
495 int cluster_read(struct vnode *, u_quad_t, daddr_t, long,
496 struct ucred *, long, int, struct buf **);
497 int cluster_wbuild(struct vnode *, long, daddr_t, int);
498 void cluster_write(struct buf *, u_quad_t, int);
499 void vfs_bio_set_validclean(struct buf *, int base, int size);
500 void vfs_bio_clrbuf(struct buf *);
501 void vfs_busy_pages(struct buf *, int clear_modify);
502 void vfs_unbusy_pages(struct buf *);
503 void vwakeup(struct buf *);
504 void vmapbuf(struct buf *);
505 void vunmapbuf(struct buf *);
506 void relpbuf(struct buf *, int *);
507 void brelvp(struct buf *);
508 void bgetvp(struct vnode *, struct buf *);
509 void pbgetvp(struct vnode *, struct buf *);
510 void pbrelvp(struct buf *);
511 int allocbuf(struct buf *bp, int size);
512 void reassignbuf(struct buf *, struct vnode *);
513 struct buf *trypbuf(int *);
514
515 #endif /* _KERNEL */
516
517 #endif /* !_SYS_BUF_H_ */
Cache object: a89c59f1ff9f9489d227de0b4921031f
|