FreeBSD/Linux Kernel Cross Reference
sys/sys/buf.h
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)buf.h 8.9 (Berkeley) 3/30/95
39 * $FreeBSD$
40 */
41
42 #ifndef _SYS_BUF_H_
43 #define _SYS_BUF_H_
44
45 #include <sys/queue.h>
46
47 struct buf;
48 struct mount;
49 struct vnode;
50
51 /*
52 * To avoid including <ufs/ffs/softdep.h>
53 */
54 LIST_HEAD(workhead, worklist);
55 /*
56 * These are currently used only by the soft dependency code, hence
57 * are stored once in a global variable. If other subsystems wanted
58 * to use these hooks, a pointer to a set of bio_ops could be added
59 * to each buffer.
60 */
61 extern struct bio_ops {
62 void (*io_start) __P((struct buf *));
63 void (*io_complete) __P((struct buf *));
64 void (*io_deallocate) __P((struct buf *));
65 int (*io_fsync) __P((struct vnode *));
66 int (*io_sync) __P((struct mount *));
67 } bioops;
68
69 struct iodone_chain {
70 long ic_prev_flags;
71 void (*ic_prev_iodone) __P((struct buf *));
72 void *ic_prev_iodone_chain;
73 struct {
74 long ia_long;
75 void *ia_ptr;
76 } ic_args[5];
77 };
78
79 /*
80 * The buffer header describes an I/O operation in the kernel.
81 */
82 struct buf {
83 LIST_ENTRY(buf) b_hash; /* Hash chain. */
84 TAILQ_ENTRY(buf) b_vnbufs; /* Buffer's associated vnode. */
85 TAILQ_ENTRY(buf) b_freelist; /* Free list position if not active. */
86 TAILQ_ENTRY(buf) b_act; /* Device driver queue when active. *new* */
87 struct proc *b_proc; /* Associated proc; NULL if kernel. */
88 long b_flags; /* B_* flags. */
89 unsigned short b_qindex; /* buffer queue index */
90 unsigned char b_usecount; /* buffer use count */
91 unsigned char b_xflags; /* extra flags */
92 int b_error; /* Errno value. */
93 long b_bufsize; /* Allocated buffer size. */
94 long b_bcount; /* Valid bytes in buffer. */
95 long b_resid; /* Remaining I/O. */
96 dev_t b_dev; /* Device associated with buffer. */
97 caddr_t b_data; /* Memory, superblocks, indirect etc. */
98 caddr_t b_kvabase; /* base kva for buffer */
99 int b_kvasize; /* size of kva for buffer */
100 daddr_t b_lblkno; /* Logical block number. */
101 daddr_t b_blkno; /* Underlying physical block number. */
102 off_t b_offset; /* Offset into file */
103 /* Function to call upon completion. */
104 void (*b_iodone) __P((struct buf *));
105 /* For nested b_iodone's. */
106 struct iodone_chain *b_iodone_chain;
107 struct vnode *b_vp; /* Device vnode. */
108 int b_dirtyoff; /* Offset in buffer of dirty region. */
109 int b_dirtyend; /* Offset of end of dirty region. */
110 struct ucred *b_rcred; /* Read credentials reference. */
111 struct ucred *b_wcred; /* Write credentials reference. */
112 int b_validoff; /* Offset in buffer of valid region. */
113 int b_validend; /* Offset of end of valid region. */
114 daddr_t b_pblkno; /* physical block number */
115 void *b_saveaddr; /* Original b_addr for physio. */
116 caddr_t b_savekva; /* saved kva for transfer while bouncing */
117 void *b_driver1; /* for private use by the driver */
118 void *b_driver2; /* for private use by the driver */
119 void *b_spc;
120 union cluster_info {
121 TAILQ_HEAD(cluster_list_head, buf) cluster_head;
122 TAILQ_ENTRY(buf) cluster_entry;
123 } b_cluster;
124 struct vm_page *b_pages[btoc(MAXPHYS)];
125 int b_npages;
126 struct workhead b_dep; /* List of filesystem dependencies. */
127 };
128
129 /*
130 * These flags are kept in b_flags.
131 */
132 #define B_AGE 0x00000001 /* Move to age queue when I/O done. */
133 #define B_NEEDCOMMIT 0x00000002 /* Append-write in progress. */
134 #define B_ASYNC 0x00000004 /* Start I/O, do not wait. */
135 #define B_BAD 0x00000008 /* Bad block revectoring in progress. */
136 #define B_BUSY 0x00000010 /* I/O in progress. */
137 #define B_CACHE 0x00000020 /* Bread found us in the cache. */
138 #define B_CALL 0x00000040 /* Call b_iodone from biodone. */
139 #define B_DELWRI 0x00000080 /* Delay I/O until buffer reused. */
140 #define B_FREEBUF 0x00000100 /* Instruct driver: free blocks */
141 #define B_DONE 0x00000200 /* I/O completed. */
142 #define B_EINTR 0x00000400 /* I/O was interrupted */
143 #define B_ERROR 0x00000800 /* I/O error occurred. */
144 #define B_SCANNED 0x00001000 /* VOP_FSYNC funcs mark written bufs */
145 #define B_INVAL 0x00002000 /* Does not contain valid info. */
146 #define B_LOCKED 0x00004000 /* Locked in core (not reusable). */
147 #define B_NOCACHE 0x00008000 /* Do not cache block after use. */
148 #define B_MALLOC 0x00010000 /* malloced b_data */
149 #define B_CLUSTEROK 0x00020000 /* Pagein op, so swap() can count it. */
150 #define B_PHYS 0x00040000 /* I/O to user memory. */
151 #define B_RAW 0x00080000 /* Set by physio for raw transfers. */
152 #define B_READ 0x00100000 /* Read buffer. */
153 #define B_DIRTY 0x00200000 /* Needs writing later. */
154 #define B_RELBUF 0x00400000 /* Release VMIO buffer. */
155 #define B_WANTED 0x00800000 /* Process wants this buffer. */
156 #define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */
157 #define B_WRITEINPROG 0x01000000 /* Write in progress. */
158 #define B_XXX 0x02000000 /* Debugging flag. */
159 #define B_PAGING 0x04000000 /* volatile paging I/O -- bypass VMIO */
160 #define B_ORDERED 0x08000000 /* Must guarantee I/O ordering */
161 #define B_RAM 0x10000000 /* Read ahead mark (flag) */
162 #define B_VMIO 0x20000000 /* VMIO flag */
163 #define B_CLUSTER 0x40000000 /* pagein op, so swap() can count it */
164 #define B_AVAIL1 0x80000000 /* Available flag */
165
166 #define PRINT_BUF_FLAGS "\2\40avail1\37cluster\36vmio\35ram\34ordered" \
167 "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26dirty" \
168 "\25read\24raw\23phys\22clusterok\21malloc\20nocache" \
169 "\17locked\16inval\15avail2\14error\13eintr\12done\11freebuf" \
170 "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age"
171
172 /*
173 * These flags are kept in b_xflags.
174 */
175 #define B_VNDIRTY 0x01 /* On vnode dirty list */
176 #define B_VNCLEAN 0x02 /* On vnode clean list */
177
178 #define NOOFFSET (-1LL) /* No buffer offset calculated yet */
179
180 struct buf_queue_head {
181 TAILQ_HEAD(buf_queue, buf) queue;
182 daddr_t last_pblkno;
183 struct buf *insert_point;
184 struct buf *switch_point;
185 };
186
187 /*
188 * This structure describes a clustered I/O. It is stored in the b_saveaddr
189 * field of the buffer on which I/O is done. At I/O completion, cluster
190 * callback uses the structure to parcel I/O's to individual buffers, and
191 * then free's this structure.
192 */
193 struct cluster_save {
194 long bs_bcount; /* Saved b_bcount. */
195 long bs_bufsize; /* Saved b_bufsize. */
196 void *bs_saveaddr; /* Saved b_addr. */
197 int bs_nchildren; /* Number of associated buffers. */
198 struct buf **bs_children; /* List of associated buffers. */
199 };
200
201 static __inline void bufq_init __P((struct buf_queue_head *head));
202
203 static __inline void bufq_insert_tail __P((struct buf_queue_head *head,
204 struct buf *bp));
205
206 static __inline void bufq_remove __P((struct buf_queue_head *head,
207 struct buf *bp));
208
209 static __inline struct buf *bufq_first __P((struct buf_queue_head *head));
210
211 static __inline void
212 bufq_init(struct buf_queue_head *head)
213 {
214 TAILQ_INIT(&head->queue);
215 head->last_pblkno = 0;
216 head->insert_point = NULL;
217 head->switch_point = NULL;
218 }
219
220 static __inline void
221 bufq_insert_tail(struct buf_queue_head *head, struct buf *bp)
222 {
223 if ((bp->b_flags & B_ORDERED) != 0) {
224 head->insert_point = bp;
225 head->switch_point = NULL;
226 }
227 TAILQ_INSERT_TAIL(&head->queue, bp, b_act);
228 }
229
230 static __inline void
231 bufq_remove(struct buf_queue_head *head, struct buf *bp)
232 {
233 if (bp == head->switch_point)
234 head->switch_point = TAILQ_NEXT(bp, b_act);
235 if (bp == head->insert_point) {
236 head->insert_point = TAILQ_PREV(bp, buf_queue, b_act);
237 if (head->insert_point == NULL)
238 head->last_pblkno = 0;
239 } else if (bp == TAILQ_FIRST(&head->queue))
240 head->last_pblkno = bp->b_pblkno;
241 TAILQ_REMOVE(&head->queue, bp, b_act);
242 if (TAILQ_FIRST(&head->queue) == head->switch_point)
243 head->switch_point = NULL;
244 }
245
246 static __inline struct buf *
247 bufq_first(struct buf_queue_head *head)
248 {
249 return (TAILQ_FIRST(&head->queue));
250 }
251
252
253 /*
254 * number of buffer hash entries
255 */
256 #define BUFHSZ 512
257
258 /*
259 * buffer hash table calculation, originally by David Greenman
260 */
261 #define BUFHASH(vnp, bn) \
262 (&bufhashtbl[(((uintptr_t)(vnp) >> 7)+(int)(bn)) % BUFHSZ])
263
264 /*
265 * Definitions for the buffer free lists.
266 */
267 #define BUFFER_QUEUES 6 /* number of free buffer queues */
268
269 #define QUEUE_NONE 0 /* on no queue */
270 #define QUEUE_LOCKED 1 /* locked buffers */
271 #define QUEUE_LRU 2 /* useful buffers */
272 #define QUEUE_VMIO 3 /* VMIO buffers */
273 #define QUEUE_AGE 4 /* not-useful buffers */
274 #define QUEUE_EMPTY 5 /* empty buffer headers*/
275
276 /*
277 * Zero out the buffer's data area.
278 */
279 #define clrbuf(bp) { \
280 bzero((bp)->b_data, (u_int)(bp)->b_bcount); \
281 (bp)->b_resid = 0; \
282 }
283
284 /* Flags to low-level allocation routines. */
285 #define B_CLRBUF 0x01 /* Request allocated buffer be cleared. */
286 #define B_SYNC 0x02 /* Do all allocations synchronously. */
287
288 #ifdef KERNEL
289 extern int nbuf; /* The number of buffer headers */
290 extern struct buf *buf; /* The buffer headers. */
291 extern char *buffers; /* The buffer contents. */
292 extern int bufpages; /* Number of memory pages in the buffer pool. */
293 extern struct buf *swbuf; /* Swap I/O buffer headers. */
294 extern int nswbuf; /* Number of swap I/O buffer headers. */
295 extern int needsbuffer, numdirtybuffers;
296 extern TAILQ_HEAD(swqueue, buf) bswlist;
297 extern TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES];
298
299 struct uio;
300
301 void bufinit __P((void));
302 void bremfree __P((struct buf *));
303 int bread __P((struct vnode *, daddr_t, int,
304 struct ucred *, struct buf **));
305 int breadn __P((struct vnode *, daddr_t, int, daddr_t *, int *, int,
306 struct ucred *, struct buf **));
307 int bwrite __P((struct buf *));
308 void bdwrite __P((struct buf *));
309 void bawrite __P((struct buf *));
310 void bdirty __P((struct buf *));
311 int bowrite __P((struct buf *));
312 void brelse __P((struct buf *));
313 void bqrelse __P((struct buf *));
314 int vfs_bio_awrite __P((struct buf *));
315 struct buf * getpbuf __P((void));
316 struct buf *incore __P((struct vnode *, daddr_t));
317 struct buf *gbincore __P((struct vnode *, daddr_t));
318 int inmem __P((struct vnode *, daddr_t));
319 struct buf *getblk __P((struct vnode *, daddr_t, int, int, int));
320 struct buf *geteblk __P((int));
321 int allocbuf __P((struct buf *, int));
322 int biowait __P((struct buf *));
323 void biodone __P((struct buf *));
324
325 void cluster_callback __P((struct buf *));
326 int cluster_read __P((struct vnode *, u_quad_t, daddr_t, long,
327 struct ucred *, long, int, struct buf **));
328 int cluster_wbuild __P((struct vnode *, long, daddr_t, int));
329 void cluster_write __P((struct buf *, u_quad_t));
330 int physio __P((void (*)(struct buf *), struct buf *, dev_t,
331 int, u_int (*)(struct buf *), struct uio *));
332 u_int minphys __P((struct buf *));
333 void vfs_bio_clrbuf __P((struct buf *));
334 void vfs_busy_pages __P((struct buf *, int clear_modify));
335 void vfs_unbusy_pages __P((struct buf *));
336 void vwakeup __P((struct buf *));
337 void vmapbuf __P((struct buf *));
338 void vunmapbuf __P((struct buf *));
339 void relpbuf __P((struct buf *));
340 void brelvp __P((struct buf *));
341 void bgetvp __P((struct vnode *, struct buf *));
342 void pbgetvp __P((struct vnode *, struct buf *));
343 void pbrelvp __P((struct buf *));
344 void reassignbuf __P((struct buf *, struct vnode *));
345 struct buf *trypbuf __P((void));
346 void vfs_bio_need_satisfy __P((void));
347 #endif /* KERNEL */
348
349 #endif /* !_SYS_BUF_H_ */
Cache object: 408040772e376540554b22dbac8293a9
|