FreeBSD/Linux Kernel Cross Reference
sys/sys/buf_ring.h
1 /*-
2 * Copyright (c) 2007-2009 Kip Macy <kmacy@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/8.4/sys/sys/buf_ring.h 209061 2010-06-11 19:17:36Z ken $
27 *
28 */
29
30 #ifndef _SYS_BUF_RING_H_
31 #define _SYS_BUF_RING_H_
32
33 #include <machine/cpu.h>
34
35 #if defined(INVARIANTS) && !defined(DEBUG_BUFRING)
36 #define DEBUG_BUFRING 1
37 #endif
38
39 #ifdef DEBUG_BUFRING
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #endif
43
44 struct buf_ring {
45 volatile uint32_t br_prod_head;
46 volatile uint32_t br_prod_tail;
47 int br_prod_size;
48 int br_prod_mask;
49 uint64_t br_drops;
50 uint64_t br_prod_bufs;
51 uint64_t br_prod_bytes;
52 /*
53 * Pad out to next L2 cache line
54 */
55 uint64_t _pad0[11];
56
57 volatile uint32_t br_cons_head;
58 volatile uint32_t br_cons_tail;
59 int br_cons_size;
60 int br_cons_mask;
61
62 /*
63 * Pad out to next L2 cache line
64 */
65 uint64_t _pad1[14];
66 #ifdef DEBUG_BUFRING
67 struct mtx *br_lock;
68 #endif
69 void *br_ring[0];
70 };
71
72 /*
73 * multi-producer safe lock-free ring buffer enqueue
74 *
75 */
76 static __inline int
77 buf_ring_enqueue_bytes(struct buf_ring *br, void *buf, int nbytes)
78 {
79 uint32_t prod_head, prod_next;
80 uint32_t cons_tail;
81 int success;
82 #ifdef DEBUG_BUFRING
83 int i;
84 for (i = br->br_cons_head; i != br->br_prod_head;
85 i = ((i + 1) & br->br_cons_mask))
86 if(br->br_ring[i] == buf)
87 panic("buf=%p already enqueue at %d prod=%d cons=%d",
88 buf, i, br->br_prod_tail, br->br_cons_tail);
89 #endif
90 critical_enter();
91 do {
92 prod_head = br->br_prod_head;
93 cons_tail = br->br_cons_tail;
94
95 prod_next = (prod_head + 1) & br->br_prod_mask;
96
97 if (prod_next == cons_tail) {
98 critical_exit();
99 return (ENOBUFS);
100 }
101
102 success = atomic_cmpset_int(&br->br_prod_head, prod_head,
103 prod_next);
104 } while (success == 0);
105 #ifdef DEBUG_BUFRING
106 if (br->br_ring[prod_head] != NULL)
107 panic("dangling value in enqueue");
108 #endif
109 br->br_ring[prod_head] = buf;
110 wmb();
111
112 /*
113 * If there are other enqueues in progress
114 * that preceeded us, we need to wait for them
115 * to complete
116 */
117 while (br->br_prod_tail != prod_head)
118 cpu_spinwait();
119 br->br_prod_bufs++;
120 br->br_prod_bytes += nbytes;
121 br->br_prod_tail = prod_next;
122 critical_exit();
123 return (0);
124 }
125
126 static __inline int
127 buf_ring_enqueue(struct buf_ring *br, void *buf)
128 {
129
130 return (buf_ring_enqueue_bytes(br, buf, 0));
131 }
132
133 /*
134 * multi-consumer safe dequeue
135 *
136 */
137 static __inline void *
138 buf_ring_dequeue_mc(struct buf_ring *br)
139 {
140 uint32_t cons_head, cons_next;
141 uint32_t prod_tail;
142 void *buf;
143 int success;
144
145 critical_enter();
146 do {
147 cons_head = br->br_cons_head;
148 prod_tail = br->br_prod_tail;
149
150 cons_next = (cons_head + 1) & br->br_cons_mask;
151
152 if (cons_head == prod_tail) {
153 critical_exit();
154 return (NULL);
155 }
156
157 success = atomic_cmpset_int(&br->br_cons_head, cons_head,
158 cons_next);
159 } while (success == 0);
160
161 buf = br->br_ring[cons_head];
162 #ifdef DEBUG_BUFRING
163 br->br_ring[cons_head] = NULL;
164 #endif
165 rmb();
166
167 /*
168 * If there are other dequeues in progress
169 * that preceeded us, we need to wait for them
170 * to complete
171 */
172 while (br->br_cons_tail != cons_head)
173 cpu_spinwait();
174
175 br->br_cons_tail = cons_next;
176 critical_exit();
177
178 return (buf);
179 }
180
181 /*
182 * single-consumer dequeue
183 * use where dequeue is protected by a lock
184 * e.g. a network driver's tx queue lock
185 */
186 static __inline void *
187 buf_ring_dequeue_sc(struct buf_ring *br)
188 {
189 uint32_t cons_head, cons_next, cons_next_next;
190 uint32_t prod_tail;
191 void *buf;
192
193 cons_head = br->br_cons_head;
194 prod_tail = br->br_prod_tail;
195
196 cons_next = (cons_head + 1) & br->br_cons_mask;
197 cons_next_next = (cons_head + 2) & br->br_cons_mask;
198
199 if (cons_head == prod_tail)
200 return (NULL);
201
202 #ifdef PREFETCH_DEFINED
203 if (cons_next != prod_tail) {
204 prefetch(br->br_ring[cons_next]);
205 if (cons_next_next != prod_tail)
206 prefetch(br->br_ring[cons_next_next]);
207 }
208 #endif
209 br->br_cons_head = cons_next;
210 buf = br->br_ring[cons_head];
211
212 #ifdef DEBUG_BUFRING
213 br->br_ring[cons_head] = NULL;
214 if (!mtx_owned(br->br_lock))
215 panic("lock not held on single consumer dequeue");
216 if (br->br_cons_tail != cons_head)
217 panic("inconsistent list cons_tail=%d cons_head=%d",
218 br->br_cons_tail, cons_head);
219 #endif
220 br->br_cons_tail = cons_next;
221 return (buf);
222 }
223
224 /*
225 * return a pointer to the first entry in the ring
226 * without modifying it, or NULL if the ring is empty
227 * race-prone if not protected by a lock
228 */
229 static __inline void *
230 buf_ring_peek(struct buf_ring *br)
231 {
232
233 #ifdef DEBUG_BUFRING
234 if ((br->br_lock != NULL) && !mtx_owned(br->br_lock))
235 panic("lock not held on single consumer dequeue");
236 #endif
237 /*
238 * I believe it is safe to not have a memory barrier
239 * here because we control cons and tail is worst case
240 * a lagging indicator so we worst case we might
241 * return NULL immediately after a buffer has been enqueued
242 */
243 if (br->br_cons_head == br->br_prod_tail)
244 return (NULL);
245
246 return (br->br_ring[br->br_cons_head]);
247 }
248
249 static __inline int
250 buf_ring_full(struct buf_ring *br)
251 {
252
253 return (((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail);
254 }
255
256 static __inline int
257 buf_ring_empty(struct buf_ring *br)
258 {
259
260 return (br->br_cons_head == br->br_prod_tail);
261 }
262
263 static __inline int
264 buf_ring_count(struct buf_ring *br)
265 {
266
267 return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail)
268 & br->br_prod_mask);
269 }
270
271 struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags,
272 struct mtx *);
273 void buf_ring_free(struct buf_ring *br, struct malloc_type *type);
274
275
276
277 #endif
Cache object: 0769ed7ec626dd079aadc7ef4024a2e4
|