FreeBSD/Linux Kernel Cross Reference
sys/sys/buf_ring.h
1 /*-
2 * Copyright (c) 2007-2009 Kip Macy <kmacy@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/10.1/sys/sys/buf_ring.h 246482 2013-02-07 15:20:54Z rrs $
27 *
28 */
29
30 #ifndef _SYS_BUF_RING_H_
31 #define _SYS_BUF_RING_H_
32
33 #include <machine/cpu.h>
34
35 #if defined(INVARIANTS) && !defined(DEBUG_BUFRING)
36 #define DEBUG_BUFRING 1
37 #endif
38
39 #ifdef DEBUG_BUFRING
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #endif
43
44 struct buf_ring {
45 volatile uint32_t br_prod_head;
46 volatile uint32_t br_prod_tail;
47 int br_prod_size;
48 int br_prod_mask;
49 uint64_t br_drops;
50 volatile uint32_t br_cons_head __aligned(CACHE_LINE_SIZE);
51 volatile uint32_t br_cons_tail;
52 int br_cons_size;
53 int br_cons_mask;
54 #ifdef DEBUG_BUFRING
55 struct mtx *br_lock;
56 #endif
57 void *br_ring[0] __aligned(CACHE_LINE_SIZE);
58 };
59
60 /*
61 * multi-producer safe lock-free ring buffer enqueue
62 *
63 */
64 static __inline int
65 buf_ring_enqueue(struct buf_ring *br, void *buf)
66 {
67 uint32_t prod_head, prod_next;
68 uint32_t cons_tail;
69 #ifdef DEBUG_BUFRING
70 int i;
71 for (i = br->br_cons_head; i != br->br_prod_head;
72 i = ((i + 1) & br->br_cons_mask))
73 if(br->br_ring[i] == buf)
74 panic("buf=%p already enqueue at %d prod=%d cons=%d",
75 buf, i, br->br_prod_tail, br->br_cons_tail);
76 #endif
77 critical_enter();
78 do {
79 prod_head = br->br_prod_head;
80 cons_tail = br->br_cons_tail;
81
82 prod_next = (prod_head + 1) & br->br_prod_mask;
83
84 if (prod_next == cons_tail) {
85 br->br_drops++;
86 critical_exit();
87 return (ENOBUFS);
88 }
89 } while (!atomic_cmpset_int(&br->br_prod_head, prod_head, prod_next));
90 #ifdef DEBUG_BUFRING
91 if (br->br_ring[prod_head] != NULL)
92 panic("dangling value in enqueue");
93 #endif
94 br->br_ring[prod_head] = buf;
95
96 /*
97 * The full memory barrier also avoids that br_prod_tail store
98 * is reordered before the br_ring[prod_head] is full setup.
99 */
100 mb();
101
102 /*
103 * If there are other enqueues in progress
104 * that preceeded us, we need to wait for them
105 * to complete
106 */
107 while (br->br_prod_tail != prod_head)
108 cpu_spinwait();
109 br->br_prod_tail = prod_next;
110 critical_exit();
111 return (0);
112 }
113
114 /*
115 * multi-consumer safe dequeue
116 *
117 */
118 static __inline void *
119 buf_ring_dequeue_mc(struct buf_ring *br)
120 {
121 uint32_t cons_head, cons_next;
122 uint32_t prod_tail;
123 void *buf;
124 int success;
125
126 critical_enter();
127 do {
128 cons_head = br->br_cons_head;
129 prod_tail = br->br_prod_tail;
130
131 cons_next = (cons_head + 1) & br->br_cons_mask;
132
133 if (cons_head == prod_tail) {
134 critical_exit();
135 return (NULL);
136 }
137
138 success = atomic_cmpset_int(&br->br_cons_head, cons_head,
139 cons_next);
140 } while (success == 0);
141
142 buf = br->br_ring[cons_head];
143 #ifdef DEBUG_BUFRING
144 br->br_ring[cons_head] = NULL;
145 #endif
146
147 /*
148 * The full memory barrier also avoids that br_ring[cons_read]
149 * load is reordered after br_cons_tail is set.
150 */
151 mb();
152
153 /*
154 * If there are other dequeues in progress
155 * that preceeded us, we need to wait for them
156 * to complete
157 */
158 while (br->br_cons_tail != cons_head)
159 cpu_spinwait();
160
161 br->br_cons_tail = cons_next;
162 critical_exit();
163
164 return (buf);
165 }
166
167 /*
168 * single-consumer dequeue
169 * use where dequeue is protected by a lock
170 * e.g. a network driver's tx queue lock
171 */
172 static __inline void *
173 buf_ring_dequeue_sc(struct buf_ring *br)
174 {
175 uint32_t cons_head, cons_next, cons_next_next;
176 uint32_t prod_tail;
177 void *buf;
178
179 cons_head = br->br_cons_head;
180 prod_tail = br->br_prod_tail;
181
182 cons_next = (cons_head + 1) & br->br_cons_mask;
183 cons_next_next = (cons_head + 2) & br->br_cons_mask;
184
185 if (cons_head == prod_tail)
186 return (NULL);
187
188 #ifdef PREFETCH_DEFINED
189 if (cons_next != prod_tail) {
190 prefetch(br->br_ring[cons_next]);
191 if (cons_next_next != prod_tail)
192 prefetch(br->br_ring[cons_next_next]);
193 }
194 #endif
195 br->br_cons_head = cons_next;
196 buf = br->br_ring[cons_head];
197
198 #ifdef DEBUG_BUFRING
199 br->br_ring[cons_head] = NULL;
200 if (!mtx_owned(br->br_lock))
201 panic("lock not held on single consumer dequeue");
202 if (br->br_cons_tail != cons_head)
203 panic("inconsistent list cons_tail=%d cons_head=%d",
204 br->br_cons_tail, cons_head);
205 #endif
206 br->br_cons_tail = cons_next;
207 return (buf);
208 }
209
210 /*
211 * single-consumer advance after a peek
212 * use where it is protected by a lock
213 * e.g. a network driver's tx queue lock
214 */
215 static __inline void
216 buf_ring_advance_sc(struct buf_ring *br)
217 {
218 uint32_t cons_head, cons_next;
219 uint32_t prod_tail;
220
221 cons_head = br->br_cons_head;
222 prod_tail = br->br_prod_tail;
223
224 cons_next = (cons_head + 1) & br->br_cons_mask;
225 if (cons_head == prod_tail)
226 return;
227 br->br_cons_head = cons_next;
228 #ifdef DEBUG_BUFRING
229 br->br_ring[cons_head] = NULL;
230 #endif
231 br->br_cons_tail = cons_next;
232 }
233
234 /*
235 * Used to return a buffer (most likely already there)
236 * to the top od the ring. The caller should *not*
237 * have used any dequeue to pull it out of the ring
238 * but instead should have used the peek() function.
239 * This is normally used where the transmit queue
240 * of a driver is full, and an mubf must be returned.
241 * Most likely whats in the ring-buffer is what
242 * is being put back (since it was not removed), but
243 * sometimes the lower transmit function may have
244 * done a pullup or other function that will have
245 * changed it. As an optimzation we always put it
246 * back (since jhb says the store is probably cheaper),
247 * if we have to do a multi-queue version we will need
248 * the compare and an atomic.
249 */
250 static __inline void
251 buf_ring_putback_sc(struct buf_ring *br, void *new)
252 {
253 KASSERT(br->br_cons_head != br->br_prod_tail,
254 ("Buf-Ring has none in putback")) ;
255 br->br_ring[br->br_cons_head] = new;
256 }
257
258 /*
259 * return a pointer to the first entry in the ring
260 * without modifying it, or NULL if the ring is empty
261 * race-prone if not protected by a lock
262 */
263 static __inline void *
264 buf_ring_peek(struct buf_ring *br)
265 {
266
267 #ifdef DEBUG_BUFRING
268 if ((br->br_lock != NULL) && !mtx_owned(br->br_lock))
269 panic("lock not held on single consumer dequeue");
270 #endif
271 /*
272 * I believe it is safe to not have a memory barrier
273 * here because we control cons and tail is worst case
274 * a lagging indicator so we worst case we might
275 * return NULL immediately after a buffer has been enqueued
276 */
277 if (br->br_cons_head == br->br_prod_tail)
278 return (NULL);
279
280 return (br->br_ring[br->br_cons_head]);
281 }
282
283 static __inline int
284 buf_ring_full(struct buf_ring *br)
285 {
286
287 return (((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail);
288 }
289
290 static __inline int
291 buf_ring_empty(struct buf_ring *br)
292 {
293
294 return (br->br_cons_head == br->br_prod_tail);
295 }
296
297 static __inline int
298 buf_ring_count(struct buf_ring *br)
299 {
300
301 return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail)
302 & br->br_prod_mask);
303 }
304
305 struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags,
306 struct mtx *);
307 void buf_ring_free(struct buf_ring *br, struct malloc_type *type);
308
309
310
311 #endif
Cache object: 38d4d3d7b2756a7c075d794b554f93b1
|