FreeBSD/Linux Kernel Cross Reference
sys/sys/queue.h
1 /* $NetBSD: queue.h,v 1.45.14.2 2009/06/05 16:23:34 snj Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)queue.h 8.5 (Berkeley) 8/20/94
32 */
33
34 #ifndef _SYS_QUEUE_H_
35 #define _SYS_QUEUE_H_
36
37 #include <sys/null.h>
38
39 /*
40 * This file defines five types of data structures: singly-linked lists,
41 * lists, simple queues, tail queues, and circular queues.
42 *
43 * A singly-linked list is headed by a single forward pointer. The
44 * elements are singly linked for minimum space and pointer manipulation
45 * overhead at the expense of O(n) removal for arbitrary elements. New
46 * elements can be added to the list after an existing element or at the
47 * head of the list. Elements being removed from the head of the list
48 * should use the explicit macro for this purpose for optimum
49 * efficiency. A singly-linked list may only be traversed in the forward
50 * direction. Singly-linked lists are ideal for applications with large
51 * datasets and few or no removals or for implementing a LIFO queue.
52 *
53 * A list is headed by a single forward pointer (or an array of forward
54 * pointers for a hash table header). The elements are doubly linked
55 * so that an arbitrary element can be removed without a need to
56 * traverse the list. New elements can be added to the list before
57 * or after an existing element or at the head of the list. A list
58 * may only be traversed in the forward direction.
59 *
60 * A simple queue is headed by a pair of pointers, one the head of the
61 * list and the other to the tail of the list. The elements are singly
62 * linked to save space, so elements can only be removed from the
63 * head of the list. New elements can be added to the list after
64 * an existing element, at the head of the list, or at the end of the
65 * list. A simple queue may only be traversed in the forward direction.
66 *
67 * A tail queue is headed by a pair of pointers, one to the head of the
68 * list and the other to the tail of the list. The elements are doubly
69 * linked so that an arbitrary element can be removed without a need to
70 * traverse the list. New elements can be added to the list before or
71 * after an existing element, at the head of the list, or at the end of
72 * the list. A tail queue may be traversed in either direction.
73 *
74 * A circle queue is headed by a pair of pointers, one to the head of the
75 * list and the other to the tail of the list. The elements are doubly
76 * linked so that an arbitrary element can be removed without a need to
77 * traverse the list. New elements can be added to the list before or after
78 * an existing element, at the head of the list, or at the end of the list.
79 * A circle queue may be traversed in either direction, but has a more
80 * complex end of list detection.
81 *
82 * For details on the use of these macros, see the queue(3) manual page.
83 */
84
85 /*
86 * List definitions.
87 */
88 #define LIST_HEAD(name, type) \
89 struct name { \
90 struct type *lh_first; /* first element */ \
91 }
92
93 #define LIST_HEAD_INITIALIZER(head) \
94 { NULL }
95
96 #define LIST_ENTRY(type) \
97 struct { \
98 struct type *le_next; /* next element */ \
99 struct type **le_prev; /* address of previous next element */ \
100 }
101
102 /*
103 * List functions.
104 */
105 #if defined(_KERNEL) && defined(QUEUEDEBUG)
106 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
107 if ((head)->lh_first && \
108 (head)->lh_first->field.le_prev != &(head)->lh_first) \
109 panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
110 #define QUEUEDEBUG_LIST_OP(elm, field) \
111 if ((elm)->field.le_next && \
112 (elm)->field.le_next->field.le_prev != \
113 &(elm)->field.le_next) \
114 panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
115 if (*(elm)->field.le_prev != (elm)) \
116 panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__);
117 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
118 (elm)->field.le_next = (void *)1L; \
119 (elm)->field.le_prev = (void *)1L;
120 #else
121 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
122 #define QUEUEDEBUG_LIST_OP(elm, field)
123 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
124 #endif
125
126 #define LIST_INIT(head) do { \
127 (head)->lh_first = NULL; \
128 } while (/*CONSTCOND*/0)
129
130 #define LIST_INSERT_AFTER(listelm, elm, field) do { \
131 QUEUEDEBUG_LIST_OP((listelm), field) \
132 if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
133 (listelm)->field.le_next->field.le_prev = \
134 &(elm)->field.le_next; \
135 (listelm)->field.le_next = (elm); \
136 (elm)->field.le_prev = &(listelm)->field.le_next; \
137 } while (/*CONSTCOND*/0)
138
139 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \
140 QUEUEDEBUG_LIST_OP((listelm), field) \
141 (elm)->field.le_prev = (listelm)->field.le_prev; \
142 (elm)->field.le_next = (listelm); \
143 *(listelm)->field.le_prev = (elm); \
144 (listelm)->field.le_prev = &(elm)->field.le_next; \
145 } while (/*CONSTCOND*/0)
146
147 #define LIST_INSERT_HEAD(head, elm, field) do { \
148 QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
149 if (((elm)->field.le_next = (head)->lh_first) != NULL) \
150 (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
151 (head)->lh_first = (elm); \
152 (elm)->field.le_prev = &(head)->lh_first; \
153 } while (/*CONSTCOND*/0)
154
155 #define LIST_REMOVE(elm, field) do { \
156 QUEUEDEBUG_LIST_OP((elm), field) \
157 if ((elm)->field.le_next != NULL) \
158 (elm)->field.le_next->field.le_prev = \
159 (elm)->field.le_prev; \
160 *(elm)->field.le_prev = (elm)->field.le_next; \
161 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
162 } while (/*CONSTCOND*/0)
163
164 #define LIST_FOREACH(var, head, field) \
165 for ((var) = ((head)->lh_first); \
166 (var); \
167 (var) = ((var)->field.le_next))
168
169 /*
170 * List access methods.
171 */
172 #define LIST_EMPTY(head) ((head)->lh_first == NULL)
173 #define LIST_FIRST(head) ((head)->lh_first)
174 #define LIST_NEXT(elm, field) ((elm)->field.le_next)
175
176
177 /*
178 * Singly-linked List definitions.
179 */
180 #define SLIST_HEAD(name, type) \
181 struct name { \
182 struct type *slh_first; /* first element */ \
183 }
184
185 #define SLIST_HEAD_INITIALIZER(head) \
186 { NULL }
187
188 #define SLIST_ENTRY(type) \
189 struct { \
190 struct type *sle_next; /* next element */ \
191 }
192
193 /*
194 * Singly-linked List functions.
195 */
196 #define SLIST_INIT(head) do { \
197 (head)->slh_first = NULL; \
198 } while (/*CONSTCOND*/0)
199
200 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
201 (elm)->field.sle_next = (slistelm)->field.sle_next; \
202 (slistelm)->field.sle_next = (elm); \
203 } while (/*CONSTCOND*/0)
204
205 #define SLIST_INSERT_HEAD(head, elm, field) do { \
206 (elm)->field.sle_next = (head)->slh_first; \
207 (head)->slh_first = (elm); \
208 } while (/*CONSTCOND*/0)
209
210 #define SLIST_REMOVE_HEAD(head, field) do { \
211 (head)->slh_first = (head)->slh_first->field.sle_next; \
212 } while (/*CONSTCOND*/0)
213
214 #define SLIST_REMOVE(head, elm, type, field) do { \
215 if ((head)->slh_first == (elm)) { \
216 SLIST_REMOVE_HEAD((head), field); \
217 } \
218 else { \
219 struct type *curelm = (head)->slh_first; \
220 while(curelm->field.sle_next != (elm)) \
221 curelm = curelm->field.sle_next; \
222 curelm->field.sle_next = \
223 curelm->field.sle_next->field.sle_next; \
224 } \
225 } while (/*CONSTCOND*/0)
226
227 #define SLIST_FOREACH(var, head, field) \
228 for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
229
230 /*
231 * Singly-linked List access methods.
232 */
233 #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
234 #define SLIST_FIRST(head) ((head)->slh_first)
235 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
236
237
238 /*
239 * Singly-linked Tail queue declarations.
240 */
241 #define STAILQ_HEAD(name, type) \
242 struct name { \
243 struct type *stqh_first; /* first element */ \
244 struct type **stqh_last; /* addr of last next element */ \
245 }
246
247 #define STAILQ_HEAD_INITIALIZER(head) \
248 { NULL, &(head).stqh_first }
249
250 #define STAILQ_ENTRY(type) \
251 struct { \
252 struct type *stqe_next; /* next element */ \
253 }
254
255 /*
256 * Singly-linked Tail queue functions.
257 */
258 #define STAILQ_INIT(head) do { \
259 (head)->stqh_first = NULL; \
260 (head)->stqh_last = &(head)->stqh_first; \
261 } while (/*CONSTCOND*/0)
262
263 #define STAILQ_INSERT_HEAD(head, elm, field) do { \
264 if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
265 (head)->stqh_last = &(elm)->field.stqe_next; \
266 (head)->stqh_first = (elm); \
267 } while (/*CONSTCOND*/0)
268
269 #define STAILQ_INSERT_TAIL(head, elm, field) do { \
270 (elm)->field.stqe_next = NULL; \
271 *(head)->stqh_last = (elm); \
272 (head)->stqh_last = &(elm)->field.stqe_next; \
273 } while (/*CONSTCOND*/0)
274
275 #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
276 if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
277 (head)->stqh_last = &(elm)->field.stqe_next; \
278 (listelm)->field.stqe_next = (elm); \
279 } while (/*CONSTCOND*/0)
280
281 #define STAILQ_REMOVE_HEAD(head, field) do { \
282 if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
283 (head)->stqh_last = &(head)->stqh_first; \
284 } while (/*CONSTCOND*/0)
285
286 #define STAILQ_REMOVE(head, elm, type, field) do { \
287 if ((head)->stqh_first == (elm)) { \
288 STAILQ_REMOVE_HEAD((head), field); \
289 } else { \
290 struct type *curelm = (head)->stqh_first; \
291 while (curelm->field.stqe_next != (elm)) \
292 curelm = curelm->field.stqe_next; \
293 if ((curelm->field.stqe_next = \
294 curelm->field.stqe_next->field.stqe_next) == NULL) \
295 (head)->stqh_last = &(curelm)->field.stqe_next; \
296 } \
297 } while (/*CONSTCOND*/0)
298
299 #define STAILQ_FOREACH(var, head, field) \
300 for ((var) = ((head)->stqh_first); \
301 (var); \
302 (var) = ((var)->field.stqe_next))
303
304 /*
305 * Singly-linked Tail queue access methods.
306 */
307 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
308 #define STAILQ_FIRST(head) ((head)->stqh_first)
309 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
310
311
312 /*
313 * Simple queue definitions.
314 */
315 #define SIMPLEQ_HEAD(name, type) \
316 struct name { \
317 struct type *sqh_first; /* first element */ \
318 struct type **sqh_last; /* addr of last next element */ \
319 }
320
321 #define SIMPLEQ_HEAD_INITIALIZER(head) \
322 { NULL, &(head).sqh_first }
323
324 #define SIMPLEQ_ENTRY(type) \
325 struct { \
326 struct type *sqe_next; /* next element */ \
327 }
328
329 /*
330 * Simple queue functions.
331 */
332 #define SIMPLEQ_INIT(head) do { \
333 (head)->sqh_first = NULL; \
334 (head)->sqh_last = &(head)->sqh_first; \
335 } while (/*CONSTCOND*/0)
336
337 #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
338 if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
339 (head)->sqh_last = &(elm)->field.sqe_next; \
340 (head)->sqh_first = (elm); \
341 } while (/*CONSTCOND*/0)
342
343 #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
344 (elm)->field.sqe_next = NULL; \
345 *(head)->sqh_last = (elm); \
346 (head)->sqh_last = &(elm)->field.sqe_next; \
347 } while (/*CONSTCOND*/0)
348
349 #define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
350 if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
351 (head)->sqh_last = &(elm)->field.sqe_next; \
352 (listelm)->field.sqe_next = (elm); \
353 } while (/*CONSTCOND*/0)
354
355 #define SIMPLEQ_REMOVE_HEAD(head, field) do { \
356 if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
357 (head)->sqh_last = &(head)->sqh_first; \
358 } while (/*CONSTCOND*/0)
359
360 #define SIMPLEQ_REMOVE(head, elm, type, field) do { \
361 if ((head)->sqh_first == (elm)) { \
362 SIMPLEQ_REMOVE_HEAD((head), field); \
363 } else { \
364 struct type *curelm = (head)->sqh_first; \
365 while (curelm->field.sqe_next != (elm)) \
366 curelm = curelm->field.sqe_next; \
367 if ((curelm->field.sqe_next = \
368 curelm->field.sqe_next->field.sqe_next) == NULL) \
369 (head)->sqh_last = &(curelm)->field.sqe_next; \
370 } \
371 } while (/*CONSTCOND*/0)
372
373 #define SIMPLEQ_FOREACH(var, head, field) \
374 for ((var) = ((head)->sqh_first); \
375 (var); \
376 (var) = ((var)->field.sqe_next))
377
378 /*
379 * Simple queue access methods.
380 */
381 #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
382 #define SIMPLEQ_FIRST(head) ((head)->sqh_first)
383 #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
384
385
386 /*
387 * Tail queue definitions.
388 */
389 #define _TAILQ_HEAD(name, type, qual) \
390 struct name { \
391 qual type *tqh_first; /* first element */ \
392 qual type *qual *tqh_last; /* addr of last next element */ \
393 }
394 #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
395
396 #define TAILQ_HEAD_INITIALIZER(head) \
397 { NULL, &(head).tqh_first }
398
399 #define _TAILQ_ENTRY(type, qual) \
400 struct { \
401 qual type *tqe_next; /* next element */ \
402 qual type *qual *tqe_prev; /* address of previous next element */\
403 }
404 #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
405
406 /*
407 * Tail queue functions.
408 */
409 #if defined(_KERNEL) && defined(QUEUEDEBUG)
410 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
411 if ((head)->tqh_first && \
412 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
413 panic("TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
414 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
415 if (*(head)->tqh_last != NULL) \
416 panic("TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__);
417 #define QUEUEDEBUG_TAILQ_OP(elm, field) \
418 if ((elm)->field.tqe_next && \
419 (elm)->field.tqe_next->field.tqe_prev != \
420 &(elm)->field.tqe_next) \
421 panic("TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
422 if (*(elm)->field.tqe_prev != (elm)) \
423 panic("TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__);
424 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
425 if ((elm)->field.tqe_next == NULL && \
426 (head)->tqh_last != &(elm)->field.tqe_next) \
427 panic("TAILQ_PREREMOVE head %p elm %p %s:%d", \
428 (head), (elm), __FILE__, __LINE__);
429 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
430 (elm)->field.tqe_next = (void *)1L; \
431 (elm)->field.tqe_prev = (void *)1L;
432 #else
433 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
434 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
435 #define QUEUEDEBUG_TAILQ_OP(elm, field)
436 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
437 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
438 #endif
439
440 #define TAILQ_INIT(head) do { \
441 (head)->tqh_first = NULL; \
442 (head)->tqh_last = &(head)->tqh_first; \
443 } while (/*CONSTCOND*/0)
444
445 #define TAILQ_INSERT_HEAD(head, elm, field) do { \
446 QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \
447 if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
448 (head)->tqh_first->field.tqe_prev = \
449 &(elm)->field.tqe_next; \
450 else \
451 (head)->tqh_last = &(elm)->field.tqe_next; \
452 (head)->tqh_first = (elm); \
453 (elm)->field.tqe_prev = &(head)->tqh_first; \
454 } while (/*CONSTCOND*/0)
455
456 #define TAILQ_INSERT_TAIL(head, elm, field) do { \
457 QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \
458 (elm)->field.tqe_next = NULL; \
459 (elm)->field.tqe_prev = (head)->tqh_last; \
460 *(head)->tqh_last = (elm); \
461 (head)->tqh_last = &(elm)->field.tqe_next; \
462 } while (/*CONSTCOND*/0)
463
464 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
465 QUEUEDEBUG_TAILQ_OP((listelm), field) \
466 if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
467 (elm)->field.tqe_next->field.tqe_prev = \
468 &(elm)->field.tqe_next; \
469 else \
470 (head)->tqh_last = &(elm)->field.tqe_next; \
471 (listelm)->field.tqe_next = (elm); \
472 (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
473 } while (/*CONSTCOND*/0)
474
475 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
476 QUEUEDEBUG_TAILQ_OP((listelm), field) \
477 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
478 (elm)->field.tqe_next = (listelm); \
479 *(listelm)->field.tqe_prev = (elm); \
480 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
481 } while (/*CONSTCOND*/0)
482
483 #define TAILQ_REMOVE(head, elm, field) do { \
484 QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
485 QUEUEDEBUG_TAILQ_OP((elm), field) \
486 if (((elm)->field.tqe_next) != NULL) \
487 (elm)->field.tqe_next->field.tqe_prev = \
488 (elm)->field.tqe_prev; \
489 else \
490 (head)->tqh_last = (elm)->field.tqe_prev; \
491 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
492 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
493 } while (/*CONSTCOND*/0)
494
495 #define TAILQ_FOREACH(var, head, field) \
496 for ((var) = ((head)->tqh_first); \
497 (var); \
498 (var) = ((var)->field.tqe_next))
499
500 #define TAILQ_FOREACH_SAFE(var, head, field, next) \
501 for ((var) = ((head)->tqh_first); \
502 (var) != NULL && ((next) = TAILQ_NEXT(var, field), 1); \
503 (var) = (next))
504
505 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
506 for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
507 (var); \
508 (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
509
510 /*
511 * Tail queue access methods.
512 */
513 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
514 #define TAILQ_FIRST(head) ((head)->tqh_first)
515 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
516
517 #define TAILQ_LAST(head, headname) \
518 (*(((struct headname *)((head)->tqh_last))->tqh_last))
519 #define TAILQ_PREV(elm, headname, field) \
520 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
521
522
523 /*
524 * Circular queue definitions.
525 */
526 #if defined(_KERNEL) && defined(QUEUEDEBUG)
527 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \
528 if ((head)->cqh_first != (void *)(head) && \
529 (head)->cqh_first->field.cqe_prev != (void *)(head)) \
530 panic("CIRCLEQ head forw %p %s:%d", (head), \
531 __FILE__, __LINE__); \
532 if ((head)->cqh_last != (void *)(head) && \
533 (head)->cqh_last->field.cqe_next != (void *)(head)) \
534 panic("CIRCLEQ head back %p %s:%d", (head), \
535 __FILE__, __LINE__);
536 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \
537 if ((elm)->field.cqe_next == (void *)(head)) { \
538 if ((head)->cqh_last != (elm)) \
539 panic("CIRCLEQ elm last %p %s:%d", (elm), \
540 __FILE__, __LINE__); \
541 } else { \
542 if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \
543 panic("CIRCLEQ elm forw %p %s:%d", (elm), \
544 __FILE__, __LINE__); \
545 } \
546 if ((elm)->field.cqe_prev == (void *)(head)) { \
547 if ((head)->cqh_first != (elm)) \
548 panic("CIRCLEQ elm first %p %s:%d", (elm), \
549 __FILE__, __LINE__); \
550 } else { \
551 if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \
552 panic("CIRCLEQ elm prev %p %s:%d", (elm), \
553 __FILE__, __LINE__); \
554 }
555 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \
556 (elm)->field.cqe_next = (void *)1L; \
557 (elm)->field.cqe_prev = (void *)1L;
558 #else
559 #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
560 #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
561 #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
562 #endif
563
564 #define CIRCLEQ_HEAD(name, type) \
565 struct name { \
566 struct type *cqh_first; /* first element */ \
567 struct type *cqh_last; /* last element */ \
568 }
569
570 #define CIRCLEQ_HEAD_INITIALIZER(head) \
571 { (void *)&head, (void *)&head }
572
573 #define CIRCLEQ_ENTRY(type) \
574 struct { \
575 struct type *cqe_next; /* next element */ \
576 struct type *cqe_prev; /* previous element */ \
577 }
578
579 /*
580 * Circular queue functions.
581 */
582 #define CIRCLEQ_INIT(head) do { \
583 (head)->cqh_first = (void *)(head); \
584 (head)->cqh_last = (void *)(head); \
585 } while (/*CONSTCOND*/0)
586
587 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
588 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
589 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
590 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
591 (elm)->field.cqe_prev = (listelm); \
592 if ((listelm)->field.cqe_next == (void *)(head)) \
593 (head)->cqh_last = (elm); \
594 else \
595 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
596 (listelm)->field.cqe_next = (elm); \
597 } while (/*CONSTCOND*/0)
598
599 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
600 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
601 QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
602 (elm)->field.cqe_next = (listelm); \
603 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
604 if ((listelm)->field.cqe_prev == (void *)(head)) \
605 (head)->cqh_first = (elm); \
606 else \
607 (listelm)->field.cqe_prev->field.cqe_next = (elm); \
608 (listelm)->field.cqe_prev = (elm); \
609 } while (/*CONSTCOND*/0)
610
611 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
612 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
613 (elm)->field.cqe_next = (head)->cqh_first; \
614 (elm)->field.cqe_prev = (void *)(head); \
615 if ((head)->cqh_last == (void *)(head)) \
616 (head)->cqh_last = (elm); \
617 else \
618 (head)->cqh_first->field.cqe_prev = (elm); \
619 (head)->cqh_first = (elm); \
620 } while (/*CONSTCOND*/0)
621
622 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
623 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
624 (elm)->field.cqe_next = (void *)(head); \
625 (elm)->field.cqe_prev = (head)->cqh_last; \
626 if ((head)->cqh_first == (void *)(head)) \
627 (head)->cqh_first = (elm); \
628 else \
629 (head)->cqh_last->field.cqe_next = (elm); \
630 (head)->cqh_last = (elm); \
631 } while (/*CONSTCOND*/0)
632
633 #define CIRCLEQ_REMOVE(head, elm, field) do { \
634 QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
635 QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \
636 if ((elm)->field.cqe_next == (void *)(head)) \
637 (head)->cqh_last = (elm)->field.cqe_prev; \
638 else \
639 (elm)->field.cqe_next->field.cqe_prev = \
640 (elm)->field.cqe_prev; \
641 if ((elm)->field.cqe_prev == (void *)(head)) \
642 (head)->cqh_first = (elm)->field.cqe_next; \
643 else \
644 (elm)->field.cqe_prev->field.cqe_next = \
645 (elm)->field.cqe_next; \
646 QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \
647 } while (/*CONSTCOND*/0)
648
649 #define CIRCLEQ_FOREACH(var, head, field) \
650 for ((var) = ((head)->cqh_first); \
651 (var) != (const void *)(head); \
652 (var) = ((var)->field.cqe_next))
653
654 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
655 for ((var) = ((head)->cqh_last); \
656 (var) != (const void *)(head); \
657 (var) = ((var)->field.cqe_prev))
658
659 /*
660 * Circular queue access methods.
661 */
662 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
663 #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
664 #define CIRCLEQ_LAST(head) ((head)->cqh_last)
665 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
666 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
667
668 #define CIRCLEQ_LOOP_NEXT(head, elm, field) \
669 (((elm)->field.cqe_next == (void *)(head)) \
670 ? ((head)->cqh_first) \
671 : (elm->field.cqe_next))
672 #define CIRCLEQ_LOOP_PREV(head, elm, field) \
673 (((elm)->field.cqe_prev == (void *)(head)) \
674 ? ((head)->cqh_last) \
675 : (elm->field.cqe_prev))
676
677 #endif /* !_SYS_QUEUE_H_ */
Cache object: d3844279a455e6d359ff03112aa3a722
|