1 /* $NetBSD: queue.h,v 1.42 2005/07/13 15:08:24 wiz Exp $ */
2
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)queue.h 8.5 (Berkeley) 8/20/94
32 */
33 /*
34 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
35 * Use is subject to license terms.
36 */
37
38 #ifndef _SYS_QUEUE_H
39 #define _SYS_QUEUE_H
40
41 #include <sys/note.h>
42
43 #ifdef __cplusplus
44 extern "C" {
45 #endif
46
47 /*
48 * This file defines five types of data structures: singly-linked lists,
49 * lists, simple queues, tail queues, and circular queues.
50 *
51 * A singly-linked list is headed by a single forward pointer. The
52 * elements are singly linked for minimum space and pointer manipulation
53 * overhead at the expense of O(n) removal for arbitrary elements. New
54 * elements can be added to the list after an existing element or at the
55 * head of the list. Elements being removed from the head of the list
56 * should use the explicit macro for this purpose for optimum
57 * efficiency. A singly-linked list may only be traversed in the forward
58 * direction. Singly-linked lists are ideal for applications with large
59 * datasets and few or no removals or for implementing a LIFO queue.
60 *
61 * A list is headed by a single forward pointer (or an array of forward
91 */
92
93 /*
94 * List definitions.
95 */
96 #define LIST_HEAD(name, type) \
97 struct name { \
98 struct type *lh_first; /* first element */ \
99 }
100
101 #define LIST_HEAD_INITIALIZER(head) \
102 { NULL }
103
104 #define LIST_ENTRY(type) \
105 struct { \
106 struct type *le_next; /* next element */ \
107 struct type **le_prev; /* address of previous next element */ \
108 }
109
110 /*
111 * List functions.
112 */
113 #if defined(_KERNEL) && defined(QUEUEDEBUG)
114 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
115 if ((head)->lh_first && \
116 (head)->lh_first->field.le_prev != &(head)->lh_first) \
117 panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
118 #define QUEUEDEBUG_LIST_OP(elm, field) \
119 if ((elm)->field.le_next && \
120 (elm)->field.le_next->field.le_prev != \
121 &(elm)->field.le_next) \
122 panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
123 if (*(elm)->field.le_prev != (elm)) \
124 panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__);
125 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
126 (elm)->field.le_next = (void *)1L; \
127 (elm)->field.le_prev = (void *)1L;
128 #else
129 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
130 #define QUEUEDEBUG_LIST_OP(elm, field)
157
158 #define LIST_INSERT_HEAD(head, elm, field) do { \
159 QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
160 if (((elm)->field.le_next = (head)->lh_first) != NULL) \
161 (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
162 (head)->lh_first = (elm); \
163 (elm)->field.le_prev = &(head)->lh_first; \
164 _NOTE(CONSTCOND) \
165 } while (0)
166
167 #define LIST_REMOVE(elm, field) do { \
168 QUEUEDEBUG_LIST_OP((elm), field) \
169 if ((elm)->field.le_next != NULL) \
170 (elm)->field.le_next->field.le_prev = \
171 (elm)->field.le_prev; \
172 *(elm)->field.le_prev = (elm)->field.le_next; \
173 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
174 _NOTE(CONSTCOND) \
175 } while (0)
176
177 #define LIST_FOREACH(var, head, field) \
178 for ((var) = ((head)->lh_first); \
179 (var); \
180 (var) = ((var)->field.le_next))
181
182 /*
183 * List access methods.
184 */
185 #define LIST_EMPTY(head) ((head)->lh_first == NULL)
186 #define LIST_FIRST(head) ((head)->lh_first)
187 #define LIST_NEXT(elm, field) ((elm)->field.le_next)
188
189
190 /*
191 * Singly-linked List definitions.
192 */
193 #define SLIST_HEAD(name, type) \
194 struct name { \
195 struct type *slh_first; /* first element */ \
196 }
197
198 #define SLIST_HEAD_INITIALIZER(head) \
199 { NULL }
200
201 #define SLIST_ENTRY(type) \
202 struct { \
203 struct type *sle_next; /* next element */ \
204 }
205
206 /*
207 * Singly-linked List functions.
208 */
209 #define SLIST_INIT(head) do { \
210 (head)->slh_first = NULL; \
211 _NOTE(CONSTCOND) \
212 } while (0)
213
214 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
215 (elm)->field.sle_next = (slistelm)->field.sle_next; \
216 (slistelm)->field.sle_next = (elm); \
217 _NOTE(CONSTCOND) \
218 } while (0)
219
220 #define SLIST_INSERT_HEAD(head, elm, field) do { \
221 (elm)->field.sle_next = (head)->slh_first; \
222 (head)->slh_first = (elm); \
223 _NOTE(CONSTCOND) \
224 } while (0)
225
226 #define SLIST_REMOVE_HEAD(head, field) do { \
227 (head)->slh_first = (head)->slh_first->field.sle_next; \
228 _NOTE(CONSTCOND) \
229 } while (0)
230
231 #define SLIST_REMOVE(head, elm, type, field) do { \
232 if ((head)->slh_first == (elm)) { \
233 SLIST_REMOVE_HEAD((head), field); \
234 } \
235 else { \
236 struct type *curelm = (head)->slh_first; \
237 while (curelm->field.sle_next != (elm)) \
238 curelm = curelm->field.sle_next; \
239 curelm->field.sle_next = \
240 curelm->field.sle_next->field.sle_next; \
241 } \
242 _NOTE(CONSTCOND) \
243 } while (0)
244
245 #define SLIST_FOREACH(var, head, field) \
246 for ((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
247
248 /*
249 * Singly-linked List access methods.
250 */
251 #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
252 #define SLIST_FIRST(head) ((head)->slh_first)
253 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
254
255
256 /*
257 * Singly-linked Tail queue declarations.
258 */
259 #define STAILQ_HEAD(name, type) \
260 struct name { \
261 struct type *stqh_first; /* first element */ \
262 struct type **stqh_last; /* addr of last next element */ \
263 }
264
265 #define STAILQ_HEAD_INITIALIZER(head) \
266 { NULL, &(head).stqh_first }
267
268 #define STAILQ_ENTRY(type) \
269 struct { \
270 struct type *stqe_next; /* next element */ \
271 }
272
273 /*
274 * Singly-linked Tail queue functions.
275 */
276 #define STAILQ_INIT(head) do { \
277 (head)->stqh_first = NULL; \
278 (head)->stqh_last = &(head)->stqh_first; \
279 _NOTE(CONSTCOND) \
280 } while (0)
281
282 #define STAILQ_INSERT_HEAD(head, elm, field) do { \
283 if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
284 (head)->stqh_last = &(elm)->field.stqe_next; \
285 (head)->stqh_first = (elm); \
286 _NOTE(CONSTCOND) \
287 } while (0)
288
289 #define STAILQ_INSERT_TAIL(head, elm, field) do { \
290 (elm)->field.stqe_next = NULL; \
291 *(head)->stqh_last = (elm); \
292 (head)->stqh_last = &(elm)->field.stqe_next; \
293 _NOTE(CONSTCOND) \
294 } while (0)
295
296 #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
297 if (((elm)->field.stqe_next = (listelm)->field.stqe_next) \
298 == NULL) \
299 (head)->stqh_last = &(elm)->field.stqe_next; \
300 (listelm)->field.stqe_next = (elm); \
301 _NOTE(CONSTCOND) \
302 } while (0)
303
304 #define STAILQ_REMOVE_HEAD(head, field) do { \
305 if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) \
306 == NULL) \
307 (head)->stqh_last = &(head)->stqh_first; \
308 _NOTE(CONSTCOND) \
309 } while (0)
310
311 #define STAILQ_REMOVE(head, elm, type, field) do { \
312 if ((head)->stqh_first == (elm)) { \
313 STAILQ_REMOVE_HEAD((head), field); \
314 } else { \
315 struct type *curelm = (head)->stqh_first; \
316 while (curelm->field.stqe_next != (elm)) \
317 curelm = curelm->field.stqe_next; \
318 if ((curelm->field.stqe_next = \
319 curelm->field.stqe_next->field.stqe_next) == NULL) \
320 (head)->stqh_last = &(curelm)->field.stqe_next; \
321 } \
322 _NOTE(CONSTCOND) \
323 } while (0)
324
325 #define STAILQ_FOREACH(var, head, field) \
326 for ((var) = ((head)->stqh_first); \
327 (var); \
328 (var) = ((var)->field.stqe_next))
329
330 /*
331 * Singly-linked Tail queue access methods.
332 */
333 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
334 #define STAILQ_FIRST(head) ((head)->stqh_first)
335 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
336
337
338 /*
339 * Simple queue definitions.
340 */
341 #define SIMPLEQ_HEAD(name, type) \
342 struct name { \
343 struct type *sqh_first; /* first element */ \
344 struct type **sqh_last; /* addr of last next element */ \
345 }
346
347 #define SIMPLEQ_HEAD_INITIALIZER(head) \
348 { NULL, &(head).sqh_first }
349
350 #define SIMPLEQ_ENTRY(type) \
351 struct { \
352 struct type *sqe_next; /* next element */ \
353 }
354
355 /*
356 * Simple queue functions.
357 */
358 #define SIMPLEQ_INIT(head) do { \
359 (head)->sqh_first = NULL; \
360 (head)->sqh_last = &(head)->sqh_first; \
361 _NOTE(CONSTCOND) \
362 } while (0)
363
364 #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
365 if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
366 (head)->sqh_last = &(elm)->field.sqe_next; \
367 (head)->sqh_first = (elm); \
368 _NOTE(CONSTCOND) \
369 } while (0)
370
371 #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
372 (elm)->field.sqe_next = NULL; \
373 *(head)->sqh_last = (elm); \
374 (head)->sqh_last = &(elm)->field.sqe_next; \
375 _NOTE(CONSTCOND) \
385 #define SIMPLEQ_REMOVE_HEAD(head, field) do { \
386 if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
387 (head)->sqh_last = &(head)->sqh_first; \
388 _NOTE(CONSTCOND) \
389 } while (0)
390
391 #define SIMPLEQ_REMOVE(head, elm, type, field) do { \
392 if ((head)->sqh_first == (elm)) { \
393 SIMPLEQ_REMOVE_HEAD((head), field); \
394 } else { \
395 struct type *curelm = (head)->sqh_first; \
396 while (curelm->field.sqe_next != (elm)) \
397 curelm = curelm->field.sqe_next; \
398 if ((curelm->field.sqe_next = \
399 curelm->field.sqe_next->field.sqe_next) == NULL) \
400 (head)->sqh_last = &(curelm)->field.sqe_next; \
401 } \
402 _NOTE(CONSTCOND) \
403 } while (0)
404
405 #define SIMPLEQ_FOREACH(var, head, field) \
406 for ((var) = ((head)->sqh_first); \
407 (var); \
408 (var) = ((var)->field.sqe_next))
409
410 /*
411 * Simple queue access methods.
412 */
413 #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
414 #define SIMPLEQ_FIRST(head) ((head)->sqh_first)
415 #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
416
417
418 /*
419 * Tail queue definitions.
420 */
421 #define _TAILQ_HEAD(name, type) \
422 struct name { \
423 type *tqh_first; /* first element */ \
424 type **tqh_last; /* addr of last next element */ \
425 }
426 #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type)
427
428 #define TAILQ_HEAD_INITIALIZER(head) \
429 { NULL, &(head).tqh_first }
430
431 #define _TAILQ_ENTRY(type) \
432 struct { \
433 type *tqe_next; /* next element */ \
434 type **tqe_prev; /* address of previous next element */\
435 }
436 #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type)
437
438 /*
439 * Tail queue functions.
440 */
441 #if defined(_KERNEL) && defined(QUEUEDEBUG)
442 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
443 if ((head)->tqh_first && \
444 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
445 panic("TAILQ_INSERT_HEAD %p %s:%d", (void *)(head), \
446 __FILE__, __LINE__);
447 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
448 if (*(head)->tqh_last != NULL) \
449 panic("TAILQ_INSERT_TAIL %p %s:%d", (void *)(head), \
450 __FILE__, __LINE__);
451 #define QUEUEDEBUG_TAILQ_OP(elm, field) \
452 if ((elm)->field.tqe_next && \
453 (elm)->field.tqe_next->field.tqe_prev != \
454 &(elm)->field.tqe_next) \
455 panic("TAILQ_* forw %p %s:%d", (void *)(elm), \
456 __FILE__, __LINE__);\
457 if (*(elm)->field.tqe_prev != (elm)) \
458 panic("TAILQ_* back %p %s:%d", (void *)(elm), \
517 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
518 (elm)->field.tqe_next = (listelm); \
519 *(listelm)->field.tqe_prev = (elm); \
520 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
521 _NOTE(CONSTCOND) \
522 } while (0)
523
524 #define TAILQ_REMOVE(head, elm, field) do { \
525 QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
526 QUEUEDEBUG_TAILQ_OP((elm), field) \
527 if (((elm)->field.tqe_next) != NULL) \
528 (elm)->field.tqe_next->field.tqe_prev = \
529 (elm)->field.tqe_prev; \
530 else \
531 (head)->tqh_last = (elm)->field.tqe_prev; \
532 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
533 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
534 _NOTE(CONSTCOND) \
535 } while (0)
536
537 #define TAILQ_FOREACH(var, head, field) \
538 for ((var) = ((head)->tqh_first); \
539 (var); \
540 (var) = ((var)->field.tqe_next))
541
542 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
543 for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
544 (var); \
545 (var) = \
546 (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
547
548 /*
549 * Tail queue access methods.
550 */
551 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
552 #define TAILQ_FIRST(head) ((head)->tqh_first)
553 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
554
555 #define TAILQ_LAST(head, headname) \
556 (*(((struct headname *)((head)->tqh_last))->tqh_last))
557 #define TAILQ_PREV(elm, headname, field) \
558 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
559
560
561 /*
562 * Circular queue definitions.
563 */
564 #define CIRCLEQ_HEAD(name, type) \
565 struct name { \
566 struct type *cqh_first; /* first element */ \
567 struct type *cqh_last; /* last element */ \
568 }
569
570 #define CIRCLEQ_HEAD_INITIALIZER(head) \
571 { (void *)&head, (void *)&head }
572
573 #define CIRCLEQ_ENTRY(type) \
574 struct { \
575 struct type *cqe_next; /* next element */ \
576 struct type *cqe_prev; /* previous element */ \
577 }
578
579 /*
580 * Circular queue functions.
581 */
582 #define CIRCLEQ_INIT(head) do { \
583 (head)->cqh_first = (void *)(head); \
584 (head)->cqh_last = (void *)(head); \
585 _NOTE(CONSTCOND) \
586 } while (0)
587
588 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
589 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
590 (elm)->field.cqe_prev = (listelm); \
591 if ((listelm)->field.cqe_next == (void *)(head)) \
592 (head)->cqh_last = (elm); \
593 else \
594 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
595 (listelm)->field.cqe_next = (elm); \
596 _NOTE(CONSTCOND) \
597 } while (0)
598
599 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
626 else \
627 (head)->cqh_last->field.cqe_next = (elm); \
628 (head)->cqh_last = (elm); \
629 _NOTE(CONSTCOND) \
630 } while (0)
631
632 #define CIRCLEQ_REMOVE(head, elm, field) do { \
633 if ((elm)->field.cqe_next == (void *)(head)) \
634 (head)->cqh_last = (elm)->field.cqe_prev; \
635 else \
636 (elm)->field.cqe_next->field.cqe_prev = \
637 (elm)->field.cqe_prev; \
638 if ((elm)->field.cqe_prev == (void *)(head)) \
639 (head)->cqh_first = (elm)->field.cqe_next; \
640 else \
641 (elm)->field.cqe_prev->field.cqe_next = \
642 (elm)->field.cqe_next; \
643 _NOTE(CONSTCOND) \
644 } while (0)
645
646 #define CIRCLEQ_FOREACH(var, head, field) \
647 for ((var) = ((head)->cqh_first); \
648 (var) != (void *)(head); \
649 (var) = ((var)->field.cqe_next))
650
651 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
652 for ((var) = ((head)->cqh_last); \
653 (var) != (void *)(head); \
654 (var) = ((var)->field.cqe_prev))
655
656 /*
657 * Circular queue access methods.
658 */
659 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
660 #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
661 #define CIRCLEQ_LAST(head) ((head)->cqh_last)
662 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
663 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
664
665 #define CIRCLEQ_LOOP_NEXT(head, elm, field) \
666 (((elm)->field.cqe_next == (void *)(head)) \
667 ? ((head)->cqh_first) \
668 : (elm->field.cqe_next))
669 #define CIRCLEQ_LOOP_PREV(head, elm, field) \
670 (((elm)->field.cqe_prev == (void *)(head)) \
671 ? ((head)->cqh_last) \
672 : (elm->field.cqe_prev))
673
674 #ifdef __cplusplus
675 }
676 #endif
677
678 #endif /* !_SYS_QUEUE_H */
|
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)queue.h 8.5 (Berkeley) 8/20/94
30 */
31 /*
32 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
33 * Use is subject to license terms.
34 */
35
36 #ifndef _SYS_QUEUE_H
37 #define _SYS_QUEUE_H
38
39 #include <sys/note.h>
40 #include <sys/stddef.h>
41
42 #ifdef __cplusplus
43 extern "C" {
44 #endif
45
46 /*
47 * This file defines five types of data structures: singly-linked lists,
48 * lists, simple queues, tail queues, and circular queues.
49 *
50 * A singly-linked list is headed by a single forward pointer. The
51 * elements are singly linked for minimum space and pointer manipulation
52 * overhead at the expense of O(n) removal for arbitrary elements. New
53 * elements can be added to the list after an existing element or at the
54 * head of the list. Elements being removed from the head of the list
55 * should use the explicit macro for this purpose for optimum
56 * efficiency. A singly-linked list may only be traversed in the forward
57 * direction. Singly-linked lists are ideal for applications with large
58 * datasets and few or no removals or for implementing a LIFO queue.
59 *
60 * A list is headed by a single forward pointer (or an array of forward
90 */
91
92 /*
93 * List definitions.
94 */
95 #define LIST_HEAD(name, type) \
96 struct name { \
97 struct type *lh_first; /* first element */ \
98 }
99
100 #define LIST_HEAD_INITIALIZER(head) \
101 { NULL }
102
103 #define LIST_ENTRY(type) \
104 struct { \
105 struct type *le_next; /* next element */ \
106 struct type **le_prev; /* address of previous next element */ \
107 }
108
109 /*
110 * List access methods.
111 */
112 #define LIST_EMPTY(head) ((head)->lh_first == NULL)
113 #define LIST_FIRST(head) ((head)->lh_first)
114 #define LIST_NEXT(elm, field) ((elm)->field.le_next)
115 #define LIST_PREV(elm, head, type, field) \
116 ((elm)->field.le_prev == &LIST_FIRST((head)) ? NULL : \
117 container_of((elm)->field.le_prev, type, field.le_next))
118
119 #define LIST_FOREACH(var, head, field) \
120 for ((var) = LIST_FIRST((head)); \
121 (var); \
122 (var) = LIST_NEXT((var), field))
123
124 #define LIST_FOREACH_SAFE(var, head, field, tvar) \
125 for ((var) = LIST_FIRST((head)); \
126 (var) && ((tvar) = LIST_NEXT((var), field), 1); \
127 (var) = (tvar))
128
129 /*
130 * List functions.
131 */
132 #if defined(_KERNEL) && defined(QUEUEDEBUG)
133 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
134 if ((head)->lh_first && \
135 (head)->lh_first->field.le_prev != &(head)->lh_first) \
136 panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
137 #define QUEUEDEBUG_LIST_OP(elm, field) \
138 if ((elm)->field.le_next && \
139 (elm)->field.le_next->field.le_prev != \
140 &(elm)->field.le_next) \
141 panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
142 if (*(elm)->field.le_prev != (elm)) \
143 panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__);
144 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
145 (elm)->field.le_next = (void *)1L; \
146 (elm)->field.le_prev = (void *)1L;
147 #else
148 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
149 #define QUEUEDEBUG_LIST_OP(elm, field)
176
177 #define LIST_INSERT_HEAD(head, elm, field) do { \
178 QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
179 if (((elm)->field.le_next = (head)->lh_first) != NULL) \
180 (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
181 (head)->lh_first = (elm); \
182 (elm)->field.le_prev = &(head)->lh_first; \
183 _NOTE(CONSTCOND) \
184 } while (0)
185
186 #define LIST_REMOVE(elm, field) do { \
187 QUEUEDEBUG_LIST_OP((elm), field) \
188 if ((elm)->field.le_next != NULL) \
189 (elm)->field.le_next->field.le_prev = \
190 (elm)->field.le_prev; \
191 *(elm)->field.le_prev = (elm)->field.le_next; \
192 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
193 _NOTE(CONSTCOND) \
194 } while (0)
195
196
197 /*
198 * Singly-linked List definitions.
199 */
200 #define SLIST_HEAD(name, type) \
201 struct name { \
202 struct type *slh_first; /* first element */ \
203 }
204
205 #define SLIST_HEAD_INITIALIZER(head) \
206 { NULL }
207
208 #define SLIST_ENTRY(type) \
209 struct { \
210 struct type *sle_next; /* next element */ \
211 }
212
213 /*
214 * Singly-linked List access methods.
215 */
216 #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
217 #define SLIST_FIRST(head) ((head)->slh_first)
218 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
219
220 #define SLIST_FOREACH(var, head, field) \
221 for ((var) = SLIST_FIRST((head)); \
222 (var); \
223 (var) = SLIST_NEXT((var), field))
224
225 #define SLIST_FOREACH_SAFE(var, head, field, tvar) \
226 for ((var) = SLIST_FIRST((head)); \
227 (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
228 (var) = (tvar))
229
230 /*
231 * Singly-linked List functions.
232 */
233 #define SLIST_INIT(head) do { \
234 (head)->slh_first = NULL; \
235 _NOTE(CONSTCOND) \
236 } while (0)
237
238 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
239 (elm)->field.sle_next = (slistelm)->field.sle_next; \
240 (slistelm)->field.sle_next = (elm); \
241 _NOTE(CONSTCOND) \
242 } while (0)
243
244 #define SLIST_INSERT_HEAD(head, elm, field) do { \
245 (elm)->field.sle_next = (head)->slh_first; \
246 (head)->slh_first = (elm); \
247 _NOTE(CONSTCOND) \
248 } while (0)
249
250 #define SLIST_REMOVE_HEAD(head, field) do { \
251 (head)->slh_first = (head)->slh_first->field.sle_next; \
252 _NOTE(CONSTCOND) \
253 } while (0)
254
255 #define SLIST_REMOVE(head, elm, type, field) do { \
256 if ((head)->slh_first == (elm)) { \
257 SLIST_REMOVE_HEAD((head), field); \
258 } \
259 else { \
260 struct type *curelm = (head)->slh_first; \
261 while (curelm->field.sle_next != (elm)) \
262 curelm = curelm->field.sle_next; \
263 curelm->field.sle_next = \
264 curelm->field.sle_next->field.sle_next; \
265 } \
266 _NOTE(CONSTCOND) \
267 } while (0)
268
269
270 /*
271 * Singly-linked Tail queue declarations.
272 */
273 #define STAILQ_HEAD(name, type) \
274 struct name { \
275 struct type *stqh_first; /* first element */ \
276 struct type **stqh_last; /* addr of last next element */ \
277 }
278
279 #define STAILQ_HEAD_INITIALIZER(head) \
280 { NULL, &(head).stqh_first }
281
282 #define STAILQ_ENTRY(type) \
283 struct { \
284 struct type *stqe_next; /* next element */ \
285 }
286
287 /*
288 * Singly-linked Tail queue access methods.
289 */
290 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
291 #define STAILQ_FIRST(head) ((head)->stqh_first)
292 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
293
294 #define STAILQ_FOREACH(var, head, field) \
295 for ((var) = STAILQ_FIRST(head); \
296 (var); \
297 (var) = STAILQ_NEXT((var), field))
298
299 #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
300 for ((var) = STAILQ_FIRST(head); \
301 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
302 (var) = (tvar))
303
304 /*
305 * Singly-linked Tail queue functions.
306 */
307 #define STAILQ_INIT(head) do { \
308 (head)->stqh_first = NULL; \
309 (head)->stqh_last = &(head)->stqh_first; \
310 _NOTE(CONSTCOND) \
311 } while (0)
312
313 #define STAILQ_CONCAT(head1, head2) do { \
314 if (!STAILQ_EMPTY((head2))) { \
315 *(head1)->stqh_last = (head2)->stqh_first; \
316 (head1)->stqh_last = (head2)->stqh_last; \
317 STAILQ_INIT((head2)); \
318 } \
319 _NOTE(CONSTCOND) \
320 } while (0)
321
322 #define STAILQ_INSERT_HEAD(head, elm, field) do { \
323 if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
324 (head)->stqh_last = &(elm)->field.stqe_next; \
325 (head)->stqh_first = (elm); \
326 _NOTE(CONSTCOND) \
327 } while (0)
328
329 #define STAILQ_INSERT_TAIL(head, elm, field) do { \
330 (elm)->field.stqe_next = NULL; \
331 *(head)->stqh_last = (elm); \
332 (head)->stqh_last = &(elm)->field.stqe_next; \
333 _NOTE(CONSTCOND) \
334 } while (0)
335
336 #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
337 if (((elm)->field.stqe_next = (listelm)->field.stqe_next) \
338 == NULL) \
339 (head)->stqh_last = &(elm)->field.stqe_next; \
340 (listelm)->field.stqe_next = (elm); \
341 _NOTE(CONSTCOND) \
342 } while (0)
343
344 #define STAILQ_REMOVE_HEAD(head, field) do { \
345 if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) \
346 == NULL) \
347 (head)->stqh_last = &(head)->stqh_first; \
348 _NOTE(CONSTCOND) \
349 } while (0)
350
351 #define STAILQ_REMOVE_AFTER(head, elm, field) do { \
352 if ((STAILQ_NEXT(elm, field) = \
353 STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
354 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
355 _NOTE(CONSTCOND) \
356 } while (0)
357
358 #define STAILQ_REMOVE(head, elm, type, field) do { \
359 if (STAILQ_FIRST((head)) == (elm)) { \
360 STAILQ_REMOVE_HEAD((head), field); \
361 } else { \
362 struct type *curelm = STAILQ_FIRST(head); \
363 while (STAILQ_NEXT(curelm, field) != (elm)) \
364 curelm = STAILQ_NEXT(curelm, field); \
365 STAILQ_REMOVE_AFTER(head, curelm, field); \
366 } \
367 _NOTE(CONSTCOND) \
368 } while (0)
369
370 #define STAILQ_LAST(head, type, field) \
371 (STAILQ_EMPTY((head)) ? NULL : \
372 container_of((head)->stqh_last, struct type, field.stqe_next))
373
374 /*
375 * Simple queue definitions.
376 */
377 #define SIMPLEQ_HEAD(name, type) \
378 struct name { \
379 struct type *sqh_first; /* first element */ \
380 struct type **sqh_last; /* addr of last next element */ \
381 }
382
383 #define SIMPLEQ_HEAD_INITIALIZER(head) \
384 { NULL, &(head).sqh_first }
385
386 #define SIMPLEQ_ENTRY(type) \
387 struct { \
388 struct type *sqe_next; /* next element */ \
389 }
390
391 /*
392 * Simple queue access methods.
393 */
394 #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
395 #define SIMPLEQ_FIRST(head) ((head)->sqh_first)
396 #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
397
398 #define SIMPLEQ_FOREACH(var, head, field) \
399 for ((var) = SIMPLEQ_FIRST((head)); \
400 (var); \
401 (var) = SIMPLEQ_NEXT((var), field))
402
403 #define SIMPLEQ_FOREACH_SAFE(var, head, field, tvar) \
404 for ((var) = SIMPLEQ_FIRST((head)); \
405 (var) && ((tvar) = SIMPLEQ_NEXT((var), field), 1); \
406 (var) = (tvar))
407
408 /*
409 * Simple queue functions.
410 */
411 #define SIMPLEQ_INIT(head) do { \
412 (head)->sqh_first = NULL; \
413 (head)->sqh_last = &(head)->sqh_first; \
414 _NOTE(CONSTCOND) \
415 } while (0)
416
417 #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
418 if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
419 (head)->sqh_last = &(elm)->field.sqe_next; \
420 (head)->sqh_first = (elm); \
421 _NOTE(CONSTCOND) \
422 } while (0)
423
424 #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
425 (elm)->field.sqe_next = NULL; \
426 *(head)->sqh_last = (elm); \
427 (head)->sqh_last = &(elm)->field.sqe_next; \
428 _NOTE(CONSTCOND) \
438 #define SIMPLEQ_REMOVE_HEAD(head, field) do { \
439 if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
440 (head)->sqh_last = &(head)->sqh_first; \
441 _NOTE(CONSTCOND) \
442 } while (0)
443
444 #define SIMPLEQ_REMOVE(head, elm, type, field) do { \
445 if ((head)->sqh_first == (elm)) { \
446 SIMPLEQ_REMOVE_HEAD((head), field); \
447 } else { \
448 struct type *curelm = (head)->sqh_first; \
449 while (curelm->field.sqe_next != (elm)) \
450 curelm = curelm->field.sqe_next; \
451 if ((curelm->field.sqe_next = \
452 curelm->field.sqe_next->field.sqe_next) == NULL) \
453 (head)->sqh_last = &(curelm)->field.sqe_next; \
454 } \
455 _NOTE(CONSTCOND) \
456 } while (0)
457
458
459 /*
460 * Tail queue definitions.
461 */
462 #define _TAILQ_HEAD(name, type) \
463 struct name { \
464 type *tqh_first; /* first element */ \
465 type **tqh_last; /* addr of last next element */ \
466 }
467 #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type)
468
469 #define TAILQ_HEAD_INITIALIZER(head) \
470 { NULL, &(head).tqh_first }
471
472 #define _TAILQ_ENTRY(type) \
473 struct { \
474 type *tqe_next; /* next element */ \
475 type **tqe_prev; /* address of previous next element */\
476 }
477 #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type)
478
479 /*
480 * Tail queue access methods.
481 */
482 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
483 #define TAILQ_FIRST(head) ((head)->tqh_first)
484 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
485 #define TAILQ_LAST(head, headname) \
486 (*(((struct headname *)((head)->tqh_last))->tqh_last))
487 #define TAILQ_PREV(elm, headname, field) \
488 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
489
490
491 #define TAILQ_FOREACH(var, head, field) \
492 for ((var) = TAILQ_FIRST((head)); \
493 (var); \
494 (var) = TAILQ_NEXT((var), field))
495
496 #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
497 for ((var) = TAILQ_FIRST((head)); \
498 (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
499 (var) = (tvar))
500
501 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
502 for ((var) = TAILQ_LAST((head), headname); \
503 (var); \
504 (var) = TAILQ_PREV((var), headname, field))
505
506 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
507 for ((var) = TAILQ_LAST((head), headname); \
508 (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
509 (var) = (tvar))
510
511 /*
512 * Tail queue functions.
513 */
514 #if defined(_KERNEL) && defined(QUEUEDEBUG)
515 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
516 if ((head)->tqh_first && \
517 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
518 panic("TAILQ_INSERT_HEAD %p %s:%d", (void *)(head), \
519 __FILE__, __LINE__);
520 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
521 if (*(head)->tqh_last != NULL) \
522 panic("TAILQ_INSERT_TAIL %p %s:%d", (void *)(head), \
523 __FILE__, __LINE__);
524 #define QUEUEDEBUG_TAILQ_OP(elm, field) \
525 if ((elm)->field.tqe_next && \
526 (elm)->field.tqe_next->field.tqe_prev != \
527 &(elm)->field.tqe_next) \
528 panic("TAILQ_* forw %p %s:%d", (void *)(elm), \
529 __FILE__, __LINE__);\
530 if (*(elm)->field.tqe_prev != (elm)) \
531 panic("TAILQ_* back %p %s:%d", (void *)(elm), \
590 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
591 (elm)->field.tqe_next = (listelm); \
592 *(listelm)->field.tqe_prev = (elm); \
593 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
594 _NOTE(CONSTCOND) \
595 } while (0)
596
597 #define TAILQ_REMOVE(head, elm, field) do { \
598 QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
599 QUEUEDEBUG_TAILQ_OP((elm), field) \
600 if (((elm)->field.tqe_next) != NULL) \
601 (elm)->field.tqe_next->field.tqe_prev = \
602 (elm)->field.tqe_prev; \
603 else \
604 (head)->tqh_last = (elm)->field.tqe_prev; \
605 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
606 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
607 _NOTE(CONSTCOND) \
608 } while (0)
609
610
611 /*
612 * Circular queue definitions.
613 */
614 #define CIRCLEQ_HEAD(name, type) \
615 struct name { \
616 struct type *cqh_first; /* first element */ \
617 struct type *cqh_last; /* last element */ \
618 }
619
620 #define CIRCLEQ_HEAD_INITIALIZER(head) \
621 { (void *)&head, (void *)&head }
622
623 #define CIRCLEQ_ENTRY(type) \
624 struct { \
625 struct type *cqe_next; /* next element */ \
626 struct type *cqe_prev; /* previous element */ \
627 }
628
629 /*
630 * Circular queue access methods.
631 */
632 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
633 #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
634 #define CIRCLEQ_LAST(head) ((head)->cqh_last)
635 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
636 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
637
638 #define CIRCLEQ_LOOP_NEXT(head, elm, field) \
639 (((elm)->field.cqe_next == (void *)(head)) \
640 ? ((head)->cqh_first) \
641 : (elm->field.cqe_next))
642 #define CIRCLEQ_LOOP_PREV(head, elm, field) \
643 (((elm)->field.cqe_prev == (void *)(head)) \
644 ? ((head)->cqh_last) \
645 : (elm->field.cqe_prev))
646
647 #define CIRCLEQ_FOREACH(var, head, field) \
648 for ((var) = CIRCLEQ_FIRST((head)); \
649 (var) != (void *)(head); \
650 (var) = CIRCLEQ_NEXT((var), field))
651
652 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
653 for ((var) = CIRCLEQ_LAST((head)); \
654 (var) != (void *)(head); \
655 (var) = CIRCLEQ_PREV((var), field))
656
657 /*
658 * Circular queue functions.
659 */
660 #define CIRCLEQ_INIT(head) do { \
661 (head)->cqh_first = (void *)(head); \
662 (head)->cqh_last = (void *)(head); \
663 _NOTE(CONSTCOND) \
664 } while (0)
665
666 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
667 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
668 (elm)->field.cqe_prev = (listelm); \
669 if ((listelm)->field.cqe_next == (void *)(head)) \
670 (head)->cqh_last = (elm); \
671 else \
672 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
673 (listelm)->field.cqe_next = (elm); \
674 _NOTE(CONSTCOND) \
675 } while (0)
676
677 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
704 else \
705 (head)->cqh_last->field.cqe_next = (elm); \
706 (head)->cqh_last = (elm); \
707 _NOTE(CONSTCOND) \
708 } while (0)
709
710 #define CIRCLEQ_REMOVE(head, elm, field) do { \
711 if ((elm)->field.cqe_next == (void *)(head)) \
712 (head)->cqh_last = (elm)->field.cqe_prev; \
713 else \
714 (elm)->field.cqe_next->field.cqe_prev = \
715 (elm)->field.cqe_prev; \
716 if ((elm)->field.cqe_prev == (void *)(head)) \
717 (head)->cqh_first = (elm)->field.cqe_next; \
718 else \
719 (elm)->field.cqe_prev->field.cqe_next = \
720 (elm)->field.cqe_next; \
721 _NOTE(CONSTCOND) \
722 } while (0)
723
724
725 #ifdef __cplusplus
726 }
727 #endif
728
729 #endif /* !_SYS_QUEUE_H */
|