blob: f93223feb46df15ee99442e6d8efefc3e50fa3db [file] [log] [blame]
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001musl does not provide sys/queue.h implementation. Borrow queue.h from
2the NetBSD project
3http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/sys/queue.h?rev=1.68
4
5Upstream-Status: Inappropriate [musl specific]
6
7Signed-off-by: Jรถrg Krause <joerg.krause@embedded.rocks>
8Signed-off-by: Maxin B. John <maxin.john@intel.com>
9---
10diff -Naur libtirpc-1.0.1-orig/src/clnt_bcast.c libtirpc-1.0.1/src/clnt_bcast.c
11--- libtirpc-1.0.1-orig/src/clnt_bcast.c 2015-10-30 17:15:14.000000000 +0200
12+++ libtirpc-1.0.1/src/clnt_bcast.c 2015-12-21 17:03:52.066008311 +0200
13@@ -40,7 +40,6 @@
14 */
15 #include <sys/socket.h>
16 #include <sys/types.h>
17-#include <sys/queue.h>
18
19 #include <net/if.h>
20 #include <netinet/in.h>
21@@ -62,6 +61,7 @@
22 #include <err.h>
23 #include <string.h>
24
25+#include "queue.h"
26 #include "rpc_com.h"
27 #include "debug.h"
28
29diff -Naur libtirpc-1.0.1-orig/tirpc/queue.h libtirpc-1.0.1/tirpc/queue.h
30--- libtirpc-1.0.1-orig/tirpc/queue.h 1970-01-01 02:00:00.000000000 +0200
31+++ libtirpc-1.0.1/tirpc/queue.h 2015-12-21 17:02:44.427853905 +0200
32@@ -0,0 +1,846 @@
33+/* $NetBSD: queue.h,v 1.68 2014/11/19 08:10:01 uebayasi Exp $ */
34+
35+/*
36+ * Copyright (c) 1991, 1993
37+ * The Regents of the University of California. All rights reserved.
38+ *
39+ * Redistribution and use in source and binary forms, with or without
40+ * modification, are permitted provided that the following conditions
41+ * are met:
42+ * 1. Redistributions of source code must retain the above copyright
43+ * notice, this list of conditions and the following disclaimer.
44+ * 2. Redistributions in binary form must reproduce the above copyright
45+ * notice, this list of conditions and the following disclaimer in the
46+ * documentation and/or other materials provided with the distribution.
47+ * 3. Neither the name of the University nor the names of its contributors
48+ * may be used to endorse or promote products derived from this software
49+ * without specific prior written permission.
50+ *
51+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61+ * SUCH DAMAGE.
62+ *
63+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
64+ */
65+
66+#ifndef _SYS_QUEUE_H_
67+#define _SYS_QUEUE_H_
68+
69+/*
70+ * This file defines five types of data structures: singly-linked lists,
71+ * lists, simple queues, tail queues, and circular queues.
72+ *
73+ * A singly-linked list is headed by a single forward pointer. The
74+ * elements are singly linked for minimum space and pointer manipulation
75+ * overhead at the expense of O(n) removal for arbitrary elements. New
76+ * elements can be added to the list after an existing element or at the
77+ * head of the list. Elements being removed from the head of the list
78+ * should use the explicit macro for this purpose for optimum
79+ * efficiency. A singly-linked list may only be traversed in the forward
80+ * direction. Singly-linked lists are ideal for applications with large
81+ * datasets and few or no removals or for implementing a LIFO queue.
82+ *
83+ * A list is headed by a single forward pointer (or an array of forward
84+ * pointers for a hash table header). The elements are doubly linked
85+ * so that an arbitrary element can be removed without a need to
86+ * traverse the list. New elements can be added to the list before
87+ * or after an existing element or at the head of the list. A list
88+ * may only be traversed in the forward direction.
89+ *
90+ * A simple queue is headed by a pair of pointers, one the head of the
91+ * list and the other to the tail of the list. The elements are singly
92+ * linked to save space, so elements can only be removed from the
93+ * head of the list. New elements can be added to the list after
94+ * an existing element, at the head of the list, or at the end of the
95+ * list. A simple queue may only be traversed in the forward direction.
96+ *
97+ * A tail queue is headed by a pair of pointers, one to the head of the
98+ * list and the other to the tail of the list. The elements are doubly
99+ * linked so that an arbitrary element can be removed without a need to
100+ * traverse the list. New elements can be added to the list before or
101+ * after an existing element, at the head of the list, or at the end of
102+ * the list. A tail queue may be traversed in either direction.
103+ *
104+ * A circle queue is headed by a pair of pointers, one to the head of the
105+ * list and the other to the tail of the list. The elements are doubly
106+ * linked so that an arbitrary element can be removed without a need to
107+ * traverse the list. New elements can be added to the list before or after
108+ * an existing element, at the head of the list, or at the end of the list.
109+ * A circle queue may be traversed in either direction, but has a more
110+ * complex end of list detection.
111+ *
112+ * For details on the use of these macros, see the queue(3) manual page.
113+ */
114+
115+/*
116+ * Include the definition of NULL only on NetBSD because sys/null.h
117+ * is not available elsewhere. This conditional makes the header
118+ * portable and it can simply be dropped verbatim into any system.
119+ * The caveat is that on other systems some other header
120+ * must provide NULL before the macros can be used.
121+ */
122+#ifdef __NetBSD__
123+#include <sys/null.h>
124+#endif
125+
126+#if defined(QUEUEDEBUG)
127+# if defined(_KERNEL)
128+# define QUEUEDEBUG_ABORT(...) panic(__VA_ARGS__)
129+# else
130+# include <err.h>
131+# define QUEUEDEBUG_ABORT(...) err(1, __VA_ARGS__)
132+# endif
133+#endif
134+
135+/*
136+ * Singly-linked List definitions.
137+ */
138+#define SLIST_HEAD(name, type) \
139+struct name { \
140+ struct type *slh_first; /* first element */ \
141+}
142+
143+#define SLIST_HEAD_INITIALIZER(head) \
144+ { NULL }
145+
146+#define SLIST_ENTRY(type) \
147+struct { \
148+ struct type *sle_next; /* next element */ \
149+}
150+
151+/*
152+ * Singly-linked List access methods.
153+ */
154+#define SLIST_FIRST(head) ((head)->slh_first)
155+#define SLIST_END(head) NULL
156+#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
157+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
158+
159+#define SLIST_FOREACH(var, head, field) \
160+ for((var) = (head)->slh_first; \
161+ (var) != SLIST_END(head); \
162+ (var) = (var)->field.sle_next)
163+
164+#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
165+ for ((var) = SLIST_FIRST((head)); \
166+ (var) != SLIST_END(head) && \
167+ ((tvar) = SLIST_NEXT((var), field), 1); \
168+ (var) = (tvar))
169+
170+/*
171+ * Singly-linked List functions.
172+ */
173+#define SLIST_INIT(head) do { \
174+ (head)->slh_first = SLIST_END(head); \
175+} while (/*CONSTCOND*/0)
176+
177+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
178+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
179+ (slistelm)->field.sle_next = (elm); \
180+} while (/*CONSTCOND*/0)
181+
182+#define SLIST_INSERT_HEAD(head, elm, field) do { \
183+ (elm)->field.sle_next = (head)->slh_first; \
184+ (head)->slh_first = (elm); \
185+} while (/*CONSTCOND*/0)
186+
187+#define SLIST_REMOVE_AFTER(slistelm, field) do { \
188+ (slistelm)->field.sle_next = \
189+ SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \
190+} while (/*CONSTCOND*/0)
191+
192+#define SLIST_REMOVE_HEAD(head, field) do { \
193+ (head)->slh_first = (head)->slh_first->field.sle_next; \
194+} while (/*CONSTCOND*/0)
195+
196+#define SLIST_REMOVE(head, elm, type, field) do { \
197+ if ((head)->slh_first == (elm)) { \
198+ SLIST_REMOVE_HEAD((head), field); \
199+ } \
200+ else { \
201+ struct type *curelm = (head)->slh_first; \
202+ while(curelm->field.sle_next != (elm)) \
203+ curelm = curelm->field.sle_next; \
204+ curelm->field.sle_next = \
205+ curelm->field.sle_next->field.sle_next; \
206+ } \
207+} while (/*CONSTCOND*/0)
208+
209+
210+/*
211+ * List definitions.
212+ */
213+#define LIST_HEAD(name, type) \
214+struct name { \
215+ struct type *lh_first; /* first element */ \
216+}
217+
218+#define LIST_HEAD_INITIALIZER(head) \
219+ { NULL }
220+
221+#define LIST_ENTRY(type) \
222+struct { \
223+ struct type *le_next; /* next element */ \
224+ struct type **le_prev; /* address of previous next element */ \
225+}
226+
227+/*
228+ * List access methods.
229+ */
230+#define LIST_FIRST(head) ((head)->lh_first)
231+#define LIST_END(head) NULL
232+#define LIST_EMPTY(head) ((head)->lh_first == LIST_END(head))
233+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
234+
235+#define LIST_FOREACH(var, head, field) \
236+ for ((var) = ((head)->lh_first); \
237+ (var) != LIST_END(head); \
238+ (var) = ((var)->field.le_next))
239+
240+#define LIST_FOREACH_SAFE(var, head, field, tvar) \
241+ for ((var) = LIST_FIRST((head)); \
242+ (var) != LIST_END(head) && \
243+ ((tvar) = LIST_NEXT((var), field), 1); \
244+ (var) = (tvar))
245+
246+#define LIST_MOVE(head1, head2) do { \
247+ LIST_INIT((head2)); \
248+ if (!LIST_EMPTY((head1))) { \
249+ (head2)->lh_first = (head1)->lh_first; \
250+ LIST_INIT((head1)); \
251+ } \
252+} while (/*CONSTCOND*/0)
253+
254+/*
255+ * List functions.
256+ */
257+#if defined(QUEUEDEBUG)
258+#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
259+ if ((head)->lh_first && \
260+ (head)->lh_first->field.le_prev != &(head)->lh_first) \
261+ QUEUEDEBUG_ABORT("LIST_INSERT_HEAD %p %s:%d", (head), \
262+ __FILE__, __LINE__);
263+#define QUEUEDEBUG_LIST_OP(elm, field) \
264+ if ((elm)->field.le_next && \
265+ (elm)->field.le_next->field.le_prev != \
266+ &(elm)->field.le_next) \
267+ QUEUEDEBUG_ABORT("LIST_* forw %p %s:%d", (elm), \
268+ __FILE__, __LINE__); \
269+ if (*(elm)->field.le_prev != (elm)) \
270+ QUEUEDEBUG_ABORT("LIST_* back %p %s:%d", (elm), \
271+ __FILE__, __LINE__);
272+#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
273+ (elm)->field.le_next = (void *)1L; \
274+ (elm)->field.le_prev = (void *)1L;
275+#else
276+#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
277+#define QUEUEDEBUG_LIST_OP(elm, field)
278+#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
279+#endif
280+
281+#define LIST_INIT(head) do { \
282+ (head)->lh_first = LIST_END(head); \
283+} while (/*CONSTCOND*/0)
284+
285+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
286+ QUEUEDEBUG_LIST_OP((listelm), field) \
287+ if (((elm)->field.le_next = (listelm)->field.le_next) != \
288+ LIST_END(head)) \
289+ (listelm)->field.le_next->field.le_prev = \
290+ &(elm)->field.le_next; \
291+ (listelm)->field.le_next = (elm); \
292+ (elm)->field.le_prev = &(listelm)->field.le_next; \
293+} while (/*CONSTCOND*/0)
294+
295+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
296+ QUEUEDEBUG_LIST_OP((listelm), field) \
297+ (elm)->field.le_prev = (listelm)->field.le_prev; \
298+ (elm)->field.le_next = (listelm); \
299+ *(listelm)->field.le_prev = (elm); \
300+ (listelm)->field.le_prev = &(elm)->field.le_next; \
301+} while (/*CONSTCOND*/0)
302+
303+#define LIST_INSERT_HEAD(head, elm, field) do { \
304+ QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
305+ if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\
306+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
307+ (head)->lh_first = (elm); \
308+ (elm)->field.le_prev = &(head)->lh_first; \
309+} while (/*CONSTCOND*/0)
310+
311+#define LIST_REMOVE(elm, field) do { \
312+ QUEUEDEBUG_LIST_OP((elm), field) \
313+ if ((elm)->field.le_next != NULL) \
314+ (elm)->field.le_next->field.le_prev = \
315+ (elm)->field.le_prev; \
316+ *(elm)->field.le_prev = (elm)->field.le_next; \
317+ QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
318+} while (/*CONSTCOND*/0)
319+
320+#define LIST_REPLACE(elm, elm2, field) do { \
321+ if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
322+ (elm2)->field.le_next->field.le_prev = \
323+ &(elm2)->field.le_next; \
324+ (elm2)->field.le_prev = (elm)->field.le_prev; \
325+ *(elm2)->field.le_prev = (elm2); \
326+ QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
327+} while (/*CONSTCOND*/0)
328+
329+/*
330+ * Simple queue definitions.
331+ */
332+#define SIMPLEQ_HEAD(name, type) \
333+struct name { \
334+ struct type *sqh_first; /* first element */ \
335+ struct type **sqh_last; /* addr of last next element */ \
336+}
337+
338+#define SIMPLEQ_HEAD_INITIALIZER(head) \
339+ { NULL, &(head).sqh_first }
340+
341+#define SIMPLEQ_ENTRY(type) \
342+struct { \
343+ struct type *sqe_next; /* next element */ \
344+}
345+
346+/*
347+ * Simple queue access methods.
348+ */
349+#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
350+#define SIMPLEQ_END(head) NULL
351+#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == SIMPLEQ_END(head))
352+#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
353+
354+#define SIMPLEQ_FOREACH(var, head, field) \
355+ for ((var) = ((head)->sqh_first); \
356+ (var) != SIMPLEQ_END(head); \
357+ (var) = ((var)->field.sqe_next))
358+
359+#define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \
360+ for ((var) = ((head)->sqh_first); \
361+ (var) != SIMPLEQ_END(head) && \
362+ ((next = ((var)->field.sqe_next)), 1); \
363+ (var) = (next))
364+
365+/*
366+ * Simple queue functions.
367+ */
368+#define SIMPLEQ_INIT(head) do { \
369+ (head)->sqh_first = NULL; \
370+ (head)->sqh_last = &(head)->sqh_first; \
371+} while (/*CONSTCOND*/0)
372+
373+#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
374+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
375+ (head)->sqh_last = &(elm)->field.sqe_next; \
376+ (head)->sqh_first = (elm); \
377+} while (/*CONSTCOND*/0)
378+
379+#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
380+ (elm)->field.sqe_next = NULL; \
381+ *(head)->sqh_last = (elm); \
382+ (head)->sqh_last = &(elm)->field.sqe_next; \
383+} while (/*CONSTCOND*/0)
384+
385+#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
386+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
387+ (head)->sqh_last = &(elm)->field.sqe_next; \
388+ (listelm)->field.sqe_next = (elm); \
389+} while (/*CONSTCOND*/0)
390+
391+#define SIMPLEQ_REMOVE_HEAD(head, field) do { \
392+ if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
393+ (head)->sqh_last = &(head)->sqh_first; \
394+} while (/*CONSTCOND*/0)
395+
396+#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \
397+ if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
398+ == NULL) \
399+ (head)->sqh_last = &(elm)->field.sqe_next; \
400+} while (/*CONSTCOND*/0)
401+
402+#define SIMPLEQ_REMOVE(head, elm, type, field) do { \
403+ if ((head)->sqh_first == (elm)) { \
404+ SIMPLEQ_REMOVE_HEAD((head), field); \
405+ } else { \
406+ struct type *curelm = (head)->sqh_first; \
407+ while (curelm->field.sqe_next != (elm)) \
408+ curelm = curelm->field.sqe_next; \
409+ if ((curelm->field.sqe_next = \
410+ curelm->field.sqe_next->field.sqe_next) == NULL) \
411+ (head)->sqh_last = &(curelm)->field.sqe_next; \
412+ } \
413+} while (/*CONSTCOND*/0)
414+
415+#define SIMPLEQ_CONCAT(head1, head2) do { \
416+ if (!SIMPLEQ_EMPTY((head2))) { \
417+ *(head1)->sqh_last = (head2)->sqh_first; \
418+ (head1)->sqh_last = (head2)->sqh_last; \
419+ SIMPLEQ_INIT((head2)); \
420+ } \
421+} while (/*CONSTCOND*/0)
422+
423+#define SIMPLEQ_LAST(head, type, field) \
424+ (SIMPLEQ_EMPTY((head)) ? \
425+ NULL : \
426+ ((struct type *)(void *) \
427+ ((char *)((head)->sqh_last) - offsetof(struct type, field))))
428+
429+/*
430+ * Tail queue definitions.
431+ */
432+#define _TAILQ_HEAD(name, type, qual) \
433+struct name { \
434+ qual type *tqh_first; /* first element */ \
435+ qual type *qual *tqh_last; /* addr of last next element */ \
436+}
437+#define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
438+
439+#define TAILQ_HEAD_INITIALIZER(head) \
440+ { TAILQ_END(head), &(head).tqh_first }
441+
442+#define _TAILQ_ENTRY(type, qual) \
443+struct { \
444+ qual type *tqe_next; /* next element */ \
445+ qual type *qual *tqe_prev; /* address of previous next element */\
446+}
447+#define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
448+
449+/*
450+ * Tail queue access methods.
451+ */
452+#define TAILQ_FIRST(head) ((head)->tqh_first)
453+#define TAILQ_END(head) (NULL)
454+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
455+#define TAILQ_LAST(head, headname) \
456+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
457+#define TAILQ_PREV(elm, headname, field) \
458+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
459+#define TAILQ_EMPTY(head) (TAILQ_FIRST(head) == TAILQ_END(head))
460+
461+
462+#define TAILQ_FOREACH(var, head, field) \
463+ for ((var) = ((head)->tqh_first); \
464+ (var) != TAILQ_END(head); \
465+ (var) = ((var)->field.tqe_next))
466+
467+#define TAILQ_FOREACH_SAFE(var, head, field, next) \
468+ for ((var) = ((head)->tqh_first); \
469+ (var) != TAILQ_END(head) && \
470+ ((next) = TAILQ_NEXT(var, field), 1); (var) = (next))
471+
472+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
473+ for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
474+ (var) != TAILQ_END(head); \
475+ (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
476+
477+#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
478+ for ((var) = TAILQ_LAST((head), headname); \
479+ (var) != TAILQ_END(head) && \
480+ ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev))
481+
482+/*
483+ * Tail queue functions.
484+ */
485+#if defined(QUEUEDEBUG)
486+#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
487+ if ((head)->tqh_first && \
488+ (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
489+ QUEUEDEBUG_ABORT("TAILQ_INSERT_HEAD %p %s:%d", (head), \
490+ __FILE__, __LINE__);
491+#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
492+ if (*(head)->tqh_last != NULL) \
493+ QUEUEDEBUG_ABORT("TAILQ_INSERT_TAIL %p %s:%d", (head), \
494+ __FILE__, __LINE__);
495+#define QUEUEDEBUG_TAILQ_OP(elm, field) \
496+ if ((elm)->field.tqe_next && \
497+ (elm)->field.tqe_next->field.tqe_prev != \
498+ &(elm)->field.tqe_next) \
499+ QUEUEDEBUG_ABORT("TAILQ_* forw %p %s:%d", (elm), \
500+ __FILE__, __LINE__); \
501+ if (*(elm)->field.tqe_prev != (elm)) \
502+ QUEUEDEBUG_ABORT("TAILQ_* back %p %s:%d", (elm), \
503+ __FILE__, __LINE__);
504+#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
505+ if ((elm)->field.tqe_next == NULL && \
506+ (head)->tqh_last != &(elm)->field.tqe_next) \
507+ QUEUEDEBUG_ABORT("TAILQ_PREREMOVE head %p elm %p %s:%d",\
508+ (head), (elm), __FILE__, __LINE__);
509+#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
510+ (elm)->field.tqe_next = (void *)1L; \
511+ (elm)->field.tqe_prev = (void *)1L;
512+#else
513+#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
514+#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
515+#define QUEUEDEBUG_TAILQ_OP(elm, field)
516+#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
517+#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
518+#endif
519+
520+#define TAILQ_INIT(head) do { \
521+ (head)->tqh_first = TAILQ_END(head); \
522+ (head)->tqh_last = &(head)->tqh_first; \
523+} while (/*CONSTCOND*/0)
524+
525+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
526+ QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \
527+ if (((elm)->field.tqe_next = (head)->tqh_first) != TAILQ_END(head))\
528+ (head)->tqh_first->field.tqe_prev = \
529+ &(elm)->field.tqe_next; \
530+ else \
531+ (head)->tqh_last = &(elm)->field.tqe_next; \
532+ (head)->tqh_first = (elm); \
533+ (elm)->field.tqe_prev = &(head)->tqh_first; \
534+} while (/*CONSTCOND*/0)
535+
536+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
537+ QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \
538+ (elm)->field.tqe_next = TAILQ_END(head); \
539+ (elm)->field.tqe_prev = (head)->tqh_last; \
540+ *(head)->tqh_last = (elm); \
541+ (head)->tqh_last = &(elm)->field.tqe_next; \
542+} while (/*CONSTCOND*/0)
543+
544+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
545+ QUEUEDEBUG_TAILQ_OP((listelm), field) \
546+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != \
547+ TAILQ_END(head)) \
548+ (elm)->field.tqe_next->field.tqe_prev = \
549+ &(elm)->field.tqe_next; \
550+ else \
551+ (head)->tqh_last = &(elm)->field.tqe_next; \
552+ (listelm)->field.tqe_next = (elm); \
553+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
554+} while (/*CONSTCOND*/0)
555+
556+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
557+ QUEUEDEBUG_TAILQ_OP((listelm), field) \
558+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
559+ (elm)->field.tqe_next = (listelm); \
560+ *(listelm)->field.tqe_prev = (elm); \
561+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
562+} while (/*CONSTCOND*/0)
563+
564+#define TAILQ_REMOVE(head, elm, field) do { \
565+ QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
566+ QUEUEDEBUG_TAILQ_OP((elm), field) \
567+ if (((elm)->field.tqe_next) != TAILQ_END(head)) \
568+ (elm)->field.tqe_next->field.tqe_prev = \
569+ (elm)->field.tqe_prev; \
570+ else \
571+ (head)->tqh_last = (elm)->field.tqe_prev; \
572+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
573+ QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
574+} while (/*CONSTCOND*/0)
575+
576+#define TAILQ_REPLACE(head, elm, elm2, field) do { \
577+ if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != \
578+ TAILQ_END(head)) \
579+ (elm2)->field.tqe_next->field.tqe_prev = \
580+ &(elm2)->field.tqe_next; \
581+ else \
582+ (head)->tqh_last = &(elm2)->field.tqe_next; \
583+ (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
584+ *(elm2)->field.tqe_prev = (elm2); \
585+ QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
586+} while (/*CONSTCOND*/0)
587+
588+#define TAILQ_CONCAT(head1, head2, field) do { \
589+ if (!TAILQ_EMPTY(head2)) { \
590+ *(head1)->tqh_last = (head2)->tqh_first; \
591+ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
592+ (head1)->tqh_last = (head2)->tqh_last; \
593+ TAILQ_INIT((head2)); \
594+ } \
595+} while (/*CONSTCOND*/0)
596+
597+/*
598+ * Singly-linked Tail queue declarations.
599+ */
600+#define STAILQ_HEAD(name, type) \
601+struct name { \
602+ struct type *stqh_first; /* first element */ \
603+ struct type **stqh_last; /* addr of last next element */ \
604+}
605+
606+#define STAILQ_HEAD_INITIALIZER(head) \
607+ { NULL, &(head).stqh_first }
608+
609+#define STAILQ_ENTRY(type) \
610+struct { \
611+ struct type *stqe_next; /* next element */ \
612+}
613+
614+/*
615+ * Singly-linked Tail queue access methods.
616+ */
617+#define STAILQ_FIRST(head) ((head)->stqh_first)
618+#define STAILQ_END(head) NULL
619+#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
620+#define STAILQ_EMPTY(head) (STAILQ_FIRST(head) == STAILQ_END(head))
621+
622+/*
623+ * Singly-linked Tail queue functions.
624+ */
625+#define STAILQ_INIT(head) do { \
626+ (head)->stqh_first = NULL; \
627+ (head)->stqh_last = &(head)->stqh_first; \
628+} while (/*CONSTCOND*/0)
629+
630+#define STAILQ_INSERT_HEAD(head, elm, field) do { \
631+ if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
632+ (head)->stqh_last = &(elm)->field.stqe_next; \
633+ (head)->stqh_first = (elm); \
634+} while (/*CONSTCOND*/0)
635+
636+#define STAILQ_INSERT_TAIL(head, elm, field) do { \
637+ (elm)->field.stqe_next = NULL; \
638+ *(head)->stqh_last = (elm); \
639+ (head)->stqh_last = &(elm)->field.stqe_next; \
640+} while (/*CONSTCOND*/0)
641+
642+#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
643+ if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
644+ (head)->stqh_last = &(elm)->field.stqe_next; \
645+ (listelm)->field.stqe_next = (elm); \
646+} while (/*CONSTCOND*/0)
647+
648+#define STAILQ_REMOVE_HEAD(head, field) do { \
649+ if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
650+ (head)->stqh_last = &(head)->stqh_first; \
651+} while (/*CONSTCOND*/0)
652+
653+#define STAILQ_REMOVE(head, elm, type, field) do { \
654+ if ((head)->stqh_first == (elm)) { \
655+ STAILQ_REMOVE_HEAD((head), field); \
656+ } else { \
657+ struct type *curelm = (head)->stqh_first; \
658+ while (curelm->field.stqe_next != (elm)) \
659+ curelm = curelm->field.stqe_next; \
660+ if ((curelm->field.stqe_next = \
661+ curelm->field.stqe_next->field.stqe_next) == NULL) \
662+ (head)->stqh_last = &(curelm)->field.stqe_next; \
663+ } \
664+} while (/*CONSTCOND*/0)
665+
666+#define STAILQ_FOREACH(var, head, field) \
667+ for ((var) = ((head)->stqh_first); \
668+ (var); \
669+ (var) = ((var)->field.stqe_next))
670+
671+#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
672+ for ((var) = STAILQ_FIRST((head)); \
673+ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
674+ (var) = (tvar))
675+
676+#define STAILQ_CONCAT(head1, head2) do { \
677+ if (!STAILQ_EMPTY((head2))) { \
678+ *(head1)->stqh_last = (head2)->stqh_first; \
679+ (head1)->stqh_last = (head2)->stqh_last; \
680+ STAILQ_INIT((head2)); \
681+ } \
682+} while (/*CONSTCOND*/0)
683+
684+#define STAILQ_LAST(head, type, field) \
685+ (STAILQ_EMPTY((head)) ? \
686+ NULL : \
687+ ((struct type *)(void *) \
688+ ((char *)((head)->stqh_last) - offsetof(struct type, field))))
689+
690+
691+#ifndef _KERNEL
692+/*
693+ * Circular queue definitions. Do not use. We still keep the macros
694+ * for compatibility but because of pointer aliasing issues their use
695+ * is discouraged!
696+ */
697+
698+/*
699+ * __launder_type(): We use this ugly hack to work around the the compiler
700+ * noticing that two types may not alias each other and elide tests in code.
701+ * We hit this in the CIRCLEQ macros when comparing 'struct name *' and
702+ * 'struct type *' (see CIRCLEQ_HEAD()). Modern compilers (such as GCC
703+ * 4.8) declare these comparisons as always false, causing the code to
704+ * not run as designed.
705+ *
706+ * This hack is only to be used for comparisons and thus can be fully const.
707+ * Do not use for assignment.
708+ *
709+ * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix
710+ * this by changing the head/tail sentinal values, but see the note above
711+ * this one.
712+ */
713+static __inline const void * __launder_type(const void *);
714+static __inline const void *
715+__launder_type(const void *__x)
716+{
717+ __asm __volatile("" : "+r" (__x));
718+ return __x;
719+}
720+
721+#if defined(QUEUEDEBUG)
722+#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \
723+ if ((head)->cqh_first != CIRCLEQ_ENDC(head) && \
724+ (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head)) \
725+ QUEUEDEBUG_ABORT("CIRCLEQ head forw %p %s:%d", (head), \
726+ __FILE__, __LINE__); \
727+ if ((head)->cqh_last != CIRCLEQ_ENDC(head) && \
728+ (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head)) \
729+ QUEUEDEBUG_ABORT("CIRCLEQ head back %p %s:%d", (head), \
730+ __FILE__, __LINE__);
731+#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \
732+ if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) { \
733+ if ((head)->cqh_last != (elm)) \
734+ QUEUEDEBUG_ABORT("CIRCLEQ elm last %p %s:%d", \
735+ (elm), __FILE__, __LINE__); \
736+ } else { \
737+ if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \
738+ QUEUEDEBUG_ABORT("CIRCLEQ elm forw %p %s:%d", \
739+ (elm), __FILE__, __LINE__); \
740+ } \
741+ if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) { \
742+ if ((head)->cqh_first != (elm)) \
743+ QUEUEDEBUG_ABORT("CIRCLEQ elm first %p %s:%d", \
744+ (elm), __FILE__, __LINE__); \
745+ } else { \
746+ if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \
747+ QUEUEDEBUG_ABORT("CIRCLEQ elm prev %p %s:%d", \
748+ (elm), __FILE__, __LINE__); \
749+ }
750+#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \
751+ (elm)->field.cqe_next = (void *)1L; \
752+ (elm)->field.cqe_prev = (void *)1L;
753+#else
754+#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
755+#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
756+#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
757+#endif
758+
759+#define CIRCLEQ_HEAD(name, type) \
760+struct name { \
761+ struct type *cqh_first; /* first element */ \
762+ struct type *cqh_last; /* last element */ \
763+}
764+
765+#define CIRCLEQ_HEAD_INITIALIZER(head) \
766+ { CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
767+
768+#define CIRCLEQ_ENTRY(type) \
769+struct { \
770+ struct type *cqe_next; /* next element */ \
771+ struct type *cqe_prev; /* previous element */ \
772+}
773+
774+/*
775+ * Circular queue functions.
776+ */
777+#define CIRCLEQ_INIT(head) do { \
778+ (head)->cqh_first = CIRCLEQ_END(head); \
779+ (head)->cqh_last = CIRCLEQ_END(head); \
780+} while (/*CONSTCOND*/0)
781+
782+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
783+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
784+ QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
785+ (elm)->field.cqe_next = (listelm)->field.cqe_next; \
786+ (elm)->field.cqe_prev = (listelm); \
787+ if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
788+ (head)->cqh_last = (elm); \
789+ else \
790+ (listelm)->field.cqe_next->field.cqe_prev = (elm); \
791+ (listelm)->field.cqe_next = (elm); \
792+} while (/*CONSTCOND*/0)
793+
794+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
795+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
796+ QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
797+ (elm)->field.cqe_next = (listelm); \
798+ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
799+ if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
800+ (head)->cqh_first = (elm); \
801+ else \
802+ (listelm)->field.cqe_prev->field.cqe_next = (elm); \
803+ (listelm)->field.cqe_prev = (elm); \
804+} while (/*CONSTCOND*/0)
805+
806+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
807+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
808+ (elm)->field.cqe_next = (head)->cqh_first; \
809+ (elm)->field.cqe_prev = CIRCLEQ_END(head); \
810+ if ((head)->cqh_last == CIRCLEQ_ENDC(head)) \
811+ (head)->cqh_last = (elm); \
812+ else \
813+ (head)->cqh_first->field.cqe_prev = (elm); \
814+ (head)->cqh_first = (elm); \
815+} while (/*CONSTCOND*/0)
816+
817+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
818+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
819+ (elm)->field.cqe_next = CIRCLEQ_END(head); \
820+ (elm)->field.cqe_prev = (head)->cqh_last; \
821+ if ((head)->cqh_first == CIRCLEQ_ENDC(head)) \
822+ (head)->cqh_first = (elm); \
823+ else \
824+ (head)->cqh_last->field.cqe_next = (elm); \
825+ (head)->cqh_last = (elm); \
826+} while (/*CONSTCOND*/0)
827+
828+#define CIRCLEQ_REMOVE(head, elm, field) do { \
829+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
830+ QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \
831+ if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
832+ (head)->cqh_last = (elm)->field.cqe_prev; \
833+ else \
834+ (elm)->field.cqe_next->field.cqe_prev = \
835+ (elm)->field.cqe_prev; \
836+ if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
837+ (head)->cqh_first = (elm)->field.cqe_next; \
838+ else \
839+ (elm)->field.cqe_prev->field.cqe_next = \
840+ (elm)->field.cqe_next; \
841+ QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \
842+} while (/*CONSTCOND*/0)
843+
844+#define CIRCLEQ_FOREACH(var, head, field) \
845+ for ((var) = ((head)->cqh_first); \
846+ (var) != CIRCLEQ_ENDC(head); \
847+ (var) = ((var)->field.cqe_next))
848+
849+#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
850+ for ((var) = ((head)->cqh_last); \
851+ (var) != CIRCLEQ_ENDC(head); \
852+ (var) = ((var)->field.cqe_prev))
853+
854+/*
855+ * Circular queue access methods.
856+ */
857+#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
858+#define CIRCLEQ_LAST(head) ((head)->cqh_last)
859+/* For comparisons */
860+#define CIRCLEQ_ENDC(head) (__launder_type(head))
861+/* For assignments */
862+#define CIRCLEQ_END(head) ((void *)(head))
863+#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
864+#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
865+#define CIRCLEQ_EMPTY(head) \
866+ (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head))
867+
868+#define CIRCLEQ_LOOP_NEXT(head, elm, field) \
869+ (((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
870+ ? ((head)->cqh_first) \
871+ : (elm->field.cqe_next))
872+#define CIRCLEQ_LOOP_PREV(head, elm, field) \
873+ (((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
874+ ? ((head)->cqh_last) \
875+ : (elm->field.cqe_prev))
876+#endif /* !_KERNEL */
877+
878+#endif /* !_SYS_QUEUE_H_ */