]> git.proxmox.com Git - mirror_zfs.git/blame - lib/libuutil/uu_list.c
libuutil: purge unused functions
[mirror_zfs.git] / lib / libuutil / uu_list.c
CommitLineData
34dc7c2f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
b128c09f
BB
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
34dc7c2f
BB
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
b128c09f 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
34dc7c2f
BB
23 * Use is subject to license terms.
24 */
25
e5dc681a 26
34dc7c2f
BB
27
28#include "libuutil_common.h"
29
30#include <stdlib.h>
31#include <string.h>
32#include <unistd.h>
33#include <sys/time.h>
34
35#define ELEM_TO_NODE(lp, e) \
36 ((uu_list_node_impl_t *)((uintptr_t)(e) + (lp)->ul_offset))
37
38#define NODE_TO_ELEM(lp, n) \
39 ((void *)((uintptr_t)(n) - (lp)->ul_offset))
40
41/*
42 * uu_list_index_ts define a location for insertion. They are simply a
43 * pointer to the object after the insertion point. We store a mark
44 * in the low-bits of the index, to help prevent mistakes.
45 *
46 * When debugging, the index mark changes on every insert and delete, to
47 * catch stale references.
48 */
49#define INDEX_MAX (sizeof (uintptr_t) - 1)
50#define INDEX_NEXT(m) (((m) == INDEX_MAX)? 1 : ((m) + 1) & INDEX_MAX)
51
52#define INDEX_TO_NODE(i) ((uu_list_node_impl_t *)((i) & ~INDEX_MAX))
53#define NODE_TO_INDEX(p, n) (((uintptr_t)(n) & ~INDEX_MAX) | (p)->ul_index)
54#define INDEX_VALID(p, i) (((i) & INDEX_MAX) == (p)->ul_index)
55#define INDEX_CHECK(i) (((i) & INDEX_MAX) != 0)
56
57#define POOL_TO_MARKER(pp) ((void *)((uintptr_t)(pp) | 1))
58
59static uu_list_pool_t uu_null_lpool = { &uu_null_lpool, &uu_null_lpool };
60static pthread_mutex_t uu_lpool_list_lock = PTHREAD_MUTEX_INITIALIZER;
61
62uu_list_pool_t *
63uu_list_pool_create(const char *name, size_t objsize,
64 size_t nodeoffset, uu_compare_fn_t *compare_func, uint32_t flags)
65{
66 uu_list_pool_t *pp, *next, *prev;
67
68 if (name == NULL ||
69 uu_check_name(name, UU_NAME_DOMAIN) == -1 ||
70 nodeoffset + sizeof (uu_list_node_t) > objsize) {
71 uu_set_error(UU_ERROR_INVALID_ARGUMENT);
72 return (NULL);
73 }
74
75 if (flags & ~UU_LIST_POOL_DEBUG) {
76 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
77 return (NULL);
78 }
79
80 pp = uu_zalloc(sizeof (uu_list_pool_t));
81 if (pp == NULL) {
82 uu_set_error(UU_ERROR_NO_MEMORY);
83 return (NULL);
84 }
85
86 (void) strlcpy(pp->ulp_name, name, sizeof (pp->ulp_name));
87 pp->ulp_nodeoffset = nodeoffset;
88 pp->ulp_objsize = objsize;
89 pp->ulp_cmp = compare_func;
90 if (flags & UU_LIST_POOL_DEBUG)
91 pp->ulp_debug = 1;
92 pp->ulp_last_index = 0;
93
94 (void) pthread_mutex_init(&pp->ulp_lock, NULL);
95
96 pp->ulp_null_list.ul_next_enc = UU_PTR_ENCODE(&pp->ulp_null_list);
97 pp->ulp_null_list.ul_prev_enc = UU_PTR_ENCODE(&pp->ulp_null_list);
98
99 (void) pthread_mutex_lock(&uu_lpool_list_lock);
100 pp->ulp_next = next = &uu_null_lpool;
101 pp->ulp_prev = prev = next->ulp_prev;
102 next->ulp_prev = pp;
103 prev->ulp_next = pp;
104 (void) pthread_mutex_unlock(&uu_lpool_list_lock);
105
106 return (pp);
107}
108
109void
110uu_list_pool_destroy(uu_list_pool_t *pp)
111{
112 if (pp->ulp_debug) {
113 if (pp->ulp_null_list.ul_next_enc !=
114 UU_PTR_ENCODE(&pp->ulp_null_list) ||
115 pp->ulp_null_list.ul_prev_enc !=
116 UU_PTR_ENCODE(&pp->ulp_null_list)) {
117 uu_panic("uu_list_pool_destroy: Pool \"%.*s\" (%p) has "
118 "outstanding lists, or is corrupt.\n",
b128c09f
BB
119 (int)sizeof (pp->ulp_name), pp->ulp_name,
120 (void *)pp);
34dc7c2f
BB
121 }
122 }
123 (void) pthread_mutex_lock(&uu_lpool_list_lock);
124 pp->ulp_next->ulp_prev = pp->ulp_prev;
125 pp->ulp_prev->ulp_next = pp->ulp_next;
126 (void) pthread_mutex_unlock(&uu_lpool_list_lock);
127 pp->ulp_prev = NULL;
128 pp->ulp_next = NULL;
129 uu_free(pp);
130}
131
132void
133uu_list_node_init(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
134{
135 uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
136
137 if (pp->ulp_debug) {
138 uintptr_t offset = (uintptr_t)np - (uintptr_t)base;
139 if (offset + sizeof (*np) > pp->ulp_objsize) {
140 uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
141 "offset %ld doesn't fit in object (size %ld)\n",
b128c09f
BB
142 base, (void *)np, (void *)pp, pp->ulp_name,
143 (long)offset, (long)pp->ulp_objsize);
34dc7c2f
BB
144 }
145 if (offset != pp->ulp_nodeoffset) {
146 uu_panic("uu_list_node_init(%p, %p, %p (\"%s\")): "
147 "offset %ld doesn't match pool's offset (%ld)\n",
b128c09f
BB
148 base, (void *)np, (void *)pp, pp->ulp_name,
149 (long)offset, (long)pp->ulp_objsize);
34dc7c2f
BB
150 }
151 }
152 np->uln_next = POOL_TO_MARKER(pp);
153 np->uln_prev = NULL;
154}
155
156void
157uu_list_node_fini(void *base, uu_list_node_t *np_arg, uu_list_pool_t *pp)
158{
159 uu_list_node_impl_t *np = (uu_list_node_impl_t *)np_arg;
160
161 if (pp->ulp_debug) {
162 if (np->uln_next == NULL &&
163 np->uln_prev == NULL) {
164 uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
165 "node already finied\n",
b128c09f 166 base, (void *)np_arg, (void *)pp, pp->ulp_name);
34dc7c2f
BB
167 }
168 if (np->uln_next != POOL_TO_MARKER(pp) ||
169 np->uln_prev != NULL) {
170 uu_panic("uu_list_node_fini(%p, %p, %p (\"%s\")): "
171 "node corrupt or on list\n",
b128c09f 172 base, (void *)np_arg, (void *)pp, pp->ulp_name);
34dc7c2f
BB
173 }
174 }
175 np->uln_next = NULL;
176 np->uln_prev = NULL;
177}
178
179uu_list_t *
180uu_list_create(uu_list_pool_t *pp, void *parent, uint32_t flags)
181{
182 uu_list_t *lp, *next, *prev;
183
184 if (flags & ~(UU_LIST_DEBUG | UU_LIST_SORTED)) {
185 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
186 return (NULL);
187 }
188
189 if ((flags & UU_LIST_SORTED) && pp->ulp_cmp == NULL) {
190 if (pp->ulp_debug)
191 uu_panic("uu_list_create(%p, ...): requested "
192 "UU_LIST_SORTED, but pool has no comparison func\n",
b128c09f 193 (void *)pp);
34dc7c2f
BB
194 uu_set_error(UU_ERROR_NOT_SUPPORTED);
195 return (NULL);
196 }
197
198 lp = uu_zalloc(sizeof (*lp));
199 if (lp == NULL) {
200 uu_set_error(UU_ERROR_NO_MEMORY);
201 return (NULL);
202 }
203
204 lp->ul_pool = pp;
205 lp->ul_parent_enc = UU_PTR_ENCODE(parent);
206 lp->ul_offset = pp->ulp_nodeoffset;
207 lp->ul_debug = pp->ulp_debug || (flags & UU_LIST_DEBUG);
208 lp->ul_sorted = (flags & UU_LIST_SORTED);
209 lp->ul_numnodes = 0;
210 lp->ul_index = (pp->ulp_last_index = INDEX_NEXT(pp->ulp_last_index));
211
212 lp->ul_null_node.uln_next = &lp->ul_null_node;
213 lp->ul_null_node.uln_prev = &lp->ul_null_node;
214
215 lp->ul_null_walk.ulw_next = &lp->ul_null_walk;
216 lp->ul_null_walk.ulw_prev = &lp->ul_null_walk;
217
218 (void) pthread_mutex_lock(&pp->ulp_lock);
219 next = &pp->ulp_null_list;
220 prev = UU_PTR_DECODE(next->ul_prev_enc);
221 lp->ul_next_enc = UU_PTR_ENCODE(next);
222 lp->ul_prev_enc = UU_PTR_ENCODE(prev);
223 next->ul_prev_enc = UU_PTR_ENCODE(lp);
224 prev->ul_next_enc = UU_PTR_ENCODE(lp);
225 (void) pthread_mutex_unlock(&pp->ulp_lock);
226
227 return (lp);
228}
229
230void
231uu_list_destroy(uu_list_t *lp)
232{
233 uu_list_pool_t *pp = lp->ul_pool;
234
235 if (lp->ul_debug) {
236 if (lp->ul_null_node.uln_next != &lp->ul_null_node ||
237 lp->ul_null_node.uln_prev != &lp->ul_null_node) {
238 uu_panic("uu_list_destroy(%p): list not empty\n",
b128c09f 239 (void *)lp);
34dc7c2f
BB
240 }
241 if (lp->ul_numnodes != 0) {
242 uu_panic("uu_list_destroy(%p): numnodes is nonzero, "
b128c09f 243 "but list is empty\n", (void *)lp);
34dc7c2f
BB
244 }
245 if (lp->ul_null_walk.ulw_next != &lp->ul_null_walk ||
246 lp->ul_null_walk.ulw_prev != &lp->ul_null_walk) {
247 uu_panic("uu_list_destroy(%p): outstanding walkers\n",
b128c09f 248 (void *)lp);
34dc7c2f
BB
249 }
250 }
251
252 (void) pthread_mutex_lock(&pp->ulp_lock);
253 UU_LIST_PTR(lp->ul_next_enc)->ul_prev_enc = lp->ul_prev_enc;
254 UU_LIST_PTR(lp->ul_prev_enc)->ul_next_enc = lp->ul_next_enc;
255 (void) pthread_mutex_unlock(&pp->ulp_lock);
256 lp->ul_prev_enc = UU_PTR_ENCODE(NULL);
257 lp->ul_next_enc = UU_PTR_ENCODE(NULL);
258 lp->ul_pool = NULL;
259 uu_free(lp);
260}
261
262static void
263list_insert(uu_list_t *lp, uu_list_node_impl_t *np, uu_list_node_impl_t *prev,
264 uu_list_node_impl_t *next)
265{
266 if (lp->ul_debug) {
267 if (next->uln_prev != prev || prev->uln_next != next)
268 uu_panic("insert(%p): internal error: %p and %p not "
b128c09f
BB
269 "neighbors\n", (void *)lp, (void *)next,
270 (void *)prev);
34dc7c2f
BB
271
272 if (np->uln_next != POOL_TO_MARKER(lp->ul_pool) ||
273 np->uln_prev != NULL) {
274 uu_panic("insert(%p): elem %p node %p corrupt, "
275 "not initialized, or already in a list.\n",
b128c09f 276 (void *)lp, NODE_TO_ELEM(lp, np), (void *)np);
34dc7c2f
BB
277 }
278 /*
279 * invalidate outstanding uu_list_index_ts.
280 */
281 lp->ul_index = INDEX_NEXT(lp->ul_index);
282 }
283 np->uln_next = next;
284 np->uln_prev = prev;
285 next->uln_prev = np;
286 prev->uln_next = np;
287
288 lp->ul_numnodes++;
289}
290
291void
292uu_list_insert(uu_list_t *lp, void *elem, uu_list_index_t idx)
293{
294 uu_list_node_impl_t *np;
295
296 np = INDEX_TO_NODE(idx);
297 if (np == NULL)
298 np = &lp->ul_null_node;
299
300 if (lp->ul_debug) {
301 if (!INDEX_VALID(lp, idx))
302 uu_panic("uu_list_insert(%p, %p, %p): %s\n",
b128c09f 303 (void *)lp, elem, (void *)idx,
34dc7c2f
BB
304 INDEX_CHECK(idx)? "outdated index" :
305 "invalid index");
306 if (np->uln_prev == NULL)
307 uu_panic("uu_list_insert(%p, %p, %p): out-of-date "
b128c09f 308 "index\n", (void *)lp, elem, (void *)idx);
34dc7c2f
BB
309 }
310
311 list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
312}
313
314void *
315uu_list_find(uu_list_t *lp, void *elem, void *private, uu_list_index_t *out)
316{
317 int sorted = lp->ul_sorted;
318 uu_compare_fn_t *func = lp->ul_pool->ulp_cmp;
319 uu_list_node_impl_t *np;
320
321 if (func == NULL) {
322 if (out != NULL)
323 *out = 0;
324 uu_set_error(UU_ERROR_NOT_SUPPORTED);
325 return (NULL);
326 }
327 for (np = lp->ul_null_node.uln_next; np != &lp->ul_null_node;
328 np = np->uln_next) {
329 void *ep = NODE_TO_ELEM(lp, np);
330 int cmp = func(ep, elem, private);
331 if (cmp == 0) {
332 if (out != NULL)
333 *out = NODE_TO_INDEX(lp, np);
334 return (ep);
335 }
336 if (sorted && cmp > 0) {
337 if (out != NULL)
338 *out = NODE_TO_INDEX(lp, np);
339 return (NULL);
340 }
341 }
342 if (out != NULL)
343 *out = NODE_TO_INDEX(lp, 0);
344 return (NULL);
345}
346
347void *
348uu_list_nearest_next(uu_list_t *lp, uu_list_index_t idx)
349{
350 uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
351
352 if (np == NULL)
353 np = &lp->ul_null_node;
354
355 if (lp->ul_debug) {
356 if (!INDEX_VALID(lp, idx))
357 uu_panic("uu_list_nearest_next(%p, %p): %s\n",
b128c09f
BB
358 (void *)lp, (void *)idx,
359 INDEX_CHECK(idx)? "outdated index" :
34dc7c2f
BB
360 "invalid index");
361 if (np->uln_prev == NULL)
362 uu_panic("uu_list_nearest_next(%p, %p): out-of-date "
b128c09f 363 "index\n", (void *)lp, (void *)idx);
34dc7c2f
BB
364 }
365
366 if (np == &lp->ul_null_node)
367 return (NULL);
368 else
369 return (NODE_TO_ELEM(lp, np));
370}
371
372void *
373uu_list_nearest_prev(uu_list_t *lp, uu_list_index_t idx)
374{
375 uu_list_node_impl_t *np = INDEX_TO_NODE(idx);
376
377 if (np == NULL)
378 np = &lp->ul_null_node;
379
380 if (lp->ul_debug) {
381 if (!INDEX_VALID(lp, idx))
382 uu_panic("uu_list_nearest_prev(%p, %p): %s\n",
b128c09f
BB
383 (void *)lp, (void *)idx, INDEX_CHECK(idx)?
384 "outdated index" : "invalid index");
34dc7c2f
BB
385 if (np->uln_prev == NULL)
386 uu_panic("uu_list_nearest_prev(%p, %p): out-of-date "
b128c09f 387 "index\n", (void *)lp, (void *)idx);
34dc7c2f
BB
388 }
389
390 if ((np = np->uln_prev) == &lp->ul_null_node)
391 return (NULL);
392 else
393 return (NODE_TO_ELEM(lp, np));
394}
395
396static void
397list_walk_init(uu_list_walk_t *wp, uu_list_t *lp, uint32_t flags)
398{
399 uu_list_walk_t *next, *prev;
400
401 int robust = (flags & UU_WALK_ROBUST);
402 int direction = (flags & UU_WALK_REVERSE)? -1 : 1;
403
404 (void) memset(wp, 0, sizeof (*wp));
405 wp->ulw_list = lp;
406 wp->ulw_robust = robust;
407 wp->ulw_dir = direction;
408 if (direction > 0)
409 wp->ulw_next_result = lp->ul_null_node.uln_next;
410 else
411 wp->ulw_next_result = lp->ul_null_node.uln_prev;
412
413 if (lp->ul_debug || robust) {
b128c09f
BB
414 /*
415 * Add this walker to the list's list of walkers so
416 * uu_list_remove() can advance us if somebody tries to
417 * remove ulw_next_result.
418 */
34dc7c2f
BB
419 wp->ulw_next = next = &lp->ul_null_walk;
420 wp->ulw_prev = prev = next->ulw_prev;
421 next->ulw_prev = wp;
422 prev->ulw_next = wp;
423 }
424}
425
426static uu_list_node_impl_t *
427list_walk_advance(uu_list_walk_t *wp, uu_list_t *lp)
428{
429 uu_list_node_impl_t *np = wp->ulw_next_result;
430 uu_list_node_impl_t *next;
431
432 if (np == &lp->ul_null_node)
433 return (NULL);
434
435 next = (wp->ulw_dir > 0)? np->uln_next : np->uln_prev;
436
437 wp->ulw_next_result = next;
438 return (np);
439}
440
441static void
442list_walk_fini(uu_list_walk_t *wp)
443{
444 /* GLXXX debugging? */
445 if (wp->ulw_next != NULL) {
446 wp->ulw_next->ulw_prev = wp->ulw_prev;
447 wp->ulw_prev->ulw_next = wp->ulw_next;
448 wp->ulw_next = NULL;
449 wp->ulw_prev = NULL;
450 }
451 wp->ulw_list = NULL;
452 wp->ulw_next_result = NULL;
453}
454
455uu_list_walk_t *
456uu_list_walk_start(uu_list_t *lp, uint32_t flags)
457{
458 uu_list_walk_t *wp;
459
460 if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
461 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
462 return (NULL);
463 }
464
465 wp = uu_zalloc(sizeof (*wp));
466 if (wp == NULL) {
467 uu_set_error(UU_ERROR_NO_MEMORY);
468 return (NULL);
469 }
470
471 list_walk_init(wp, lp, flags);
472 return (wp);
473}
474
475void *
476uu_list_walk_next(uu_list_walk_t *wp)
477{
478 uu_list_t *lp = wp->ulw_list;
479 uu_list_node_impl_t *np = list_walk_advance(wp, lp);
480
481 if (np == NULL)
482 return (NULL);
483
484 return (NODE_TO_ELEM(lp, np));
485}
486
487void
488uu_list_walk_end(uu_list_walk_t *wp)
489{
490 list_walk_fini(wp);
491 uu_free(wp);
492}
493
494int
495uu_list_walk(uu_list_t *lp, uu_walk_fn_t *func, void *private, uint32_t flags)
496{
497 uu_list_node_impl_t *np;
498
499 int status = UU_WALK_NEXT;
500
501 int robust = (flags & UU_WALK_ROBUST);
502 int reverse = (flags & UU_WALK_REVERSE);
503
504 if (flags & ~(UU_WALK_ROBUST | UU_WALK_REVERSE)) {
505 uu_set_error(UU_ERROR_UNKNOWN_FLAG);
506 return (-1);
507 }
508
509 if (lp->ul_debug || robust) {
510 uu_list_walk_t my_walk;
511 void *e;
512
513 list_walk_init(&my_walk, lp, flags);
514 while (status == UU_WALK_NEXT &&
515 (e = uu_list_walk_next(&my_walk)) != NULL)
516 status = (*func)(e, private);
517 list_walk_fini(&my_walk);
518 } else {
519 if (!reverse) {
520 for (np = lp->ul_null_node.uln_next;
521 status == UU_WALK_NEXT && np != &lp->ul_null_node;
522 np = np->uln_next) {
523 status = (*func)(NODE_TO_ELEM(lp, np), private);
524 }
525 } else {
526 for (np = lp->ul_null_node.uln_prev;
527 status == UU_WALK_NEXT && np != &lp->ul_null_node;
528 np = np->uln_prev) {
529 status = (*func)(NODE_TO_ELEM(lp, np), private);
530 }
531 }
532 }
533 if (status >= 0)
534 return (0);
535 uu_set_error(UU_ERROR_CALLBACK_FAILED);
536 return (-1);
537}
538
539void
540uu_list_remove(uu_list_t *lp, void *elem)
541{
542 uu_list_node_impl_t *np = ELEM_TO_NODE(lp, elem);
543 uu_list_walk_t *wp;
544
545 if (lp->ul_debug) {
546 if (np->uln_prev == NULL)
547 uu_panic("uu_list_remove(%p, %p): elem not on list\n",
b128c09f 548 (void *)lp, elem);
34dc7c2f
BB
549 /*
550 * invalidate outstanding uu_list_index_ts.
551 */
552 lp->ul_index = INDEX_NEXT(lp->ul_index);
553 }
554
555 /*
556 * robust walkers must be advanced. In debug mode, non-robust
557 * walkers are also on the list. If there are any, it's an error.
558 */
559 for (wp = lp->ul_null_walk.ulw_next; wp != &lp->ul_null_walk;
560 wp = wp->ulw_next) {
561 if (wp->ulw_robust) {
562 if (np == wp->ulw_next_result)
563 (void) list_walk_advance(wp, lp);
564 } else if (wp->ulw_next_result != NULL) {
565 uu_panic("uu_list_remove(%p, %p): active non-robust "
b128c09f 566 "walker\n", (void *)lp, elem);
34dc7c2f
BB
567 }
568 }
569
570 np->uln_next->uln_prev = np->uln_prev;
571 np->uln_prev->uln_next = np->uln_next;
572
573 lp->ul_numnodes--;
574
575 np->uln_next = POOL_TO_MARKER(lp->ul_pool);
576 np->uln_prev = NULL;
577}
578
579void *
580uu_list_teardown(uu_list_t *lp, void **cookie)
581{
582 void *ep;
583
584 /*
585 * XXX: disable list modification until list is empty
586 */
587 if (lp->ul_debug && *cookie != NULL)
b128c09f
BB
588 uu_panic("uu_list_teardown(%p, %p): unexpected cookie\n",
589 (void *)lp, (void *)cookie);
34dc7c2f
BB
590
591 ep = uu_list_first(lp);
592 if (ep)
593 uu_list_remove(lp, ep);
594 return (ep);
595}
596
597int
598uu_list_insert_before(uu_list_t *lp, void *target, void *elem)
599{
600 uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
601
602 if (target == NULL)
603 np = &lp->ul_null_node;
604
605 if (lp->ul_debug) {
606 if (np->uln_prev == NULL)
607 uu_panic("uu_list_insert_before(%p, %p, %p): %p is "
608 "not currently on a list\n",
b128c09f 609 (void *)lp, target, elem, target);
34dc7c2f
BB
610 }
611 if (lp->ul_sorted) {
612 if (lp->ul_debug)
613 uu_panic("uu_list_insert_before(%p, ...): list is "
b128c09f 614 "UU_LIST_SORTED\n", (void *)lp);
34dc7c2f
BB
615 uu_set_error(UU_ERROR_NOT_SUPPORTED);
616 return (-1);
617 }
618
619 list_insert(lp, ELEM_TO_NODE(lp, elem), np->uln_prev, np);
620 return (0);
621}
622
623int
624uu_list_insert_after(uu_list_t *lp, void *target, void *elem)
625{
626 uu_list_node_impl_t *np = ELEM_TO_NODE(lp, target);
627
628 if (target == NULL)
629 np = &lp->ul_null_node;
630
631 if (lp->ul_debug) {
632 if (np->uln_prev == NULL)
633 uu_panic("uu_list_insert_after(%p, %p, %p): %p is "
634 "not currently on a list\n",
b128c09f 635 (void *)lp, target, elem, target);
34dc7c2f
BB
636 }
637 if (lp->ul_sorted) {
638 if (lp->ul_debug)
639 uu_panic("uu_list_insert_after(%p, ...): list is "
b128c09f 640 "UU_LIST_SORTED\n", (void *)lp);
34dc7c2f
BB
641 uu_set_error(UU_ERROR_NOT_SUPPORTED);
642 return (-1);
643 }
644
645 list_insert(lp, ELEM_TO_NODE(lp, elem), np, np->uln_next);
646 return (0);
647}
648
649size_t
650uu_list_numnodes(uu_list_t *lp)
651{
652 return (lp->ul_numnodes);
653}
654
655void *
656uu_list_first(uu_list_t *lp)
657{
658 uu_list_node_impl_t *n = lp->ul_null_node.uln_next;
659 if (n == &lp->ul_null_node)
660 return (NULL);
661 return (NODE_TO_ELEM(lp, n));
662}
663
664void *
665uu_list_last(uu_list_t *lp)
666{
667 uu_list_node_impl_t *n = lp->ul_null_node.uln_prev;
668 if (n == &lp->ul_null_node)
669 return (NULL);
670 return (NODE_TO_ELEM(lp, n));
671}
672
673void *
674uu_list_next(uu_list_t *lp, void *elem)
675{
676 uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
677
678 n = n->uln_next;
679 if (n == &lp->ul_null_node)
680 return (NULL);
681 return (NODE_TO_ELEM(lp, n));
682}
683
684void *
685uu_list_prev(uu_list_t *lp, void *elem)
686{
687 uu_list_node_impl_t *n = ELEM_TO_NODE(lp, elem);
688
689 n = n->uln_prev;
690 if (n == &lp->ul_null_node)
691 return (NULL);
692 return (NODE_TO_ELEM(lp, n));
693}
694
695/*
696 * called from uu_lockup() and uu_release(), as part of our fork1()-safety.
697 */
698void
699uu_list_lockup(void)
700{
701 uu_list_pool_t *pp;
702
703 (void) pthread_mutex_lock(&uu_lpool_list_lock);
704 for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
705 pp = pp->ulp_next)
706 (void) pthread_mutex_lock(&pp->ulp_lock);
707}
708
709void
710uu_list_release(void)
711{
712 uu_list_pool_t *pp;
713
714 for (pp = uu_null_lpool.ulp_next; pp != &uu_null_lpool;
715 pp = pp->ulp_next)
716 (void) pthread_mutex_unlock(&pp->ulp_lock);
717 (void) pthread_mutex_unlock(&uu_lpool_list_lock);
718}