1 /* 2 * include/common/mini-clist.h 3 * Circular list manipulation macros and structures. 4 * 5 * Copyright (C) 2002-2014 Willy Tarreau - w@1wt.eu 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation, version 2.1 10 * exclusively. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 20 */ 21 22 #ifndef _COMMON_MINI_CLIST_H 23 #define _COMMON_MINI_CLIST_H 24 25 #include <common/config.h> 26 27 /* these are circular or bidirectionnal lists only. Each list pointer points to 28 * another list pointer in a structure, and not the structure itself. The 29 * pointer to the next element MUST be the first one so that the list is easily 30 * cast as a single linked list or pointer. 31 */ 32 struct list { 33 struct list *n; /* next */ 34 struct list *p; /* prev */ 35 }; 36 37 /* This is similar to struct list, but we want to be sure the compiler will 38 * yell at you if you use macroes for one when you're using the other. You have 39 * to expicitely cast if that's really what you want to do. 40 */ 41 struct mt_list { 42 struct mt_list *next; 43 struct mt_list *prev; 44 }; 45 46 47 /* a back-ref is a pointer to a target list entry. It is used to detect when an 48 * element being deleted is currently being tracked by another user. The best 49 * example is a user dumping the session table. The table does not fit in the 50 * output buffer so we have to set a mark on a session and go on later. But if 51 * that marked session gets deleted, we don't want the user's pointer to go in 52 * the wild. So we can simply link this user's request to the list of this 53 * session's users, and put a pointer to the list element in ref, that will be 54 * used as the mark for next iteration. 55 */ 56 struct bref { 57 struct list users; 58 struct list *ref; /* pointer to the target's list entry */ 59 }; 60 61 /* a word list is a generic list with a pointer to a string in each element. */ 62 struct wordlist { 63 struct list list; 64 char *s; 65 }; 66 67 /* this is the same as above with an additional pointer to a condition. */ 68 struct cond_wordlist { 69 struct list list; 70 void *cond; 71 char *s; 72 }; 73 74 /* First undefine some macros which happen to also be defined on OpenBSD, 75 * in sys/queue.h, used by sys/event.h 76 */ 77 #undef LIST_HEAD 78 #undef LIST_INIT 79 #undef LIST_NEXT 80 81 /* ILH = Initialized List Head : used to prevent gcc from moving an empty 82 * list to BSS. Some older version tend to trim all the array and cause 83 * corruption. 84 */ 85 #define ILH { .n = (struct list *)1, .p = (struct list *)2 } 86 87 #define LIST_HEAD(a) ((void *)(&(a))) 88 89 #define LIST_INIT(l) ((l)->n = (l)->p = (l)) 90 91 #define LIST_HEAD_INIT(l) { &l, &l } 92 93 /* adds an element at the beginning of a list ; returns the element */ 94 #define LIST_ADD(lh, el) ({ (el)->n = (lh)->n; (el)->n->p = (lh)->n = (el); (el)->p = (lh); (el); }) 95 96 /* adds an element at the end of a list ; returns the element */ 97 #define LIST_ADDQ(lh, el) ({ (el)->p = (lh)->p; (el)->p->n = (lh)->p = (el); (el)->n = (lh); (el); }) 98 99 /* adds the contents of a list <old> at the beginning of another list <new>. The old list head remains untouched. */ 100 #define LIST_SPLICE(new, old) do { \ 101 if (!LIST_ISEMPTY(old)) { \ 102 (old)->p->n = (new)->n; (old)->n->p = (new); \ 103 (new)->n->p = (old)->p; (new)->n = (old)->n; \ 104 } \ 105 } while (0) 106 107 /* adds the contents of a list whose first element is <old> and last one is 108 * <old->prev> at the end of another list <new>. The old list DOES NOT have 109 * any head here. 110 */ 111 #define LIST_SPLICE_END_DETACHED(new, old) do { \ 112 typeof(new) __t; \ 113 (new)->p->n = (old); \ 114 (old)->p->n = (new); \ 115 __t = (old)->p; \ 116 (old)->p = (new)->p; \ 117 (new)->p = __t; \ 118 } while (0) 119 120 /* removes an element from a list and returns it */ 121 #define LIST_DEL(el) ({ typeof(el) __ret = (el); (el)->n->p = (el)->p; (el)->p->n = (el)->n; (__ret); }) 122 123 /* removes an element from a list, initializes it and returns it. 124 * This is faster than LIST_DEL+LIST_INIT as we avoid reloading the pointers. 125 */ 126 #define LIST_DEL_INIT(el) ({ \ 127 typeof(el) __ret = (el); \ 128 typeof(__ret->n) __n = __ret->n; \ 129 typeof(__ret->p) __p = __ret->p; \ 130 __n->p = __p; __p->n = __n; \ 131 __ret->n = __ret->p = __ret; \ 132 __ret; \ 133 }) 134 135 /* returns a pointer of type <pt> to a structure containing a list head called 136 * <el> at address <lh>. Note that <lh> can be the result of a function or macro 137 * since it's used only once. 138 * Example: LIST_ELEM(cur_node->args.next, struct node *, args) 139 */ 140 #define LIST_ELEM(lh, pt, el) ((pt)(((const char *)(lh)) - ((size_t)&((pt)NULL)->el))) 141 142 /* checks if the list head <lh> is empty or not */ 143 #define LIST_ISEMPTY(lh) ((lh)->n == (lh)) 144 145 /* checks if the list element <el> was added to a list or not. This only 146 * works when detached elements are reinitialized (using LIST_DEL_INIT) 147 */ 148 #define LIST_ADDED(el) ((el)->n != (el)) 149 150 /* returns a pointer of type <pt> to a structure following the element 151 * which contains list head <lh>, which is known as element <el> in 152 * struct pt. 153 * Example: LIST_NEXT(args, struct node *, list) 154 */ 155 #define LIST_NEXT(lh, pt, el) (LIST_ELEM((lh)->n, pt, el)) 156 157 158 /* returns a pointer of type <pt> to a structure preceding the element 159 * which contains list head <lh>, which is known as element <el> in 160 * struct pt. 161 */ 162 #undef LIST_PREV 163 #define LIST_PREV(lh, pt, el) (LIST_ELEM((lh)->p, pt, el)) 164 165 /* 166 * Simpler FOREACH_ITEM macro inspired from Linux sources. 167 * Iterates <item> through a list of items of type "typeof(*item)" which are 168 * linked via a "struct list" member named <member>. A pointer to the head of 169 * the list is passed in <list_head>. No temporary variable is needed. Note 170 * that <item> must not be modified during the loop. 171 * Example: list_for_each_entry(cur_acl, known_acl, list) { ... }; 172 */ 173 #define list_for_each_entry(item, list_head, member) \ 174 for (item = LIST_ELEM((list_head)->n, typeof(item), member); \ 175 &item->member != (list_head); \ 176 item = LIST_ELEM(item->member.n, typeof(item), member)) 177 178 /* 179 * Same as list_for_each_entry but starting from current point 180 * Iterates <item> through the list starting from <item> 181 * It's basically the same macro but without initializing item to the head of 182 * the list. 183 */ 184 #define list_for_each_entry_from(item, list_head, member) \ 185 for ( ; &item->member != (list_head); \ 186 item = LIST_ELEM(item->member.n, typeof(item), member)) 187 188 /* 189 * Simpler FOREACH_ITEM_SAFE macro inspired from Linux sources. 190 * Iterates <item> through a list of items of type "typeof(*item)" which are 191 * linked via a "struct list" member named <member>. A pointer to the head of 192 * the list is passed in <list_head>. A temporary variable <back> of same type 193 * as <item> is needed so that <item> may safely be deleted if needed. 194 * Example: list_for_each_entry_safe(cur_acl, tmp, known_acl, list) { ... }; 195 */ 196 #define list_for_each_entry_safe(item, back, list_head, member) \ 197 for (item = LIST_ELEM((list_head)->n, typeof(item), member), \ 198 back = LIST_ELEM(item->member.n, typeof(item), member); \ 199 &item->member != (list_head); \ 200 item = back, back = LIST_ELEM(back->member.n, typeof(back), member)) 201 202 203 /* 204 * Same as list_for_each_entry_safe but starting from current point 205 * Iterates <item> through the list starting from <item> 206 * It's basically the same macro but without initializing item to the head of 207 * the list. 208 */ 209 #define list_for_each_entry_safe_from(item, back, list_head, member) \ 210 for (back = LIST_ELEM(item->member.n, typeof(item), member); \ 211 &item->member != (list_head); \ 212 item = back, back = LIST_ELEM(back->member.n, typeof(back), member)) 213 214 #include <common/hathreads.h> 215 #define MT_LIST_BUSY ((struct mt_list *)1) 216 217 /* 218 * Locked version of list manipulation macros. 219 * It is OK to use those concurrently from multiple threads, as long as the 220 * list is only used with the locked variants. 221 */ 222 223 /* 224 * Add an item at the beginning of a list. 225 * Returns 1 if we added the item, 0 otherwise (because it was already in a 226 * list). 227 */ 228 #define MT_LIST_ADD(_lh, _el) \ 229 ({ \ 230 int _ret = 0; \ 231 struct mt_list *lh = (_lh), *el = (_el); \ 232 while (1) { \ 233 struct mt_list *n, *n2; \ 234 struct mt_list *p, *p2; \ 235 n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \ 236 if (n == MT_LIST_BUSY) \ 237 continue; \ 238 p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \ 239 if (p == MT_LIST_BUSY) { \ 240 (lh)->next = n; \ 241 __ha_barrier_store(); \ 242 continue; \ 243 } \ 244 n2 = _HA_ATOMIC_XCHG(&el->next, MT_LIST_BUSY); \ 245 if (n2 != el) { /* element already linked */ \ 246 if (n2 != MT_LIST_BUSY) \ 247 el->next = n2; \ 248 n->prev = p; \ 249 __ha_barrier_store(); \ 250 lh->next = n; \ 251 __ha_barrier_store(); \ 252 if (n2 == MT_LIST_BUSY) \ 253 continue; \ 254 break; \ 255 } \ 256 p2 = _HA_ATOMIC_XCHG(&el->prev, MT_LIST_BUSY); \ 257 if (p2 != el) { \ 258 if (p2 != MT_LIST_BUSY) \ 259 el->prev = p2; \ 260 n->prev = p; \ 261 el->next = el; \ 262 __ha_barrier_store(); \ 263 lh->next = n; \ 264 __ha_barrier_store(); \ 265 if (p2 == MT_LIST_BUSY) \ 266 continue; \ 267 break; \ 268 } \ 269 (el)->next = n; \ 270 (el)->prev = p; \ 271 __ha_barrier_store(); \ 272 n->prev = (el); \ 273 __ha_barrier_store(); \ 274 p->next = (el); \ 275 __ha_barrier_store(); \ 276 _ret = 1; \ 277 break; \ 278 } \ 279 (_ret); \ 280 }) 281 282 /* 283 * Add an item at the end of a list. 284 * Returns 1 if we added the item, 0 otherwise (because it was already in a 285 * list). 286 */ 287 #define MT_LIST_ADDQ(_lh, _el) \ 288 ({ \ 289 int _ret = 0; \ 290 struct mt_list *lh = (_lh), *el = (_el); \ 291 while (1) { \ 292 struct mt_list *n, *n2; \ 293 struct mt_list *p, *p2; \ 294 p = _HA_ATOMIC_XCHG(&(lh)->prev, MT_LIST_BUSY); \ 295 if (p == MT_LIST_BUSY) \ 296 continue; \ 297 n = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY); \ 298 if (n == MT_LIST_BUSY) { \ 299 (lh)->prev = p; \ 300 __ha_barrier_store(); \ 301 continue; \ 302 } \ 303 p2 = _HA_ATOMIC_XCHG(&el->prev, MT_LIST_BUSY); \ 304 if (p2 != el) { \ 305 if (p2 != MT_LIST_BUSY) \ 306 el->prev = p2; \ 307 p->next = n; \ 308 __ha_barrier_store(); \ 309 lh->prev = p; \ 310 __ha_barrier_store(); \ 311 if (p2 == MT_LIST_BUSY) \ 312 continue; \ 313 break; \ 314 } \ 315 n2 = _HA_ATOMIC_XCHG(&el->next, MT_LIST_BUSY); \ 316 if (n2 != el) { /* element already linked */ \ 317 if (n2 != MT_LIST_BUSY) \ 318 el->next = n2; \ 319 p->next = n; \ 320 el->prev = el; \ 321 __ha_barrier_store(); \ 322 lh->prev = p; \ 323 __ha_barrier_store(); \ 324 if (n2 == MT_LIST_BUSY) \ 325 continue; \ 326 break; \ 327 } \ 328 (el)->next = n; \ 329 (el)->prev = p; \ 330 __ha_barrier_store(); \ 331 p->next = (el); \ 332 __ha_barrier_store(); \ 333 n->prev = (el); \ 334 __ha_barrier_store(); \ 335 _ret = 1; \ 336 break; \ 337 } \ 338 (_ret); \ 339 }) 340 341 /* 342 * Detach a list from its head. A pointer to the first element is returned 343 * and the list is closed. If the list was empty, NULL is returned. This may 344 * exclusively be used with lists modified by MT_LIST_ADD/MT_LIST_ADDQ. This 345 * is incompatible with MT_LIST_DEL run concurrently. 346 * If there's at least one element, the next of the last element will always 347 * be NULL. 348 */ 349 #define MT_LIST_BEHEAD(_lh) ({ \ 350 struct mt_list *lh = (_lh); \ 351 struct mt_list *_n; \ 352 struct mt_list *_p; \ 353 while (1) { \ 354 _p = _HA_ATOMIC_XCHG(&(lh)->prev, MT_LIST_BUSY); \ 355 if (_p == MT_LIST_BUSY) \ 356 continue; \ 357 if (_p == (lh)) { \ 358 (lh)->prev = _p; \ 359 __ha_barrier_store(); \ 360 _n = NULL; \ 361 break; \ 362 } \ 363 _n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \ 364 if (_n == MT_LIST_BUSY) { \ 365 (lh)->prev = _p; \ 366 __ha_barrier_store(); \ 367 continue; \ 368 } \ 369 if (_n == (lh)) { \ 370 (lh)->next = _n; \ 371 (lh)->prev = _p; \ 372 __ha_barrier_store(); \ 373 _n = NULL; \ 374 break; \ 375 } \ 376 (lh)->next = (lh); \ 377 (lh)->prev = (lh); \ 378 __ha_barrier_store(); \ 379 _n->prev = _p; \ 380 __ha_barrier_store(); \ 381 _p->next = NULL; \ 382 __ha_barrier_store(); \ 383 break; \ 384 } \ 385 (_n); \ 386 }) 387 388 389 /* Remove an item from a list. 390 * Returns 1 if we removed the item, 0 otherwise (because it was in no list). 391 */ 392 #define MT_LIST_DEL(_el) \ 393 ({ \ 394 int _ret = 0; \ 395 struct mt_list *el = (_el); \ 396 do { \ 397 while (1) { \ 398 struct mt_list *n, *n2; \ 399 struct mt_list *p, *p2 = NULL; \ 400 n = _HA_ATOMIC_XCHG(&(el)->next, MT_LIST_BUSY); \ 401 if (n == MT_LIST_BUSY) \ 402 continue; \ 403 p = _HA_ATOMIC_XCHG(&(el)->prev, MT_LIST_BUSY); \ 404 if (p == MT_LIST_BUSY) { \ 405 (el)->next = n; \ 406 __ha_barrier_store(); \ 407 continue; \ 408 } \ 409 if (p != (el)) { \ 410 p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);\ 411 if (p2 == MT_LIST_BUSY) { \ 412 (el)->prev = p; \ 413 (el)->next = n; \ 414 __ha_barrier_store(); \ 415 continue; \ 416 } \ 417 } \ 418 if (n != (el)) { \ 419 n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);\ 420 if (n2 == MT_LIST_BUSY) { \ 421 if (p2 != NULL) \ 422 p->next = p2; \ 423 (el)->prev = p; \ 424 (el)->next = n; \ 425 __ha_barrier_store(); \ 426 continue; \ 427 } \ 428 } \ 429 n->prev = p; \ 430 p->next = n; \ 431 if (p != (el) && n != (el)) \ 432 _ret = 1; \ 433 __ha_barrier_store(); \ 434 (el)->prev = (el); \ 435 (el)->next = (el); \ 436 __ha_barrier_store(); \ 437 break; \ 438 } \ 439 } while (0); \ 440 (_ret); \ 441 }) 442 443 444 /* Remove the first element from the list, and return it */ 445 #define MT_LIST_POP(_lh, pt, el) \ 446 ({ \ 447 void *_ret; \ 448 struct mt_list *lh = (_lh); \ 449 while (1) { \ 450 struct mt_list *n, *n2; \ 451 struct mt_list *p, *p2; \ 452 n = _HA_ATOMIC_XCHG(&(lh)->next, MT_LIST_BUSY); \ 453 if (n == MT_LIST_BUSY) \ 454 continue; \ 455 if (n == (lh)) { \ 456 (lh)->next = lh; \ 457 __ha_barrier_store(); \ 458 _ret = NULL; \ 459 break; \ 460 } \ 461 p = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY); \ 462 if (p == MT_LIST_BUSY) { \ 463 (lh)->next = n; \ 464 __ha_barrier_store(); \ 465 continue; \ 466 } \ 467 n2 = _HA_ATOMIC_XCHG(&n->next, MT_LIST_BUSY); \ 468 if (n2 == MT_LIST_BUSY) { \ 469 n->prev = p; \ 470 __ha_barrier_store(); \ 471 (lh)->next = n; \ 472 __ha_barrier_store(); \ 473 continue; \ 474 } \ 475 p2 = _HA_ATOMIC_XCHG(&n2->prev, MT_LIST_BUSY); \ 476 if (p2 == MT_LIST_BUSY) { \ 477 n->next = n2; \ 478 n->prev = p; \ 479 __ha_barrier_store(); \ 480 (lh)->next = n; \ 481 __ha_barrier_store(); \ 482 continue; \ 483 } \ 484 (lh)->next = n2; \ 485 (n2)->prev = (lh); \ 486 __ha_barrier_store(); \ 487 (n)->prev = (n); \ 488 (n)->next = (n); \ 489 __ha_barrier_store(); \ 490 _ret = MT_LIST_ELEM(n, pt, el); \ 491 break; \ 492 } \ 493 (_ret); \ 494 }) 495 496 #define MT_LIST_HEAD(a) ((void *)(&(a))) 497 498 #define MT_LIST_INIT(l) ((l)->next = (l)->prev = (l)) 499 500 #define MT_LIST_HEAD_INIT(l) { &l, &l } 501 /* returns a pointer of type <pt> to a structure containing a list head called 502 * <el> at address <lh>. Note that <lh> can be the result of a function or macro 503 * since it's used only once. 504 * Example: MT_LIST_ELEM(cur_node->args.next, struct node *, args) 505 */ 506 #define MT_LIST_ELEM(lh, pt, el) ((pt)(((const char *)(lh)) - ((size_t)&((pt)NULL)->el))) 507 508 /* checks if the list head <lh> is empty or not */ 509 #define MT_LIST_ISEMPTY(lh) ((lh)->next == (lh)) 510 511 /* returns a pointer of type <pt> to a structure following the element 512 * which contains list head <lh>, which is known as element <el> in 513 * struct pt. 514 * Example: MT_LIST_NEXT(args, struct node *, list) 515 */ 516 #define MT_LIST_NEXT(lh, pt, el) (MT_LIST_ELEM((lh)->next, pt, el)) 517 518 519 /* returns a pointer of type <pt> to a structure preceding the element 520 * which contains list head <lh>, which is known as element <el> in 521 * struct pt. 522 */ 523 #undef MT_LIST_PREV 524 #define MT_LIST_PREV(lh, pt, el) (MT_LIST_ELEM((lh)->prev, pt, el)) 525 526 /* checks if the list element <el> was added to a list or not. This only 527 * works when detached elements are reinitialized (using LIST_DEL_INIT) 528 */ 529 #define MT_LIST_ADDED(el) ((el)->next != (el)) 530 531 /* Lock an element in the list, to be sure it won't be removed. 532 * It needs to be synchronized somehow to be sure it's not removed 533 * from the list in the meanwhile. 534 * This returns a struct mt_list, that will be needed at unlock time. 535 */ 536 #define MT_LIST_LOCK_ELT(_el) \ 537 ({ \ 538 struct mt_list ret; \ 539 struct mt_liet *el = (_el); \ 540 while (1) { \ 541 struct mt_list *n, *n2; \ 542 struct mt_list *p, *p2 = NULL; \ 543 n = _HA_ATOMIC_XCHG(&(el)->next, MT_LIST_BUSY); \ 544 if (n == MT_LIST_BUSY) \ 545 continue; \ 546 p = _HA_ATOMIC_XCHG(&(el)->prev, MT_LIST_BUSY); \ 547 if (p == MT_LIST_BUSY) { \ 548 (el)->next = n; \ 549 __ha_barrier_store(); \ 550 continue; \ 551 } \ 552 if (p != (el)) { \ 553 p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);\ 554 if (p2 == MT_LIST_BUSY) { \ 555 (el)->prev = p; \ 556 (el)->next = n; \ 557 __ha_barrier_store(); \ 558 continue; \ 559 } \ 560 } \ 561 if (n != (el)) { \ 562 n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);\ 563 if (n2 == MT_LIST_BUSY) { \ 564 if (p2 != NULL) \ 565 p->next = p2; \ 566 (el)->prev = p; \ 567 (el)->next = n; \ 568 __ha_barrier_store(); \ 569 continue; \ 570 } \ 571 } \ 572 ret.next = n; \ 573 ret.prev = p; \ 574 break; \ 575 } \ 576 ret; \ 577 }) 578 579 /* Unlock an element previously locked by MT_LIST_LOCK_ELT. "np" is the 580 * struct mt_list returned by MT_LIST_LOCK_ELT(). 581 */ 582 #define MT_LIST_UNLOCK_ELT(_el, np) \ 583 do { \ 584 struct mt_list *n = (np).next, *p = (np).prev; \ 585 struct mt_list *el = (_el); \ 586 (el)->next = n; \ 587 (el)->prev = p; \ 588 if (n != (el)) \ 589 n->prev = (el); \ 590 if (p != (el)) \ 591 p->next = (el); \ 592 } while (0) 593 594 /* Internal macroes for the foreach macroes */ 595 #define _MT_LIST_UNLOCK_NEXT(el, np) \ 596 do { \ 597 struct mt_list *n = (np); \ 598 (el)->next = n; \ 599 if (n != (el)) \ 600 n->prev = (el); \ 601 } while (0) 602 603 /* Internal macroes for the foreach macroes */ 604 #define _MT_LIST_UNLOCK_PREV(el, np) \ 605 do { \ 606 struct mt_list *p = (np); \ 607 (el)->prev = p; \ 608 if (p != (el)) \ 609 p->next = (el); \ 610 } while (0) 611 612 #define _MT_LIST_LOCK_NEXT(el) \ 613 ({ \ 614 struct mt_list *n = NULL; \ 615 while (1) { \ 616 struct mt_list *n2; \ 617 n = _HA_ATOMIC_XCHG(&((el)->next), MT_LIST_BUSY); \ 618 if (n == MT_LIST_BUSY) \ 619 continue; \ 620 if (n != (el)) { \ 621 n2 = _HA_ATOMIC_XCHG(&n->prev, MT_LIST_BUSY);\ 622 if (n2 == MT_LIST_BUSY) { \ 623 (el)->next = n; \ 624 __ha_barrier_store(); \ 625 continue; \ 626 } \ 627 } \ 628 break; \ 629 } \ 630 n; \ 631 }) 632 633 #define _MT_LIST_LOCK_PREV(el) \ 634 ({ \ 635 struct mt_list *p = NULL; \ 636 while (1) { \ 637 struct mt_list *p2; \ 638 p = _HA_ATOMIC_XCHG(&((el)->prev), MT_LIST_BUSY); \ 639 if (p == MT_LIST_BUSY) \ 640 continue; \ 641 if (p != (el)) { \ 642 p2 = _HA_ATOMIC_XCHG(&p->next, MT_LIST_BUSY);\ 643 if (p2 == MT_LIST_BUSY) { \ 644 (el)->prev = p; \ 645 __ha_barrier_store(); \ 646 continue; \ 647 } \ 648 } \ 649 break; \ 650 } \ 651 p; \ 652 }) 653 654 #define _MT_LIST_RELINK_DELETED(elt2) \ 655 do { \ 656 struct mt_list *n = elt2.next, *p = elt2.prev; \ 657 ALREADY_CHECKED(p); \ 658 n->prev = p; \ 659 p->next = n; \ 660 } while (0); 661 662 /* Equivalent of MT_LIST_DEL(), to be used when parsing the list with mt_list_entry_for_each_safe(). 663 * It should be the element currently parsed (tmpelt1) 664 */ 665 #define MT_LIST_DEL_SAFE(_el) \ 666 do { \ 667 struct mt_list *el = (_el); \ 668 (el)->prev = (el); \ 669 (el)->next = (el); \ 670 (_el) = NULL; \ 671 } while (0) 672 673 /* Simpler FOREACH_ITEM_SAFE macro inspired from Linux sources. 674 * Iterates <item> through a list of items of type "typeof(*item)" which are 675 * linked via a "struct list" member named <member>. A pointer to the head of 676 * the list is passed in <list_head>. A temporary variable <back> of same type 677 * as <item> is needed so that <item> may safely be deleted if needed. 678 * tmpelt1 is a temporary struct mt_list *, and tmpelt2 is a temporary 679 * struct mt_list, used internally, both are needed for MT_LIST_DEL_SAFE. 680 * Example: list_for_each_entry_safe(cur_acl, tmp, known_acl, list, elt1, elt2) 681 * { ... }; 682 * If you want to remove the current element, please use MT_LIST_DEL_SAFE. 683 */ 684 #define mt_list_for_each_entry_safe(item, list_head, member, tmpelt, tmpelt2) \ 685 for ((tmpelt) = NULL; (tmpelt) != MT_LIST_BUSY; ({ \ 686 if (tmpelt) { \ 687 if (tmpelt2.prev) \ 688 MT_LIST_UNLOCK_ELT(tmpelt, tmpelt2); \ 689 else \ 690 _MT_LIST_UNLOCK_NEXT(tmpelt, tmpelt2.next); \ 691 } else \ 692 _MT_LIST_RELINK_DELETED(tmpelt2); \ 693 (tmpelt) = MT_LIST_BUSY; \ 694 })) \ 695 for ((tmpelt) = (list_head), (tmpelt2).prev = NULL, (tmpelt2).next = _MT_LIST_LOCK_NEXT(tmpelt); ({ \ 696 (item) = MT_LIST_ELEM((tmpelt2.next), typeof(item), member); \ 697 if (&item->member != (list_head)) { \ 698 if (tmpelt2.prev != &item->member) \ 699 tmpelt2.next = _MT_LIST_LOCK_NEXT(&item->member); \ 700 else \ 701 tmpelt2.next = tmpelt; \ 702 if (tmpelt != NULL) { \ 703 if (tmpelt2.prev) \ 704 _MT_LIST_UNLOCK_PREV(tmpelt, tmpelt2.prev); \ 705 tmpelt2.prev = tmpelt; \ 706 } \ 707 (tmpelt) = &item->member; \ 708 } \ 709 }), \ 710 &item->member != (list_head);) 711 #endif /* _COMMON_MINI_CLIST_H */ 712