Changes in kernel/generic/src/adt/cht.c [63e27ef:8f9c808] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/cht.c
r63e27ef r8f9c808 290 290 #include <adt/cht.h> 291 291 #include <adt/hash.h> 292 #include <assert.h> 292 #include <debug.h> 293 #include <memstr.h> 293 294 #include <mm/slab.h> 294 295 #include <arch/barrier.h> … … 522 523 bool can_block, cht_ops_t *op) 523 524 { 524 assert(h);525 assert(op && op->hash && op->key_hash && op->equal && op->key_equal);525 ASSERT(h); 526 ASSERT(op && op->hash && op->key_hash && op->equal && op->key_equal); 526 527 /* Memoized hashes are stored in the rcu_link.func function pointer. */ 527 static_assert(sizeof(size_t) == sizeof(rcu_func_t), "");528 assert(sentinel.hash == (uintptr_t)sentinel.rcu_link.func);528 STATIC_ASSERT(sizeof(size_t) == sizeof(rcu_func_t)); 529 ASSERT(sentinel.hash == (uintptr_t)sentinel.rcu_link.func); 529 530 530 531 /* All operations are compulsory. */ … … 625 626 626 627 /* You must clear the table of items. Otherwise cht_destroy will leak. */ 627 assert(atomic_get(&h->item_cnt) == 0);628 ASSERT(atomic_get(&h->item_cnt) == 0); 628 629 } 629 630 … … 685 686 static inline cht_link_t *find_lazy(cht_t *h, void *key) 686 687 { 687 assert(h);688 ASSERT(h); 688 689 /* See docs to cht_find() and cht_find_lazy(). */ 689 assert(rcu_read_locked());690 ASSERT(rcu_read_locked()); 690 691 691 692 size_t hash = calc_key_hash(h, key); … … 731 732 cht_link_t *cht_find_next_lazy(cht_t *h, const cht_link_t *item) 732 733 { 733 assert(h);734 assert(rcu_read_locked());735 assert(item);734 ASSERT(h); 735 ASSERT(rcu_read_locked()); 736 ASSERT(item); 736 737 737 738 return find_duplicate(h, item, calc_node_hash(h, item), get_next(item->link)); … … 755 756 do { 756 757 cur = get_next(prev); 757 assert(cur);758 ASSERT(cur); 758 759 prev = cur->link; 759 760 } while (node_hash(h, cur) < search_hash); … … 770 771 771 772 cur = get_next(cur->link); 772 assert(cur);773 ASSERT(cur); 773 774 } 774 775 … … 790 791 marked_ptr_t old_head, size_t old_idx) 791 792 { 792 assert(N_INVALID == get_mark(old_head));793 assert(h->new_b);793 ASSERT(N_INVALID == get_mark(old_head)); 794 ASSERT(h->new_b); 794 795 795 796 size_t new_idx = calc_bucket_idx(hash, h->new_b->order); … … 903 904 * traversing search_head. 904 905 */ 905 assert(N_JOIN & get_mark(get_next(old_head)->link));906 ASSERT(N_JOIN & get_mark(get_next(old_head)->link)); 906 907 return search_bucket(h, old_head, key, hash); 907 908 } … … 913 914 * sure all cpus see that the new table replaced the old one. 914 915 */ 915 assert(h->b->order == h->new_b->order);916 ASSERT(h->b->order == h->new_b->order); 916 917 /* 917 918 * The resizer must ensure all new bucket heads are visible before 918 919 * replacing the old table. 919 920 */ 920 assert(N_NORMAL == get_mark(new_head));921 ASSERT(N_NORMAL == get_mark(new_head)); 921 922 return search_bucket(h, new_head, key, hash); 922 923 } … … 961 962 bool cht_insert_unique(cht_t *h, cht_link_t *item, cht_link_t **dup_item) 962 963 { 963 assert(rcu_read_locked());964 assert(dup_item);964 ASSERT(rcu_read_locked()); 965 ASSERT(dup_item); 965 966 return insert_impl(h, item, dup_item); 966 967 } … … 1060 1061 return ret == make_link(wnd->cur, jf_mark); 1061 1062 } else { 1062 assert(walk_mode == WM_LEAVE_JOIN);1063 ASSERT(walk_mode == WM_LEAVE_JOIN); 1063 1064 1064 1065 item->link = make_link(wnd->cur, N_NORMAL); … … 1087 1088 cht_link_t *cur, cht_link_t **dup_item) 1088 1089 { 1089 assert(cur);1090 assert(cur == &sentinel || hash <= node_hash(h, cur)1090 ASSERT(cur); 1091 ASSERT(cur == &sentinel || hash <= node_hash(h, cur) 1091 1092 || node_hash(h, cur) == h->invalid_hash); 1092 1093 … … 1110 1111 cht_link_t *start) 1111 1112 { 1112 assert(hash <= node_hash(h, start) || h->invalid_hash == node_hash(h, start));1113 ASSERT(hash <= node_hash(h, start) || h->invalid_hash == node_hash(h, start)); 1113 1114 1114 1115 cht_link_t *cur = start; 1115 1116 1116 1117 try_again: 1117 assert(cur);1118 ASSERT(cur); 1118 1119 1119 1120 while (node_hash(h, cur) == hash) { 1120 assert(cur != &sentinel);1121 ASSERT(cur != &sentinel); 1121 1122 1122 1123 bool deleted = (N_DELETED & get_mark(cur->link)); … … 1127 1128 1128 1129 cur = get_next(cur->link); 1129 assert(cur);1130 ASSERT(cur); 1130 1131 } 1131 1132 … … 1142 1143 size_t cht_remove_key(cht_t *h, void *key) 1143 1144 { 1144 assert(h);1145 ASSERT(h); 1145 1146 1146 1147 size_t hash = calc_key_hash(h, key); … … 1163 1164 bool cht_remove_item(cht_t *h, cht_link_t *item) 1164 1165 { 1165 assert(h);1166 assert(item);1166 ASSERT(h); 1167 ASSERT(item); 1167 1168 /* Otherwise a concurrent cht_remove_key might free the item. */ 1168 assert(rcu_read_locked());1169 ASSERT(rcu_read_locked()); 1169 1170 1170 1171 /* … … 1262 1263 bool *deleted_but_gc, bool *resizing) 1263 1264 { 1264 assert(wnd->cur && wnd->cur != &sentinel);1265 ASSERT(wnd->cur && wnd->cur != &sentinel); 1265 1266 1266 1267 *deleted_but_gc = false; … … 1292 1293 bool *resizing) 1293 1294 { 1294 assert(cur && cur != &sentinel);1295 ASSERT(cur && cur != &sentinel); 1295 1296 1296 1297 /* … … 1310 1311 } 1311 1312 } else { 1312 static_assert(N_JOIN == N_JOIN_FOLLOWS, "");1313 STATIC_ASSERT(N_JOIN == N_JOIN_FOLLOWS); 1313 1314 1314 1315 /* Keep the N_JOIN/N_JOIN_FOLLOWS mark but strip N_DELETED. */ … … 1329 1330 bool *resizing) 1330 1331 { 1331 assert(wnd->cur != &sentinel);1332 assert(wnd->cur && (N_DELETED & get_mark(wnd->cur->link)));1332 ASSERT(wnd->cur != &sentinel); 1333 ASSERT(wnd->cur && (N_DELETED & get_mark(wnd->cur->link))); 1333 1334 1334 1335 cht_link_t *next = get_next(wnd->cur->link); … … 1336 1337 if (walk_mode == WM_LEAVE_JOIN) { 1337 1338 /* Never try to unlink join nodes. */ 1338 assert(!(N_JOIN & get_mark(wnd->cur->link)));1339 ASSERT(!(N_JOIN & get_mark(wnd->cur->link))); 1339 1340 1340 1341 mark_t pred_mark = get_mark(*wnd->ppred); … … 1348 1349 return false; 1349 1350 } else { 1350 assert(walk_mode == WM_MOVE_JOIN_FOLLOWS || walk_mode == WM_NORMAL);1351 ASSERT(walk_mode == WM_MOVE_JOIN_FOLLOWS || walk_mode == WM_NORMAL); 1351 1352 /* Move the JF mark if set. Clear DEL mark. */ 1352 1353 mark_t cur_mark = N_JOIN_FOLLOWS & get_mark(wnd->cur->link); … … 1398 1399 equal_pred_t pred, void *pred_arg, wnd_t *wnd, bool *resizing) 1399 1400 { 1400 assert(wnd->cur);1401 ASSERT(wnd->cur); 1401 1402 1402 1403 if (wnd->cur == &sentinel) … … 1415 1416 1416 1417 while (cur_hash <= hash) { 1417 assert(wnd->cur && wnd->cur != &sentinel);1418 ASSERT(wnd->cur && wnd->cur != &sentinel); 1418 1419 1419 1420 /* GC any deleted nodes on the way. */ … … 1436 1437 if (cur_hash == h->invalid_hash) { 1437 1438 next_wnd(wnd); 1438 assert(wnd->cur);1439 ASSERT(wnd->cur); 1439 1440 goto try_again; 1440 1441 } … … 1469 1470 { 1470 1471 try_again: 1471 assert(wnd->cur);1472 ASSERT(wnd->cur); 1472 1473 1473 1474 while (node_hash(h, wnd->cur) < hash) { … … 1482 1483 } 1483 1484 1484 assert(wnd->cur);1485 ASSERT(wnd->cur); 1485 1486 } 1486 1487 … … 1498 1499 bool *resizing) 1499 1500 { 1500 assert(N_DELETED & get_mark(wnd->cur->link));1501 ASSERT(N_DELETED & get_mark(wnd->cur->link)); 1501 1502 1502 1503 /* Skip deleted JOIN nodes. */ … … 1505 1506 } else { 1506 1507 /* Ordinary deleted node or a deleted JOIN_FOLLOWS. */ 1507 assert(walk_mode != WM_LEAVE_JOIN1508 ASSERT(walk_mode != WM_LEAVE_JOIN 1508 1509 || !((N_JOIN | N_JOIN_FOLLOWS) & get_mark(wnd->cur->link))); 1509 1510 … … 1546 1547 * it 1547 1548 */ 1548 assert(h->b->order > h->new_b->order);1549 assert(wnd->cur);1549 ASSERT(h->b->order > h->new_b->order); 1550 ASSERT(wnd->cur); 1550 1551 1551 1552 /* Either we did not need the joining link or we have already followed it.*/ … … 1620 1621 /* The hash belongs to the moved bucket. */ 1621 1622 if (move_dest_idx == new_idx) { 1622 assert(pmoved_head == pnew_head);1623 ASSERT(pmoved_head == pnew_head); 1623 1624 /* 1624 1625 * move_head() makes the new head of the moved bucket visible. 1625 1626 * The new head may be marked with a JOIN_FOLLOWS 1626 1627 */ 1627 assert(!(N_CONST & get_mark(*pmoved_head)));1628 ASSERT(!(N_CONST & get_mark(*pmoved_head))); 1628 1629 *walk_mode = WM_MOVE_JOIN_FOLLOWS; 1629 1630 } else { 1630 assert(pmoved_head != pnew_head);1631 ASSERT(pmoved_head != pnew_head); 1631 1632 /* 1632 1633 * The hash belongs to the bucket that is the result of splitting … … 1643 1644 * JOIN_FOLLOWS in this part of split bucket. 1644 1645 */ 1645 assert(N_NORMAL == get_mark(*pnew_head));1646 ASSERT(N_NORMAL == get_mark(*pnew_head)); 1646 1647 } 1647 1648 … … 1675 1676 1676 1677 /* move_head() or join_buckets() makes it so or makes the mark visible.*/ 1677 assert(N_INVALID == get_mark(*pold_head));1678 ASSERT(N_INVALID == get_mark(*pold_head)); 1678 1679 /* move_head() makes it visible. No JOIN_FOLLOWS used when shrinking. */ 1679 assert(N_NORMAL == get_mark(*pnew_head));1680 ASSERT(N_NORMAL == get_mark(*pnew_head)); 1680 1681 1681 1682 *walk_mode = WM_LEAVE_JOIN; … … 1685 1686 * readers to notice that the old table had been replaced. 1686 1687 */ 1687 assert(b == h->new_b);1688 ASSERT(b == h->new_b); 1688 1689 *walk_mode = WM_NORMAL; 1689 1690 } … … 1712 1713 { 1713 1714 /* Head move has to in progress already when calling this func. */ 1714 assert(N_CONST & get_mark(*psrc_head));1715 ASSERT(N_CONST & get_mark(*psrc_head)); 1715 1716 1716 1717 /* Head already moved. */ … … 1725 1726 } 1726 1727 1727 assert(!(N_CONST & get_mark(*pdest_head)));1728 ASSERT(!(N_CONST & get_mark(*pdest_head))); 1728 1729 } 1729 1730 … … 1756 1757 static void complete_head_move(marked_ptr_t *psrc_head, marked_ptr_t *pdest_head) 1757 1758 { 1758 assert(N_JOIN_FOLLOWS != get_mark(*psrc_head));1759 assert(N_CONST & get_mark(*psrc_head));1759 ASSERT(N_JOIN_FOLLOWS != get_mark(*psrc_head)); 1760 ASSERT(N_CONST & get_mark(*psrc_head)); 1760 1761 1761 1762 cht_link_t *next = get_next(*psrc_head); … … 1763 1764 DBG(marked_ptr_t ret = ) 1764 1765 cas_link(pdest_head, &sentinel, N_INVALID, next, N_NORMAL); 1765 assert(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret)));1766 ASSERT(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret))); 1766 1767 cas_order_barrier(); 1767 1768 1768 1769 DBG(ret = ) 1769 1770 cas_link(psrc_head, next, N_CONST, next, N_INVALID); 1770 assert(ret == make_link(next, N_CONST) || (N_INVALID == get_mark(ret)));1771 ASSERT(ret == make_link(next, N_CONST) || (N_INVALID == get_mark(ret))); 1771 1772 cas_order_barrier(); 1772 1773 } … … 1861 1862 DBG(marked_ptr_t ret = ) 1862 1863 cas_link(pdest_head, &sentinel, N_INVALID, wnd.cur, N_NORMAL); 1863 assert(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret)));1864 ASSERT(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret))); 1864 1865 cas_order_barrier(); 1865 1866 … … 1905 1906 1906 1907 /* Must not report that the table is resizing if WM_MOVE_JOIN_FOLLOWS.*/ 1907 assert(!resizing);1908 ASSERT(!resizing); 1908 1909 /* 1909 1910 * Mark the last node of the first half of the split bucket … … 2042 2043 DBG(marked_ptr_t ret = ) 2043 2044 cas_link(psrc_head, join_node, N_CONST, join_node, N_INVALID); 2044 assert(ret == make_link(join_node, N_CONST) || (N_INVALID == get_mark(ret)));2045 ASSERT(ret == make_link(join_node, N_CONST) || (N_INVALID == get_mark(ret))); 2045 2046 cas_order_barrier(); 2046 2047 … … 2073 2074 continue; 2074 2075 2075 assert(!resizing);2076 ASSERT(!resizing); 2076 2077 2077 2078 if (wnd.cur != &sentinel) { 2078 2079 /* Must be from the new appended bucket. */ 2079 assert(split_hash <= node_hash(h, wnd.cur)2080 ASSERT(split_hash <= node_hash(h, wnd.cur) 2080 2081 || h->invalid_hash == node_hash(h, wnd.cur)); 2081 2082 return; … … 2096 2097 static void free_later(cht_t *h, cht_link_t *item) 2097 2098 { 2098 assert(item != &sentinel);2099 ASSERT(item != &sentinel); 2099 2100 2100 2101 /* … … 2156 2157 2157 2158 #ifdef CONFIG_DEBUG 2158 assert(h->b);2159 ASSERT(h->b); 2159 2160 /* Make resize_reqs visible. */ 2160 2161 read_barrier(); 2161 assert(0 < atomic_get(&h->resize_reqs));2162 ASSERT(0 < atomic_get(&h->resize_reqs)); 2162 2163 #endif 2163 2164 … … 2336 2337 /* Set the invalid joinee head to NULL. */ 2337 2338 if (old_idx != move_src_idx) { 2338 assert(N_INVALID == get_mark(h->b->head[old_idx]));2339 ASSERT(N_INVALID == get_mark(h->b->head[old_idx])); 2339 2340 2340 2341 if (&sentinel != get_next(h->b->head[old_idx])) … … 2392 2393 marked_ptr_t *new_head) 2393 2394 { 2394 assert(join_node != &sentinel);2395 assert(join_node && (N_JOIN & get_mark(join_node->link)));2395 ASSERT(join_node != &sentinel); 2396 ASSERT(join_node && (N_JOIN & get_mark(join_node->link))); 2396 2397 2397 2398 bool done; … … 2409 2410 /* Done if the mark was cleared. Retry if a new node was inserted. */ 2410 2411 done = (ret == jn_link); 2411 assert(ret == jn_link || (get_mark(ret) & N_JOIN));2412 ASSERT(ret == jn_link || (get_mark(ret) & N_JOIN)); 2412 2413 } while (!done); 2413 2414 … … 2432 2433 join_node, &wnd, &resizing); 2433 2434 2434 assert(!resizing);2435 ASSERT(!resizing); 2435 2436 } while (!done); 2436 2437 } … … 2439 2440 static void cleanup_join_follows(cht_t *h, marked_ptr_t *new_head) 2440 2441 { 2441 assert(new_head);2442 ASSERT(new_head); 2442 2443 2443 2444 rcu_read_lock(); … … 2464 2465 /* GC any deleted nodes on the way - even deleted JOIN_FOLLOWS. */ 2465 2466 if (N_DELETED & get_mark(*cur_link)) { 2466 assert(cur_link != new_head);2467 assert(wnd.ppred && wnd.cur && wnd.cur != &sentinel);2468 assert(cur_link == &wnd.cur->link);2467 ASSERT(cur_link != new_head); 2468 ASSERT(wnd.ppred && wnd.cur && wnd.cur != &sentinel); 2469 ASSERT(cur_link == &wnd.cur->link); 2469 2470 2470 2471 bool dummy; … … 2484 2485 cas_link(cur_link, next, N_JOIN_FOLLOWS, &sentinel, N_NORMAL); 2485 2486 2486 assert(next == &sentinel2487 ASSERT(next == &sentinel 2487 2488 || ((N_JOIN | N_JOIN_FOLLOWS) & get_mark(ret))); 2488 2489 … … 2505 2506 2506 2507 /* We must encounter a JF node before we reach the end of the bucket. */ 2507 assert(wnd.cur && wnd.cur != &sentinel);2508 ASSERT(wnd.cur && wnd.cur != &sentinel); 2508 2509 cur_link = &wnd.cur->link; 2509 2510 } … … 2519 2520 static inline size_t calc_split_hash(size_t split_idx, size_t order) 2520 2521 { 2521 assert(1 <= order && order <= 8 * sizeof(size_t));2522 ASSERT(1 <= order && order <= 8 * sizeof(size_t)); 2522 2523 return split_idx << (8 * sizeof(size_t) - order); 2523 2524 } … … 2526 2527 static inline size_t calc_bucket_idx(size_t hash, size_t order) 2527 2528 { 2528 assert(1 <= order && order <= 8 * sizeof(size_t));2529 ASSERT(1 <= order && order <= 8 * sizeof(size_t)); 2529 2530 return hash >> (8 * sizeof(size_t) - order); 2530 2531 } … … 2558 2559 static inline size_t node_hash(cht_t *h, const cht_link_t *item) 2559 2560 { 2560 assert(item->hash == h->invalid_hash2561 ASSERT(item->hash == h->invalid_hash 2561 2562 || item->hash == sentinel.hash 2562 2563 || item->hash == calc_node_hash(h, item)); … … 2568 2569 static inline size_t calc_node_hash(cht_t *h, const cht_link_t *item) 2569 2570 { 2570 assert(item != &sentinel);2571 ASSERT(item != &sentinel); 2571 2572 /* 2572 2573 * Clear the lowest order bit in order for sentinel's node hash … … 2587 2588 marked_ptr_t ptr = (marked_ptr_t) next; 2588 2589 2589 assert(!(ptr & N_MARK_MASK));2590 assert(!((unsigned)mark & ~N_MARK_MASK));2590 ASSERT(!(ptr & N_MARK_MASK)); 2591 ASSERT(!((unsigned)mark & ~N_MARK_MASK)); 2591 2592 2592 2593 return ptr | mark; … … 2608 2609 static inline void next_wnd(wnd_t *wnd) 2609 2610 { 2610 assert(wnd);2611 assert(wnd->cur);2611 ASSERT(wnd); 2612 ASSERT(wnd->cur); 2612 2613 2613 2614 wnd->last = wnd->cur; … … 2635 2636 marked_ptr_t new) 2636 2637 { 2637 assert(link != &sentinel.link);2638 ASSERT(link != &sentinel.link); 2638 2639 /* 2639 2640 * cas(x) on the same location x on one cpu must be ordered, but do not
Note:
See TracChangeset
for help on using the changeset viewer.