Changes in kernel/generic/src/adt/cht.c [8f9c808:63e27ef] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/cht.c
r8f9c808 r63e27ef 290 290 #include <adt/cht.h> 291 291 #include <adt/hash.h> 292 #include <debug.h> 293 #include <memstr.h> 292 #include <assert.h> 294 293 #include <mm/slab.h> 295 294 #include <arch/barrier.h> … … 523 522 bool can_block, cht_ops_t *op) 524 523 { 525 ASSERT(h);526 ASSERT(op && op->hash && op->key_hash && op->equal && op->key_equal);524 assert(h); 525 assert(op && op->hash && op->key_hash && op->equal && op->key_equal); 527 526 /* Memoized hashes are stored in the rcu_link.func function pointer. */ 528 STATIC_ASSERT(sizeof(size_t) == sizeof(rcu_func_t));529 ASSERT(sentinel.hash == (uintptr_t)sentinel.rcu_link.func);527 static_assert(sizeof(size_t) == sizeof(rcu_func_t), ""); 528 assert(sentinel.hash == (uintptr_t)sentinel.rcu_link.func); 530 529 531 530 /* All operations are compulsory. */ … … 626 625 627 626 /* You must clear the table of items. Otherwise cht_destroy will leak. */ 628 ASSERT(atomic_get(&h->item_cnt) == 0);627 assert(atomic_get(&h->item_cnt) == 0); 629 628 } 630 629 … … 686 685 static inline cht_link_t *find_lazy(cht_t *h, void *key) 687 686 { 688 ASSERT(h);687 assert(h); 689 688 /* See docs to cht_find() and cht_find_lazy(). */ 690 ASSERT(rcu_read_locked());689 assert(rcu_read_locked()); 691 690 692 691 size_t hash = calc_key_hash(h, key); … … 732 731 cht_link_t *cht_find_next_lazy(cht_t *h, const cht_link_t *item) 733 732 { 734 ASSERT(h);735 ASSERT(rcu_read_locked());736 ASSERT(item);733 assert(h); 734 assert(rcu_read_locked()); 735 assert(item); 737 736 738 737 return find_duplicate(h, item, calc_node_hash(h, item), get_next(item->link)); … … 756 755 do { 757 756 cur = get_next(prev); 758 ASSERT(cur);757 assert(cur); 759 758 prev = cur->link; 760 759 } while (node_hash(h, cur) < search_hash); … … 771 770 772 771 cur = get_next(cur->link); 773 ASSERT(cur);772 assert(cur); 774 773 } 775 774 … … 791 790 marked_ptr_t old_head, size_t old_idx) 792 791 { 793 ASSERT(N_INVALID == get_mark(old_head));794 ASSERT(h->new_b);792 assert(N_INVALID == get_mark(old_head)); 793 assert(h->new_b); 795 794 796 795 size_t new_idx = calc_bucket_idx(hash, h->new_b->order); … … 904 903 * traversing search_head. 905 904 */ 906 ASSERT(N_JOIN & get_mark(get_next(old_head)->link));905 assert(N_JOIN & get_mark(get_next(old_head)->link)); 907 906 return search_bucket(h, old_head, key, hash); 908 907 } … … 914 913 * sure all cpus see that the new table replaced the old one. 915 914 */ 916 ASSERT(h->b->order == h->new_b->order);915 assert(h->b->order == h->new_b->order); 917 916 /* 918 917 * The resizer must ensure all new bucket heads are visible before 919 918 * replacing the old table. 920 919 */ 921 ASSERT(N_NORMAL == get_mark(new_head));920 assert(N_NORMAL == get_mark(new_head)); 922 921 return search_bucket(h, new_head, key, hash); 923 922 } … … 962 961 bool cht_insert_unique(cht_t *h, cht_link_t *item, cht_link_t **dup_item) 963 962 { 964 ASSERT(rcu_read_locked());965 ASSERT(dup_item);963 assert(rcu_read_locked()); 964 assert(dup_item); 966 965 return insert_impl(h, item, dup_item); 967 966 } … … 1061 1060 return ret == make_link(wnd->cur, jf_mark); 1062 1061 } else { 1063 ASSERT(walk_mode == WM_LEAVE_JOIN);1062 assert(walk_mode == WM_LEAVE_JOIN); 1064 1063 1065 1064 item->link = make_link(wnd->cur, N_NORMAL); … … 1088 1087 cht_link_t *cur, cht_link_t **dup_item) 1089 1088 { 1090 ASSERT(cur);1091 ASSERT(cur == &sentinel || hash <= node_hash(h, cur)1089 assert(cur); 1090 assert(cur == &sentinel || hash <= node_hash(h, cur) 1092 1091 || node_hash(h, cur) == h->invalid_hash); 1093 1092 … … 1111 1110 cht_link_t *start) 1112 1111 { 1113 ASSERT(hash <= node_hash(h, start) || h->invalid_hash == node_hash(h, start));1112 assert(hash <= node_hash(h, start) || h->invalid_hash == node_hash(h, start)); 1114 1113 1115 1114 cht_link_t *cur = start; 1116 1115 1117 1116 try_again: 1118 ASSERT(cur);1117 assert(cur); 1119 1118 1120 1119 while (node_hash(h, cur) == hash) { 1121 ASSERT(cur != &sentinel);1120 assert(cur != &sentinel); 1122 1121 1123 1122 bool deleted = (N_DELETED & get_mark(cur->link)); … … 1128 1127 1129 1128 cur = get_next(cur->link); 1130 ASSERT(cur);1129 assert(cur); 1131 1130 } 1132 1131 … … 1143 1142 size_t cht_remove_key(cht_t *h, void *key) 1144 1143 { 1145 ASSERT(h);1144 assert(h); 1146 1145 1147 1146 size_t hash = calc_key_hash(h, key); … … 1164 1163 bool cht_remove_item(cht_t *h, cht_link_t *item) 1165 1164 { 1166 ASSERT(h);1167 ASSERT(item);1165 assert(h); 1166 assert(item); 1168 1167 /* Otherwise a concurrent cht_remove_key might free the item. */ 1169 ASSERT(rcu_read_locked());1168 assert(rcu_read_locked()); 1170 1169 1171 1170 /* … … 1263 1262 bool *deleted_but_gc, bool *resizing) 1264 1263 { 1265 ASSERT(wnd->cur && wnd->cur != &sentinel);1264 assert(wnd->cur && wnd->cur != &sentinel); 1266 1265 1267 1266 *deleted_but_gc = false; … … 1293 1292 bool *resizing) 1294 1293 { 1295 ASSERT(cur && cur != &sentinel);1294 assert(cur && cur != &sentinel); 1296 1295 1297 1296 /* … … 1311 1310 } 1312 1311 } else { 1313 STATIC_ASSERT(N_JOIN == N_JOIN_FOLLOWS);1312 static_assert(N_JOIN == N_JOIN_FOLLOWS, ""); 1314 1313 1315 1314 /* Keep the N_JOIN/N_JOIN_FOLLOWS mark but strip N_DELETED. */ … … 1330 1329 bool *resizing) 1331 1330 { 1332 ASSERT(wnd->cur != &sentinel);1333 ASSERT(wnd->cur && (N_DELETED & get_mark(wnd->cur->link)));1331 assert(wnd->cur != &sentinel); 1332 assert(wnd->cur && (N_DELETED & get_mark(wnd->cur->link))); 1334 1333 1335 1334 cht_link_t *next = get_next(wnd->cur->link); … … 1337 1336 if (walk_mode == WM_LEAVE_JOIN) { 1338 1337 /* Never try to unlink join nodes. */ 1339 ASSERT(!(N_JOIN & get_mark(wnd->cur->link)));1338 assert(!(N_JOIN & get_mark(wnd->cur->link))); 1340 1339 1341 1340 mark_t pred_mark = get_mark(*wnd->ppred); … … 1349 1348 return false; 1350 1349 } else { 1351 ASSERT(walk_mode == WM_MOVE_JOIN_FOLLOWS || walk_mode == WM_NORMAL);1350 assert(walk_mode == WM_MOVE_JOIN_FOLLOWS || walk_mode == WM_NORMAL); 1352 1351 /* Move the JF mark if set. Clear DEL mark. */ 1353 1352 mark_t cur_mark = N_JOIN_FOLLOWS & get_mark(wnd->cur->link); … … 1399 1398 equal_pred_t pred, void *pred_arg, wnd_t *wnd, bool *resizing) 1400 1399 { 1401 ASSERT(wnd->cur);1400 assert(wnd->cur); 1402 1401 1403 1402 if (wnd->cur == &sentinel) … … 1416 1415 1417 1416 while (cur_hash <= hash) { 1418 ASSERT(wnd->cur && wnd->cur != &sentinel);1417 assert(wnd->cur && wnd->cur != &sentinel); 1419 1418 1420 1419 /* GC any deleted nodes on the way. */ … … 1437 1436 if (cur_hash == h->invalid_hash) { 1438 1437 next_wnd(wnd); 1439 ASSERT(wnd->cur);1438 assert(wnd->cur); 1440 1439 goto try_again; 1441 1440 } … … 1470 1469 { 1471 1470 try_again: 1472 ASSERT(wnd->cur);1471 assert(wnd->cur); 1473 1472 1474 1473 while (node_hash(h, wnd->cur) < hash) { … … 1483 1482 } 1484 1483 1485 ASSERT(wnd->cur);1484 assert(wnd->cur); 1486 1485 } 1487 1486 … … 1499 1498 bool *resizing) 1500 1499 { 1501 ASSERT(N_DELETED & get_mark(wnd->cur->link));1500 assert(N_DELETED & get_mark(wnd->cur->link)); 1502 1501 1503 1502 /* Skip deleted JOIN nodes. */ … … 1506 1505 } else { 1507 1506 /* Ordinary deleted node or a deleted JOIN_FOLLOWS. */ 1508 ASSERT(walk_mode != WM_LEAVE_JOIN1507 assert(walk_mode != WM_LEAVE_JOIN 1509 1508 || !((N_JOIN | N_JOIN_FOLLOWS) & get_mark(wnd->cur->link))); 1510 1509 … … 1547 1546 * it 1548 1547 */ 1549 ASSERT(h->b->order > h->new_b->order);1550 ASSERT(wnd->cur);1548 assert(h->b->order > h->new_b->order); 1549 assert(wnd->cur); 1551 1550 1552 1551 /* Either we did not need the joining link or we have already followed it.*/ … … 1621 1620 /* The hash belongs to the moved bucket. */ 1622 1621 if (move_dest_idx == new_idx) { 1623 ASSERT(pmoved_head == pnew_head);1622 assert(pmoved_head == pnew_head); 1624 1623 /* 1625 1624 * move_head() makes the new head of the moved bucket visible. 1626 1625 * The new head may be marked with a JOIN_FOLLOWS 1627 1626 */ 1628 ASSERT(!(N_CONST & get_mark(*pmoved_head)));1627 assert(!(N_CONST & get_mark(*pmoved_head))); 1629 1628 *walk_mode = WM_MOVE_JOIN_FOLLOWS; 1630 1629 } else { 1631 ASSERT(pmoved_head != pnew_head);1630 assert(pmoved_head != pnew_head); 1632 1631 /* 1633 1632 * The hash belongs to the bucket that is the result of splitting … … 1644 1643 * JOIN_FOLLOWS in this part of split bucket. 1645 1644 */ 1646 ASSERT(N_NORMAL == get_mark(*pnew_head));1645 assert(N_NORMAL == get_mark(*pnew_head)); 1647 1646 } 1648 1647 … … 1676 1675 1677 1676 /* move_head() or join_buckets() makes it so or makes the mark visible.*/ 1678 ASSERT(N_INVALID == get_mark(*pold_head));1677 assert(N_INVALID == get_mark(*pold_head)); 1679 1678 /* move_head() makes it visible. No JOIN_FOLLOWS used when shrinking. */ 1680 ASSERT(N_NORMAL == get_mark(*pnew_head));1679 assert(N_NORMAL == get_mark(*pnew_head)); 1681 1680 1682 1681 *walk_mode = WM_LEAVE_JOIN; … … 1686 1685 * readers to notice that the old table had been replaced. 1687 1686 */ 1688 ASSERT(b == h->new_b);1687 assert(b == h->new_b); 1689 1688 *walk_mode = WM_NORMAL; 1690 1689 } … … 1713 1712 { 1714 1713 /* Head move has to in progress already when calling this func. */ 1715 ASSERT(N_CONST & get_mark(*psrc_head));1714 assert(N_CONST & get_mark(*psrc_head)); 1716 1715 1717 1716 /* Head already moved. */ … … 1726 1725 } 1727 1726 1728 ASSERT(!(N_CONST & get_mark(*pdest_head)));1727 assert(!(N_CONST & get_mark(*pdest_head))); 1729 1728 } 1730 1729 … … 1757 1756 static void complete_head_move(marked_ptr_t *psrc_head, marked_ptr_t *pdest_head) 1758 1757 { 1759 ASSERT(N_JOIN_FOLLOWS != get_mark(*psrc_head));1760 ASSERT(N_CONST & get_mark(*psrc_head));1758 assert(N_JOIN_FOLLOWS != get_mark(*psrc_head)); 1759 assert(N_CONST & get_mark(*psrc_head)); 1761 1760 1762 1761 cht_link_t *next = get_next(*psrc_head); … … 1764 1763 DBG(marked_ptr_t ret = ) 1765 1764 cas_link(pdest_head, &sentinel, N_INVALID, next, N_NORMAL); 1766 ASSERT(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret)));1765 assert(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret))); 1767 1766 cas_order_barrier(); 1768 1767 1769 1768 DBG(ret = ) 1770 1769 cas_link(psrc_head, next, N_CONST, next, N_INVALID); 1771 ASSERT(ret == make_link(next, N_CONST) || (N_INVALID == get_mark(ret)));1770 assert(ret == make_link(next, N_CONST) || (N_INVALID == get_mark(ret))); 1772 1771 cas_order_barrier(); 1773 1772 } … … 1862 1861 DBG(marked_ptr_t ret = ) 1863 1862 cas_link(pdest_head, &sentinel, N_INVALID, wnd.cur, N_NORMAL); 1864 ASSERT(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret)));1863 assert(ret == make_link(&sentinel, N_INVALID) || (N_NORMAL == get_mark(ret))); 1865 1864 cas_order_barrier(); 1866 1865 … … 1906 1905 1907 1906 /* Must not report that the table is resizing if WM_MOVE_JOIN_FOLLOWS.*/ 1908 ASSERT(!resizing);1907 assert(!resizing); 1909 1908 /* 1910 1909 * Mark the last node of the first half of the split bucket … … 2043 2042 DBG(marked_ptr_t ret = ) 2044 2043 cas_link(psrc_head, join_node, N_CONST, join_node, N_INVALID); 2045 ASSERT(ret == make_link(join_node, N_CONST) || (N_INVALID == get_mark(ret)));2044 assert(ret == make_link(join_node, N_CONST) || (N_INVALID == get_mark(ret))); 2046 2045 cas_order_barrier(); 2047 2046 … … 2074 2073 continue; 2075 2074 2076 ASSERT(!resizing);2075 assert(!resizing); 2077 2076 2078 2077 if (wnd.cur != &sentinel) { 2079 2078 /* Must be from the new appended bucket. */ 2080 ASSERT(split_hash <= node_hash(h, wnd.cur)2079 assert(split_hash <= node_hash(h, wnd.cur) 2081 2080 || h->invalid_hash == node_hash(h, wnd.cur)); 2082 2081 return; … … 2097 2096 static void free_later(cht_t *h, cht_link_t *item) 2098 2097 { 2099 ASSERT(item != &sentinel);2098 assert(item != &sentinel); 2100 2099 2101 2100 /* … … 2157 2156 2158 2157 #ifdef CONFIG_DEBUG 2159 ASSERT(h->b);2158 assert(h->b); 2160 2159 /* Make resize_reqs visible. */ 2161 2160 read_barrier(); 2162 ASSERT(0 < atomic_get(&h->resize_reqs));2161 assert(0 < atomic_get(&h->resize_reqs)); 2163 2162 #endif 2164 2163 … … 2337 2336 /* Set the invalid joinee head to NULL. */ 2338 2337 if (old_idx != move_src_idx) { 2339 ASSERT(N_INVALID == get_mark(h->b->head[old_idx]));2338 assert(N_INVALID == get_mark(h->b->head[old_idx])); 2340 2339 2341 2340 if (&sentinel != get_next(h->b->head[old_idx])) … … 2393 2392 marked_ptr_t *new_head) 2394 2393 { 2395 ASSERT(join_node != &sentinel);2396 ASSERT(join_node && (N_JOIN & get_mark(join_node->link)));2394 assert(join_node != &sentinel); 2395 assert(join_node && (N_JOIN & get_mark(join_node->link))); 2397 2396 2398 2397 bool done; … … 2410 2409 /* Done if the mark was cleared. Retry if a new node was inserted. */ 2411 2410 done = (ret == jn_link); 2412 ASSERT(ret == jn_link || (get_mark(ret) & N_JOIN));2411 assert(ret == jn_link || (get_mark(ret) & N_JOIN)); 2413 2412 } while (!done); 2414 2413 … … 2433 2432 join_node, &wnd, &resizing); 2434 2433 2435 ASSERT(!resizing);2434 assert(!resizing); 2436 2435 } while (!done); 2437 2436 } … … 2440 2439 static void cleanup_join_follows(cht_t *h, marked_ptr_t *new_head) 2441 2440 { 2442 ASSERT(new_head);2441 assert(new_head); 2443 2442 2444 2443 rcu_read_lock(); … … 2465 2464 /* GC any deleted nodes on the way - even deleted JOIN_FOLLOWS. */ 2466 2465 if (N_DELETED & get_mark(*cur_link)) { 2467 ASSERT(cur_link != new_head);2468 ASSERT(wnd.ppred && wnd.cur && wnd.cur != &sentinel);2469 ASSERT(cur_link == &wnd.cur->link);2466 assert(cur_link != new_head); 2467 assert(wnd.ppred && wnd.cur && wnd.cur != &sentinel); 2468 assert(cur_link == &wnd.cur->link); 2470 2469 2471 2470 bool dummy; … … 2485 2484 cas_link(cur_link, next, N_JOIN_FOLLOWS, &sentinel, N_NORMAL); 2486 2485 2487 ASSERT(next == &sentinel2486 assert(next == &sentinel 2488 2487 || ((N_JOIN | N_JOIN_FOLLOWS) & get_mark(ret))); 2489 2488 … … 2506 2505 2507 2506 /* We must encounter a JF node before we reach the end of the bucket. */ 2508 ASSERT(wnd.cur && wnd.cur != &sentinel);2507 assert(wnd.cur && wnd.cur != &sentinel); 2509 2508 cur_link = &wnd.cur->link; 2510 2509 } … … 2520 2519 static inline size_t calc_split_hash(size_t split_idx, size_t order) 2521 2520 { 2522 ASSERT(1 <= order && order <= 8 * sizeof(size_t));2521 assert(1 <= order && order <= 8 * sizeof(size_t)); 2523 2522 return split_idx << (8 * sizeof(size_t) - order); 2524 2523 } … … 2527 2526 static inline size_t calc_bucket_idx(size_t hash, size_t order) 2528 2527 { 2529 ASSERT(1 <= order && order <= 8 * sizeof(size_t));2528 assert(1 <= order && order <= 8 * sizeof(size_t)); 2530 2529 return hash >> (8 * sizeof(size_t) - order); 2531 2530 } … … 2559 2558 static inline size_t node_hash(cht_t *h, const cht_link_t *item) 2560 2559 { 2561 ASSERT(item->hash == h->invalid_hash2560 assert(item->hash == h->invalid_hash 2562 2561 || item->hash == sentinel.hash 2563 2562 || item->hash == calc_node_hash(h, item)); … … 2569 2568 static inline size_t calc_node_hash(cht_t *h, const cht_link_t *item) 2570 2569 { 2571 ASSERT(item != &sentinel);2570 assert(item != &sentinel); 2572 2571 /* 2573 2572 * Clear the lowest order bit in order for sentinel's node hash … … 2588 2587 marked_ptr_t ptr = (marked_ptr_t) next; 2589 2588 2590 ASSERT(!(ptr & N_MARK_MASK));2591 ASSERT(!((unsigned)mark & ~N_MARK_MASK));2589 assert(!(ptr & N_MARK_MASK)); 2590 assert(!((unsigned)mark & ~N_MARK_MASK)); 2592 2591 2593 2592 return ptr | mark; … … 2609 2608 static inline void next_wnd(wnd_t *wnd) 2610 2609 { 2611 ASSERT(wnd);2612 ASSERT(wnd->cur);2610 assert(wnd); 2611 assert(wnd->cur); 2613 2612 2614 2613 wnd->last = wnd->cur; … … 2636 2635 marked_ptr_t new) 2637 2636 { 2638 ASSERT(link != &sentinel.link);2637 assert(link != &sentinel.link); 2639 2638 /* 2640 2639 * cas(x) on the same location x on one cpu must be ordered, but do not
Note:
See TracChangeset
for help on using the changeset viewer.