Changeset 14c9aa6 in mainline
- Timestamp:
- 2012-07-27T13:40:19Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0949b7a
- Parents:
- 4ec9ea41
- Location:
- kernel
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/Makefile
r4ec9ea41 r14c9aa6 192 192 generic/src/adt/bitmap.c \ 193 193 generic/src/adt/btree.c \ 194 generic/src/adt/cht.c \ 194 195 generic/src/adt/hash_table.c \ 195 196 generic/src/adt/list.c \ -
kernel/generic/include/adt/cht.h
r4ec9ea41 r14c9aa6 40 40 #include <synch/rcu.h> 41 41 #include <macros.h> 42 #include <synch/workqueue.h> 42 43 43 44 typedef uintptr_t cht_ptr_t; … … 53 54 /** Set of operations for a concurrent hash table. */ 54 55 typedef struct cht_ops { 55 size_t (*hash)(c ht_link_t *node);56 size_t (*hash)(const cht_link_t *item); 56 57 size_t (*key_hash)(void *key); 57 58 bool (*equal)(const cht_link_t *item1, const cht_link_t *item2); … … 70 71 cht_ops_t *op; 71 72 73 size_t min_order; 72 74 cht_buckets_t *b; 73 75 cht_buckets_t *new_b; 76 77 work_t resize_work; 78 atomic_t resize_reqs; 74 79 75 atomic_t resize_reqs;76 80 atomic_t item_cnt; 77 81 } cht_t; … … 84 88 #define cht_read_unlock() rcu_read_unlock() 85 89 86 extern void cht_create(cht_t *h, size_t init_size, cht_ops_t *op);90 extern bool cht_create(cht_t *h, size_t init_size, size_t min_size, cht_ops_t *op); 87 91 extern void cht_destroy(cht_t *h); 88 92 89 93 extern cht_link_t *cht_find(cht_t *h, void *key); 90 94 extern cht_link_t *cht_find_lazy(cht_t *h, void *key); 95 extern cht_link_t *cht_find_next(cht_t *h, const cht_link_t *item); 96 extern cht_link_t *cht_find_next_lazy(cht_t *h, const cht_link_t *item); 97 91 98 extern void cht_insert(cht_t *h, cht_link_t *item); 92 99 extern bool cht_insert_unique(cht_t *h, cht_link_t *item); -
kernel/generic/include/adt/hash.h
r4ec9ea41 r14c9aa6 37 37 #include <stdint.h> 38 38 39 /** Produces a uniform hash affecting all output bits from the skewed input. 40 */ 39 /** Produces a uniform hash affecting all output bits from the skewed input. */ 41 40 static inline uint32_t hash_mix32(uint32_t hash) 42 41 { … … 55 54 } 56 55 57 /** Produces a uniform hash affecting all output bits from the skewed input. 58 */ 56 /** Produces a uniform hash affecting all output bits from the skewed input. */ 59 57 static inline uint64_t hash_mix64(uint64_t hash) 60 58 { … … 68 66 hash = hash * 0x27d4eb2d; 69 67 hash = hash ^ (hash >> 15); 68 return hash; 70 69 } 71 70 72 /** Produces a uniform hash affecting all output bits from the skewed input. 73 */ 71 /** Produces a uniform hash affecting all output bits from the skewed input. */ 74 72 static inline size_t hash_mix(size_t hash) 75 73 { -
kernel/generic/src/adt/cht.c
r4ec9ea41 r14c9aa6 27 27 */ 28 28 29 29 30 /** @addtogroup genericadt 30 31 * @{ … … 33 34 /** 34 35 * @file 35 * @brief Concurrent resizablelock-free hash table.36 * @brief Scalable resizable concurrent lock-free hash table. 36 37 * 37 38 */ 38 39 39 40 #include <adt/cht.h> 41 #include <adt/hash.h> 40 42 #include <debug.h> 41 43 #include <memstr.h> 42 44 #include <mm/slab.h> 43 #include < barrier.h>45 #include <arch/barrier.h> 44 46 #include <compiler/barrier.h> 45 47 #include <atomic.h> 46 48 #include <synch/rcu.h> 47 49 48 /* Logarithm of the min bucket count. */ 50 51 /* Logarithm of the min bucket count. 2^6 == 64 buckets. */ 49 52 #define CHT_MIN_ORDER 6 50 53 /* Logarithm of the max bucket count. */ … … 61 64 N_NORMAL = 0, 62 65 N_DELETED = 1, 63 N_ INVALID= 1,64 N_ CONST= 3,66 N_CONST = 1, 67 N_INVALID = 3, 65 68 N_JOIN = 2, 66 69 N_JOIN_FOLLOWS = 2, … … 80 83 } wnd_t; 81 84 82 83 static size_t size_to_order(size_t bucket_cnt); 84 static cht_buckets_t *alloc_buckets(size_t order); 85 86 static marked_ptr_t make_link(cht_link_t *next, mark_t mark); 85 /* Fwd decl. */ 86 static size_t size_to_order(size_t bucket_cnt, size_t min_order); 87 static cht_buckets_t *alloc_buckets(size_t order, bool set_invalid); 88 static cht_link_t *search_bucket(cht_t *h, marked_ptr_t head, void *key, 89 size_t search_hash); 90 static cht_link_t *find_resizing(cht_t *h, void *key, size_t hash, 91 marked_ptr_t old_head, size_t old_idx); 92 static bool insert_impl(cht_t *h, cht_link_t *item, bool unique); 93 static bool insert_at(cht_link_t *item, const wnd_t *wnd, walk_mode_t walk_mode, 94 bool *resizing); 95 static bool has_duplicates(cht_t *h, const cht_link_t *item, size_t hash, 96 const wnd_t *cwnd); 97 static cht_link_t *find_duplicate(cht_t *h, const cht_link_t *item, size_t hash, 98 cht_link_t *start); 99 static bool remove_pred(cht_t *h, size_t hash, equal_pred_t pred, void *pred_arg); 100 static bool delete_at(cht_t *h, wnd_t *wnd, walk_mode_t walk_mode, 101 bool *deleted_but_gc, bool *resizing); 102 static bool mark_deleted(cht_link_t *cur, walk_mode_t walk_mode, bool *resizing); 103 static bool unlink_from_pred(wnd_t *wnd, walk_mode_t walk_mode, bool *resizing); 104 static bool find_wnd_and_gc_pred(cht_t *h, size_t hash, walk_mode_t walk_mode, 105 equal_pred_t pred, void *pred_arg, wnd_t *wnd, bool *resizing); 106 static bool find_wnd_and_gc(cht_t *h, size_t hash, walk_mode_t walk_mode, 107 wnd_t *wnd, bool *resizing); 108 static bool gc_deleted_node(cht_t *h, walk_mode_t walk_mode, wnd_t *wnd, 109 bool *resizing); 110 static bool join_completed(cht_t *h, const wnd_t *wnd); 111 static void upd_resizing_head(cht_t *h, size_t hash, marked_ptr_t **phead, 112 bool *join_finishing, walk_mode_t *walk_mode); 113 static void item_removed(cht_t *h); 114 static void item_inserted(cht_t *h); 115 static void free_later(cht_t *h, cht_link_t *item); 116 static void help_head_move(marked_ptr_t *psrc_head, marked_ptr_t *pdest_head); 117 static void start_head_move(marked_ptr_t *psrc_head); 118 static void mark_const(marked_ptr_t *psrc_head); 119 static void complete_head_move(marked_ptr_t *psrc_head, marked_ptr_t *pdest_head); 120 static void split_bucket(cht_t *h, marked_ptr_t *psrc_head, 121 marked_ptr_t *pdest_head, size_t split_hash); 122 static void mark_join_follows(cht_t *h, marked_ptr_t *psrc_head, 123 size_t split_hash, wnd_t *wnd); 124 static void mark_join_node(cht_link_t *join_node); 125 static void join_buckets(cht_t *h, marked_ptr_t *psrc_head, 126 marked_ptr_t *pdest_head, size_t split_hash); 127 static void link_to_join_node(cht_t *h, marked_ptr_t *pdest_head, 128 cht_link_t *join_node, size_t split_hash); 129 static void resize_table(work_t *arg); 130 static void grow_table(cht_t *h); 131 static void shrink_table(cht_t *h); 132 static void cleanup_join_node(cht_t *h, marked_ptr_t *new_head); 133 static void clear_join_and_gc(cht_t *h, cht_link_t *join_node, 134 marked_ptr_t *new_head); 135 static void cleanup_join_follows(cht_t *h, marked_ptr_t *new_head); 136 static marked_ptr_t make_link(const cht_link_t *next, mark_t mark); 87 137 static cht_link_t * get_next(marked_ptr_t link); 88 138 static mark_t get_mark(marked_ptr_t link); 89 139 static void next_wnd(wnd_t *wnd); 140 static bool same_node_pred(void *node, const cht_link_t *item2); 90 141 static size_t key_hash(cht_t *h, void *key); 91 142 static size_t node_hash(cht_t *h, const cht_link_t *item); 92 93 143 static size_t calc_split_hash(size_t split_idx, size_t order); 94 144 static size_t calc_bucket_idx(size_t hash, size_t order); 145 static size_t grow_to_split_idx(size_t old_idx); 95 146 static size_t grow_idx(size_t idx); 96 147 static size_t shrink_idx(size_t idx); 97 98 99 100 bool cht_create(cht_t *h, size_t init_size, cht_ops_t *op) 148 static marked_ptr_t cas_link(marked_ptr_t *link, const cht_link_t *cur_next, 149 mark_t cur_mark, const cht_link_t *new_next, mark_t new_mark); 150 static marked_ptr_t _cas_link(marked_ptr_t *link, marked_ptr_t cur, 151 marked_ptr_t new); 152 static void cas_order_barrier(void); 153 154 155 bool cht_create(cht_t *h, size_t init_size, size_t min_size, cht_ops_t *op) 101 156 { 102 157 ASSERT(h); … … 107 162 return false; 108 163 109 size_t order = size_to_order(init_size); 110 111 h->b = alloc_buckets(order); 164 size_t min_order = size_to_order(min_size, CHT_MIN_ORDER); 165 size_t order = size_to_order(init_size, min_order); 166 167 h->b = alloc_buckets(order, false); 112 168 113 169 if (!h->b) 114 170 return false; 115 171 172 h->min_order = min_order; 116 173 h->new_b = 0; 117 174 h->op = op; … … 127 184 { 128 185 size_t bucket_cnt = (1 << order); 129 cht_buckets_t *b = malloc( 130 sizeof(cht_buckets_t) + (bucket_cnt - 1) * sizeof(marked_ptr_t)); 186 size_t bytes = 187 sizeof(cht_buckets_t) + (bucket_cnt - 1) * sizeof(marked_ptr_t); 188 cht_buckets_t *b = malloc(bytes, FRAME_ATOMIC); 131 189 132 190 if (!b) … … 145 203 } 146 204 147 static size_t size_to_order(size_t bucket_cnt )148 { 149 size_t order = CHT_MIN_ORDER;205 static size_t size_to_order(size_t bucket_cnt, size_t min_order) 206 { 207 size_t order = min_order; 150 208 151 209 /* Find a power of two such that bucket_cnt <= 2^order */ 152 210 do { 153 if (bucket_cnt <= ( 1 << order))211 if (bucket_cnt <= ((size_t)1 << order)) 154 212 return order; 155 213 … … 163 221 void cht_destroy(cht_t *h) 164 222 { 165 /* todo: impl */ 223 /* Wait for resize to complete. */ 224 while (0 < atomic_get(&h->resize_reqs)) { 225 rcu_barrier(); 226 } 227 228 /* Wait for all remove_callback()s to complete. */ 229 rcu_barrier(); 230 231 free(h->b); 232 h->b = 0; 166 233 } 167 234 168 235 cht_link_t *cht_find(cht_t *h, void *key) 169 236 { 170 /* Make the most recent changes ofthe table visible. */237 /* Make the most recent changes to the table visible. */ 171 238 read_barrier(); 172 239 return cht_find_lazy(h, key); … … 195 262 } 196 263 264 cht_link_t *cht_find_next(cht_t *h, const cht_link_t *item) 265 { 266 /* Make the most recent changes to the table visible. */ 267 read_barrier(); 268 return cht_find_next_lazy(h, item); 269 } 270 271 cht_link_t *cht_find_next_lazy(cht_t *h, const cht_link_t *item) 272 { 273 ASSERT(h); 274 ASSERT(rcu_read_locked()); 275 ASSERT(item); 276 277 return find_duplicate(h, item, node_hash(h, item), get_next(item->link)); 278 } 197 279 198 280 static cht_link_t *search_bucket(cht_t *h, marked_ptr_t head, void *key, … … 207 289 * may find by following the next pointers is allocated. 208 290 */ 209 size_t cur_hash = node_hash( cur);291 size_t cur_hash = node_hash(h, cur); 210 292 211 293 if (cur_hash >= search_hash) { … … 363 445 void cht_insert(cht_t *h, cht_link_t *item) 364 446 { 447 insert_impl(h, item, false); 448 } 449 450 bool cht_insert_unique(cht_t *h, cht_link_t *item) 451 { 365 452 return insert_impl(h, item, true); 366 453 } 367 454 368 bool cht_insert_unique(cht_t *h, cht_link_t *item) 369 { 370 insert_impl(h, item, false); 371 } 372 373 bool insert_impl(cht_t *h, cht_link_t *item, bool unique) 455 static bool insert_impl(cht_t *h, cht_link_t *item, bool unique) 374 456 { 375 457 rcu_read_lock(); … … 391 473 /* The table is resizing. Get the correct bucket head. */ 392 474 if (resizing) { 393 upd_resizing_head(h ash, &phead, &join_finishing, &walk_mode);475 upd_resizing_head(h, hash, &phead, &join_finishing, &walk_mode); 394 476 } 395 477 … … 405 487 } 406 488 407 if (unique && has_duplicates(h, item, hash, wnd)) {489 if (unique && has_duplicates(h, item, hash, &wnd)) { 408 490 rcu_read_unlock(); 409 491 return false; 410 492 } 411 493 412 inserted = insert_at(item, wnd, walk_mode, &resizing);494 inserted = insert_at(item, &wnd, walk_mode, &resizing); 413 495 } while (!inserted); 414 496 497 rcu_read_unlock(); 498 415 499 item_inserted(h); 416 417 rcu_read_unlock();418 500 return true; 419 501 } … … 435 517 return true; 436 518 } else { 519 /* This includes an invalid head but not a const head. */ 437 520 *resizing = ((N_JOIN_FOLLOWS | N_JOIN) & get_mark(ret)); 438 521 return false; … … 465 548 } 466 549 467 static bool has_duplicates(cht_t *h, c ht_link_t *item, size_t hash,468 const wnd_t * cwnd)550 static bool has_duplicates(cht_t *h, const cht_link_t *item, size_t hash, 551 const wnd_t *wnd) 469 552 { 470 553 ASSERT(0 == wnd->cur || hash <= node_hash(h, wnd->cur)); … … 479 562 */ 480 563 read_barrier(); 481 482 cht_link_t *cur = wnd->cur; 483 484 do { 564 return 0 != find_duplicate(h, item, hash, wnd->cur); 565 } 566 567 static cht_link_t *find_duplicate(cht_t *h, const cht_link_t *item, size_t hash, 568 cht_link_t *start) 569 { 570 ASSERT(0 == start || hash <= node_hash(h, start)); 571 572 cht_link_t *cur = start; 573 574 while (cur && node_hash(h, cur) == hash) { 485 575 bool deleted = (N_DELETED & get_mark(cur->link)); 486 576 487 577 /* Skip logically deleted nodes. */ 488 578 if (!deleted && h->op->equal(item, cur)) 489 return true;579 return cur; 490 580 491 581 cur = get_next(cur->link); 492 } while (cur && node_hash(h, cur) == hash); 493 494 return false; 495 } 496 582 } 583 584 return 0; 585 } 497 586 498 587 size_t cht_remove_key(cht_t *h, void *key) … … 544 633 /* The table is resizing. Get the correct bucket head. */ 545 634 if (resizing) { 546 upd_resizing_head(h ash, &phead, &join_finishing, &walk_mode);635 upd_resizing_head(h, hash, &phead, &join_finishing, &walk_mode); 547 636 } 548 637 … … 583 672 } 584 673 585 deleted = delete_at( wnd, walk_mode, &deleted_but_gc, &resizing);674 deleted = delete_at(h, &wnd, walk_mode, &deleted_but_gc, &resizing); 586 675 } while (!deleted || deleted_but_gc); 587 676 … … 608 697 if (walk_mode == WM_LEAVE_JOIN && (N_JOIN & get_mark(wnd->cur->link))) 609 698 return true; 699 700 cas_order_barrier(); 610 701 611 702 if (unlink_from_pred(wnd, walk_mode, resizing)) { … … 631 722 if (walk_mode == WM_NORMAL) { 632 723 /* Only mark clean/normal nodes - JF/JN is used only during resize. */ 633 marked_ptr_t normal_link = make_link(next, N_NORMAL); 634 marked_ptr_t del_link = make_link(next, N_DELETED); 635 636 marked_ptr_t ret = cas_link(&cur->link, normal_link, del_link); 637 638 if (normal_link != ret) { 639 *resizing = (N_JOIN | N_JOIN_FOLLOWS | N_INVALID) & get_mark(ret); 724 marked_ptr_t ret = cas_link(&cur->link, next, N_NORMAL, next, N_DELETED); 725 726 if (ret != make_link(next, N_NORMAL)) { 727 *resizing = (N_JOIN | N_JOIN_FOLLOWS) & get_mark(ret); 640 728 return false; 641 729 } … … 646 734 mark_t cur_mark = get_mark(cur->link) & N_JOIN_FOLLOWS; 647 735 648 marked_ptr_t nondel_link = make_link(next, cur_mark);649 marked_ptr_t del_link = make_link(next, cur_mark | N_DELETED);650 651 if ( nondel_link != cas_link(&cur->link, nondel_link, del_link))736 marked_ptr_t ret = 737 cas_link(&cur->link, next, cur_mark, next, cur_mark | N_DELETED); 738 739 if (ret != make_link(next, cur_mark)) 652 740 return false; 653 741 } … … 667 755 668 756 mark_t pred_mark = get_mark(*wnd->ppred); 669 /* Succeed only of the predecessor is clean/normal or a join node. */757 /* Succeed only if the predecessor is clean/normal or a join node. */ 670 758 mark_t exp_pred_mark = (N_JOIN & pred_mark) ? pred_mark : N_NORMAL; 671 759 … … 673 761 marked_ptr_t next_link = make_link(next, exp_pred_mark); 674 762 675 if (pred_link != cas_link(wnd->ppred, pred_link, next_link))763 if (pred_link != _cas_link(wnd->ppred, pred_link, next_link)) 676 764 return false; 677 765 } else { … … 685 773 marked_ptr_t next_link = make_link(next, cur_mark); 686 774 687 marked_ptr_t ret = cas_link(wnd->ppred, pred_link, next_link);775 marked_ptr_t ret = _cas_link(wnd->ppred, pred_link, next_link); 688 776 689 777 if (pred_link != ret) { … … 834 922 835 923 static void upd_resizing_head(cht_t *h, size_t hash, marked_ptr_t **phead, 836 bool *join_finishing, bool*walk_mode)924 bool *join_finishing, walk_mode_t *walk_mode) 837 925 { 838 926 cht_buckets_t *b = rcu_access(h->b); … … 852 940 853 941 /* Complete moving the bucket from the old to the new table. */ 854 move_head(pold_head, pmoved_head);942 help_head_move(pold_head, pmoved_head); 855 943 856 944 /* The hash belongs to the moved bucket. */ 857 945 if (move_dest_idx == new_idx) { 946 ASSERT(pmoved_head == pnew_head); 858 947 /* 859 948 * move_head() makes the new head of the moved bucket visible. 860 949 * The new head may be marked with a JOIN_FOLLOWS 861 950 */ 862 ASSERT(!(N_CONST & get_mark(*p new_head)));951 ASSERT(!(N_CONST & get_mark(*pmoved_head))); 863 952 *walk_mode = WM_MOVE_JOIN_FOLLOWS; 864 953 } else { 954 ASSERT(pmoved_head != pnew_head); 865 955 /* 866 956 * The hash belongs to the bucket that is the result of splitting … … 872 962 if (N_NORMAL != get_mark(*pnew_head)) { 873 963 size_t split_hash = calc_split_hash(new_idx, h->new_b->order); 874 split_bucket( pmoved_head, pnew_head, split_hash);964 split_bucket(h, pmoved_head, pnew_head, split_hash); 875 965 /* 876 966 * split_bucket() makes the new head visible. No … … 891 981 * Makes a valid pnew_head visible if already moved. 892 982 */ 893 move_head(&b->head[move_src_idx], pnew_head);983 help_head_move(&b->head[move_src_idx], pnew_head); 894 984 895 985 /* Hash belongs to the bucket to be joined with the moved bucket. */ … … 898 988 if (N_INVALID != get_mark(*pold_head)) { 899 989 size_t split_hash = calc_split_hash(old_idx, b->order); 900 join_buckets( pold_head, pnew_head, split_hash);990 join_buckets(h, pold_head, pnew_head, split_hash); 901 991 } 902 992 … … 926 1016 { 927 1017 start_head_move(psrc_head); 1018 cas_order_barrier(); 928 1019 complete_head_move(psrc_head, pdest_head); 929 1020 } … … 941 1032 /* Make the move visible on this cpu. */ 942 1033 read_barrier(); 943 ASSERT(!(N_CONST & get_mark(*pdest_head)));944 1034 } 945 1035 } else { 946 1036 complete_head_move(psrc_head, pdest_head); 947 1037 } 1038 1039 ASSERT(!(N_CONST & get_mark(*pdest_head))); 948 1040 } 949 1041 … … 974 1066 975 1067 cht_link_t *next = get_next(*psrc_head); 976 /* todo: cas order barrier */ 977 cas_link(pdest_head, 0, N_INVALID, next, N_NORMAL); 978 /* todo: cas order barrier */ 979 cas_link(psrc_head, next, N_CONST, next, N_INVALID); 1068 marked_ptr_t ret; 1069 1070 ret = cas_link(pdest_head, 0, N_INVALID, next, N_NORMAL); 1071 ASSERT(ret == make_link(0, N_INVALID) || (N_NORMAL == get_mark(ret))); 1072 cas_order_barrier(); 1073 1074 ret = cas_link(psrc_head, next, N_CONST, next, N_INVALID); 1075 ASSERT(ret == make_link(next, N_CONST) || (N_INVALID == get_mark(ret))); 1076 cas_order_barrier(); 980 1077 } 981 1078 … … 1031 1128 */ 1032 1129 wnd_t wnd; 1033 bool done;1034 1130 1035 1131 rcu_read_lock(); … … 1037 1133 /* Mark the last node of the first part of the split bucket as JF. */ 1038 1134 mark_join_follows(h, psrc_head, split_hash, &wnd); 1039 1040 /* todo: cas order barrier */ 1135 cas_order_barrier(); 1041 1136 1042 1137 /* There are nodes in the dest bucket, ie the second part of the split. */ … … 1047 1142 */ 1048 1143 mark_join_node(wnd.cur); 1144 cas_order_barrier(); 1049 1145 } else { 1050 1146 /* … … 1055 1151 1056 1152 /* Link the dest head to the second part of the split. */ 1057 cas_link(pdest_head, 0, N_INVALID, wnd.cur, N_NORMAL); 1153 marked_ptr_t ret = cas_link(pdest_head, 0, N_INVALID, wnd.cur, N_NORMAL); 1154 ASSERT(ret == make_link(0, N_INVALID) || (N_NORMAL == get_mark(ret))); 1155 cas_order_barrier(); 1058 1156 1059 1157 rcu_read_unlock(); … … 1067 1165 bool done; 1068 1166 do { 1069 bool dummy;1167 bool resizing = false; 1070 1168 wnd->ppred = psrc_head; 1071 1169 wnd->cur = get_next(*psrc_head); … … 1076 1174 * the second part of the split bucket. Retry if GC failed. 1077 1175 */ 1078 if (!find_wnd_and_gc(h, split_hash, WM_MOVE_JOIN_FOLLOWS, wnd, & dummy))1176 if (!find_wnd_and_gc(h, split_hash, WM_MOVE_JOIN_FOLLOWS, wnd, &resizing)) 1079 1177 continue; 1080 1178 1179 /* Must not report that the table is resizing if WM_MOVE_JOIN_FOLLOWS.*/ 1180 ASSERT(!resizing); 1081 1181 /* 1082 1182 * Mark the last node of the first half of the split bucket … … 1086 1186 = cas_link(wnd->ppred, wnd->cur, N_NORMAL, wnd->cur, N_JOIN_FOLLOWS); 1087 1187 1088 /* Successfully marked as a JF node or already marked that way. */ 1188 /* 1189 * Successfully marked as a JF node or already marked that way (even 1190 * if also marked deleted - unlinking the node will move the JF mark). 1191 */ 1089 1192 done = (ret == make_link(wnd->cur, N_NORMAL)) 1090 1193 || (N_JOIN_FOLLOWS & get_mark(ret)); … … 1098 1201 bool done; 1099 1202 do { 1100 cht_link_t *next = get_next( *join_node);1101 mark_t mark = get_mark( *join_node);1203 cht_link_t *next = get_next(join_node->link); 1204 mark_t mark = get_mark(join_node->link); 1102 1205 1103 1206 /* … … 1189 1292 rcu_read_lock(); 1190 1293 1191 /* Mark src_head immutable - signals updaters bucket join started. */1294 /* Mark src_head immutable - signals updaters that bucket join started. */ 1192 1295 mark_const(psrc_head); 1193 /* todo: cas order barrier*/1296 cas_order_barrier(); 1194 1297 1195 1298 cht_link_t *join_node = get_next(*psrc_head); … … 1197 1300 if (join_node) { 1198 1301 mark_join_node(join_node); 1199 /* todo: cas order barrier*/1302 cas_order_barrier(); 1200 1303 1201 1304 link_to_join_node(h, pdest_head, join_node, split_hash); 1202 /* todo: cas order barrier*/1305 cas_order_barrier(); 1203 1306 } 1204 1307 1205 cas_link(psrc_head, join_node, N_CONST, join_node, N_INVALID); 1308 marked_ptr_t ret = 1309 cas_link(psrc_head, join_node, N_CONST, join_node, N_INVALID); 1310 ASSERT(ret == make_link(join_node, N_CONST) || (N_INVALID == get_mark(ret))); 1311 cas_order_barrier(); 1206 1312 1207 1313 rcu_read_unlock(); … … 1218 1324 }; 1219 1325 1220 bool dummy;1221 1222 if (!find_wnd_and_gc(h, split_hash, WM_LEAVE_JOIN, &wnd, & dummy))1326 bool resizing = false; 1327 1328 if (!find_wnd_and_gc(h, split_hash, WM_LEAVE_JOIN, &wnd, &resizing)) 1223 1329 continue; 1224 1330 1331 ASSERT(!resizing); 1332 1225 1333 if (wnd.cur) { 1226 1334 /* Must be from the new appended bucket. */ … … 1249 1357 static void item_removed(cht_t *h) 1250 1358 { 1251 /* todo: impl */ 1359 size_t items = (size_t) atomic_predec(&h->item_cnt); 1360 size_t bucket_cnt = (1 << h->b->order); 1361 1362 bool need_shrink = (items == CHT_MAX_LOAD * bucket_cnt / 4); 1363 bool missed_shrink = (items == CHT_MAX_LOAD * bucket_cnt / 8); 1364 1365 if ((need_shrink || missed_shrink) && h->b->order > h->min_order) { 1366 atomic_count_t resize_reqs = atomic_preinc(&h->resize_reqs); 1367 /* The first resize request. Start the resizer. */ 1368 if (1 == resize_reqs) { 1369 workq_global_enqueue_noblock(&h->resize_work, resize_table); 1370 } 1371 } 1252 1372 } 1253 1373 1254 1374 static void item_inserted(cht_t *h) 1255 1375 { 1256 /* todo: impl */ 1257 } 1258 1259 static void resize_table(void *arg) 1260 { 1261 cht_t *h = (cht_t *)arg; 1262 1376 size_t items = (size_t) atomic_preinc(&h->item_cnt); 1377 size_t bucket_cnt = (1 << h->b->order); 1378 1379 bool need_grow = (items == CHT_MAX_LOAD * bucket_cnt); 1380 bool missed_grow = (items == 2 * CHT_MAX_LOAD * bucket_cnt); 1381 1382 if ((need_grow || missed_grow) && h->b->order < CHT_MAX_ORDER) { 1383 atomic_count_t resize_reqs = atomic_preinc(&h->resize_reqs); 1384 /* The first resize request. Start the resizer. */ 1385 if (1 == resize_reqs) { 1386 workq_global_enqueue_noblock(&h->resize_work, resize_table); 1387 } 1388 } 1389 } 1390 1391 static void resize_table(work_t *arg) 1392 { 1393 cht_t *h = member_to_inst(arg, cht_t, resize_work); 1394 1395 #ifdef CONFIG_DEBUG 1263 1396 ASSERT(h->b); 1264 ASSERT(0 < (read_barrier(), atomic_get(&h->resize_reqs))); 1265 1266 /* Load the most current h->item_cnt. */ 1397 /* Make resize_reqs visible. */ 1267 1398 read_barrier(); 1399 ASSERT(0 < atomic_get(&h->resize_reqs)); 1400 #endif 1401 1402 bool done; 1268 1403 do { 1269 size_t cur_items = h->item_cnt; 1404 /* Load the most recent h->item_cnt. */ 1405 read_barrier(); 1406 size_t cur_items = (size_t) atomic_get(&h->item_cnt); 1270 1407 size_t bucket_cnt = (1 << h->b->order); 1271 1272 if (cur_items >= CHT_MAX_LOAD * bucket_cnt) { 1408 size_t max_items = CHT_MAX_LOAD * bucket_cnt; 1409 1410 if (cur_items >= max_items && h->b->order < CHT_MAX_ORDER) { 1273 1411 grow_table(h); 1274 } else if (cur_items <= CHT_MAX_LOAD * bucket_cnt / 4) {1412 } else if (cur_items <= max_items / 4 && h->b->order > h->min_order) { 1275 1413 shrink_table(h); 1276 } 1277 1278 /* Load the most current h->item_cnt and h->resize_reqs. */ 1279 read_barrier(); 1280 } while (0 < atomic_predec(&h->resize_reqs)); 1414 } else { 1415 /* Table is just the right size. */ 1416 atomic_count_t reqs = atomic_predec(&h->resize_reqs); 1417 done = (reqs == 0); 1418 } 1419 } while (!done); 1281 1420 } 1282 1421 … … 1294 1433 /* Wait for all readers and updaters to see the initialized new table. */ 1295 1434 rcu_synchronize(); 1296 1297 1435 size_t old_bucket_cnt = (1 << h->b->order); 1298 1436 … … 1305 1443 } 1306 1444 1445 /* Order start_head_move() wrt complete_head_move(). */ 1446 cas_order_barrier(); 1447 1307 1448 /* Complete moving heads and split any buckets not yet split by updaters. */ 1308 1449 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { … … 1333 1474 size_t new_idx = grow_idx(old_idx); 1334 1475 1335 cleanup_join_follows(h, &h->new_b [new_idx]);1336 } 1337 1476 cleanup_join_follows(h, &h->new_b->head[new_idx]); 1477 } 1478 1338 1479 /* 1339 1480 * Wait for everyone to notice that buckets were split, ie link connecting … … 1346 1487 size_t new_idx = grow_to_split_idx(old_idx); 1347 1488 1348 cleanup_join_node(h, &h->new_b [new_idx]);1349 } 1350 1489 cleanup_join_node(h, &h->new_b->head[new_idx]); 1490 } 1491 1351 1492 /* Wait for everyone to see that the table is clear of any resize marks. */ 1352 1493 rcu_synchronize(); … … 1354 1495 cht_buckets_t *old_b = h->b; 1355 1496 rcu_assign(h->b, h->new_b); 1356 1497 1357 1498 /* Wait for everyone to start using the new table. */ 1358 1499 rcu_synchronize(); … … 1366 1507 static void shrink_table(cht_t *h) 1367 1508 { 1368 if (h->b->order <= CHT_MIN_ORDER)1509 if (h->b->order <= h->min_order) 1369 1510 return; 1370 1511 … … 1395 1536 } 1396 1537 1538 /* Order start_head_move() wrt to complete_head_move(). */ 1539 cas_order_barrier(); 1540 1397 1541 /* Complete moving heads and join buckets with the moved buckets. */ 1398 1542 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 1399 1543 size_t new_idx = shrink_idx(old_idx); 1544 size_t move_src_idx = grow_idx(new_idx); 1400 1545 1401 1546 /* This bucket should be moved. */ 1402 if ( grow_idx(new_idx)== old_idx) {1547 if (move_src_idx == old_idx) { 1403 1548 /* Head move not yet completed. */ 1404 1549 if (N_INVALID != get_mark(h->b->head[old_idx])) { … … 1426 1571 /* Set the invalid joinee head to NULL. */ 1427 1572 if (old_idx != move_src_idx) { 1428 ASSERT(N_INVALID == h->b->head[old_idx]);1573 ASSERT(N_INVALID == get_mark(h->b->head[old_idx])); 1429 1574 1430 1575 if (0 != get_next(h->b->head[old_idx])) … … 1440 1585 /* Clear the JOIN mark and GC any deleted join nodes. */ 1441 1586 for (size_t new_idx = 0; new_idx < new_bucket_cnt; ++new_idx) { 1442 cleanup_join_node(h, &h->new_b [new_idx]);1587 cleanup_join_node(h, &h->new_b->head[new_idx]); 1443 1588 } 1444 1589 … … 1496 1641 /* Done if the mark was cleared. Retry if a new node was inserted. */ 1497 1642 done = (ret == jn_link); 1643 ASSERT(ret == jn_link || (get_mark(ret) & N_JOIN)); 1498 1644 } while (!done); 1499 1645 … … 1502 1648 1503 1649 /* The join node had been marked as deleted - GC it. */ 1650 1651 /* Clear the JOIN mark before trying to unlink the deleted join node.*/ 1652 cas_order_barrier(); 1504 1653 1505 1654 size_t jn_hash = node_hash(h, join_node); 1506 1655 do { 1507 bool resizing ;1656 bool resizing = false; 1508 1657 1509 1658 wnd_t wnd = { … … 1565 1714 marked_ptr_t ret 1566 1715 = cas_link(cur_link, next, N_JOIN_FOLLOWS, 0, N_NORMAL); 1716 1717 ASSERT(!next || ((N_JOIN | N_JOIN_FOLLOWS) & get_mark(ret))); 1567 1718 1568 1719 /* Successfully cleared the JF mark of a non-deleted node. */ … … 1703 1854 * inserted in front of it, would require more than one grace period. 1704 1855 */ 1705 } 1856 void *ret = atomic_cas_ptr((void**)link, (void *)cur, (void *)new); 1857 return (marked_ptr_t) ret; 1858 } 1859 1860 static void cas_order_barrier(void) 1861 { 1862 /* Make sure CAS to different memory locations are ordered. */ 1863 write_barrier(); 1864 } 1865 1706 1866 1707 1867 /** @}
Note:
See TracChangeset
for help on using the changeset viewer.