Changeset b9cb911 in mainline for kernel/generic/src/adt/cht.c
- Timestamp:
- 2012-08-20T18:40:19Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 7cfe5c0
- Parents:
- 85d31de9
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/cht.c
r85d31de9 rb9cb911 61 61 typedef bool (*equal_pred_t)(void *arg, const cht_link_t *item); 62 62 63 /** The following mark items and bucket heads. 64 * 65 * They are stored in the two low order bits of the next item pointers. 66 * Some marks may be combined. Some marks share the same binary value and 67 * are distinguished only by context (eg bucket head vs an ordinary item), 68 * in particular by walk_mode_t. 69 */ 63 70 typedef enum mark { 71 /** Normal non-deleted item or a valid bucket head. */ 64 72 N_NORMAL = 0, 73 /** Logically deleted item that might have already been unlinked. 74 * 75 * May be combined with N_JOIN and N_JOIN_FOLLOWS. Applicable only 76 * to items; never to bucket heads. 77 * 78 * Once marked deleted an item remains marked deleted. 79 */ 65 80 N_DELETED = 1, 81 /** Immutable bucket head. 82 * 83 * The bucket is being moved or joined with another and its (old) head 84 * must not be modified. 85 * 86 * May be combined with N_INVALID. Applicable only to old bucket heads, 87 * ie cht_t.b and not cht_t.new_b. 88 */ 66 89 N_CONST = 1, 90 /** Invalid bucket head. The bucket head must not be modified. 91 * 92 * Old bucket heads (ie cht_t.b) are marked invalid if they have 93 * already been moved to cht_t.new_b or if the bucket had already 94 * been merged with another when shrinking the table. New bucket 95 * heads (ie cht_t.new_b) are marked invalid if the old bucket had 96 * not yet been moved or if an old bucket had not yet been split 97 * when growing the table. 98 */ 67 99 N_INVALID = 3, 100 /** The item is a join node, ie joining two buckets 101 * 102 * A join node is either the first node of the second part of 103 * a bucket to be split; or it is the first node of the bucket 104 * to be merged into/appended to/joined with another bucket. 105 * 106 * May be combined with N_DELETED. Applicable only to items, never 107 * to bucket heads. 108 * 109 * Join nodes are referred to from two different buckets and may, 110 * therefore, not be safely/atomically unlinked from both buckets. 111 * As a result join nodes are not unlinked but rather just marked 112 * deleted. Once resize completes join nodes marked deleted are 113 * garbage collected. 114 */ 68 115 N_JOIN = 2, 116 /** The next node is a join node and will soon be marked so. 117 * 118 * A join-follows node is the last node of the first part of bucket 119 * that is to be split, ie it is the last node that will remain 120 * in the same bucket after splitting it. 121 * 122 * May be combined with N_DELETED. Applicable to items as well 123 * as to bucket heads of the bucket to be split (but only in cht_t.new_b). 124 */ 69 125 N_JOIN_FOLLOWS = 2, 126 /** Bit mask to filter out the address to the next item from the next ptr. */ 70 127 N_MARK_MASK = 3 71 128 } mark_t; 72 129 130 /** Determines */ 73 131 typedef enum walk_mode { 132 /** The table is not resizing. */ 74 133 WM_NORMAL = 4, 134 /** The table is undergoing a resize. Join nodes may be encountered. */ 75 135 WM_LEAVE_JOIN, 136 /** The table is growing. A join-follows node may be encountered. */ 76 137 WM_MOVE_JOIN_FOLLOWS 77 138 } walk_mode_t; 78 139 140 /** Bucket position window. */ 79 141 typedef struct wnd { 142 /** Pointer to cur's predecessor. */ 80 143 marked_ptr_t *ppred; 144 /** Current item. */ 81 145 cht_link_t *cur; 146 /** Last encountered item. Deleted or not. */ 82 147 cht_link_t *last; 83 148 } wnd_t; … … 162 227 static void cas_order_barrier(void); 163 228 164 229 /** Creates a concurrent hash table. 230 * 231 * @param h Valid pointer to a cht_t instance. 232 * @param init_size The initial number of buckets the table should contain. 233 * The table may be shrunk below this value if deemed necessary. 234 * Uses the default value if 0. 235 * @param min_size Minimum number of buckets that the table should contain. 236 * The number of buckets never drops below this value, 237 * although it may be rounded up internally as appropriate. 238 * Uses the default value if 0. 239 * @param max_load Maximum average number of items per bucket that allowed 240 * before the table grows. 241 * @param op Item specific operations. All operations are compulsory. 242 * @return True if successfully created the table. False otherwise. 243 */ 165 244 bool cht_create(cht_t *h, size_t init_size, size_t min_size, size_t max_load, 166 245 cht_ops_t *op) … … 202 281 } 203 282 283 /** Allocates and initializes 2^order buckets. 284 * 285 * All bucket heads are initialized to point to the sentinel node. 286 * 287 * @param order The number of buckets to allocate is 2^order. 288 * @param set_invalid Bucket heads are marked invalid if true; otherwise 289 * they are marked N_NORMAL. 290 * @return Newly allocated and initialized buckets or NULL if not enough memory. 291 */ 204 292 static cht_buckets_t *alloc_buckets(size_t order, bool set_invalid) 205 293 { … … 225 313 } 226 314 315 /** Returns the smallest k such that bucket_cnt <= 2^k and min_order <= k.*/ 227 316 static size_t size_to_order(size_t bucket_cnt, size_t min_order) 228 317 { … … 240 329 } 241 330 242 331 /** Destroys a CHT successfully created via cht_create(). 332 * 333 * Waits for all outstanding concurrent operations to complete and 334 * frees internal allocated resources. The table is however not cleared 335 * and items already present in the table (if any) are leaked. 336 */ 243 337 void cht_destroy(cht_t *h) 244 338 { … … 253 347 free(h->b); 254 348 h->b = 0; 255 } 256 349 350 /* You must clear the table of items. Otherwise cht_destroy will leak. */ 351 ASSERT(atomic_get(&h->item_cnt) == 0); 352 } 353 354 /** Returns the first item equal to the search key or NULL if not found. 355 * 356 * The call must be enclosed in a rcu_read_lock() unlock() pair. The 357 * returned item is guaranteed to be allocated until rcu_read_unlock() 358 * although the item may be concurrently removed from the table by another 359 * cpu. 360 * 361 * Further items matching the key may be retrieved via cht_find_next(). 362 * 363 * cht_find() sees the effects of any completed cht_remove(), cht_insert(). 364 * If a concurrent remove or insert had not yet completed cht_find() may 365 * or may not see the effects of it (eg it may find an item being removed). 366 * 367 * @param h CHT to operate on. 368 * @param key Search key as defined by cht_ops_t.key_equal() and .key_hash(). 369 * @return First item equal to the key or NULL if such an item does not exist. 370 */ 257 371 cht_link_t *cht_find(cht_t *h, void *key) 258 372 { … … 262 376 } 263 377 378 /** Returns the first item equal to the search key or NULL if not found. 379 * 380 * Unlike cht_find(), cht_find_lazy() may not see the effects of 381 * cht_remove() or cht_insert() even though they have already completed. 382 * It may take a couple of milliseconds for those changes to propagate 383 * and become visible to cht_find_lazy(). On the other hand, cht_find_lazy() 384 * operates a bit faster than cht_find(). 385 * 386 * See cht_find() for more details. 387 */ 264 388 cht_link_t *cht_find_lazy(cht_t *h, void *key) 265 389 { … … 267 391 } 268 392 393 /** Finds the first item equal to the search key. */ 269 394 static inline cht_link_t *find_lazy(cht_t *h, void *key) 270 395 { 271 396 ASSERT(h); 397 /* See docs to cht_find() and cht_find_lazy(). */ 272 398 ASSERT(rcu_read_locked()); 273 399 … … 282 408 marked_ptr_t head = b->head[idx]; 283 409 410 /* Undergoing a resize - take the slow path. */ 284 411 if (N_INVALID == get_mark(head)) 285 412 return find_resizing(h, key, hash, head, idx); … … 288 415 } 289 416 417 /** Returns the next item matching \a item. 418 * 419 * Must be enclosed in a rcu_read_lock()/unlock() pair. Effects of 420 * completed cht_remove(), cht_insert() are guaranteed to be visible 421 * to cht_find_next(). 422 * 423 * See cht_find() for more details. 424 */ 290 425 cht_link_t *cht_find_next(cht_t *h, const cht_link_t *item) 291 426 { … … 295 430 } 296 431 432 /** Returns the next item matching \a item. 433 * 434 * Must be enclosed in a rcu_read_lock()/unlock() pair. Effects of 435 * completed cht_remove(), cht_insert() may or may not be visible 436 * to cht_find_next_lazy(). 437 * 438 * See cht_find_lazy() for more details. 439 */ 297 440 cht_link_t *cht_find_next_lazy(cht_t *h, const cht_link_t *item) 298 441 { … … 304 447 } 305 448 449 /** Searches the bucket at head for key using search_hash. */ 306 450 static inline cht_link_t *search_bucket(cht_t *h, marked_ptr_t head, void *key, 307 451 size_t search_hash) … … 351 495 } 352 496 497 /** Searches for the key while the table is undergoing a resize. */ 353 498 static cht_link_t *find_resizing(cht_t *h, void *key, size_t hash, 354 499 marked_ptr_t old_head, size_t old_idx) … … 487 632 } 488 633 489 634 /** Inserts an item. Succeeds even if an equal item is already present. */ 490 635 void cht_insert(cht_t *h, cht_link_t *item) 491 636 { … … 493 638 } 494 639 640 /** Inserts a unique item. Returns false if an equal item was already present. 641 * 642 * Use this function to atomically check if an equal/duplicate item had 643 * not yet been inserted into the table and to insert this item into the 644 * table. 645 * 646 * The following is NOT thread-safe, so do not use: 647 * \code 648 * if (!cht_find(h, key)) { 649 * // A concurrent insert here may go unnoticed by cht_find() above. 650 * item = malloc(..); 651 * cht_insert(h, item); 652 * // Now we may have two items with equal search keys. 653 * } 654 * \endcode 655 * 656 * Replace such code with: 657 * \code 658 * item = malloc(..); 659 * if (!cht_insert_unique(h, item)) { 660 * // Whoops, someone beat us to it - an equal item had already been inserted. 661 * free(item); 662 * } else { 663 * // Successfully inserted the item and we are guaranteed that 664 * // there are no other equal items. 665 * } 666 * \endcode 667 * 668 */ 495 669 bool cht_insert_unique(cht_t *h, cht_link_t *item) 496 670 { … … 498 672 } 499 673 674 /** Inserts the item into the table and checks for duplicates if unique is true.*/ 500 675 static bool insert_impl(cht_t *h, cht_link_t *item, bool unique) 501 676 { … … 547 722 } 548 723 724 /** Inserts item between wnd.ppred and wnd.cur. 725 * 726 * @param item Item to link to wnd.ppred and wnd.cur. 727 * @param wnd The item will be inserted before wnd.cur. Wnd.ppred 728 * must be N_NORMAL. 729 * @param walk_mode 730 * @param resizing Set to true only if the table is undergoing resize 731 * and it was not expected (ie walk_mode == WM_NORMAL). 732 * @return True if the item was successfully linked to wnd.ppred. False 733 * if whole insert operation must be retried because the predecessor 734 * of wnd.cur has changed. 735 */ 549 736 inline static bool insert_at(cht_link_t *item, const wnd_t *wnd, 550 737 walk_mode_t walk_mode, bool *resizing) … … 594 781 } 595 782 783 /** Returns true the chain starting at wnd hash an item equal to \a item. 784 * 785 * @param h CHT to operate on. 786 * @param item Item whose duplicates the function looks for. 787 * @param hash Hash of \a item. 788 * @param[in] wnd wnd.cur is the first node with a hash greater to or equal 789 * to item's hash. 790 * @return True if a non-deleted item equal to \a item exists in the table. 791 */ 596 792 static inline bool has_duplicates(cht_t *h, const cht_link_t *item, size_t hash, 597 793 const wnd_t *wnd) … … 614 810 } 615 811 812 /** Returns an item that is equal to \a item starting in a chain at \a start. */ 616 813 static cht_link_t *find_duplicate(cht_t *h, const cht_link_t *item, size_t hash, 617 814 cht_link_t *start) … … 637 834 } 638 835 836 /* Skip logically deleted nodes with rcu_call() in progress. */ 639 837 if (h->invalid_hash == node_hash(h, cur)) { 640 838 cur = get_next(cur->link); … … 645 843 } 646 844 845 /** Removes all items matching the search key. Returns the number of items removed.*/ 647 846 size_t cht_remove_key(cht_t *h, void *key) 648 847 { … … 658 857 } 659 858 859 /** Removes a specific item from the table. 860 * 861 * The called must hold rcu read lock. 862 * 863 * @param item Item presumably present in the table and to be removed. 864 * @return True if the item was removed successfully; or false if it had 865 * already been deleted. 866 */ 660 867 bool cht_remove_item(cht_t *h, cht_link_t *item) 661 868 { 662 869 ASSERT(h); 663 870 ASSERT(item); 871 /* Otherwise a concurrent cht_remove_key might free the item. */ 872 ASSERT(rcu_read_locked()); 664 873 665 874 /* … … 672 881 } 673 882 674 883 /** Removes an item equal to pred_arg according to the predicate pred. */ 675 884 static bool remove_pred(cht_t *h, size_t hash, equal_pred_t pred, void *pred_arg) 676 885 { … … 739 948 } 740 949 741 950 /** Unlinks wnd.cur from wnd.ppred and schedules a deferred free for the item. 951 * 952 * Ignores nodes marked N_JOIN if walk mode is WM_LEAVE_JOIN. 953 * 954 * @param h CHT to operate on. 955 * @param wnd Points to the item to delete and its N_NORMAL predecessor. 956 * @param walk_mode Bucket chaing walk mode. 957 * @param deleted_but_gc Set to true if the item had been logically deleted, 958 * but a garbage collecting walk of the bucket is in order for 959 * it to be fully unlinked. 960 * @param resizing Set to true if the table is undergoing an unexpected 961 * resize (ie walk_mode == WM_NORMAL). 962 * @return False if the wnd.ppred changed in the meantime and the whole 963 * delete operation must be retried. 964 */ 742 965 static inline bool delete_at(cht_t *h, wnd_t *wnd, walk_mode_t walk_mode, 743 966 bool *deleted_but_gc, bool *resizing) … … 769 992 } 770 993 994 /** Marks cur logically deleted. Returns false to request a retry. */ 771 995 static inline bool mark_deleted(cht_link_t *cur, walk_mode_t walk_mode, 772 996 bool *resizing) … … 805 1029 } 806 1030 1031 /** Unlinks wnd.cur from wnd.ppred. Returns false if it should be retried. */ 807 1032 static inline bool unlink_from_pred(wnd_t *wnd, walk_mode_t walk_mode, 808 1033 bool *resizing) … … 849 1074 } 850 1075 851 1076 /** Finds the first non-deleted item equal to \a pred_arg according to \a pred. 1077 * 1078 * The function returns the candidate item in \a wnd. Logically deleted 1079 * nodes are garbage collected so the predecessor will most likely not 1080 * be marked as deleted. 1081 * 1082 * Unlike find_wnd_and_gc(), this function never returns a node that 1083 * is known to have already been marked N_DELETED. 1084 * 1085 * Any logically deleted nodes (ie those marked N_DELETED) are garbage 1086 * collected, ie free in the background via rcu_call (except for join-nodes 1087 * if walk_mode == WM_LEAVE_JOIN). 1088 * 1089 * @param h CHT to operate on. 1090 * @param hash Hash the search for. 1091 * @param walk_mode Bucket chain walk mode. 1092 * @param pred Predicate used to find an item equal to pred_arg. 1093 * @param pred_arg Argument to pass to the equality predicate \a pred. 1094 * @param[in,out] wnd The search starts with wnd.cur. If the desired 1095 * item is found wnd.cur will point to it. 1096 * @param resizing Set to true if the table is resizing but it was not 1097 * expected (ie walk_mode == WM_NORMAL). 1098 * @return False if the operation has to be retried. True otherwise 1099 * (even if an equal item had not been found). 1100 */ 852 1101 static bool find_wnd_and_gc_pred(cht_t *h, size_t hash, walk_mode_t walk_mode, 853 1102 equal_pred_t pred, void *pred_arg, wnd_t *wnd, bool *resizing) … … 899 1148 } 900 1149 901 /* todo: comment different semantics (eg deleted JN first w/ specific hash) */ 1150 /** Find the first item (deleted or not) with a hash greater or equal to \a hash. 1151 * 1152 * The function returns the first item with a hash that is greater or 1153 * equal to \a hash in \a wnd. Moreover it garbage collects logically 1154 * deleted node that have not yet been unlinked and freed. Therefore, 1155 * the returned node's predecessor will most likely be N_NORMAL. 1156 * 1157 * Unlike find_wnd_and_gc_pred(), this function may return a node 1158 * that is known to had been marked N_DELETED. 1159 * 1160 * @param h CHT to operate on. 1161 * @param hash Hash of the item to find. 1162 * @param walk_mode Bucket chain walk mode. 1163 * @param[in,out] wnd wnd.cur denotes the first node of the chain. If the 1164 * the operation is successful, \a wnd points to the desired 1165 * item. 1166 * @param resizing Set to true if a table resize was detected but walk_mode 1167 * suggested the table was not undergoing a resize. 1168 * @return False indicates the operation must be retried. True otherwise 1169 * (even if an item with exactly the same has was not found). 1170 */ 902 1171 static bool find_wnd_and_gc(cht_t *h, size_t hash, walk_mode_t walk_mode, 903 1172 wnd_t *wnd, bool *resizing) … … 929 1198 } 930 1199 1200 /** Garbage collects the N_DELETED node at \a wnd skipping join nodes. */ 931 1201 static bool gc_deleted_node(cht_t *h, walk_mode_t walk_mode, wnd_t *wnd, 932 1202 bool *resizing) … … 958 1228 } 959 1229 1230 /** Returns true if a bucket join had already completed. 1231 * 1232 * May only be called if upd_resizing_head() indicates a bucket join 1233 * may be in progress. 1234 * 1235 * If it returns false, the search must be retried in order to guarantee 1236 * all item that should have been encountered have been seen. 1237 */ 960 1238 static bool join_completed(cht_t *h, const wnd_t *wnd) 961 1239 { … … 1009 1287 } 1010 1288 1289 /** When resizing returns the bucket head to start the search with in \a phead. 1290 * 1291 * If a resize had been detected (eg cht_t.b.head[idx] is marked immutable). 1292 * upd_resizing_head() moves the bucket for \a hash from the old head 1293 * to the new head. Moreover, it splits or joins buckets as necessary. 1294 * 1295 * @param h CHT to operate on. 1296 * @param hash Hash of an item whose chain we would like to traverse. 1297 * @param[out] phead Head of the bucket to search for \a hash. 1298 * @param[out] join_finishing Set to true if a bucket join might be 1299 * in progress and the bucket may have to traversed again 1300 * as indicated by join_completed(). 1301 * @param[out] walk_mode Specifies how to interpret node marks. 1302 */ 1011 1303 static void upd_resizing_head(cht_t *h, size_t hash, marked_ptr_t **phead, 1012 1304 bool *join_finishing, walk_mode_t *walk_mode) … … 1112 1404 #endif 1113 1405 1406 /** Moves an immutable head \a psrc_head of cht_t.b to \a pdest_head of cht_t.new_b. 1407 * 1408 * The function guarantees the move will be visible on this cpu once 1409 * it completes. In particular, *pdest_head will not be N_INVALID. 1410 * 1411 * Unlike complete_head_move(), help_head_move() checks if the head had already 1412 * been moved and tries to avoid moving the bucket heads if possible. 1413 */ 1114 1414 static inline void help_head_move(marked_ptr_t *psrc_head, 1115 1415 marked_ptr_t *pdest_head) … … 1132 1432 } 1133 1433 1434 /** Initiates the move of the old head \a psrc_head. 1435 * 1436 * The move may be completed with help_head_move(). 1437 */ 1134 1438 static void start_head_move(marked_ptr_t *psrc_head) 1135 1439 { … … 1138 1442 } 1139 1443 1444 /** Marks the head immutable. */ 1140 1445 static void mark_const(marked_ptr_t *psrc_head) 1141 1446 { … … 1152 1457 } 1153 1458 1459 /** Completes moving head psrc_head to pdest_head (started by start_head_move()).*/ 1154 1460 static void complete_head_move(marked_ptr_t *psrc_head, marked_ptr_t *pdest_head) 1155 1461 { … … 1169 1475 } 1170 1476 1477 /** Splits the bucket at psrc_head and links to the remainder from pdest_head. 1478 * 1479 * Items with hashes greater or equal to \a split_hash are moved to bucket 1480 * with head at \a pdest_head. 1481 * 1482 * @param h CHT to operate on. 1483 * @param psrc_head Head of the bucket to split (in cht_t.new_b). 1484 * @param pdest_head Head of the bucket that points to the second part 1485 * of the split bucket in psrc_head. (in cht_t.new_b) 1486 * @param split_hash Hash of the first possible item in the remainder of 1487 * psrc_head, ie the smallest hash pdest_head is allowed 1488 * to point to.. 1489 */ 1171 1490 static void split_bucket(cht_t *h, marked_ptr_t *psrc_head, 1172 1491 marked_ptr_t *pdest_head, size_t split_hash) … … 1251 1570 } 1252 1571 1572 /** Finds and marks the last node of psrc_head w/ hash less than split_hash. 1573 * 1574 * Finds a node in psrc_head with the greatest hash that is strictly less 1575 * than split_hash and marks it with N_JOIN_FOLLOWS. 1576 * 1577 * Returns a window pointing to that node. 1578 * 1579 * Any logically deleted nodes along the way are 1580 * garbage collected; therefore, the predecessor node (if any) will most 1581 * likely not be marked N_DELETED. 1582 * 1583 * @param h CHT to operate on. 1584 * @param psrc_head Bucket head. 1585 * @param split_hash The smallest hash a join node (ie the node following 1586 * the desired join-follows node) may have. 1587 * @param[out] wnd Points to the node marked with N_JOIN_FOLLOWS. 1588 */ 1253 1589 static void mark_join_follows(cht_t *h, marked_ptr_t *psrc_head, 1254 1590 size_t split_hash, wnd_t *wnd) … … 1288 1624 } 1289 1625 1626 /** Marks join_node with N_JOIN. */ 1290 1627 static void mark_join_node(cht_link_t *join_node) 1291 1628 { … … 1310 1647 } 1311 1648 1312 1649 /** Appends the bucket at psrc_head to the bucket at pdest_head. 1650 * 1651 * @param h CHT to operate on. 1652 * @param psrc_head Bucket to merge with pdest_head. 1653 * @param pdest_head Bucket to be joined by psrc_head. 1654 * @param split_hash The smallest hash psrc_head may contain. 1655 */ 1313 1656 static void join_buckets(cht_t *h, marked_ptr_t *psrc_head, 1314 1657 marked_ptr_t *pdest_head, size_t split_hash) … … 1407 1750 } 1408 1751 1752 /** Links the tail of pdest_head to join_node. 1753 * 1754 * @param h CHT to operate on. 1755 * @param pdest_head Head of the bucket whose tail is to be linked to join_node. 1756 * @param join_node A node marked N_JOIN with a hash greater or equal to 1757 * split_hash. 1758 * @param split_hash The least hash that is greater than the hash of any items 1759 * (originally) in pdest_head. 1760 */ 1409 1761 static void link_to_join_node(cht_t *h, marked_ptr_t *pdest_head, 1410 1762 cht_link_t *join_node, size_t split_hash) … … 1439 1791 } 1440 1792 1793 /** Instructs RCU to free the item once all preexisting references are dropped. 1794 * 1795 * The item is freed via op->remove_callback(). 1796 */ 1441 1797 static void free_later(cht_t *h, cht_link_t *item) 1442 1798 { … … 1452 1808 } 1453 1809 1810 /** Notes that an item had been unlinked from the table and shrinks it if needed. 1811 * 1812 * If the number of items in the table drops below 1/4 of the maximum 1813 * allowed load the table is shrunk in the background. 1814 */ 1454 1815 static inline void item_removed(cht_t *h) 1455 1816 { … … 1469 1830 } 1470 1831 1832 /** Notes an item had been inserted and grows the table if needed. 1833 * 1834 * The table is resized in the background. 1835 */ 1471 1836 static inline void item_inserted(cht_t *h) 1472 1837 { … … 1486 1851 } 1487 1852 1853 /** Resize request handler. Invoked on the system work queue. */ 1488 1854 static void resize_table(work_t *arg) 1489 1855 { … … 1517 1883 } 1518 1884 1885 /** Increases the number of buckets two-fold. Blocks until done. */ 1519 1886 static void grow_table(cht_t *h) 1520 1887 { … … 1602 1969 } 1603 1970 1971 /** Halfs the number of buckets. Blocks until done. */ 1604 1972 static void shrink_table(cht_t *h) 1605 1973 { … … 1700 2068 } 1701 2069 2070 /** Finds and clears the N_JOIN mark from a node in new_head (if present). */ 1702 2071 static void cleanup_join_node(cht_t *h, marked_ptr_t *new_head) 1703 2072 { … … 1719 2088 } 1720 2089 2090 /** Clears the join_node's N_JOIN mark frees it if marked N_DELETED as well. */ 1721 2091 static void clear_join_and_gc(cht_t *h, cht_link_t *join_node, 1722 2092 marked_ptr_t *new_head) … … 1766 2136 } 1767 2137 2138 /** Finds a non-deleted node with N_JOIN_FOLLOWS and clears the mark. */ 1768 2139 static void cleanup_join_follows(cht_t *h, marked_ptr_t *new_head) 1769 2140 { … … 1841 2212 } 1842 2213 1843 2214 /** Returns the first possible hash following a bucket split point. 2215 * 2216 * In other words the returned hash is the smallest possible hash 2217 * the remainder of the split bucket may contain. 2218 */ 1844 2219 static inline size_t calc_split_hash(size_t split_idx, size_t order) 1845 2220 { … … 1848 2223 } 1849 2224 2225 /** Returns the bucket head index given the table size order and item hash. */ 1850 2226 static inline size_t calc_bucket_idx(size_t hash, size_t order) 1851 2227 { … … 1854 2230 } 1855 2231 2232 /** Returns the bucket index of destination*/ 1856 2233 static inline size_t grow_to_split_idx(size_t old_idx) 1857 2234 { … … 1859 2236 } 1860 2237 2238 /** Returns the destination index of a bucket head when the table is growing. */ 1861 2239 static inline size_t grow_idx(size_t idx) 1862 2240 { … … 1864 2242 } 1865 2243 2244 /** Returns the destination index of a bucket head when the table is shrinking.*/ 1866 2245 static inline size_t shrink_idx(size_t idx) 1867 2246 { … … 1869 2248 } 1870 2249 2250 /** Returns a mixed hash of the search key.*/ 1871 2251 static inline size_t calc_key_hash(cht_t *h, void *key) 1872 2252 { … … 1875 2255 } 1876 2256 2257 /** Returns a memoized mixed hash of the item. */ 1877 2258 static inline size_t node_hash(cht_t *h, const cht_link_t *item) 1878 2259 { … … 1884 2265 } 1885 2266 2267 /** Calculates and mixed the hash of the item. */ 1886 2268 static inline size_t calc_node_hash(cht_t *h, const cht_link_t *item) 1887 2269 { … … 1894 2276 } 1895 2277 2278 /** Computes and memoizes the hash of the item. */ 1896 2279 static inline void memoize_node_hash(cht_t *h, cht_link_t *item) 1897 2280 { … … 1899 2282 } 1900 2283 2284 /** Packs the next pointer address and the mark into a single pointer. */ 1901 2285 static inline marked_ptr_t make_link(const cht_link_t *next, mark_t mark) 1902 2286 { … … 1909 2293 } 1910 2294 1911 2295 /** Strips any marks from the next item link and returns the next item's address.*/ 1912 2296 static inline cht_link_t * get_next(marked_ptr_t link) 1913 2297 { … … 1915 2299 } 1916 2300 1917 2301 /** Returns the current node's mark stored in the next item link. */ 1918 2302 static inline mark_t get_mark(marked_ptr_t link) 1919 2303 { … … 1921 2305 } 1922 2306 1923 2307 /** Moves the window by one item so that is points to the next item. */ 1924 2308 static inline void next_wnd(wnd_t *wnd) 1925 2309 { … … 1932 2316 } 1933 2317 1934 2318 /** Predicate that matches only exactly the same node. */ 1935 2319 static bool same_node_pred(void *node, const cht_link_t *item2) 1936 2320 { … … 1939 2323 } 1940 2324 2325 /** Compare-and-swaps a next item link. */ 1941 2326 static inline marked_ptr_t cas_link(marked_ptr_t *link, const cht_link_t *cur_next, 1942 2327 mark_t cur_mark, const cht_link_t *new_next, mark_t new_mark) … … 1946 2331 } 1947 2332 2333 /** Compare-and-swaps a next item link. */ 1948 2334 static inline marked_ptr_t _cas_link(marked_ptr_t *link, marked_ptr_t cur, 1949 2335 marked_ptr_t new) … … 1976 2362 } 1977 2363 2364 /** Orders compare-and-swaps to different memory locations. */ 1978 2365 static inline void cas_order_barrier(void) 1979 2366 {
Note:
See TracChangeset
for help on using the changeset viewer.