Changeset 3bb732b in mainline
- Timestamp:
- 2012-07-26T17:01:03Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 2bcf6c6
- Parents:
- 7ef2249
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/adt/cht.c
r7ef2249 r3bb732b 46 46 #include <synch/rcu.h> 47 47 48 /* Must be a power of 2. */ 49 #define CHT_MIN_BUCKET_CNT 128 48 /* Logarithm of the min bucket count. */ 49 #define CHT_MIN_ORDER 6 50 /* Logarithm of the max bucket count. */ 51 #define CHT_MAX_ORDER (8 * sizeof(size_t)) 52 /* Minimum number of hash table buckets. */ 53 #define CHT_MIN_BUCKET_CNT (1 << CHT_MIN_ORDER) 50 54 /* Must be a power of 2. */ 51 55 #define CHT_MAX_LOAD 2 … … 53 57 typedef cht_ptr_t marked_ptr_t; 54 58 typedef bool (*equal_pred_t)(void *arg, const cht_link_t *item); 55 56 59 57 60 typedef enum mark { … … 88 91 static size_t node_hash(cht_t *h, const cht_link_t *item); 89 92 93 static size_t calc_split_hash(size_t split_idx, size_t order); 90 94 static size_t calc_bucket_idx(size_t hash, size_t order); 91 95 static size_t grow_idx(size_t idx); … … 99 103 ASSERT(op && op->hash && op->key_hash && op->equal && op->key_equal); 100 104 105 /* All operations are compulsory. */ 101 106 if (!op || !op->hash || !op->key_hash || !op->equal || !op->key_equal) 102 107 return false; … … 119 124 } 120 125 126 static cht_buckets_t *alloc_buckets(size_t order, bool set_invalid) 127 { 128 size_t bucket_cnt = (1 << order); 129 cht_buckets_t *b = malloc( 130 sizeof(cht_buckets_t) + (bucket_cnt - 1) * sizeof(marked_ptr_t)); 131 132 if (!b) 133 return 0; 134 135 b->order = order; 136 137 marked_ptr_t head_link 138 = set_invalid ? make_link(0, N_INVALID) : make_link(0, N_NORMAL); 139 140 for (size_t i = 0; i < bucket_cnt; ++i) { 141 b->head[i] = head_link; 142 } 143 144 return b; 145 } 146 147 static size_t size_to_order(size_t bucket_cnt) 148 { 149 size_t order = CHT_MIN_ORDER; 150 151 /* Find a power of two such that bucket_cnt <= 2^order */ 152 do { 153 if (bucket_cnt <= (1 << order)) 154 return order; 155 156 ++order; 157 } while (order < CHT_MAX_ORDER); 158 159 return order; 160 } 161 162 121 163 void cht_destroy(cht_t *h) 122 164 { 123 165 /* todo: impl */ 124 1 = 1;125 rcu_synchronize();126 166 } 127 167 128 168 cht_link_t *cht_find(cht_t *h, void *key) 129 169 { 170 /* Make the most recent changes of the table visible. */ 130 171 read_barrier(); 131 172 return cht_find_lazy(h, key); … … 142 183 cht_buckets_t *b = rcu_access(h->b); 143 184 size_t idx = calc_bucket_idx(hash, b->order); 144 /* todo: access once? */ 185 /* 186 * No need for access_once. b->head[idx] will point to an allocated node 187 * even if marked invalid until we exit rcu read section. 188 */ 145 189 marked_ptr_t head = b->head[idx]; 146 190 … … 331 375 rcu_read_lock(); 332 376 377 cht_buckets_t *b = rcu_access(h->b); 378 size_t hash = node_hash(h, item); 379 size_t idx = calc_bucket_idx(hash, b->order); 380 marked_ptr_t *phead = &b->head[idx]; 381 382 bool resizing = false; 383 bool inserted; 384 385 do { 386 walk_mode_t walk_mode = WM_NORMAL; 387 bool join_finishing; 388 389 resizing = resizing || (N_NORMAL != get_mark(*phead)); 390 391 /* The table is resizing. Get the correct bucket head. */ 392 if (resizing) { 393 upd_resizing_head(hash, &phead, &join_finishing, &walk_mode); 394 } 395 396 wnd_t wnd = { 397 .ppred = phead, 398 .cur = get_next(*phead), 399 .last = 0 400 }; 401 402 if (!find_wnd_and_gc(h, hash, walk_mode, &wnd, &resizing)) { 403 /* Could not GC a node; or detected an unexpected resize. */ 404 continue; 405 } 406 407 if (unique && has_duplicates(h, item, hash, wnd)) { 408 rcu_read_unlock(); 409 return false; 410 } 411 412 inserted = insert_at(item, wnd, walk_mode, &resizing); 413 } while (!inserted); 414 415 item_inserted(h); 333 416 334 417 rcu_read_unlock(); 418 return true; 419 } 420 421 static bool insert_at(cht_link_t *item, const wnd_t *wnd, walk_mode_t walk_mode, 422 bool *resizing) 423 { 424 marked_ptr_t ret; 425 426 if (walk_mode == WM_NORMAL) { 427 item->link = make_link(wnd->cur, N_NORMAL); 428 /* Initialize the item before adding it to a bucket. */ 429 memory_barrier(); 430 431 /* Link a clean/normal predecessor to the item. */ 432 ret = cas_link(wnd->ppred, wnd->cur, N_NORMAL, item, N_NORMAL); 433 434 if (ret == make_link(wnd->cur, N_NORMAL)) { 435 return true; 436 } else { 437 *resizing = ((N_JOIN_FOLLOWS | N_JOIN) & get_mark(ret)); 438 return false; 439 } 440 } else if (walk_mode == WM_MOVE_JOIN_FOLLOWS) { 441 /* Move JOIN_FOLLOWS mark but filter out the DELETED mark. */ 442 mark_t jf_mark = get_mark(*wnd->ppred) & N_JOIN_FOLLOWS; 443 item->link = make_link(wnd->cur, jf_mark); 444 /* Initialize the item before adding it to a bucket. */ 445 memory_barrier(); 446 447 /* Link the not-deleted predecessor to the item. Move its JF mark. */ 448 ret = cas_link(wnd->ppred, wnd->cur, jf_mark, item, N_NORMAL); 449 450 return ret == make_link(wnd->cur, jf_mark); 451 } else { 452 ASSERT(walk_mode == WM_LEAVE_JOIN); 453 454 item->link = make_link(wnd->cur, N_NORMAL); 455 /* Initialize the item before adding it to a bucket. */ 456 memory_barrier(); 457 458 mark_t pred_mark = get_mark(*wnd->ppred); 459 /* If the predecessor is a join node it may be marked deleted.*/ 460 mark_t exp_pred_mark = (N_JOIN & pred_mark) ? pred_mark : N_NORMAL; 461 462 ret = cas_link(wnd->ppred, wnd->cur, exp_pred_mark, item, exp_pred_mark); 463 return ret == make_link(wnd->cur, exp_pred_mark); 464 } 465 } 466 467 static bool has_duplicates(cht_t *h, cht_link_t *item, size_t hash, 468 const wnd_t *cwnd) 469 { 470 ASSERT(0 == wnd->cur || hash <= node_hash(h, wnd->cur)); 471 472 if (0 == wnd->cur || hash < node_hash(h, wnd->cur)) 473 return false; 474 475 /* 476 * Load the most recent node marks. Otherwise we might pronounce a 477 * logically deleted node for a duplicate of the item just because 478 * the deleted node's DEL mark had not yet propagated to this cpu. 479 */ 480 read_barrier(); 481 482 cht_link_t *cur = wnd->cur; 483 484 do { 485 bool deleted = (N_DELETED & get_mark(cur->link)); 486 487 /* Skip logically deleted nodes. */ 488 if (!deleted && h->op->equal(item, cur)) 489 return true; 490 491 cur = get_next(cur->link); 492 } while (cur && node_hash(h, cur) == hash); 493 494 return false; 335 495 } 336 496 … … 569 729 570 730 /* The searched for node is not in the current bucket. */ 571 if ( wnd->cur)731 if (!wnd->cur) 572 732 return true; 573 733 … … 627 787 return true; 628 788 } 629 630 631 static bool find_dup_node_to_del(cht_t *h, bool is_pos, void *key_or_pos,632 size_t hash, wnd_t *wnd)633 {634 ASSERT(!wnd->cur || hash <= node_hash(h, wnd->cur));635 636 if (!wnd->cur || hash < node_hash(h, wnd->cur))637 return false;638 639 640 if (is_pos) {641 642 } else {643 }644 }645 646 789 647 790 static bool join_completed(cht_t *h, const wnd_t *wnd) … … 728 871 /* The moved bucket has not yet been split. */ 729 872 if (N_NORMAL != get_mark(*pnew_head)) { 730 split_bucket(pmoved_head, pnew_head); 873 size_t split_hash = calc_split_hash(new_idx, h->new_b->order); 874 split_bucket(pmoved_head, pnew_head, split_hash); 731 875 /* 732 876 * split_bucket() makes the new head visible. No … … 753 897 /* Bucket join not yet completed. */ 754 898 if (N_INVALID != get_mark(*pold_head)) { 755 join_buckets(pold_head, pnew_head); 899 size_t split_hash = calc_split_hash(old_idx, b->order); 900 join_buckets(pold_head, pnew_head, split_hash); 756 901 } 757 902 … … 777 922 778 923 924 #if 0 779 925 static void move_head(marked_ptr_t *psrc_head, marked_ptr_t *pdest_head) 780 926 { … … 782 928 complete_head_move(psrc_head, pdest_head); 783 929 } 930 #endif 784 931 785 932 static void help_head_move(marked_ptr_t *psrc_head, marked_ptr_t *pdest_head) … … 886 1033 bool done; 887 1034 1035 rcu_read_lock(); 1036 888 1037 /* Mark the last node of the first part of the split bucket as JF. */ 889 1038 mark_join_follows(h, psrc_head, split_hash, &wnd); … … 907 1056 /* Link the dest head to the second part of the split. */ 908 1057 cas_link(pdest_head, 0, N_INVALID, wnd.cur, N_NORMAL); 1058 1059 rcu_read_unlock(); 909 1060 } 910 1061 … … 965 1116 966 1117 static void join_buckets(cht_t *h, marked_ptr_t *psrc_head, 967 marked_ptr_t *pdest_head )1118 marked_ptr_t *pdest_head, size_t split_hash) 968 1119 { 969 1120 /* Buckets already joined. */ … … 1036 1187 */ 1037 1188 1189 rcu_read_lock(); 1190 1038 1191 /* Mark src_head immutable - signals updaters bucket join started. */ 1039 1192 mark_const(psrc_head); … … 1051 1204 1052 1205 cas_link(psrc_head, join_node, N_CONST, join_node, N_INVALID); 1206 1207 rcu_read_unlock(); 1053 1208 } 1054 1209 … … 1089 1244 rcu_call(&item->rcu_link, (rcu_func_t)h->op->remove_callback); 1090 1245 1091 /* todo: --items */ 1092 } 1093 1094 static size_t size_to_order(size_t bucket_cnt) 1095 { 1096 } 1097 1098 static cht_buckets_t *alloc_buckets(size_t order, bool set_invalid) 1099 { 1100 size_t bucket_cnt = (1 << order); 1101 cht_buckets_t *b = malloc( 1102 sizeof(cht_buckets_t) + (bucket_cnt - 1) * sizeof(uintptr_t)); 1103 1104 if (!b) 1105 return 0; 1106 1107 b->order = order; 1108 1109 marked_ptr_t head_link = set_invalid ? make_link(0, N_INVALID) : 0; 1110 1111 for (size_t i = 0; i < bucket_cnt; ++i) { 1112 b->head[i] = head_link; 1113 } 1114 1115 return b; 1246 item_removed(h); 1247 } 1248 1249 static void item_removed(cht_t *h) 1250 { 1251 /* todo: impl */ 1252 } 1253 1254 static void item_inserted(cht_t *h) 1255 { 1256 /* todo: impl */ 1257 } 1258 1259 static void resize_table(void *arg) 1260 { 1261 cht_t *h = (cht_t *)arg; 1262 1263 ASSERT(h->b); 1264 ASSERT(0 < (read_barrier(), atomic_get(&h->resize_reqs))); 1265 1266 /* Load the most current h->item_cnt. */ 1267 read_barrier(); 1268 do { 1269 size_t cur_items = h->item_cnt; 1270 size_t bucket_cnt = (1 << h->b->order); 1271 1272 if (cur_items >= CHT_MAX_LOAD * bucket_cnt) { 1273 grow_table(h); 1274 } else if (cur_items <= CHT_MAX_LOAD * bucket_cnt / 4) { 1275 shrink_table(h); 1276 } 1277 1278 /* Load the most current h->item_cnt and h->resize_reqs. */ 1279 read_barrier(); 1280 } while (0 < atomic_predec(&h->resize_reqs)); 1281 } 1282 1283 static void grow_table(cht_t *h) 1284 { 1285 if (h->b->order >= CHT_MAX_ORDER) 1286 return; 1287 1288 h->new_b = alloc_buckets(h->b->order + 1, true); 1289 1290 /* Failed to alloc a new table - try next time the resizer is run. */ 1291 if (!h->new_b) 1292 return; 1293 1294 /* Wait for all readers and updaters to see the initialized new table. */ 1295 rcu_synchronize(); 1296 1297 size_t old_bucket_cnt = (1 << h->b->order); 1298 1299 /* 1300 * Give updaters a chance to help out with the resize. Do the minimum 1301 * work needed to announce a resize is in progress, ie start moving heads. 1302 */ 1303 for (size_t idx = 0; idx < old_bucket_cnt; ++idx) { 1304 start_head_move(&h->b->head[idx]); 1305 } 1306 1307 /* Complete moving heads and split any buckets not yet split by updaters. */ 1308 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 1309 marked_ptr_t *move_dest_head = &h->new_b->head[grow_idx(old_idx)]; 1310 marked_ptr_t *move_src_head = &h->b->head[old_idx]; 1311 1312 /* Head move not yet completed. */ 1313 if (N_INVALID != get_mark(*move_src_head)) { 1314 complete_head_move(move_src_head, move_dest_head); 1315 } 1316 1317 size_t split_idx = grow_to_split_idx(old_idx); 1318 size_t split_hash = calc_split_hash(split_idx, h->new_b->order); 1319 marked_ptr_t *split_dest_head = &h->new_b->head[split_idx]; 1320 1321 split_bucket(h, move_dest_head, split_dest_head, split_hash); 1322 } 1323 1324 /* 1325 * Wait for all updaters to notice the new heads. Once everyone sees 1326 * the invalid old bucket heads they will know a resize is in progress 1327 * and updaters will modify the correct new buckets. 1328 */ 1329 rcu_synchronize(); 1330 1331 /* Clear the JOIN_FOLLOWS mark and remove the link between the split buckets.*/ 1332 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 1333 size_t new_idx = grow_idx(old_idx); 1334 1335 cleanup_join_follows(h, &h->new_b[new_idx]); 1336 } 1337 1338 /* 1339 * Wait for everyone to notice that buckets were split, ie link connecting 1340 * the join follows and join node has been cut. 1341 */ 1342 rcu_synchronize(); 1343 1344 /* Clear the JOIN mark and GC any deleted join nodes. */ 1345 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 1346 size_t new_idx = grow_to_split_idx(old_idx); 1347 1348 cleanup_join_node(h, &h->new_b[new_idx]); 1349 } 1350 1351 /* Wait for everyone to see that the table is clear of any resize marks. */ 1352 rcu_synchronize(); 1353 1354 cht_buckets_t *old_b = h->b; 1355 rcu_assign(h->b, h->new_b); 1356 1357 /* Wait for everyone to start using the new table. */ 1358 rcu_synchronize(); 1359 1360 free(old_b); 1361 1362 /* Not needed; just for increased readability. */ 1363 h->new_b = 0; 1364 } 1365 1366 static void shrink_table(cht_t *h) 1367 { 1368 if (h->b->order <= CHT_MIN_ORDER) 1369 return; 1370 1371 h->new_b = alloc_buckets(h->b->order - 1, true); 1372 1373 /* Failed to alloc a new table - try next time the resizer is run. */ 1374 if (!h->new_b) 1375 return; 1376 1377 /* Wait for all readers and updaters to see the initialized new table. */ 1378 rcu_synchronize(); 1379 1380 size_t old_bucket_cnt = (1 << h->b->order); 1381 1382 /* 1383 * Give updaters a chance to help out with the resize. Do the minimum 1384 * work needed to announce a resize is in progress, ie start moving heads. 1385 */ 1386 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 1387 size_t new_idx = shrink_idx(old_idx); 1388 1389 /* This bucket should be moved. */ 1390 if (grow_idx(new_idx) == old_idx) { 1391 start_head_move(&h->b->head[old_idx]); 1392 } else { 1393 /* This bucket should join the moved bucket once the move is done.*/ 1394 } 1395 } 1396 1397 /* Complete moving heads and join buckets with the moved buckets. */ 1398 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 1399 size_t new_idx = shrink_idx(old_idx); 1400 1401 /* This bucket should be moved. */ 1402 if (grow_idx(new_idx) == old_idx) { 1403 /* Head move not yet completed. */ 1404 if (N_INVALID != get_mark(h->b->head[old_idx])) { 1405 complete_head_move(&h->b->head[old_idx], &h->new_b->head[new_idx]); 1406 } 1407 } else { 1408 /* This bucket should join the moved bucket. */ 1409 size_t split_hash = calc_split_hash(old_idx, h->b->order); 1410 join_buckets(h, &h->b->head[old_idx], &h->new_b->head[new_idx], 1411 split_hash); 1412 } 1413 } 1414 1415 /* 1416 * Wait for all updaters to notice the new heads. Once everyone sees 1417 * the invalid old bucket heads they will know a resize is in progress 1418 * and updaters will modify the correct new buckets. 1419 */ 1420 rcu_synchronize(); 1421 1422 /* Let everyone know joins are complete and fully visible. */ 1423 for (size_t old_idx = 0; old_idx < old_bucket_cnt; ++old_idx) { 1424 size_t move_src_idx = grow_idx(shrink_idx(old_idx)); 1425 1426 /* Set the invalid joinee head to NULL. */ 1427 if (old_idx != move_src_idx) { 1428 ASSERT(N_INVALID == h->b->head[old_idx]); 1429 1430 if (0 != get_next(h->b->head[old_idx])) 1431 h->b->head[old_idx] = make_link(0, N_INVALID); 1432 } 1433 } 1434 1435 /* todo comment join node vs reset joinee head*/ 1436 rcu_synchronize(); 1437 1438 size_t new_bucket_cnt = (1 << h->new_b->order); 1439 1440 /* Clear the JOIN mark and GC any deleted join nodes. */ 1441 for (size_t new_idx = 0; new_idx < new_bucket_cnt; ++new_idx) { 1442 cleanup_join_node(h, &h->new_b[new_idx]); 1443 } 1444 1445 /* Wait for everyone to see that the table is clear of any resize marks. */ 1446 rcu_synchronize(); 1447 1448 cht_buckets_t *old_b = h->b; 1449 rcu_assign(h->b, h->new_b); 1450 1451 /* Wait for everyone to start using the new table. */ 1452 rcu_synchronize(); 1453 1454 free(old_b); 1455 1456 /* Not needed; just for increased readability. */ 1457 h->new_b = 0; 1458 } 1459 1460 static void cleanup_join_node(cht_t *h, marked_ptr_t *new_head) 1461 { 1462 rcu_read_lock(); 1463 1464 cht_link_t *cur = get_next(*new_head); 1465 1466 while (cur) { 1467 /* Clear the join node's JN mark - even if it is marked as deleted. */ 1468 if (N_JOIN & get_mark(cur->link)) { 1469 clear_join_and_gc(h, cur, new_head); 1470 break; 1471 } 1472 1473 cur = get_next(cur->link); 1474 } 1475 1476 rcu_read_unlock(); 1477 } 1478 1479 static void clear_join_and_gc(cht_t *h, cht_link_t *join_node, 1480 marked_ptr_t *new_head) 1481 { 1482 ASSERT(join_node && (N_JOIN & get_mark(join_node->link))); 1483 1484 bool done; 1485 1486 /* Clear the JN mark. */ 1487 do { 1488 marked_ptr_t jn_link = join_node->link; 1489 cht_link_t *next = get_next(jn_link); 1490 /* Clear the JOIN mark but keep the DEL mark if present. */ 1491 mark_t cleared_mark = get_mark(jn_link) & N_DELETED; 1492 1493 marked_ptr_t ret = 1494 _cas_link(&join_node->link, jn_link, make_link(next, cleared_mark)); 1495 1496 /* Done if the mark was cleared. Retry if a new node was inserted. */ 1497 done = (ret == jn_link); 1498 } while (!done); 1499 1500 if (!(N_DELETED & get_mark(join_node->link))) 1501 return; 1502 1503 /* The join node had been marked as deleted - GC it. */ 1504 1505 size_t jn_hash = node_hash(h, join_node); 1506 do { 1507 bool resizing; 1508 1509 wnd_t wnd = { 1510 .ppred = new_head, 1511 .cur = get_next(*new_head) 1512 }; 1513 1514 done = find_wnd_and_gc_pred(h, jn_hash, WM_NORMAL, same_node_pred, 1515 join_node, &wnd, &resizing); 1516 1517 ASSERT(!resizing); 1518 } while (!done); 1519 } 1520 1521 static void cleanup_join_follows(cht_t *h, marked_ptr_t *new_head) 1522 { 1523 ASSERT(new_head); 1524 1525 rcu_read_lock(); 1526 1527 wnd_t wnd = { 1528 .ppred = 0, 1529 .cur = 0 1530 }; 1531 marked_ptr_t *cur_link = new_head; 1532 1533 /* 1534 * Find the non-deleted node with a JF mark and clear the JF mark. 1535 * The JF node may be deleted and/or the mark moved to its neighbors 1536 * at any time. Therefore, we GC deleted nodes until we find the JF 1537 * node in order to remove stale/deleted JF nodes left behind eg by 1538 * delayed threads that did not yet get a chance to unlink the deleted 1539 * JF node and move its mark. 1540 * 1541 * Note that the head may be marked JF (but never DELETED). 1542 */ 1543 while (true) { 1544 bool is_jf_node = N_JOIN_FOLLOWS & get_mark(*cur_link); 1545 1546 /* GC any deleted nodes on the way - even deleted JOIN_FOLLOWS. */ 1547 if (N_DELETED & get_mark(*cur_link)) { 1548 ASSERT(cur_link != new_head); 1549 ASSERT(wnd.ppred && wnd.cur); 1550 ASSERT(cur_link == &wnd.cur->link); 1551 1552 bool dummy; 1553 bool deleted = gc_deleted_node(h, WM_MOVE_JOIN_FOLLOWS, &wnd, &dummy); 1554 1555 /* Failed to GC or collected a deleted JOIN_FOLLOWS. */ 1556 if (!deleted || is_jf_node) { 1557 /* Retry from the head of the bucket. */ 1558 cur_link = new_head; 1559 continue; 1560 } 1561 } else { 1562 /* Found a non-deleted JF. Clear its JF mark. */ 1563 if (is_jf_node) { 1564 cht_link_t *next = get_next(*cur_link); 1565 marked_ptr_t ret 1566 = cas_link(cur_link, next, N_JOIN_FOLLOWS, 0, N_NORMAL); 1567 1568 /* Successfully cleared the JF mark of a non-deleted node. */ 1569 if (ret == make_link(next, N_JOIN_FOLLOWS)) { 1570 break; 1571 } else { 1572 /* 1573 * The JF node had been deleted or a new node inserted 1574 * right after it. Retry from the head. 1575 */ 1576 cur_link = new_head; 1577 continue; 1578 } 1579 } else { 1580 wnd.ppred = cur_link; 1581 wnd.cur = get_next(*cur_link); 1582 } 1583 } 1584 1585 /* We must encounter a JF node before we reach the end of the bucket. */ 1586 ASSERT(wnd.cur); 1587 cur_link = &wnd.cur->link; 1588 } 1589 1590 rcu_read_unlock(); 1591 } 1592 1593 1594 static size_t calc_split_hash(size_t split_idx, size_t order) 1595 { 1596 ASSERT(1 <= order && order <= 8 * sizeof(size_t)); 1597 return split_idx << (8 * sizeof(size_t) - order); 1598 } 1599 1600 static size_t calc_bucket_idx(size_t hash, size_t order) 1601 { 1602 ASSERT(1 <= order && order <= 8 * sizeof(size_t)); 1603 return hash >> (8 * sizeof(size_t) - order); 1604 } 1605 1606 static size_t grow_to_split_idx(size_t old_idx) 1607 { 1608 return grow_idx(old_idx) | 1; 1609 } 1610 1611 static size_t grow_idx(size_t idx) 1612 { 1613 return idx << 1; 1614 } 1615 1616 static size_t shrink_idx(size_t idx) 1617 { 1618 return idx >> 1; 1116 1619 } 1117 1620 … … 1128 1631 1129 1632 1130 static marked_ptr_t make_link(c ht_link_t *next, mark_t mark)1633 static marked_ptr_t make_link(const cht_link_t *next, mark_t mark) 1131 1634 { 1132 1635 marked_ptr_t ptr = (marked_ptr_t) next; … … 1168 1671 } 1169 1672 1170 static marked_ptr_t cas_link(marked_ptr_t *link, c ht_link_t *cur_next,1171 mark_t cur_mark, c ht_link_t *new_next, mark_t new_mark)1673 static marked_ptr_t cas_link(marked_ptr_t *link, const cht_link_t *cur_next, 1674 mark_t cur_mark, const cht_link_t *new_next, mark_t new_mark) 1172 1675 { 1173 1676 return _cas_link(link, make_link(cur_next, cur_mark), … … 1178 1681 marked_ptr_t new) 1179 1682 { 1180 1683 /* 1684 * cas(x) on the same location x on one cpu must be ordered, but do not 1685 * have to be ordered wrt to other cas(y) to a different location y 1686 * on the same cpu. 1687 * 1688 * cas(x) must act as a write barrier on x, ie if cas(x) succeeds 1689 * and is observed by another cpu, then all cpus must be able to 1690 * make the effects of cas(x) visible just by issuing a load barrier. 1691 * For example: 1692 * cpu1 cpu2 cpu3 1693 * cas(x, 0 -> 1), succeeds 1694 * cas(x, 0 -> 1), fails 1695 * MB 1696 * y = 7 1697 * sees y == 7 1698 * loadMB must be enough to make cas(x) on cpu3 visible to cpu1, ie x == 1. 1699 * 1700 * If cas() did not work this way: 1701 * - our head move protocol would not be correct. 1702 * - freeing an item linked to a moved head after another item was 1703 * inserted in front of it, would require more than one grace period. 1704 */ 1181 1705 } 1182 1706
Note:
See TracChangeset
for help on using the changeset viewer.