Changeset a35b458 in mainline for kernel/generic/src/synch/rcu.c
- Timestamp:
- 2018-03-02T20:10:49Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f1380b7
- Parents:
- 3061bc1
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:38:31)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-03-02 20:10:49)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/synch/rcu.c
r3061bc1 ra35b458 26 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 27 */ 28 29 28 29 30 30 /** @addtogroup sync 31 31 * @{ … … 182 182 */ 183 183 rcu_gp_t completed_gp; 184 184 185 185 /** Protects the following 3 fields. */ 186 186 IRQ_SPINLOCK_DECLARE(preempt_lock); … … 195 195 */ 196 196 bool preempt_blocking_det; 197 197 198 198 #ifdef RCU_PREEMPT_A 199 199 200 200 /** 201 201 * The detector waits on this semaphore for any preempted readers … … 205 205 206 206 #elif defined(RCU_PREEMPT_PODZIMEK) 207 207 208 208 /** Reclaimers notify the detector when they request more grace periods.*/ 209 209 condvar_t req_gp_changed; … … 228 228 semaphore_t remaining_readers; 229 229 #endif 230 230 231 231 /** Excludes simultaneous rcu_barrier() calls. */ 232 232 mutex_t barrier_mtx; … … 235 235 /** rcu_barrier() waits for the completion of barrier callbacks on this wq.*/ 236 236 waitq_t barrier_wq; 237 237 238 238 /** Interruptible attached detector thread pointer. */ 239 239 thread_t *detector_thr; 240 240 241 241 /* Some statistics. */ 242 242 size_t stat_expedited_cnt; … … 305 305 _rcu_cur_gp = 0; 306 306 rcu.completed_gp = 0; 307 307 308 308 irq_spinlock_initialize(&rcu.preempt_lock, "rcu.preempt_lock"); 309 309 list_initialize(&rcu.cur_preempted); 310 310 list_initialize(&rcu.next_preempted); 311 311 rcu.preempt_blocking_det = false; 312 312 313 313 mutex_initialize(&rcu.barrier_mtx, MUTEX_PASSIVE); 314 314 atomic_set(&rcu.barrier_wait_cnt, 0); … … 316 316 317 317 semaphore_initialize(&rcu.remaining_readers, 0); 318 318 319 319 #ifdef RCU_PREEMPT_PODZIMEK 320 320 condvar_initialize(&rcu.req_gp_changed); 321 321 322 322 rcu.req_gp_end_cnt = 0; 323 323 rcu.req_expedited_cnt = 0; 324 324 atomic_set(&rcu.delaying_cpu_cnt, 0); 325 325 #endif 326 326 327 327 rcu.detector_thr = NULL; 328 328 329 329 rcu.stat_expedited_cnt = 0; 330 330 rcu.stat_delayed_cnt = 0; … … 347 347 CPU->rcu.signal_unlock = false; 348 348 #endif 349 349 350 350 CPU->rcu.cur_cbs = NULL; 351 351 CPU->rcu.cur_cbs_cnt = 0; … … 358 358 CPU->rcu.cur_cbs_gp = 0; 359 359 CPU->rcu.next_cbs_gp = 0; 360 360 361 361 semaphore_initialize(&CPU->rcu.arrived_flag, 0); 362 362 … … 364 364 if (config.cpu_active == 1) 365 365 CPU->rcu.reclaimer_thr = NULL; 366 366 367 367 CPU->rcu.stat_max_cbs = 0; 368 368 CPU->rcu.stat_avg_cbs = 0; … … 379 379 start_detector(); 380 380 #endif 381 381 382 382 start_reclaimers(); 383 383 } … … 391 391 thread->rcu.was_preempted = false; 392 392 #endif 393 393 394 394 link_initialize(&thread->rcu.preempt_link); 395 395 } … … 406 406 for (unsigned int cpu_id = 0; cpu_id < config.cpu_active; ++cpu_id) { 407 407 assert(cpus[cpu_id].rcu.reclaimer_thr != NULL); 408 408 409 409 if (cpus[cpu_id].rcu.reclaimer_thr) { 410 410 thread_interrupt(cpus[cpu_id].rcu.reclaimer_thr); … … 432 432 uint64_t completed = rcu.completed_gp; 433 433 spinlock_unlock(&rcu.gp_lock); 434 434 435 435 return completed; 436 436 } … … 441 441 for (unsigned int cpu_id = 0; cpu_id < config.cpu_count; ++cpu_id) { 442 442 char name[THREAD_NAME_BUFLEN] = {0}; 443 443 444 444 snprintf(name, THREAD_NAME_BUFLEN - 1, "rcu-rec/%u", cpu_id); 445 445 446 446 cpus[cpu_id].rcu.reclaimer_thr = 447 447 thread_create(reclaimer, NULL, TASK, THREAD_FLAG_NONE, name); … … 462 462 rcu.detector_thr = 463 463 thread_create(detector, NULL, TASK, THREAD_FLAG_NONE, "rcu-det"); 464 464 465 465 if (!rcu.detector_thr) 466 466 panic("Failed to create RCU detector thread."); 467 467 468 468 thread_ready(rcu.detector_thr); 469 469 } … … 475 475 bool locked = 0 < CPU->rcu.nesting_cnt; 476 476 preemption_enable(); 477 477 478 478 return locked; 479 479 } … … 489 489 { 490 490 assert(PREEMPTION_DISABLED || interrupts_disabled()); 491 491 492 492 if (0 == --(*pnesting_cnt)) { 493 493 _rcu_record_qs(); 494 494 495 495 /* 496 496 * The thread was preempted while in a critical section or … … 511 511 { 512 512 assert(PREEMPTION_DISABLED || interrupts_disabled()); 513 513 514 514 /* 515 515 * If an interrupt occurs here (even a NMI) it may beat us to … … 517 517 * for us. 518 518 */ 519 519 520 520 /* 521 521 * If the detector is eagerly waiting for this cpu's reader to unlock, … … 525 525 semaphore_up(&rcu.remaining_readers); 526 526 } 527 527 528 528 /* 529 529 * This reader was preempted while in a reader section. … … 536 536 rm_preempted_reader(); 537 537 } 538 538 539 539 /* If there was something to signal to the detector we have done so. */ 540 540 CPU->rcu.signal_unlock = false; … … 565 565 /* Calling from a reader section will deadlock. */ 566 566 assert(!rcu_read_locked()); 567 567 568 568 synch_item_t completion; 569 569 … … 589 589 */ 590 590 mutex_lock(&rcu.barrier_mtx); 591 591 592 592 /* 593 593 * Ensure we queue a barrier callback on all cpus before the already … … 598 598 DEFINE_CPU_MASK(cpu_mask); 599 599 cpu_mask_active(cpu_mask); 600 600 601 601 cpu_mask_for_each(*cpu_mask, cpu_id) { 602 602 smp_call(cpu_id, add_barrier_cb, NULL); 603 603 } 604 604 605 605 if (0 < atomic_predec(&rcu.barrier_wait_cnt)) { 606 606 waitq_sleep(&rcu.barrier_wq); 607 607 } 608 608 609 609 mutex_unlock(&rcu.barrier_mtx); 610 610 } … … 659 659 { 660 660 assert(rcu_item); 661 661 662 662 rcu_item->func = func; 663 663 rcu_item->next = NULL; 664 664 665 665 preemption_disable(); 666 666 … … 670 670 = local_atomic_exchange(&r->parriving_cbs_tail, &rcu_item->next); 671 671 *prev_tail = rcu_item; 672 672 673 673 /* Approximate the number of callbacks present. */ 674 674 ++r->arriving_cbs_cnt; 675 675 676 676 if (expedite) { 677 677 r->expedite_arriving = true; 678 678 } 679 679 680 680 bool first_cb = (prev_tail == &CPU->rcu.arriving_cbs); 681 681 682 682 /* Added first callback - notify the reclaimer. */ 683 683 if (first_cb && !semaphore_count_get(&r->arrived_flag)) { 684 684 semaphore_up(&r->arrived_flag); 685 685 } 686 686 687 687 preemption_enable(); 688 688 } … … 725 725 rcu_gp_t last_compl_gp = 0; 726 726 bool ok = true; 727 727 728 728 while (ok && wait_for_pending_cbs()) { 729 729 assert(CPU->rcu.reclaimer_thr == THREAD); 730 730 731 731 exec_completed_cbs(last_compl_gp); 732 732 733 733 bool expedite = advance_cbs(); 734 734 735 735 ok = wait_for_cur_cbs_gp_end(expedite, &last_compl_gp); 736 736 } … … 744 744 745 745 bool ok = true; 746 746 747 747 while (arriving_cbs_empty() && ok) { 748 748 ok = semaphore_down_interruptable(&CPU->rcu.arrived_flag); 749 749 } 750 750 751 751 return ok; 752 752 } … … 763 763 { 764 764 upd_stat_missed_gp(last_completed_gp); 765 765 766 766 /* Both next_cbs and cur_cbs GP elapsed. */ 767 767 if (CPU->rcu.next_cbs_gp <= last_completed_gp) { 768 768 assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp); 769 769 770 770 size_t exec_cnt = CPU->rcu.cur_cbs_cnt + CPU->rcu.next_cbs_cnt; 771 771 772 772 if (exec_cnt < CRITICAL_THRESHOLD) { 773 773 exec_cbs(&CPU->rcu.cur_cbs); … … 784 784 preemption_enable(); 785 785 } 786 786 787 787 CPU->rcu.cur_cbs_cnt = 0; 788 788 CPU->rcu.next_cbs_cnt = 0; … … 815 815 rcu_item_t *next = rcu_item->next; 816 816 rcu_func_t func = rcu_item->func; 817 817 818 818 func(rcu_item); 819 819 820 820 rcu_item = next; 821 821 } 822 822 823 823 *phead = NULL; 824 824 } … … 843 843 CPU->rcu.cur_cbs_cnt = CPU->rcu.next_cbs_cnt; 844 844 CPU->rcu.cur_cbs_gp = CPU->rcu.next_cbs_gp; 845 845 846 846 /* Move arriving_cbs to next_cbs. */ 847 847 848 848 CPU->rcu.next_cbs_cnt = CPU->rcu.arriving_cbs_cnt; 849 849 CPU->rcu.arriving_cbs_cnt = 0; 850 850 851 851 /* 852 852 * Too many callbacks queued. Better speed up the detection … … 859 859 /* Start moving the arriving_cbs list to next_cbs. */ 860 860 CPU->rcu.next_cbs = CPU->rcu.arriving_cbs; 861 861 862 862 /* 863 863 * At least one callback arrived. The tail therefore does not point … … 866 866 if (CPU->rcu.next_cbs) { 867 867 assert(CPU->rcu.parriving_cbs_tail != &CPU->rcu.arriving_cbs); 868 868 869 869 CPU->rcu.arriving_cbs = NULL; 870 870 /* Reset arriving_cbs before updating the tail pointer. */ … … 883 883 /* Update statistics of arrived callbacks. */ 884 884 upd_stat_cb_cnts(CPU->rcu.next_cbs_cnt); 885 885 886 886 /* 887 887 * Make changes prior to queuing next_cbs visible to readers. … … 891 891 892 892 /* At the end of next_cbs_gp, exec next_cbs. Determine what GP that is. */ 893 893 894 894 if (!next_cbs_empty()) { 895 895 spinlock_lock(&rcu.gp_lock); 896 896 897 897 /* Exec next_cbs at the end of the next GP. */ 898 898 CPU->rcu.next_cbs_gp = _rcu_cur_gp + 1; 899 899 900 900 /* 901 901 * There are no callbacks to invoke before next_cbs. Instruct … … 908 908 CPU->rcu.cur_cbs_gp = rcu.completed_gp + 1; 909 909 } 910 910 911 911 spinlock_unlock(&rcu.gp_lock); 912 912 } else { 913 913 CPU->rcu.next_cbs_gp = CPU->rcu.cur_cbs_gp; 914 914 } 915 915 916 916 assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp); 917 917 918 918 return expedite; 919 919 } … … 936 936 assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp); 937 937 assert(CPU->rcu.cur_cbs_gp <= _rcu_cur_gp + 1); 938 938 939 939 while (rcu.completed_gp < CPU->rcu.cur_cbs_gp) { 940 940 /* GP has not yet started - start a new one. */ … … 952 952 } else { 953 953 /* GP detection is in progress.*/ 954 954 955 955 if (expedite) 956 956 condvar_signal(&rcu.expedite_now); 957 957 958 958 /* Wait for the GP to complete. */ 959 959 errno_t ret = _condvar_wait_timeout_spinlock(&rcu.gp_ended, &rcu.gp_lock, 960 960 SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE); 961 961 962 962 if (ret == EINTR) { 963 963 spinlock_unlock(&rcu.gp_lock); … … 966 966 } 967 967 } 968 968 969 969 upd_missed_gp_in_wait(rcu.completed_gp); 970 970 971 971 *completed_gp = rcu.completed_gp; 972 972 spinlock_unlock(&rcu.gp_lock); 973 973 974 974 return true; 975 975 } … … 978 978 { 979 979 DEFINE_CPU_MASK(reader_cpus); 980 980 981 981 cpu_mask_active(reader_cpus); 982 982 rm_quiescent_cpus(reader_cpus); 983 983 984 984 while (!cpu_mask_is_none(reader_cpus)) { 985 985 /* Give cpus a chance to context switch (a QS) and batch callbacks. */ 986 986 if(!gp_sleep(&expedite)) 987 987 return false; 988 988 989 989 rm_quiescent_cpus(reader_cpus); 990 990 sample_cpus(reader_cpus, reader_cpus); 991 991 } 992 992 993 993 /* Update statistic. */ 994 994 if (expedite) { 995 995 ++rcu.stat_expedited_cnt; 996 996 } 997 997 998 998 /* 999 999 * All cpus have passed through a QS and see the most recent _rcu_cur_gp. … … 1032 1032 assert(interrupts_disabled()); 1033 1033 cpu_mask_t *reader_cpus = (cpu_mask_t *)arg; 1034 1034 1035 1035 bool locked = RCU_CNT_INC <= THE->rcu_nesting; 1036 1036 /* smp_call machinery makes the most current _rcu_cur_gp visible. */ 1037 1037 bool passed_qs = (CPU->rcu.last_seen_gp == _rcu_cur_gp); 1038 1038 1039 1039 if (locked && !passed_qs) { 1040 1040 /* … … 1062 1062 */ 1063 1063 size_t nesting_cnt = local_atomic_exchange(&THE->rcu_nesting, 0); 1064 1064 1065 1065 /* 1066 1066 * Ensures NMIs see .rcu_nesting without the WAS_PREEMPTED mark and … … 1068 1068 */ 1069 1069 compiler_barrier(); 1070 1070 1071 1071 /* Preempted a reader critical section for the first time. */ 1072 1072 if (RCU_CNT_INC <= nesting_cnt && !(nesting_cnt & RCU_WAS_PREEMPTED)) { … … 1074 1074 note_preempted_reader(); 1075 1075 } 1076 1076 1077 1077 /* Save the thread's nesting count when it is not running. */ 1078 1078 THREAD->rcu.nesting_cnt = nesting_cnt; … … 1110 1110 THREAD->priority = -1; 1111 1111 } 1112 1112 1113 1113 upd_max_cbs_in_slice(CPU->rcu.arriving_cbs_cnt); 1114 1114 } … … 1118 1118 { 1119 1119 assert(!rcu_read_locked()); 1120 1120 1121 1121 /* Load the thread's saved nesting count from before it was preempted. */ 1122 1122 THE->rcu_nesting = THREAD->rcu.nesting_cnt; … … 1131 1131 { 1132 1132 assert(THE->rcu_nesting == 0); 1133 1133 1134 1134 /* 1135 1135 * The thread forgot to exit its reader critical section. … … 1159 1159 { 1160 1160 assert(0 == THE->rcu_nesting || RCU_WAS_PREEMPTED == THE->rcu_nesting); 1161 1161 1162 1162 size_t prev = local_atomic_exchange(&THE->rcu_nesting, 0); 1163 1163 if (prev == RCU_WAS_PREEMPTED) { … … 1212 1212 return true; 1213 1213 } 1214 1214 1215 1215 spinlock_lock(&rcu.gp_lock); 1216 1216 1217 1217 if (CPU->rcu.cur_cbs_gp <= rcu.completed_gp) { 1218 1218 *completed_gp = rcu.completed_gp; … … 1220 1220 return true; 1221 1221 } 1222 1222 1223 1223 assert(CPU->rcu.cur_cbs_gp <= CPU->rcu.next_cbs_gp); 1224 1224 assert(_rcu_cur_gp <= CPU->rcu.cur_cbs_gp); 1225 1225 1226 1226 /* 1227 1227 * Notify the detector of how many GP ends we intend to wait for, so … … 1231 1231 size_t remaining_gp_ends = (size_t) (CPU->rcu.next_cbs_gp - _rcu_cur_gp); 1232 1232 req_detection(remaining_gp_ends + (arriving_cbs_empty() ? 0 : 1)); 1233 1233 1234 1234 /* 1235 1235 * Ask the detector to speed up GP detection if there are too many … … 1239 1239 if(0 == rcu.req_expedited_cnt) 1240 1240 condvar_signal(&rcu.expedite_now); 1241 1241 1242 1242 /* 1243 1243 * Expedite only cub_cbs. If there really is a surge of callbacks … … 1250 1250 /* Wait for cur_cbs_gp to end. */ 1251 1251 bool interrupted = cv_wait_for_gp(CPU->rcu.cur_cbs_gp); 1252 1252 1253 1253 *completed_gp = rcu.completed_gp; 1254 1254 spinlock_unlock(&rcu.gp_lock); 1255 1255 1256 1256 if (!interrupted) 1257 1257 upd_missed_gp_in_wait(*completed_gp); 1258 1258 1259 1259 return !interrupted; 1260 1260 } … … 1264 1264 { 1265 1265 assert(spinlock_locked(&rcu.gp_lock)); 1266 1266 1267 1267 bool interrupted = false; 1268 1268 1269 1269 /* Wait until wait_on_gp ends. */ 1270 1270 while (rcu.completed_gp < wait_on_gp && !interrupted) { … … 1273 1273 interrupted = (ret == EINTR); 1274 1274 } 1275 1275 1276 1276 return interrupted; 1277 1277 } … … 1296 1296 { 1297 1297 spinlock_lock(&rcu.gp_lock); 1298 1298 1299 1299 while (wait_for_detect_req()) { 1300 1300 /* … … 1303 1303 */ 1304 1304 start_new_gp(); 1305 1305 1306 1306 spinlock_unlock(&rcu.gp_lock); 1307 1307 1308 1308 if (!wait_for_readers()) 1309 1309 goto unlocked_out; 1310 1310 1311 1311 spinlock_lock(&rcu.gp_lock); 1312 1312 … … 1314 1314 end_cur_gp(); 1315 1315 } 1316 1316 1317 1317 spinlock_unlock(&rcu.gp_lock); 1318 1318 1319 1319 unlocked_out: 1320 1320 return; … … 1325 1325 { 1326 1326 assert(spinlock_locked(&rcu.gp_lock)); 1327 1327 1328 1328 bool interrupted = false; 1329 1329 1330 1330 while (0 == rcu.req_gp_end_cnt && !interrupted) { 1331 1331 int ret = _condvar_wait_timeout_spinlock(&rcu.req_gp_changed, 1332 1332 &rcu.gp_lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE); 1333 1333 1334 1334 interrupted = (ret == EINTR); 1335 1335 } 1336 1336 1337 1337 return !interrupted; 1338 1338 } … … 1342 1342 { 1343 1343 assert(spinlock_locked(&rcu.gp_lock)); 1344 1344 1345 1345 rcu.completed_gp = _rcu_cur_gp; 1346 1346 --rcu.req_gp_end_cnt; 1347 1347 1348 1348 condvar_broadcast(&rcu.gp_ended); 1349 1349 } … … 1353 1353 { 1354 1354 DEFINE_CPU_MASK(reading_cpus); 1355 1355 1356 1356 /* All running cpus have potential readers. */ 1357 1357 cpu_mask_active(reading_cpus); … … 1363 1363 if (!gp_sleep()) 1364 1364 return false; 1365 1365 1366 1366 /* Non-intrusively determine which cpus have yet to pass a QS. */ 1367 1367 rm_quiescent_cpus(reading_cpus); 1368 1368 1369 1369 /* Actively interrupt cpus delaying the current GP and demand a QS. */ 1370 1370 interrupt_delaying_cpus(reading_cpus); 1371 1371 1372 1372 /* Wait for the interrupted cpus to notify us that they reached a QS. */ 1373 1373 if (!wait_for_delaying_cpus()) … … 1378 1378 * monotonically descreases. 1379 1379 */ 1380 1380 1381 1381 /* Wait for the last reader in cur_preempted to notify us it is done. */ 1382 1382 if (!wait_for_preempt_reader()) 1383 1383 return false; 1384 1384 1385 1385 return true; 1386 1386 } … … 1397 1397 DETECT_SLEEP_MS * 1000, SYNCH_FLAGS_INTERRUPTIBLE); 1398 1398 } 1399 1399 1400 1400 if (0 < rcu.req_expedited_cnt) { 1401 1401 --rcu.req_expedited_cnt; … … 1403 1403 ++rcu.stat_expedited_cnt; 1404 1404 } 1405 1405 1406 1406 spinlock_unlock(&rcu.gp_lock); 1407 1407 1408 1408 return (ret != EINTR); 1409 1409 } … … 1413 1413 { 1414 1414 atomic_set(&rcu.delaying_cpu_cnt, 0); 1415 1415 1416 1416 sample_cpus(cpu_mask, NULL); 1417 1417 } … … 1426 1426 assert(interrupts_disabled()); 1427 1427 assert(!CPU->rcu.is_delaying_gp); 1428 1428 1429 1429 /* Cpu did not pass a quiescent state yet. */ 1430 1430 if (CPU->rcu.last_seen_gp != _rcu_cur_gp) { … … 1440 1440 ACCESS_ONCE(CPU->rcu.is_delaying_gp) = true; 1441 1441 CPU->rcu.signal_unlock = true; 1442 1442 1443 1443 atomic_inc(&rcu.delaying_cpu_cnt); 1444 1444 } else { … … 1466 1466 */ 1467 1467 } 1468 1468 1469 1469 /* 1470 1470 * smp_call() makes sure any changes propagate back to the caller. … … 1483 1483 return false; 1484 1484 } 1485 1485 1486 1486 /* Update statistic. */ 1487 1487 rcu.stat_delayed_cnt += delaying_cpu_cnt; 1488 1488 1489 1489 return true; 1490 1490 } … … 1506 1506 */ 1507 1507 compiler_barrier(); 1508 1508 1509 1509 /* Save the thread's nesting count when it is not running. */ 1510 1510 THREAD->rcu.nesting_cnt = CPU->rcu.nesting_cnt; 1511 1511 1512 1512 /* Preempted a reader critical section for the first time. */ 1513 1513 if (0 < THREAD->rcu.nesting_cnt && !THREAD->rcu.was_preempted) { … … 1515 1515 note_preempted_reader(); 1516 1516 } 1517 1517 1518 1518 /* 1519 1519 * The preempted reader has been noted globally. There are therefore … … 1528 1528 */ 1529 1529 CPU->rcu.nesting_cnt = 0; 1530 1530 1531 1531 /* 1532 1532 * This cpu is holding up the current GP. Let the detector know … … 1553 1553 THREAD->priority = -1; 1554 1554 } 1555 1555 1556 1556 upd_max_cbs_in_slice(CPU->rcu.arriving_cbs_cnt); 1557 1557 } … … 1562 1562 assert(PREEMPTION_DISABLED || interrupts_disabled()); 1563 1563 assert(0 == CPU->rcu.nesting_cnt); 1564 1564 1565 1565 /* Load the thread's saved nesting count from before it was preempted. */ 1566 1566 CPU->rcu.nesting_cnt = THREAD->rcu.nesting_cnt; 1567 1567 1568 1568 /* 1569 1569 * Ensures NMI see the proper nesting count before .signal_unlock. … … 1572 1572 */ 1573 1573 compiler_barrier(); 1574 1574 1575 1575 /* 1576 1576 * In the unlikely event that a NMI occurs between the loading of the … … 1594 1594 assert(THREAD->state == Exiting); 1595 1595 assert(PREEMPTION_DISABLED || interrupts_disabled()); 1596 1596 1597 1597 /* 1598 1598 * The thread forgot to exit its reader critical section. … … 1617 1617 { 1618 1618 assert(spinlock_locked(&rcu.gp_lock)); 1619 1619 1620 1620 irq_spinlock_lock(&rcu.preempt_lock, true); 1621 1621 1622 1622 /* Start a new GP. Announce to readers that a quiescent state is needed. */ 1623 1623 ++_rcu_cur_gp; 1624 1624 1625 1625 /* 1626 1626 * Readers preempted before the start of this GP (next_preempted) … … 1632 1632 */ 1633 1633 list_concat(&rcu.cur_preempted, &rcu.next_preempted); 1634 1634 1635 1635 irq_spinlock_unlock(&rcu.preempt_lock, true); 1636 1636 } … … 1694 1694 */ 1695 1695 memory_barrier(); /* MB C */ 1696 1696 1697 1697 cpu_mask_for_each(*cpu_mask, cpu_id) { 1698 1698 /* … … 1707 1707 */ 1708 1708 bool cpu_acked_gp = (cpus[cpu_id].rcu.last_seen_gp == _rcu_cur_gp); 1709 1709 1710 1710 /* 1711 1711 * Either the cpu is idle or it is exiting away from idle mode … … 1714 1714 */ 1715 1715 bool cpu_idle = cpus[cpu_id].idle; 1716 1716 1717 1717 if (cpu_acked_gp || cpu_idle) { 1718 1718 cpu_mask_reset(cpu_mask, cpu_id); … … 1736 1736 { 1737 1737 assert(CPU->rcu.cur_cbs_gp <= completed_gp); 1738 1738 1739 1739 size_t delta = (size_t)(completed_gp - CPU->rcu.cur_cbs_gp); 1740 1740 CPU->rcu.stat_missed_gp_in_wait += delta; … … 1764 1764 { 1765 1765 irq_spinlock_lock(&rcu.preempt_lock, true); 1766 1766 1767 1767 assert(link_used(&THREAD->rcu.preempt_link)); 1768 1768 … … 1793 1793 bool reader_exists = !list_empty(&rcu.cur_preempted); 1794 1794 rcu.preempt_blocking_det = reader_exists; 1795 1795 1796 1796 irq_spinlock_unlock(&rcu.preempt_lock, true); 1797 1797 1798 1798 if (reader_exists) { 1799 1799 /* Update statistic. */ 1800 1800 ++rcu.stat_preempt_blocking_cnt; 1801 1801 1802 1802 return semaphore_down_interruptable(&rcu.remaining_readers); 1803 1803 } 1804 1804 1805 1805 return true; 1806 1806 } … … 1809 1809 { 1810 1810 rcu_cpu_data_t *cr = &CPU->rcu; 1811 1811 1812 1812 if (arriving_cbs_cnt > cr->last_arriving_cnt) { 1813 1813 size_t arrived_cnt = arriving_cbs_cnt - cr->last_arriving_cnt; 1814 1814 cr->stat_max_slice_cbs = max(arrived_cnt, cr->stat_max_slice_cbs); 1815 1815 } 1816 1816 1817 1817 cr->last_arriving_cnt = arriving_cbs_cnt; 1818 1818 } … … 1826 1826 * are no locks to lock in order to get up-to-date values. 1827 1827 */ 1828 1828 1829 1829 #ifdef RCU_PREEMPT_PODZIMEK 1830 1830 const char *algo = "podzimek-preempt-rcu"; … … 1832 1832 const char *algo = "a-preempt-rcu"; 1833 1833 #endif 1834 1834 1835 1835 printf("Config: expedite_threshold=%d, critical_threshold=%d," 1836 1836 " detect_sleep=%dms, %s\n", … … 1843 1843 "running or not)\n", rcu.stat_preempt_blocking_cnt); 1844 1844 printf("Smp calls: %zu\n", rcu.stat_smp_call_cnt); 1845 1845 1846 1846 printf("Max arrived callbacks per GP and CPU:\n"); 1847 1847 for (unsigned int i = 0; i < config.cpu_count; ++i) { … … 1853 1853 printf(" %zu", cpus[i].rcu.stat_avg_cbs); 1854 1854 } 1855 1855 1856 1856 printf("\nMax arrived callbacks per time slice and CPU:\n"); 1857 1857 for (unsigned int i = 0; i < config.cpu_count; ++i) {
Note:
See TracChangeset
for help on using the changeset viewer.