Changeset e3306d04 in mainline
- Timestamp:
- 2018-09-07T15:54:32Z (6 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 5f1d850
- Parents:
- 036e97c
- Location:
- kernel
- Files:
-
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/mips32/src/debugger.c
r036e97c re3306d04 412 412 * so this is a good idea 413 413 */ 414 atomic_s et(&haltstate, 1);414 atomic_store(&haltstate, 1); 415 415 irq_spinlock_unlock(&bkpoint_lock, false); 416 416 … … 418 418 419 419 irq_spinlock_lock(&bkpoint_lock, false); 420 atomic_s et(&haltstate, 0);420 atomic_store(&haltstate, 0); 421 421 #endif 422 422 } -
kernel/arch/sparc64/src/smp/sun4v/smp.c
r036e97c re3306d04 230 230 exec_units[i].exec_unit_id = exec_unit_id; 231 231 exec_units[i].strand_count = 0; 232 atomic_s et(&(exec_units[i].nrdy), 0);232 atomic_store(&(exec_units[i].nrdy), 0); 233 233 spinlock_initialize(&(exec_units[i].proposed_nrdy_lock), "exec_units[].proposed_nrdy_lock"); 234 234 exec_unit_count++; … … 270 270 exec_units[0].exec_unit_id = 1; 271 271 spinlock_initialize(&(exec_units[0].proposed_nrdy_lock), "exec_units[0].proposed_nrdy_lock"); 272 atomic_s et(&(exec_units[0].nrdy), 0);272 atomic_store(&(exec_units[0].nrdy), 0); 273 273 max_core_strands = cpu_count; 274 274 -
kernel/generic/include/atomic.h
r036e97c re3306d04 44 44 typedef atomic_size_t atomic_t; 45 45 46 static inline void atomic_set(atomic_t *val, atomic_count_t i)47 {48 atomic_store(val, i);49 }50 51 46 static inline size_t atomic_predec(atomic_t *val) 52 47 { -
kernel/generic/src/adt/cht.c
r036e97c re3306d04 537 537 h->new_b = NULL; 538 538 h->op = op; 539 atomic_s et(&h->item_cnt, 0);540 atomic_s et(&h->resize_reqs, 0);539 atomic_store(&h->item_cnt, 0); 540 atomic_store(&h->resize_reqs, 0); 541 541 542 542 if (NULL == op->remove_callback) { -
kernel/generic/src/cap/cap.c
r036e97c re3306d04 353 353 kobject_ops_t *ops) 354 354 { 355 atomic_s et(&kobj->refcnt, 1);355 atomic_store(&kobj->refcnt, 1); 356 356 kobj->type = type; 357 357 kobj->raw = raw; -
kernel/generic/src/console/console.c
r036e97c re3306d04 202 202 203 203 event_set_unmask_callback(EVENT_KIO, kio_update); 204 atomic_s et(&kio_inited, true);204 atomic_store(&kio_inited, true); 205 205 } 206 206 -
kernel/generic/src/ipc/ipc.c
r036e97c re3306d04 154 154 list_initialize(&box->answers); 155 155 list_initialize(&box->irq_notifs); 156 atomic_s et(&box->active_calls, 0);156 atomic_store(&box->active_calls, 0); 157 157 box->task = task; 158 158 } … … 204 204 phone->callee = NULL; 205 205 phone->state = IPC_PHONE_FREE; 206 atomic_s et(&phone->active_calls, 0);206 atomic_store(&phone->active_calls, 0); 207 207 phone->kobject = NULL; 208 208 } -
kernel/generic/src/lib/halt.c
r036e97c re3306d04 57 57 58 58 if (!atomic_load(&haltstate)) { 59 atomic_s et(&haltstate, 1);59 atomic_store(&haltstate, 1); 60 60 rundebugger = true; 61 61 } 62 62 #else 63 atomic_s et(&haltstate, 1);63 atomic_store(&haltstate, 1); 64 64 #endif 65 65 -
kernel/generic/src/log/log.c
r036e97c re3306d04 94 94 { 95 95 event_set_unmask_callback(EVENT_KLOG, log_update); 96 atomic_s et(&log_inited, true);96 atomic_store(&log_inited, true); 97 97 } 98 98 -
kernel/generic/src/proc/task.c
r036e97c re3306d04 166 166 return rc; 167 167 168 atomic_s et(&task->refcount, 0);169 atomic_s et(&task->lifecount, 0);168 atomic_store(&task->refcount, 0); 169 atomic_store(&task->lifecount, 0); 170 170 171 171 irq_spinlock_initialize(&task->lock, "task_t_lock"); -
kernel/generic/src/proc/thread.c
r036e97c re3306d04 240 240 THREAD = NULL; 241 241 242 atomic_s et(&nrdy, 0);242 atomic_store(&nrdy, 0); 243 243 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0, 244 244 thr_constructor, thr_destructor, 0); -
kernel/generic/src/smp/smp_call.c
r036e97c re3306d04 246 246 * messing up the preemption count). 247 247 */ 248 atomic_s et(&call_info->pending, 1);248 atomic_store(&call_info->pending, 1); 249 249 250 250 /* Let initialization complete before continuing. */ … … 259 259 */ 260 260 memory_barrier(); 261 atomic_s et(&call_info->pending, 0);261 atomic_store(&call_info->pending, 0); 262 262 } 263 263 -
kernel/generic/src/synch/rcu.c
r036e97c re3306d04 312 312 313 313 mutex_initialize(&rcu.barrier_mtx, MUTEX_PASSIVE); 314 atomic_s et(&rcu.barrier_wait_cnt, 0);314 atomic_store(&rcu.barrier_wait_cnt, 0); 315 315 waitq_initialize(&rcu.barrier_wq); 316 316 … … 322 322 rcu.req_gp_end_cnt = 0; 323 323 rcu.req_expedited_cnt = 0; 324 atomic_s et(&rcu.delaying_cpu_cnt, 0);324 atomic_store(&rcu.delaying_cpu_cnt, 0); 325 325 #endif 326 326 … … 594 594 * enqueued barrier callbacks start signaling completion. 595 595 */ 596 atomic_s et(&rcu.barrier_wait_cnt, 1);596 atomic_store(&rcu.barrier_wait_cnt, 1); 597 597 598 598 DEFINE_CPU_MASK(cpu_mask); … … 1412 1412 static void interrupt_delaying_cpus(cpu_mask_t *cpu_mask) 1413 1413 { 1414 atomic_s et(&rcu.delaying_cpu_cnt, 0);1414 atomic_store(&rcu.delaying_cpu_cnt, 0); 1415 1415 1416 1416 sample_cpus(cpu_mask, NULL); -
kernel/test/atomic/atomic1.c
r036e97c re3306d04 36 36 atomic_t a; 37 37 38 atomic_s et(&a, 10);38 atomic_store(&a, 10); 39 39 if (atomic_load(&a) != 10) 40 return "Failed atomic_s et()/atomic_load()";40 return "Failed atomic_store()/atomic_load()"; 41 41 42 42 if (atomic_postinc(&a) != 10) -
kernel/test/mm/falloc2.c
r036e97c re3306d04 117 117 const char *test_falloc2(void) 118 118 { 119 atomic_s et(&thread_count, THREADS);120 atomic_s et(&thread_fail, 0);119 atomic_store(&thread_count, THREADS); 120 atomic_store(&thread_fail, 0); 121 121 122 122 for (unsigned int i = 0; i < THREADS; i++) { -
kernel/test/synch/rcu1.c
r036e97c re3306d04 268 268 static bool do_nop_callbacks(void) 269 269 { 270 atomic_s et(&nop_callbacks_cnt, 0);270 atomic_store(&nop_callbacks_cnt, 0); 271 271 272 272 size_t exp_cnt = nop_updater_iters * get_thread_cnt(); … … 448 448 seq_test_result = EOK; 449 449 max_upd_done_time = 0; 450 atomic_s et(&cur_time, 1);450 atomic_store(&cur_time, 1); 451 451 452 452 const size_t iters = 100; … … 821 821 { 822 822 barrier_t *b = member_to_inst(item, barrier_t, rcu_item); 823 atomic_s et(&b->done, 1);823 atomic_store(&b->done, 1); 824 824 } 825 825 … … 835 835 } 836 836 837 atomic_s et(&barrier->done, 0);837 atomic_store(&barrier->done, 0); 838 838 839 839 rcu_call(&barrier->rcu_item, barrier_callback); -
kernel/test/synch/semaphore1.c
r036e97c re3306d04 82 82 thread_t *thrd; 83 83 84 atomic_s et(&items_produced, 0);85 atomic_s et(&items_consumed, 0);84 atomic_store(&items_produced, 0); 85 atomic_store(&items_consumed, 0); 86 86 87 87 consumers = i * CONSUMERS; -
kernel/test/synch/workq-test-core.h
r036e97c re3306d04 149 149 { 150 150 for (int i = 0; i < WAVES; ++i) { 151 atomic_s et(&call_cnt[i], 0);151 atomic_store(&call_cnt[i], 0); 152 152 } 153 153 -
kernel/test/thread/thread1.c
r036e97c re3306d04 58 58 atomic_count_t total = 0; 59 59 60 atomic_s et(&finish, 1);61 atomic_s et(&threads_finished, 0);60 atomic_store(&finish, 1); 61 atomic_store(&threads_finished, 0); 62 62 63 63 for (i = 0; i < THREADS; i++) { … … 75 75 thread_sleep(10); 76 76 77 atomic_s et(&finish, 0);77 atomic_store(&finish, 0); 78 78 while (atomic_load(&threads_finished) < total) { 79 79 TPRINTF("Threads left: %zu\n", total - atomic_load(&threads_finished));
Note:
See TracChangeset
for help on using the changeset viewer.