Changeset efed95a3 in mainline
- Timestamp:
- 2024-01-20T17:09:00Z (12 months ago)
- Branches:
- master
- Children:
- 3d84734
- Parents:
- 286da52
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-20 16:12:46)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-20 17:09:00)
- Location:
- kernel/generic
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/atomic.h
r286da52 refed95a3 39 39 #include <typedefs.h> 40 40 #include <stdatomic.h> 41 42 /* 43 * Shorthand for relaxed atomic read/write, something that's needed to formally 44 * avoid undefined behavior in cases where we need to read a variable in 45 * different threads and we don't particularly care about ordering 46 * (e.g. statistic printouts). This is most likely translated into the same 47 * assembly instructions as regular read/writes. 48 */ 49 #define atomic_set_unordered(var, val) atomic_store_explicit((var), (val), memory_order_relaxed) 50 #define atomic_get_unordered(var) atomic_load_explicit((var), memory_order_relaxed) 41 51 42 52 #define atomic_predec(val) \ -
kernel/generic/include/proc/thread.h
r286da52 refed95a3 138 138 139 139 /** Thread CPU. */ 140 cpu_t *cpu;140 _Atomic(cpu_t *) cpu; 141 141 /** Containing task. */ 142 142 task_t *task; -
kernel/generic/src/proc/scheduler.c
r286da52 refed95a3 311 311 312 312 irq_spinlock_lock(&THREAD->lock, false); 313 assert( THREAD->cpu== CPU);313 assert(atomic_get_unordered(&THREAD->cpu) == CPU); 314 314 315 315 THREAD->state = Running; … … 387 387 388 388 assert(thread->state == Running); 389 assert( thread->cpu== CPU);389 assert(atomic_get_unordered(&thread->cpu) == CPU); 390 390 391 391 int i = (thread->priority < RQ_COUNT - 1) ? … … 411 411 412 412 /* Prefer the CPU on which the thread ran last */ 413 if (!thread->cpu) 414 thread->cpu = CPU; 415 416 cpu_t *cpu = thread->cpu; 413 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 414 415 if (!cpu) { 416 cpu = CPU; 417 atomic_set_unordered(&thread->cpu, CPU); 418 } 417 419 418 420 irq_spinlock_unlock(&thread->lock, false); … … 656 658 657 659 thread->stolen = true; 658 thread->cpu = CPU;660 atomic_set_unordered(&thread->cpu, CPU); 659 661 660 662 irq_spinlock_unlock(&thread->lock, false); -
kernel/generic/src/proc/thread.c
r286da52 refed95a3 198 198 { 199 199 irq_spinlock_lock(&thread->lock, true); 200 thread->cpu = cpu;200 atomic_set_unordered(&thread->cpu, cpu); 201 201 thread->nomigrate++; 202 202 irq_spinlock_unlock(&thread->lock, true); … … 263 263 ((flags & THREAD_FLAG_UNCOUNTED) == THREAD_FLAG_UNCOUNTED); 264 264 thread->priority = -1; /* Start in rq[0] */ 265 thread->cpu = NULL;265 atomic_init(&thread->cpu, NULL); 266 266 thread->stolen = false; 267 267 thread->uspace = … … 343 343 /* Clear cpu->fpu_owner if set to this thread. */ 344 344 #ifdef CONFIG_FPU_LAZY 345 if (thread->cpu) { 345 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 346 if (cpu) { 346 347 /* 347 348 * We need to lock for this because the old CPU can concurrently try … … 349 350 * it to finish. An atomic compare-and-swap wouldn't be enough. 350 351 */ 351 irq_spinlock_lock(&thread->cpu->fpu_lock, false); 352 353 thread_t *owner = atomic_load_explicit(&thread->cpu->fpu_owner, 354 memory_order_relaxed); 355 356 if (owner == thread) { 357 atomic_store_explicit(&thread->cpu->fpu_owner, NULL, 358 memory_order_relaxed); 359 } 360 361 irq_spinlock_unlock(&thread->cpu->fpu_lock, false); 352 irq_spinlock_lock(&cpu->fpu_lock, false); 353 354 if (atomic_get_unordered(&cpu->fpu_owner) == thread) 355 atomic_set_unordered(&cpu->fpu_owner, NULL); 356 357 irq_spinlock_unlock(&cpu->fpu_lock, false); 362 358 } 363 359 #endif … … 707 703 708 704 if (additional) { 709 if (thread->cpu) 710 printf("%-5u", thread->cpu->id); 705 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 706 if (cpu) 707 printf("%-5u", cpu->id); 711 708 else 712 709 printf("none "); -
kernel/generic/src/sysinfo/stats.c
r286da52 refed95a3 308 308 stats_thread->kcycles = thread->kcycles; 309 309 310 if (thread->cpu != NULL) { 310 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 311 312 if (cpu != NULL) { 311 313 stats_thread->on_cpu = true; 312 stats_thread->cpu = thread->cpu->id;314 stats_thread->cpu = cpu->id; 313 315 } else 314 316 stats_thread->on_cpu = false;
Note:
See TracChangeset
for help on using the changeset viewer.