Changeset dfa4be62 in mainline
- Timestamp:
- 2024-01-21T16:23:19Z (10 months ago)
- Branches:
- master
- Children:
- d23712e
- Parents:
- a3d87b9
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2023-03-28 17:40:43)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-21 16:23:19)
- Location:
- kernel/generic
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/proc/thread.h
ra3d87b9 rdfa4be62 99 99 atomic_time_stat_t kcycles; 100 100 101 /** Lock protecting thread structure.102 *103 * Protects the whole thread structure except fields listed above.104 */105 IRQ_SPINLOCK_DECLARE(lock);106 107 101 /** Architecture-specific data. */ 108 102 thread_arch_t arch; -
kernel/generic/src/proc/scheduler.c
ra3d87b9 rdfa4be62 310 310 switch_task(THREAD->task); 311 311 312 irq_spinlock_lock(&THREAD->lock, false);313 312 assert(atomic_get_unordered(&THREAD->cpu) == CPU); 314 313 … … 364 363 /* Save current CPU cycle */ 365 364 THREAD->last_cycle = get_cycle(); 366 367 irq_spinlock_unlock(&THREAD->lock, false);368 365 } 369 366 … … 386 383 static void thread_requeue_preempted(thread_t *thread) 387 384 { 388 irq_spinlock_lock(&thread->lock, false);389 390 385 assert(atomic_get_unordered(&thread->state) == Running); 391 386 assert(atomic_get_unordered(&thread->cpu) == CPU); … … 400 395 atomic_set_unordered(&thread->state, Ready); 401 396 402 irq_spinlock_unlock(&thread->lock, false);403 404 397 add_to_rq(thread, CPU, prio); 405 398 } … … 408 401 { 409 402 ipl_t ipl = interrupts_disable(); 410 411 irq_spinlock_lock(&thread->lock, false);412 403 413 404 assert(atomic_get_unordered(&thread->state) == Sleeping || atomic_get_unordered(&thread->state) == Entering); … … 423 414 atomic_set_unordered(&thread->cpu, CPU); 424 415 } 425 426 irq_spinlock_unlock(&thread->lock, false);427 416 428 417 add_to_rq(thread, cpu, 0); … … 500 489 } 501 490 502 irq_spinlock_lock(&THREAD->lock, false);503 504 491 atomic_set_unordered(&THREAD->state, new_state); 505 492 … … 514 501 */ 515 502 after_thread_ran_arch(); 516 517 irq_spinlock_unlock(&THREAD->lock, false);518 503 519 504 CPU_LOCAL->exiting_state = new_state; … … 650 635 list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) { 651 636 652 irq_spinlock_lock(&thread->lock, false);653 654 637 /* 655 638 * Do not steal CPU-wired threads, threads … … 658 641 * FPU context is still in the CPU. 659 642 */ 660 if (thread->stolen || thread->nomigrate || 661 thread == fpu_owner) { 662 irq_spinlock_unlock(&thread->lock, false); 643 if (thread->stolen || thread->nomigrate || thread == fpu_owner) { 663 644 continue; 664 645 } … … 666 647 thread->stolen = true; 667 648 atomic_set_unordered(&thread->cpu, CPU); 668 669 irq_spinlock_unlock(&thread->lock, false);670 649 671 650 /* -
kernel/generic/src/proc/thread.c
ra3d87b9 rdfa4be62 115 115 thread_t *thread = (thread_t *) obj; 116 116 117 irq_spinlock_initialize(&thread->lock, "thread_t_lock");118 117 link_initialize(&thread->rq_link); 119 118 link_initialize(&thread->wq_link); … … 197 196 void thread_wire(thread_t *thread, cpu_t *cpu) 198 197 { 199 i rq_spinlock_lock(&thread->lock, true);198 ipl_t ipl = interrupts_disable(); 200 199 atomic_set_unordered(&thread->cpu, cpu); 201 200 thread->nomigrate++; 202 i rq_spinlock_unlock(&thread->lock, true);201 interrupts_restore(ipl); 203 202 } 204 203 … … 579 578 void thread_migration_disable(void) 580 579 { 580 ipl_t ipl = interrupts_disable(); 581 581 582 assert(THREAD); 582 583 583 THREAD->nomigrate++; 584 585 interrupts_restore(ipl); 584 586 } 585 587 … … 587 589 void thread_migration_enable(void) 588 590 { 591 ipl_t ipl = interrupts_disable(); 592 589 593 assert(THREAD); 590 594 assert(THREAD->nomigrate > 0); … … 592 596 if (THREAD->nomigrate > 0) 593 597 THREAD->nomigrate--; 598 599 interrupts_restore(ipl); 594 600 } 595 601 -
kernel/generic/src/sysinfo/stats.c
ra3d87b9 rdfa4be62 362 362 thread_t *thread = thread_first(); 363 363 while (thread != NULL) { 364 /* Interrupts are already disabled */365 irq_spinlock_lock(&thread->lock, false);366 367 364 /* Record the statistics and increment the index */ 368 365 produce_stats_thread(thread, &stats_threads[i]); 369 366 i++; 370 371 irq_spinlock_unlock(&thread->lock, false);372 367 373 368 thread = thread_next(thread); … … 625 620 ret.data.size = sizeof(stats_thread_t); 626 621 627 /*628 * Replaced hand-over-hand locking with regular nested sections629 * to avoid weak reference leak issues.630 */631 irq_spinlock_lock(&thread->lock, false);632 622 produce_stats_thread(thread, stats_thread); 633 irq_spinlock_unlock(&thread->lock, false);634 623 635 624 irq_spinlock_unlock(&threads_lock, true); -
kernel/generic/src/udebug/udebug_ops.c
ra3d87b9 rdfa4be62 90 90 } 91 91 92 irq_spinlock_lock(&thread->lock, true);93 94 92 /* Verify that 'thread' is a userspace thread. */ 95 93 if (!thread->uspace) { 96 /* It's not, deny its existence */97 irq_spinlock_unlock(&thread->lock, true);98 94 mutex_unlock(&TASK->udebug.lock); 99 95 return ENOENT; 100 96 } 101 102 /* Verify debugging state. */103 if (thread->udebug.active != true) {104 /* Not in debugging session or undesired GO state */105 irq_spinlock_unlock(&thread->lock, true);106 mutex_unlock(&TASK->udebug.lock);107 return ENOENT;108 }109 110 /* Now verify that the thread belongs to the current task. */111 if (thread->task != TASK) {112 /* No such thread belonging this task */113 irq_spinlock_unlock(&thread->lock, true);114 mutex_unlock(&TASK->udebug.lock);115 return ENOENT;116 }117 118 irq_spinlock_unlock(&thread->lock, true);119 120 /* Only mutex TASK->udebug.lock left. */121 97 122 98 /* … … 126 102 */ 127 103 mutex_lock(&thread->udebug.lock); 104 105 /* Verify debugging state. */ 106 if (thread->udebug.active != true) { 107 /* Not in debugging session or undesired GO state */ 108 mutex_unlock(&thread->udebug.lock); 109 mutex_unlock(&TASK->udebug.lock); 110 return ENOENT; 111 } 112 113 /* Now verify that the thread belongs to the current task. */ 114 if (thread->task != TASK) { 115 /* No such thread belonging this task */ 116 mutex_unlock(&thread->udebug.lock); 117 mutex_unlock(&TASK->udebug.lock); 118 return ENOENT; 119 } 128 120 129 121 /* The big task mutex is no longer needed. */ … … 388 380 /* FIXME: make sure the thread isn't past debug shutdown... */ 389 381 list_foreach(TASK->threads, th_link, thread_t, thread) { 390 irq_spinlock_lock(&thread->lock, false);391 382 bool uspace = thread->uspace; 392 irq_spinlock_unlock(&thread->lock, false);393 383 394 384 /* Not interested in kernel threads. */
Note:
See TracChangeset
for help on using the changeset viewer.