Changeset dfa4be62 in mainline


Ignore:
Timestamp:
2024-01-21T16:23:19Z (10 months ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
d23712e
Parents:
a3d87b9
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2023-03-28 17:40:43)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2024-01-21 16:23:19)
Message:

Thread lock is no longer necessary

Location:
kernel/generic
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/proc/thread.h

    ra3d87b9 rdfa4be62  
    9999        atomic_time_stat_t kcycles;
    100100
    101         /** Lock protecting thread structure.
    102          *
    103          * Protects the whole thread structure except fields listed above.
    104          */
    105         IRQ_SPINLOCK_DECLARE(lock);
    106 
    107101        /** Architecture-specific data. */
    108102        thread_arch_t arch;
  • kernel/generic/src/proc/scheduler.c

    ra3d87b9 rdfa4be62  
    310310        switch_task(THREAD->task);
    311311
    312         irq_spinlock_lock(&THREAD->lock, false);
    313312        assert(atomic_get_unordered(&THREAD->cpu) == CPU);
    314313
     
    364363        /* Save current CPU cycle */
    365364        THREAD->last_cycle = get_cycle();
    366 
    367         irq_spinlock_unlock(&THREAD->lock, false);
    368365}
    369366
     
    386383static void thread_requeue_preempted(thread_t *thread)
    387384{
    388         irq_spinlock_lock(&thread->lock, false);
    389 
    390385        assert(atomic_get_unordered(&thread->state) == Running);
    391386        assert(atomic_get_unordered(&thread->cpu) == CPU);
     
    400395        atomic_set_unordered(&thread->state, Ready);
    401396
    402         irq_spinlock_unlock(&thread->lock, false);
    403 
    404397        add_to_rq(thread, CPU, prio);
    405398}
     
    408401{
    409402        ipl_t ipl = interrupts_disable();
    410 
    411         irq_spinlock_lock(&thread->lock, false);
    412403
    413404        assert(atomic_get_unordered(&thread->state) == Sleeping || atomic_get_unordered(&thread->state) == Entering);
     
    423414                atomic_set_unordered(&thread->cpu, CPU);
    424415        }
    425 
    426         irq_spinlock_unlock(&thread->lock, false);
    427416
    428417        add_to_rq(thread, cpu, 0);
     
    500489        }
    501490
    502         irq_spinlock_lock(&THREAD->lock, false);
    503 
    504491        atomic_set_unordered(&THREAD->state, new_state);
    505492
     
    514501         */
    515502        after_thread_ran_arch();
    516 
    517         irq_spinlock_unlock(&THREAD->lock, false);
    518503
    519504        CPU_LOCAL->exiting_state = new_state;
     
    650635        list_foreach_rev(old_rq->rq, rq_link, thread_t, thread) {
    651636
    652                 irq_spinlock_lock(&thread->lock, false);
    653 
    654637                /*
    655638                 * Do not steal CPU-wired threads, threads
     
    658641                 * FPU context is still in the CPU.
    659642                 */
    660                 if (thread->stolen || thread->nomigrate ||
    661                     thread == fpu_owner) {
    662                         irq_spinlock_unlock(&thread->lock, false);
     643                if (thread->stolen || thread->nomigrate || thread == fpu_owner) {
    663644                        continue;
    664645                }
     
    666647                thread->stolen = true;
    667648                atomic_set_unordered(&thread->cpu, CPU);
    668 
    669                 irq_spinlock_unlock(&thread->lock, false);
    670649
    671650                /*
  • kernel/generic/src/proc/thread.c

    ra3d87b9 rdfa4be62  
    115115        thread_t *thread = (thread_t *) obj;
    116116
    117         irq_spinlock_initialize(&thread->lock, "thread_t_lock");
    118117        link_initialize(&thread->rq_link);
    119118        link_initialize(&thread->wq_link);
     
    197196void thread_wire(thread_t *thread, cpu_t *cpu)
    198197{
    199         irq_spinlock_lock(&thread->lock, true);
     198        ipl_t ipl = interrupts_disable();
    200199        atomic_set_unordered(&thread->cpu, cpu);
    201200        thread->nomigrate++;
    202         irq_spinlock_unlock(&thread->lock, true);
     201        interrupts_restore(ipl);
    203202}
    204203
     
    579578void thread_migration_disable(void)
    580579{
     580        ipl_t ipl = interrupts_disable();
     581
    581582        assert(THREAD);
    582 
    583583        THREAD->nomigrate++;
     584
     585        interrupts_restore(ipl);
    584586}
    585587
     
    587589void thread_migration_enable(void)
    588590{
     591        ipl_t ipl = interrupts_disable();
     592
    589593        assert(THREAD);
    590594        assert(THREAD->nomigrate > 0);
     
    592596        if (THREAD->nomigrate > 0)
    593597                THREAD->nomigrate--;
     598
     599        interrupts_restore(ipl);
    594600}
    595601
  • kernel/generic/src/sysinfo/stats.c

    ra3d87b9 rdfa4be62  
    362362        thread_t *thread = thread_first();
    363363        while (thread != NULL) {
    364                 /* Interrupts are already disabled */
    365                 irq_spinlock_lock(&thread->lock, false);
    366 
    367364                /* Record the statistics and increment the index */
    368365                produce_stats_thread(thread, &stats_threads[i]);
    369366                i++;
    370 
    371                 irq_spinlock_unlock(&thread->lock, false);
    372367
    373368                thread = thread_next(thread);
     
    625620                ret.data.size = sizeof(stats_thread_t);
    626621
    627                 /*
    628                  * Replaced hand-over-hand locking with regular nested sections
    629                  * to avoid weak reference leak issues.
    630                  */
    631                 irq_spinlock_lock(&thread->lock, false);
    632622                produce_stats_thread(thread, stats_thread);
    633                 irq_spinlock_unlock(&thread->lock, false);
    634623
    635624                irq_spinlock_unlock(&threads_lock, true);
  • kernel/generic/src/udebug/udebug_ops.c

    ra3d87b9 rdfa4be62  
    9090        }
    9191
    92         irq_spinlock_lock(&thread->lock, true);
    93 
    9492        /* Verify that 'thread' is a userspace thread. */
    9593        if (!thread->uspace) {
    96                 /* It's not, deny its existence */
    97                 irq_spinlock_unlock(&thread->lock, true);
    9894                mutex_unlock(&TASK->udebug.lock);
    9995                return ENOENT;
    10096        }
    101 
    102         /* Verify debugging state. */
    103         if (thread->udebug.active != true) {
    104                 /* Not in debugging session or undesired GO state */
    105                 irq_spinlock_unlock(&thread->lock, true);
    106                 mutex_unlock(&TASK->udebug.lock);
    107                 return ENOENT;
    108         }
    109 
    110         /* Now verify that the thread belongs to the current task. */
    111         if (thread->task != TASK) {
    112                 /* No such thread belonging this task */
    113                 irq_spinlock_unlock(&thread->lock, true);
    114                 mutex_unlock(&TASK->udebug.lock);
    115                 return ENOENT;
    116         }
    117 
    118         irq_spinlock_unlock(&thread->lock, true);
    119 
    120         /* Only mutex TASK->udebug.lock left. */
    12197
    12298        /*
     
    126102         */
    127103        mutex_lock(&thread->udebug.lock);
     104
     105        /* Verify debugging state. */
     106        if (thread->udebug.active != true) {
     107                /* Not in debugging session or undesired GO state */
     108                mutex_unlock(&thread->udebug.lock);
     109                mutex_unlock(&TASK->udebug.lock);
     110                return ENOENT;
     111        }
     112
     113        /* Now verify that the thread belongs to the current task. */
     114        if (thread->task != TASK) {
     115                /* No such thread belonging this task */
     116                mutex_unlock(&thread->udebug.lock);
     117                mutex_unlock(&TASK->udebug.lock);
     118                return ENOENT;
     119        }
    128120
    129121        /* The big task mutex is no longer needed. */
     
    388380        /* FIXME: make sure the thread isn't past debug shutdown... */
    389381        list_foreach(TASK->threads, th_link, thread_t, thread) {
    390                 irq_spinlock_lock(&thread->lock, false);
    391382                bool uspace = thread->uspace;
    392                 irq_spinlock_unlock(&thread->lock, false);
    393383
    394384                /* Not interested in kernel threads. */
Note: See TracChangeset for help on using the changeset viewer.