Changeset 9f2f5ee in mainline


Ignore:
Timestamp:
2025-04-10T17:48:56Z (4 days ago)
Author:
Jiří Zárevúcky <zarevucky.jiri@…>
Branches:
master
Children:
c55ab66
Parents:
c626117
git-author:
Jiří Zárevúcky <zarevucky.jiri@…> (2025-04-10 10:55:54)
git-committer:
Jiří Zárevúcky <zarevucky.jiri@…> (2025-04-10 17:48:56)
Message:

Rewrite kernel mutex implementation a little

Removes MUTEX_ACTIVE, the use of which has been removed in favor of
irq_spinlock_t, and fixes some issues with the old implementation.

  • A race in mtx→owner access is unavoidable, so make it explicitly atomic.
  • The THREAD==NULL case happens when there are no other threads yet, so we factor it out as a special case. Also ensures recursive mutex works before threads are initialized, just as normal mutex does.
  • More and better asserts.
Location:
kernel/generic
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/synch/mutex.h

    rc626117 r9f2f5ee  
    4444        MUTEX_PASSIVE,
    4545        MUTEX_RECURSIVE,
    46         MUTEX_ACTIVE
    4746} mutex_type_t;
    4847
     
    5150typedef struct {
    5251        mutex_type_t type;
     52        int nesting;
    5353        semaphore_t sem;
    54         struct thread *owner;
    55         unsigned nesting;
     54        _Atomic(struct thread *) owner;
    5655} mutex_t;
    5756
    5857#define MUTEX_INITIALIZER(name, mtype) (mutex_t) { \
    5958        .type = (mtype), \
     59        .nesting = 0, \
    6060        .sem = SEMAPHORE_INITIALIZER((name).sem, 1), \
    6161        .owner = NULL, \
    62         .nesting = 0, \
    6362}
    6463
  • kernel/generic/src/synch/mutex.c

    rc626117 r9f2f5ee  
    11/*
    22 * Copyright (c) 2001-2004 Jakub Jermar
    3  * Copyright (c) 2023 Jiří Zárevúcky
     3 * Copyright (c) 2025 Jiří Zárevúcky
    44 * All rights reserved.
    55 *
     
    3939#include <assert.h>
    4040#include <errno.h>
     41#include <proc/thread.h>
     42#include <stdatomic.h>
    4143#include <synch/mutex.h>
    4244#include <synch/semaphore.h>
    43 #include <arch.h>
    44 #include <stacktrace.h>
    45 #include <cpu.h>
    46 #include <proc/thread.h>
    4745
    4846/** Initialize mutex.
     
    5654}
    5755
     56/** A race in mtx->owner access is unavoidable, so we have to make
     57 * access to it formally atomic. These are convenience functions to
     58 * read/write the variable without memory barriers, since we don't need
     59 * them and C11 atomics default to the strongest possible memory ordering
     60 * by default, which is utterly ridiculous.
     61 */
     62static inline thread_t *_get_owner(mutex_t *mtx)
     63{
     64        return atomic_load_explicit(&mtx->owner, memory_order_relaxed);
     65}
     66
     67/** Counterpart to _get_owner(). */
     68static inline void _set_owner(mutex_t *mtx, thread_t *owner)
     69{
     70        atomic_store_explicit(&mtx->owner, owner, memory_order_relaxed);
     71}
     72
    5873/** Find out whether the mutex is currently locked.
    5974 *
     
    6479bool mutex_locked(mutex_t *mtx)
    6580{
    66         errno_t rc = semaphore_trydown(&mtx->sem);
    67         if (rc == EOK) {
    68                 semaphore_up(&mtx->sem);
    69         }
    70         return rc != EOK;
    71 }
     81        if (!THREAD)
     82                return mtx->nesting > 0;
    7283
    73 static void mutex_lock_active(mutex_t *mtx)
    74 {
    75         assert((mtx->type == MUTEX_ACTIVE) || !THREAD);
    76 
    77         const unsigned deadlock_treshold = 100000000;
    78         unsigned int cnt = 0;
    79         bool deadlock_reported = false;
    80 
    81         while (semaphore_trydown(&mtx->sem) != EOK) {
    82                 if (cnt++ > deadlock_treshold) {
    83                         printf("cpu%u: looping on active mutex %p\n", CPU->id, mtx);
    84                         stack_trace();
    85                         cnt = 0;
    86                         deadlock_reported = true;
    87                 }
    88         }
    89 
    90         if (deadlock_reported)
    91                 printf("cpu%u: not deadlocked\n", CPU->id);
     84        return _get_owner(mtx) == THREAD;
    9285}
    9386
     
    9891void mutex_lock(mutex_t *mtx)
    9992{
    100         if (mtx->type == MUTEX_RECURSIVE && mtx->owner == THREAD) {
    101                 assert(THREAD);
     93        if (!THREAD) {
     94                assert(mtx->type == MUTEX_RECURSIVE || mtx->nesting == 0);
    10295                mtx->nesting++;
    10396                return;
    10497        }
    10598
    106         if (mtx->type == MUTEX_ACTIVE || !THREAD) {
    107                 mutex_lock_active(mtx);
     99        if (_get_owner(mtx) == THREAD) {
     100                /* This will also detect nested locks on a non-recursive mutex. */
     101                assert(mtx->type == MUTEX_RECURSIVE);
     102                assert(mtx->nesting > 0);
     103                mtx->nesting++;
    108104                return;
    109105        }
    110106
    111107        semaphore_down(&mtx->sem);
    112         mtx->owner = THREAD;
     108
     109        _set_owner(mtx, THREAD);
     110        assert(mtx->nesting == 0);
    113111        mtx->nesting = 1;
    114112}
     
    123121errno_t mutex_lock_timeout(mutex_t *mtx, uint32_t usec)
    124122{
    125         if (usec != 0) {
    126                 assert(mtx->type != MUTEX_ACTIVE);
    127                 assert(THREAD);
     123        if (!THREAD) {
     124                assert(mtx->type == MUTEX_RECURSIVE || mtx->nesting == 0);
     125                mtx->nesting++;
     126                return EOK;
    128127        }
    129128
    130         if (mtx->type == MUTEX_RECURSIVE && mtx->owner == THREAD) {
    131                 assert(THREAD);
     129        if (_get_owner(mtx) == THREAD) {
     130                assert(mtx->type == MUTEX_RECURSIVE);
     131                assert(mtx->nesting > 0);
    132132                mtx->nesting++;
    133133                return EOK;
     
    135135
    136136        errno_t rc = semaphore_down_timeout(&mtx->sem, usec);
    137         if (rc == EOK) {
    138                 mtx->owner = THREAD;
    139                 mtx->nesting = 1;
    140         }
    141         return rc;
     137        if (rc != EOK)
     138                return rc;
     139
     140        _set_owner(mtx, THREAD);
     141        assert(mtx->nesting == 0);
     142        mtx->nesting = 1;
     143        return EOK;
    142144}
    143145
     
    157159void mutex_unlock(mutex_t *mtx)
    158160{
    159         if (mtx->type == MUTEX_RECURSIVE) {
    160                 assert(mtx->owner == THREAD);
    161                 if (--mtx->nesting > 0)
    162                         return;
    163                 mtx->owner = NULL;
     161        if (--mtx->nesting > 0) {
     162                assert(mtx->type == MUTEX_RECURSIVE);
     163                return;
    164164        }
     165
     166        assert(mtx->nesting == 0);
     167
     168        if (!THREAD)
     169                return;
     170
     171        assert(_get_owner(mtx) == THREAD);
     172        _set_owner(mtx, NULL);
     173
    165174        semaphore_up(&mtx->sem);
    166175}
Note: See TracChangeset for help on using the changeset viewer.