Changeset 05e2a7ad in mainline


Ignore:
Timestamp:
2005-12-07T13:32:31Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
839470f
Parents:
253f8590
Message:

Add comments describing locking rules for some locks.
Cleanup.

Files:
8 edited

Legend:

Unmodified
Added
Removed
  • arch/ia32/include/atomic.h

    r253f8590 r05e2a7ad  
    5454        atomic_t r;
    5555        __asm__ volatile (
    56                 "movl $1,%0;"
    57                 "lock xaddl %0,%1;"
     56                "movl $1, %0\n"
     57                "lock xaddl %0, %1\n"
    5858                : "=r"(r), "=m" (*val)
    5959        );
     
    6767        atomic_t r;
    6868        __asm__ volatile (
    69                 "movl $-1,%0;"
    70                 "lock xaddl %0,%1;"
     69                "movl $-1, %0\n"
     70                "lock xaddl %0, %1\n"
    7171                : "=r"(r), "=m" (*val)
    7272        );
     
    7676#define atomic_inc_post(val) (atomic_inc_pre(val)+1)
    7777#define atomic_dec_post(val) (atomic_dec_pre(val)-1)
    78 
    79 
    8078
    8179static inline int test_and_set(volatile int *val) {
  • generic/include/proc/thread.h

    r253f8590 r05e2a7ad  
    6565        link_t threads_link;                    /**< Link to the list of all threads. */
    6666       
    67         /* items below are protected by lock */
     67        /** Lock protecting thread structure.
     68         *
     69         * Protects the whole thread structure except list links above.
     70         * Must be acquired before T.lock for each T of type task_t.
     71         *
     72         */
    6873        spinlock_t lock;
    6974
     
    110115};
    111116
    112 extern spinlock_t threads_lock;                 /**< Lock protecting threads_head list. */
     117/** Thread list lock.
     118 *
     119 * This lock protects all link_t structures chained in threads_head.
     120 * Must be acquired before T.lock for each T of type thread_t.
     121 *
     122 */
     123extern spinlock_t threads_lock;
     124
    113125extern link_t threads_head;                     /**< List of all threads in the system. */
    114126
  • generic/include/synch/waitq.h

    r253f8590 r05e2a7ad  
    3939#define WAKEUP_ALL      1
    4040
     41/** Wait queue structure. */
    4142struct waitq {
     43
     44        /** Lock protecting wait queue structure.
     45         *
     46         * Must be acquired before T.lock for each T of type thread_t.
     47         */
    4248        spinlock_t lock;
     49
    4350        int missed_wakeups;     /**< Number of waitq_wakeup() calls that didn't find a thread to wake up. */
    4451        link_t head;            /**< List of sleeping threads for wich there was no missed_wakeup. */
     
    5259extern void waitq_initialize(waitq_t *wq);
    5360extern int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking);
    54 extern void waitq_wakeup(waitq_t *wq, int all);
    55 extern void _waitq_wakeup_unsafe(waitq_t *wq, int all);
     61extern void waitq_wakeup(waitq_t *wq, bool all);
     62extern void _waitq_wakeup_unsafe(waitq_t *wq, bool all);
    5663
    5764#endif
  • generic/src/proc/scheduler.c

    r253f8590 r05e2a7ad  
    434434
    435435        /*
    436          * Through the 'THE' structure, we keep track of THREAD, TASK, CPU
     436         * Through the 'THE' structure, we keep track of THREAD, TASK, CPU, VM
    437437         * and preemption counter. At this point THE could be coming either
    438438         * from THREAD's or CPU's stack.
  • generic/src/proc/thread.c

    r253f8590 r05e2a7ad  
    5555char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */
    5656
    57 spinlock_t threads_lock;
    58 link_t threads_head;
     57spinlock_t threads_lock;        /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */
     58link_t threads_head;            /**< List of all threads. */
    5959
    6060static spinlock_t tidlock;
  • generic/src/synch/rwlock.c

    r253f8590 r05e2a7ad  
    2727 */
    2828
    29 
    30 /*
    31  * Reader/Writer locks
     29/** Reader/Writer locks
     30 *
     31 * A reader/writer lock can be held by multiple readers at a time.
     32 * Or it can be exclusively held by a sole writer at a time.
    3233 */
    3334
     
    7677 */
    7778void rwlock_initialize(rwlock_t *rwl) {
    78         spinlock_initialize(&rwl->lock, "rwlock");
     79        spinlock_initialize(&rwl->lock, "rwlock_t");
    7980        mutex_initialize(&rwl->exclusive);
    8081        rwl->readers_in = 0;
     
    219220                                break;
    220221                        case ESYNCH_OK_ATOMIC:
    221                                 panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC");
     222                                panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC\n");
    222223                                break;
    223224                        dafault:
    224                                 panic("invalid ESYNCH");
     225                                panic("invalid ESYNCH\n");
    225226                                break;
    226227                }
     
    284285
    285286
    286 /** Direct handoff
     287/** Direct handoff of reader/writer lock ownership.
    287288 *
    288289 * Direct handoff of reader/writer lock ownership
     
    307308        rwlock_type_t type = RWLOCK_NONE;
    308309        thread_t *t = NULL;
    309         int one_more = 1;
     310        bool one_more = true;
    310311       
    311312        spinlock_lock(&rwl->exclusive.sem.wq.lock);
     
    353354                                spinlock_lock(&t->lock);
    354355                                if (t->rwlock_holder_type != RWLOCK_READER)
    355                                         one_more = 0;
     356                                        one_more = false;
    356357                                spinlock_unlock(&t->lock);     
    357358                        }
  • generic/src/synch/spinlock.c

    r253f8590 r05e2a7ad  
    6363void spinlock_lock(spinlock_t *sl)
    6464{
    65         int i = 0;
     65        count_t i = 0;
    6666        __address caller = ((__address *) &sl)[-1];
    6767        char *symbol;
     68        bool deadlock_reported = false;
    6869
    6970        preemption_disable();
     
    7778                        printf("\n");
    7879                        i = 0;
     80                        deadlock_reported = true;
    7981                }
    8082        }
     83
     84        if (deadlock_reported)
     85                printf("cpu%d: not deadlocked\n", CPU->id);
    8186
    8287        /*
  • generic/src/synch/waitq.c

    r253f8590 r05e2a7ad  
    3434#include <arch/asm.h>
    3535#include <arch/types.h>
     36#include <typedefs.h>
    3637#include <time/timeout.h>
    3738#include <arch.h>
     
    6869        thread_t *t = (thread_t *) data;
    6970        waitq_t *wq;
    70         int do_wakeup = 0;
     71        bool do_wakeup = false;
    7172
    7273        spinlock_lock(&threads_lock);
     
    7677grab_locks:
    7778        spinlock_lock(&t->lock);
    78         if (wq = t->sleep_queue) {
     79        if (wq = t->sleep_queue) {              /* assignment */
    7980                if (!spinlock_trylock(&wq->lock)) {
    8081                        spinlock_unlock(&t->lock);
    81                         goto grab_locks; /* avoid deadlock */
     82                        goto grab_locks;        /* avoid deadlock */
    8283                }
    8384
    8485                list_remove(&t->wq_link);
    8586                t->saved_context = t->sleep_timeout_context;
    86                 do_wakeup = 1;
     87                do_wakeup = true;
    8788               
    8889                spinlock_unlock(&wq->lock);
     
    9091        }
    9192       
    92         t->timeout_pending = 0;
     93        t->timeout_pending = false;
    9394        spinlock_unlock(&t->lock);
    9495       
    95         if (do_wakeup) thread_ready(t);
     96        if (do_wakeup)
     97                thread_ready(t);
    9698
    9799out:
     
    194196                        return ESYNCH_TIMEOUT;
    195197                }
    196                 THREAD->timeout_pending = 1;
     198                THREAD->timeout_pending = true;
    197199                timeout_register(&THREAD->sleep_timeout, (__u64) usec, waitq_interrupted_sleep, THREAD);
    198200        }
     
    228230 *        will be woken up and missed count will be zeroed.
    229231 */
    230 void waitq_wakeup(waitq_t *wq, int all)
     232void waitq_wakeup(waitq_t *wq, bool all)
    231233{
    232234        ipl_t ipl;
     
    251253 *        will be woken up and missed count will be zeroed.
    252254 */
    253 void _waitq_wakeup_unsafe(waitq_t *wq, int all)
     255void _waitq_wakeup_unsafe(waitq_t *wq, bool all)
    254256{
    255257        thread_t *t;
     
    258260        if (list_empty(&wq->head)) {
    259261                wq->missed_wakeups++;
    260                 if (all) wq->missed_wakeups = 0;
     262                if (all)
     263                        wq->missed_wakeups = 0;
    261264                return;
    262265        }
     
    267270        spinlock_lock(&t->lock);
    268271        if (t->timeout_pending && timeout_unregister(&t->sleep_timeout))
    269                 t->timeout_pending = 0;
     272                t->timeout_pending = false;
    270273        t->sleep_queue = NULL;
    271274        spinlock_unlock(&t->lock);
     
    273276        thread_ready(t);
    274277
    275         if (all) goto loop;
    276 }
     278        if (all)
     279                goto loop;
     280}
Note: See TracChangeset for help on using the changeset viewer.