Changeset 31d8e10 in mainline


Ignore:
Timestamp:
2007-04-05T16:09:49Z (18 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
547fa39
Parents:
879585a3
Message:

Continue to de-oversynchronize the kernel.

  • replace as→refcount with an atomic counter; accesses to this

reference counter are not to be done when the as→lock mutex is held;
this gets us rid of mutex_lock_active();

Remove the possibility of a deadlock between TLB shootdown and asidlock.

  • get rid of mutex_lock_active() on as→lock
  • when locking the asidlock spinlock, always do it conditionally and with

preemption disabled; in the unsuccessful case, enable interrupts and try again

  • there should be no deadlock between TLB shootdown and the as→lock mutexes
  • PLEASE REVIEW !!!

Add DEADLOCK_PROBE's to places where we have spinlock_trylock() loops.

Location:
kernel/generic
Files:
12 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/include/mm/as.h

    r879585a3 r31d8e10  
    102102                asid_t asid;
    103103               
     104                /** Number of references (i.e tasks that reference this as). */
     105                atomic_t refcount;
     106
    104107                mutex_t lock;
    105                
    106                 /** Number of references (i.e tasks that reference this as). */
    107                 count_t refcount;
    108108               
    109109                /** B+tree of address space areas. */
     
    148148        asid_t asid;
    149149
     150        /** Number of references (i.e tasks that reference this as). */
     151        atomic_t refcount;
     152
    150153        mutex_t lock;
    151 
    152         /** Number of references (i.e tasks that reference this as). */
    153         count_t refcount;
    154154
    155155        /** B+tree of address space areas. */
  • kernel/generic/include/synch/mutex.h

    r879585a3 r31d8e10  
    4545
    4646#define mutex_lock(mtx) \
    47         _mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
     47        _mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
    4848#define mutex_trylock(mtx) \
    49         _mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING)
    50 #define mutex_lock_timeout(mtx,usec) \
    51         _mutex_lock_timeout((mtx),(usec),SYNCH_FLAGS_NON_BLOCKING)
    52 #define mutex_lock_active(mtx) \
    53         while (mutex_trylock((mtx)) != ESYNCH_OK_ATOMIC)
     49        _mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING)
     50#define mutex_lock_timeout(mtx, usec) \
     51        _mutex_lock_timeout((mtx), (usec), SYNCH_FLAGS_NON_BLOCKING)
    5452
    5553extern void mutex_initialize(mutex_t *mtx);
  • kernel/generic/include/synch/spinlock.h

    r879585a3 r31d8e10  
    102102}
    103103
     104#ifdef CONFIG_DEBUG_SPINLOCK
     105
     106extern int printf(const char *, ...);
     107
     108#define DEADLOCK_THRESHOLD              100000000
     109#define DEADLOCK_PROBE_INIT(pname)      count_t pname = 0
     110#define DEADLOCK_PROBE(pname, value)                                    \
     111        if ((pname)++ > (value)) {                                      \
     112                (pname) = 0;                                            \
     113                printf("Deadlock probe %s: exceeded threshold %d\n",    \
     114                    "cpu%d: function=%s, line=%d\n",                    \
     115                    #pname, (value), CPU->id, __FUNCTION__, __LINE__);  \
     116        }
     117#else
     118#define DEADLOCK_PROBE_INIT(pname)
     119#define DEADLOCK_PROBE(pname, value)
     120#endif
     121
    104122#else
    105123
     
    114132#define spinlock_unlock(x)              preemption_enable()
    115133
     134#define DEADLOCK_PROBE_INIT(pname)
     135#define DEADLOCK_PROBE(pname, value)
     136
    116137#endif
    117138
  • kernel/generic/src/ipc/ipc.c

    r879585a3 r31d8e10  
    375375        call_t *call;
    376376        phone_t *phone;
     377        DEADLOCK_PROBE_INIT(p_phonelck);
    377378
    378379        /* Disconnect all our phones ('ipc_phone_hangup') */
     
    388389        while (!list_empty(&TASK->answerbox.connected_phones)) {
    389390                phone = list_get_instance(TASK->answerbox.connected_phones.next,
    390                                           phone_t, link);
     391                    phone_t, link);
    391392                if (! spinlock_trylock(&phone->lock)) {
    392393                        spinlock_unlock(&TASK->answerbox.lock);
     394                        DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD);
    393395                        goto restart_phones;
    394396                }
  • kernel/generic/src/ipc/irq.c

    r879585a3 r31d8e10  
    337337                link_t *cur = box->irq_head.next;
    338338                irq_t *irq;
     339                DEADLOCK_PROBE_INIT(p_irqlock);
    339340               
    340341                irq = list_get_instance(cur, irq_t, notif_cfg.link);
     
    345346                        spinlock_unlock(&box->irq_lock);
    346347                        interrupts_restore(ipl);
     348                        DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
    347349                        goto loop;
    348350                }
  • kernel/generic/src/mm/as.c

    r879585a3 r31d8e10  
    5858#include <mm/asid.h>
    5959#include <arch/mm/asid.h>
     60#include <preemption.h>
    6061#include <synch/spinlock.h>
    6162#include <synch/mutex.h>
     
    182183                as->asid = ASID_INVALID;
    183184       
    184         as->refcount = 0;
     185        atomic_set(&as->refcount, 0);
    185186        as->cpu_refcount = 0;
    186187#ifdef AS_PAGE_TABLE
     
    197198 * When there are no tasks referencing this address space (i.e. its refcount is
    198199 * zero), the address space can be destroyed.
     200 *
     201 * We know that we don't hold any spinlock.
    199202 */
    200203void as_destroy(as_t *as)
     
    202205        ipl_t ipl;
    203206        bool cond;
    204 
    205         ASSERT(as->refcount == 0);
     207        DEADLOCK_PROBE_INIT(p_asidlock);
     208
     209        ASSERT(atomic_get(&as->refcount) == 0);
    206210       
    207211        /*
     
    210214         */
    211215
    212         ipl = interrupts_disable();
    213         spinlock_lock(&asidlock);
     216        /*
     217         * We need to avoid deadlock between TLB shootdown and asidlock.
     218         * We therefore try to take asid conditionally and if we don't succeed,
     219         * we enable interrupts and try again. This is done while preemption is
     220         * disabled to prevent nested context switches. We also depend on the
     221         * fact that so far no spinlocks are held.
     222         */
     223        preemption_disable();
     224        ipl = interrupts_read();
     225retry:
     226        interrupts_disable();
     227        if (!spinlock_trylock(&asidlock)) {
     228                interrupts_enable();
     229                DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
     230                goto retry;
     231        }
     232        preemption_enable();    /* Interrupts disabled, enable preemption */
    214233        if (as->asid != ASID_INVALID && as != AS_KERNEL) {
    215234                if (as != AS && as->cpu_refcount == 0)
     
    473492                 * Finish TLB shootdown sequence.
    474493                 */
     494
    475495                tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
    476496                    area->pages - pages);
     497                /*
     498                 * Invalidate software translation caches (e.g. TSB on sparc64).
     499                 */
     500                as_invalidate_translation_cache(as, area->base +
     501                    pages * PAGE_SIZE, area->pages - pages);
    477502                tlb_shootdown_finalize();
    478503               
    479                 /*
    480                  * Invalidate software translation caches (e.g. TSB on sparc64).
    481                  */
    482                 as_invalidate_translation_cache(as, area->base +
    483                     pages * PAGE_SIZE, area->pages - pages);
    484504        } else {
    485505                /*
     
    569589         * Finish TLB shootdown sequence.
    570590         */
     591
    571592        tlb_invalidate_pages(as->asid, area->base, area->pages);
    572         tlb_shootdown_finalize();
    573        
    574593        /*
    575594         * Invalidate potential software translation caches (e.g. TSB on
     
    577596         */
    578597        as_invalidate_translation_cache(as, area->base, area->pages);
     598        tlb_shootdown_finalize();
    579599       
    580600        btree_destroy(&area->used_space);
     
    868888 * thing which is forbidden in this context is locking the address space.
    869889 *
     890 * When this function is enetered, no spinlocks may be held.
     891 *
    870892 * @param old Old address space or NULL.
    871893 * @param new New address space.
     
    873895void as_switch(as_t *old_as, as_t *new_as)
    874896{
    875         spinlock_lock(&asidlock);
     897        DEADLOCK_PROBE_INIT(p_asidlock);
     898        preemption_disable();
     899retry:
     900        (void) interrupts_disable();
     901        if (!spinlock_trylock(&asidlock)) {
     902                /*
     903                 * Avoid deadlock with TLB shootdown.
     904                 * We can enable interrupts here because
     905                 * preemption is disabled. We should not be
     906                 * holding any other lock.
     907                 */
     908                (void) interrupts_enable();
     909                DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
     910                goto retry;
     911        }
     912        preemption_enable();
    876913
    877914        /*
  • kernel/generic/src/proc/scheduler.c

    r879585a3 r31d8e10  
    378378{
    379379        int priority;
    380        
     380        DEADLOCK_PROBE_INIT(p_joinwq);
     381
    381382        ASSERT(CPU != NULL);
    382383       
     
    407408                                        delay(10);
    408409                                        spinlock_lock(&THREAD->lock);
     410                                        DEADLOCK_PROBE(p_joinwq,
     411                                            DEADLOCK_THRESHOLD);
    409412                                        goto repeat;
    410413                                }
  • kernel/generic/src/proc/task.c

    r879585a3 r31d8e10  
    4242#include <mm/as.h>
    4343#include <mm/slab.h>
     44#include <atomic.h>
    4445#include <synch/spinlock.h>
    4546#include <synch/waitq.h>
     
    141142        /*
    142143         * Increment address space reference count.
    143          * TODO: Reconsider the locking scheme.
    144          */
    145         mutex_lock(&as->lock);
    146         as->refcount++;
    147         mutex_unlock(&as->lock);
     144         */
     145        atomic_inc(&as->refcount);
    148146
    149147        spinlock_lock(&tasks_lock);
     
    167165        btree_destroy(&t->futexes);
    168166
    169         mutex_lock_active(&t->as->lock);
    170         if (--t->as->refcount == 0) {
    171                 mutex_unlock(&t->as->lock);
     167        if (atomic_predec(&t->as->refcount) == 0)
    172168                as_destroy(t->as);
    173                 /*
    174                  * t->as is destroyed.
    175                  */
    176         } else
    177                 mutex_unlock(&t->as->lock);
    178169       
    179170        free(t);
  • kernel/generic/src/proc/thread.c

    r879585a3 r31d8e10  
    497497
    498498        /*
    499          * Since the thread is expected to not be already detached,
     499         * Since the thread is expected not to be already detached,
    500500         * pointer to it must be still valid.
    501501         */
  • kernel/generic/src/synch/spinlock.c

    r879585a3 r31d8e10  
    7474 */
    7575#ifdef CONFIG_DEBUG_SPINLOCK
    76 #define DEADLOCK_THRESHOLD      100000000
    7776void spinlock_lock_debug(spinlock_t *sl)
    7877{
  • kernel/generic/src/synch/waitq.c

    r879585a3 r31d8e10  
    8787        waitq_t *wq;
    8888        bool do_wakeup = false;
     89        DEADLOCK_PROBE_INIT(p_wqlock);
    8990
    9091        spinlock_lock(&threads_lock);
     
    9798                if (!spinlock_trylock(&wq->lock)) {
    9899                        spinlock_unlock(&t->lock);
     100                        DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
    99101                        goto grab_locks;        /* avoid deadlock */
    100102                }
     
    129131        bool do_wakeup = false;
    130132        ipl_t ipl;
     133        DEADLOCK_PROBE_INIT(p_wqlock);
    131134
    132135        ipl = interrupts_disable();
     
    148151                if (!spinlock_trylock(&wq->lock)) {
    149152                        spinlock_unlock(&t->lock);
     153                        DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
    150154                        goto grab_locks;        /* avoid deadlock */
    151155                }
  • kernel/generic/src/time/timeout.c

    r879585a3 r31d8e10  
    4545#include <arch/asm.h>
    4646#include <arch.h>
    47 
    4847
    4948/** Initialize timeouts
     
    176175        link_t *l;
    177176        ipl_t ipl;
     177        DEADLOCK_PROBE_INIT(p_tolock);
    178178
    179179grab_locks:
     
    187187        if (!spinlock_trylock(&t->cpu->timeoutlock)) {
    188188                spinlock_unlock(&t->lock);
    189                 interrupts_restore(ipl);               
     189                interrupts_restore(ipl);
     190                DEADLOCK_PROBE(p_tolock, DEADLOCK_THRESHOLD);
    190191                goto grab_locks;
    191192        }
Note: See TracChangeset for help on using the changeset viewer.