Changeset 879585a3 in mainline


Ignore:
Timestamp:
2007-03-31T22:22:50Z (18 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
31d8e10
Parents:
563c2dd
Message:

Simplify synchronization in as_switch().
The function was oversynchronized, which
was causing deadlocks on the address
space mutex.

Now, address spaces can only be switched
when the asidlock is held. This also protects
stealing of ASIDs. No other synchronization
is necessary.

Location:
kernel
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • kernel/arch/ia64/src/mm/as.c

    r563c2dd r879585a3  
    3737#include <arch/mm/page.h>
    3838#include <genarch/mm/as_ht.h>
     39#include <genarch/mm/page_ht.h>
    3940#include <genarch/mm/asid_fifo.h>
    4041#include <mm/asid.h>
    41 #include <arch.h>
    4242#include <arch/barrier.h>
    43 #include <synch/spinlock.h>
    4443
    4544/** Architecture dependent address space init. */
     
    5655void as_install_arch(as_t *as)
    5756{
    58         ipl_t ipl;
    5957        region_register rr;
    6058        int i;
    61        
    62         ipl = interrupts_disable();
    63         spinlock_lock(&as->lock);
    6459       
    6560        ASSERT(as->asid != ASID_INVALID);
     
    8176        srlz_d();
    8277        srlz_i();
    83        
    84         spinlock_unlock(&as->lock);
    85         interrupts_restore(ipl);
    8678}
    8779
  • kernel/arch/mips32/src/mm/as.c

    r563c2dd r879585a3  
    3535#include <arch/mm/as.h>
    3636#include <genarch/mm/as_pt.h>
     37#include <genarch/mm/page_pt.h>
    3738#include <genarch/mm/asid_fifo.h>
    3839#include <arch/mm/tlb.h>
     
    4041#include <mm/as.h>
    4142#include <arch/cp0.h>
    42 #include <arch.h>
    4343
    4444/** Architecture dependent address space init. */
     
    5858{
    5959        entry_hi_t hi;
    60         ipl_t ipl;
    6160
    6261        /*
     
    6564        hi.value = cp0_entry_hi_read();
    6665
    67         ipl = interrupts_disable();
    68         spinlock_lock(&as->lock);
    6966        hi.asid = as->asid;
    7067        cp0_entry_hi_write(hi.value);   
    71         spinlock_unlock(&as->lock);
    72         interrupts_restore(ipl);
    7368}
    7469
  • kernel/arch/ppc32/src/mm/as.c

    r563c2dd r879585a3  
    5555{
    5656        asid_t asid;
    57         ipl_t ipl;
    5857        uint32_t sr;
    5958
    60         ipl = interrupts_disable();
    61         spinlock_lock(&as->lock);
    62        
    6359        asid = as->asid;
    6460       
     
    8076                );
    8177        }
    82        
    83         spinlock_unlock(&as->lock);
    84         interrupts_restore(ipl);
    8578}
    8679
  • kernel/arch/ppc64/src/mm/as.c

    r563c2dd r879585a3  
    2727 */
    2828
    29  /** @addtogroup ppc64mm
     29/** @addtogroup ppc64mm
    3030  * @{
    3131 */
     
    4242}
    4343
    44  /** @}
     44/** @}
    4545 */
    4646
  • kernel/arch/sparc64/src/mm/as.c

    r563c2dd r879585a3  
    4343#include <arch/mm/tsb.h>
    4444#include <arch/memstr.h>
    45 #include <synch/mutex.h>
    4645#include <arch/asm.h>
    4746#include <mm/frame.h>
     
    101100{
    102101#ifdef CONFIG_TSB
    103         ipl_t ipl;
    104 
    105         ipl = interrupts_disable();
    106         mutex_lock_active(&as->lock);   /* completely unnecessary, but polite */
    107102        tsb_invalidate(as, 0, (count_t) -1);
    108         mutex_unlock(&as->lock);
    109         interrupts_restore(ipl);
    110103#endif
    111104        return 0;
     
    124117       
    125118        /*
    126          * Note that we don't lock the address space.
    127          * That's correct - we can afford it here
    128          * because we only read members that are
    129          * currently read-only.
     119         * Note that we don't and may not lock the address space. That's ok
     120         * since we only read members that are currently read-only.
     121         *
     122         * Moreover, the as->asid is protected by asidlock, which is being held.
    130123         */
    131124       
    132125        /*
    133          * Write ASID to secondary context register.
    134          * The primary context register has to be set
    135          * from TL>0 so it will be filled from the
    136          * secondary context register from the TL=1
    137          * code just before switch to userspace.
     126         * Write ASID to secondary context register. The primary context
     127         * register has to be set from TL>0 so it will be filled from the
     128         * secondary context register from the TL=1 code just before switch to
     129         * userspace.
    138130         */
    139131        ctx.v = 0;
     
    185177
    186178        /*
    187          * Note that we don't lock the address space.
    188          * That's correct - we can afford it here
    189          * because we only read members that are
    190          * currently read-only.
     179         * Note that we don't and may not lock the address space. That's ok
     180         * since we only read members that are currently read-only.
     181         *
     182         * Moreover, the as->asid is protected by asidlock, which is being held.
    191183         */
    192184
  • kernel/genarch/src/mm/asid.c

    r563c2dd r879585a3  
    6363#include <synch/spinlock.h>
    6464#include <synch/mutex.h>
    65 #include <arch.h>
    6665#include <adt/list.h>
    6766#include <debug.h>
    68 
    69 /**
    70  * asidlock protects the asids_allocated counter.
    71  */
    72 SPINLOCK_INITIALIZE(asidlock);
    7367
    7468static count_t asids_allocated = 0;
     
    9185         */
    9286       
    93         spinlock_lock(&asidlock);
    9487        if (asids_allocated == ASIDS_ALLOCABLE) {
    9588
     
    109102               
    110103                as = list_get_instance(tmp, as_t, inactive_as_with_asid_link);
    111                 mutex_lock_active(&as->lock);
    112104
    113105                /*
     
    131123                as_invalidate_translation_cache(as, 0, (count_t) -1);
    132124               
    133                 mutex_unlock(&as->lock);
    134 
    135125                /*
    136126                 * Get the system rid of the stolen ASID.
     
    157147        }
    158148       
    159         spinlock_unlock(&asidlock);
    160        
    161149        return asid;
    162150}
     
    171159void asid_put(asid_t asid)
    172160{
    173         ipl_t ipl;
    174 
    175         ipl = interrupts_disable();
    176         spinlock_lock(&asidlock);
    177 
    178161        asids_allocated--;
    179162        asid_put_arch(asid);
    180        
    181         spinlock_unlock(&asidlock);
    182         interrupts_restore(ipl);
    183163}
    184164
  • kernel/generic/include/mm/as.h

    r563c2dd r879585a3  
    9090                /** Protected by asidlock. */
    9191                link_t inactive_as_with_asid_link;
     92                /**
     93                 * Number of processors on wich is this address space active.
     94                 * Protected by asidlock.
     95                 */
     96                count_t cpu_refcount;
     97                /**
     98                 * Address space identifier.
     99                 * Constant on architectures that do not support ASIDs.
     100                 * Protected by asidlock. 
     101                 */
     102                asid_t asid;
    92103               
    93104                mutex_t lock;
     
    96107                count_t refcount;
    97108               
    98                 /** Number of processors on wich is this address space active. */
    99                 count_t cpu_refcount;
    100                
    101109                /** B+tree of address space areas. */
    102110                btree_t as_area_btree;
    103                
    104                 /**
    105                  *  Address space identifier.
    106                  *  Constant on architectures that do not support ASIDs.
    107                  */
    108                 asid_t asid;
    109111               
    110112                /** Non-generic content. */
     
    134136        /** Protected by asidlock. */
    135137        link_t inactive_as_with_asid_link;
     138        /**
     139         * Number of processors on wich is this address space active.
     140         * Protected by asidlock.
     141         */
     142        count_t cpu_refcount;
     143        /**
     144         * Address space identifier.
     145         * Constant on architectures that do not support ASIDs.
     146         * Protected by asidlock.
     147         */
     148        asid_t asid;
    136149
    137150        mutex_t lock;
     
    140153        count_t refcount;
    141154
    142         /** Number of processors on wich is this address space active. */
    143         count_t cpu_refcount;
    144 
    145155        /** B+tree of address space areas. */
    146156        btree_t as_area_btree;
    147        
    148         /**
    149          *  Address space identifier.
    150          *  Constant on architectures that do not support ASIDs.
    151          */
    152         asid_t asid;
    153157       
    154158        /** Non-generic content. */
     
    250254#endif
    251255
    252 SPINLOCK_EXTERN(inactive_as_with_asid_lock);
    253256extern link_t inactive_as_with_asid_head;
    254257
  • kernel/generic/src/mm/as.c

    r563c2dd r879585a3  
    9696
    9797/**
    98  * This lock protects inactive_as_with_asid_head list. It must be acquired
    99  * before as_t mutex.
    100  */
    101 SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
     98 * This lock serializes access to the ASID subsystem.
     99 * It protects:
     100 * - inactive_as_with_asid_head list
     101 * - as->asid for each as of the as_t type
     102 * - asids_allocated counter
     103 */
     104SPINLOCK_INITIALIZE(asidlock);
    102105
    103106/**
     
    206209         * it is safe not to lock its mutex.
    207210         */
     211
    208212        ipl = interrupts_disable();
    209         spinlock_lock(&inactive_as_with_asid_lock);
     213        spinlock_lock(&asidlock);
    210214        if (as->asid != ASID_INVALID && as != AS_KERNEL) {
    211215                if (as != AS && as->cpu_refcount == 0)
     
    213217                asid_put(as->asid);
    214218        }
    215         spinlock_unlock(&inactive_as_with_asid_lock);
     219        spinlock_unlock(&asidlock);
    216220
    217221        /*
     
    861865 *
    862866 * Note that this function cannot sleep as it is essentially a part of
    863  * scheduling. Sleeping here would lead to deadlock on wakeup.
     867 * scheduling. Sleeping here would lead to deadlock on wakeup. Another
     868 * thing which is forbidden in this context is locking the address space.
    864869 *
    865870 * @param old Old address space or NULL.
     
    868873void as_switch(as_t *old_as, as_t *new_as)
    869874{
    870         ipl_t ipl;
    871         bool needs_asid = false;
    872        
    873         ipl = interrupts_disable();
    874         spinlock_lock(&inactive_as_with_asid_lock);
     875        spinlock_lock(&asidlock);
    875876
    876877        /*
     
    878879         */     
    879880        if (old_as) {
    880                 mutex_lock_active(&old_as->lock);
    881881                ASSERT(old_as->cpu_refcount);
    882882                if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
     
    891891                            &inactive_as_with_asid_head);
    892892                }
    893                 mutex_unlock(&old_as->lock);
    894893
    895894                /*
     
    903902         * Second, prepare the new address space.
    904903         */
    905         mutex_lock_active(&new_as->lock);
    906904        if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
    907                 if (new_as->asid != ASID_INVALID) {
     905                if (new_as->asid != ASID_INVALID)
    908906                        list_remove(&new_as->inactive_as_with_asid_link);
    909                 } else {
    910                         /*
    911                          * Defer call to asid_get() until new_as->lock is released.
    912                          */
    913                         needs_asid = true;
    914                 }
     907                else
     908                        new_as->asid = asid_get();
    915909        }
    916910#ifdef AS_PAGE_TABLE
    917911        SET_PTL0_ADDRESS(new_as->genarch.page_table);
    918912#endif
    919         mutex_unlock(&new_as->lock);
    920 
    921         if (needs_asid) {
    922                 /*
    923                  * Allocation of new ASID was deferred
    924                  * until now in order to avoid deadlock.
    925                  */
    926                 asid_t asid;
    927                
    928                 asid = asid_get();
    929                 mutex_lock_active(&new_as->lock);
    930                 new_as->asid = asid;
    931                 mutex_unlock(&new_as->lock);
    932         }
    933         spinlock_unlock(&inactive_as_with_asid_lock);
    934         interrupts_restore(ipl);
    935913       
    936914        /*
     
    939917         */
    940918        as_install_arch(new_as);
     919
     920        spinlock_unlock(&asidlock);
    941921       
    942922        AS = new_as;
Note: See TracChangeset for help on using the changeset viewer.