Changes in / [c2ab3f4:b8f7ea78] in mainline


Ignore:
Location:
kernel/generic/src
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/ddi/ddi.c

    rc2ab3f4 rb8f7ea78  
    4646#include <mm/frame.h>
    4747#include <mm/as.h>
    48 #include <synch/mutex.h>
     48#include <synch/spinlock.h>
    4949#include <syscall/copy.h>
    5050#include <adt/btree.h>
     
    5454
    5555/** This lock protects the parea_btree. */
    56 static mutex_t parea_lock;
     56SPINLOCK_INITIALIZE(parea_lock);
    5757
    5858/** B+tree with enabled physical memory areas. */
     
    6363{
    6464        btree_create(&parea_btree);
    65         mutex_initialize(&parea_lock, MUTEX_PASSIVE);
    6665}
    6766
     
    7372void ddi_parea_register(parea_t *parea)
    7473{
    75         mutex_lock(&parea_lock);
     74        ipl_t ipl = interrupts_disable();
     75        spinlock_lock(&parea_lock);
    7676       
    7777        /*
     
    8080        btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
    8181       
    82         mutex_unlock(&parea_lock);
     82        spinlock_unlock(&parea_lock);
     83        interrupts_restore(ipl);
    8384}
    8485
     
    140141                spinlock_unlock(&zones.lock);
    141142               
    142                 mutex_lock(&parea_lock);
     143                spinlock_lock(&parea_lock);
    143144                btree_node_t *nodep;
    144145                parea_t *parea = (parea_t *) btree_search(&parea_btree,
     
    146147               
    147148                if ((!parea) || (parea->frames < pages)) {
    148                         mutex_unlock(&parea_lock);
     149                        spinlock_unlock(&parea_lock);
    149150                        goto err;
    150151                }
    151152               
    152                 mutex_unlock(&parea_lock);
     153                spinlock_unlock(&parea_lock);
    153154                goto map;
    154155        }
     
    160161       
    161162map:
    162         interrupts_restore(ipl);
    163 
     163        spinlock_lock(&TASK->lock);
     164       
    164165        if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp,
    165166            AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) {
     
    168169                 * We report it using ENOMEM.
    169170                 */
     171                spinlock_unlock(&TASK->lock);
     172                interrupts_restore(ipl);
    170173                return ENOMEM;
    171174        }
     
    174177         * Mapping is created on-demand during page fault.
    175178         */
     179       
     180        spinlock_unlock(&TASK->lock);
     181        interrupts_restore(ipl);
    176182        return 0;
    177183}
  • kernel/generic/src/mm/frame.c

    rc2ab3f4 rb8f7ea78  
    10331033                spinlock_unlock(&zones.lock);
    10341034                interrupts_restore(ipl);
    1035 
    1036                 if (!THREAD)
    1037                         panic("Cannot wait for memory to become available.");
    10381035               
    10391036                /*
  • kernel/generic/src/mm/slab.c

    rc2ab3f4 rb8f7ea78  
    555555 * Initialize mag_cache structure in slab cache
    556556 */
    557 static bool make_magcache(slab_cache_t *cache)
     557static void make_magcache(slab_cache_t *cache)
    558558{
    559559        unsigned int i;
     
    562562
    563563        cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
    564             FRAME_ATOMIC);
    565         if (!cache->mag_cache)
    566                 return false;
    567 
     564            0);
    568565        for (i = 0; i < config.cpu_count; i++) {
    569566                memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
     
    571568                    "slab_maglock_cpu");
    572569        }
    573         return true;
    574570}
    575571
     
    601597        spinlock_initialize(&cache->maglock, "slab_maglock");
    602598        if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
    603                 (void) make_magcache(cache);
     599                make_magcache(cache);
    604600
    605601        /* Compute slab sizes, object counts in slabs etc. */
     
    927923                    SLAB_CACHE_MAGDEFERRED)
    928924                        continue;
    929                 (void) make_magcache(s);
     925                make_magcache(s);
    930926                s->flags &= ~SLAB_CACHE_MAGDEFERRED;
    931927        }
  • kernel/generic/src/synch/mutex.c

    rc2ab3f4 rb8f7ea78  
    4040#include <synch/synch.h>
    4141#include <debug.h>
    42 #include <arch.h>
    4342
    4443/** Initialize mutex.
     
    7069        int rc;
    7170
    72         if (mtx->type == MUTEX_PASSIVE && THREAD) {
     71        if (mtx->type == MUTEX_PASSIVE) {
    7372                rc = _semaphore_down_timeout(&mtx->sem, usec, flags);
    7473        } else {
    75                 ASSERT(mtx->type == MUTEX_ACTIVE || !THREAD);
     74                ASSERT(mtx->type == MUTEX_ACTIVE);
    7675                ASSERT(usec == SYNCH_NO_TIMEOUT);
    7776                ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE));
  • kernel/generic/src/sysinfo/stats.c

    rc2ab3f4 rb8f7ea78  
    3838#include <sysinfo/stats.h>
    3939#include <sysinfo/sysinfo.h>
    40 #include <synch/spinlock.h>
    41 #include <synch/mutex.h>
    4240#include <time/clock.h>
    4341#include <mm/frame.h>
     
    7068static load_t avenrdy[LOAD_STEPS] = {0, 0, 0};
    7169
    72 /** Load calculation lock */
    73 static mutex_t load_lock;
     70/** Load calculation spinlock */
     71SPINLOCK_STATIC_INITIALIZE_NAME(load_lock, "load_lock");
    7472
    7573/** Get system uptime
     
    346344       
    347345        /* Interrupts are already disabled */
    348         spinlock_lock(&thread->lock);
     346        spinlock_lock(&(thread->lock));
    349347       
    350348        /* Record the statistics and increment the iterator */
     
    352350        (*iterator)++;
    353351       
    354         spinlock_unlock(&thread->lock);
     352        spinlock_unlock(&(thread->lock));
    355353       
    356354        return true;
     
    617615        }
    618616       
    619         /* To always get consistent values acquire the mutex */
    620         mutex_lock(&load_lock);
     617        /* To always get consistent values acquire the spinlock */
     618        ipl_t ipl = interrupts_disable();
     619        spinlock_lock(&load_lock);
    621620       
    622621        unsigned int i;
     
    624623                stats_load[i] = avenrdy[i] << LOAD_FIXED_SHIFT;
    625624       
    626         mutex_unlock(&load_lock);
     625        spinlock_unlock(&load_lock);
     626        interrupts_restore(ipl);
    627627       
    628628        return ((void *) stats_load);
     
    655655               
    656656                /* Mutually exclude with get_stats_load() */
    657                 mutex_lock(&load_lock);
     657                ipl_t ipl = interrupts_disable();
     658                spinlock_lock(&load_lock);
    658659               
    659660                unsigned int i;
     
    661662                        avenrdy[i] = load_calc(avenrdy[i], load_exp[i], ready);
    662663               
    663                 mutex_unlock(&load_lock);
     664                spinlock_unlock(&load_lock);
     665                interrupts_restore(ipl);
    664666               
    665667                thread_sleep(LOAD_INTERVAL);
     
    672674void stats_init(void)
    673675{
    674         mutex_initialize(&load_lock, MUTEX_PASSIVE);
    675 
    676676        sysinfo_set_item_fn_val("system.uptime", NULL, get_stats_uptime);
    677677        sysinfo_set_item_fn_data("system.cpus", NULL, get_stats_cpus);
  • kernel/generic/src/sysinfo/sysinfo.c

    rc2ab3f4 rb8f7ea78  
    3737#include <print.h>
    3838#include <syscall/copy.h>
    39 #include <synch/mutex.h>
     39#include <synch/spinlock.h>
    4040#include <arch/asm.h>
    4141#include <errno.h>
     
    5252static slab_cache_t *sysinfo_item_slab;
    5353
    54 /** Sysinfo lock */
    55 static mutex_t sysinfo_lock;
     54/** Sysinfo spinlock */
     55SPINLOCK_STATIC_INITIALIZE_NAME(sysinfo_lock, "sysinfo_lock");
    5656
    5757/** Sysinfo item constructor
     
    9898            sizeof(sysinfo_item_t), 0, sysinfo_item_constructor,
    9999            sysinfo_item_destructor, SLAB_CACHE_MAGDEFERRED);
    100 
    101         mutex_initialize(&sysinfo_lock, MUTEX_ACTIVE);
    102100}
    103101
    104102/** Recursively find an item in sysinfo tree
    105103 *
    106  * Should be called with sysinfo_lock held.
     104 * Should be called with interrupts disabled
     105 * and sysinfo_lock held.
    107106 *
    108107 * @param name    Current sysinfo path suffix.
     
    169168/** Recursively create items in sysinfo tree
    170169 *
    171  * Should be called with sysinfo_lock held.
     170 * Should be called with interrupts disabled
     171 * and sysinfo_lock held.
    172172 *
    173173 * @param name     Current sysinfo path suffix.
     
    299299{
    300300        /* Protect sysinfo tree consistency */
    301         mutex_lock(&sysinfo_lock);
     301        ipl_t ipl = interrupts_disable();
     302        spinlock_lock(&sysinfo_lock);
    302303       
    303304        if (root == NULL)
     
    310311        }
    311312       
    312         mutex_unlock(&sysinfo_lock);
     313        spinlock_unlock(&sysinfo_lock);
     314        interrupts_restore(ipl);
    313315}
    314316
     
    330332{
    331333        /* Protect sysinfo tree consistency */
    332         mutex_lock(&sysinfo_lock);
     334        ipl_t ipl = interrupts_disable();
     335        spinlock_lock(&sysinfo_lock);
    333336       
    334337        if (root == NULL)
     
    342345        }
    343346       
    344         mutex_unlock(&sysinfo_lock);
     347        spinlock_unlock(&sysinfo_lock);
     348        interrupts_restore(ipl);
    345349}
    346350
     
    357361{
    358362        /* Protect sysinfo tree consistency */
    359         mutex_lock(&sysinfo_lock);
     363        ipl_t ipl = interrupts_disable();
     364        spinlock_lock(&sysinfo_lock);
    360365       
    361366        if (root == NULL)
     
    368373        }
    369374       
    370         mutex_unlock(&sysinfo_lock);
     375        spinlock_unlock(&sysinfo_lock);
     376        interrupts_restore(ipl);
    371377}
    372378
     
    388394{
    389395        /* Protect sysinfo tree consistency */
    390         mutex_lock(&sysinfo_lock);
     396        ipl_t ipl = interrupts_disable();
     397        spinlock_lock(&sysinfo_lock);
    391398       
    392399        if (root == NULL)
     
    399406        }
    400407       
    401         mutex_unlock(&sysinfo_lock);
     408        spinlock_unlock(&sysinfo_lock);
     409        interrupts_restore(ipl);
    402410}
    403411
     
    412420{
    413421        /* Protect sysinfo tree consistency */
    414         mutex_lock(&sysinfo_lock);
     422        ipl_t ipl = interrupts_disable();
     423        spinlock_lock(&sysinfo_lock);
    415424       
    416425        if (root == NULL)
     
    421430                item->val_type = SYSINFO_VAL_UNDEFINED;
    422431       
    423         mutex_unlock(&sysinfo_lock);
     432        spinlock_unlock(&sysinfo_lock);
     433        interrupts_restore(ipl);
    424434}
    425435
     
    436446{
    437447        /* Protect sysinfo tree consistency */
    438         mutex_lock(&sysinfo_lock);
     448        ipl_t ipl = interrupts_disable();
     449        spinlock_lock(&sysinfo_lock);
    439450       
    440451        if (root == NULL)
     
    450461        }
    451462       
    452         mutex_unlock(&sysinfo_lock);
     463        spinlock_unlock(&sysinfo_lock);
     464        interrupts_restore(ipl);
    453465}
    454466
     
    467479/** Dump the structure of sysinfo tree
    468480 *
    469  * Should be called with sysinfo_lock held.
     481 * Should be called with interrupts disabled
     482 * and sysinfo_lock held. Because this routine
     483 * might take a reasonable long time to proceed,
     484 * having the spinlock held is not optimal, but
     485 * there is no better simple solution.
    470486 *
    471487 * @param root  Root item of the current (sub)tree.
     
    543559        /* Avoid other functions to mess with sysinfo
    544560           while we are dumping it */
    545         mutex_lock(&sysinfo_lock);
     561        ipl_t ipl = interrupts_disable();
     562        spinlock_lock(&sysinfo_lock);
    546563       
    547564        if (root == NULL)
     
    550567                sysinfo_dump_internal(root, 0);
    551568       
    552         mutex_unlock(&sysinfo_lock);
     569        spinlock_unlock(&sysinfo_lock);
     570        interrupts_restore(ipl);
    553571}
    554572
    555573/** Return sysinfo item value determined by name
    556574 *
    557  * Should be called with sysinfo_lock held.
     575 * Should be called with interrupts disabled
     576 * and sysinfo_lock held.
    558577 *
    559578 * @param name    Sysinfo path.
     
    640659                 * are reading it.
    641660                 */
    642                 mutex_lock(&sysinfo_lock);
     661                ipl_t ipl = interrupts_disable();
     662                spinlock_lock(&sysinfo_lock);
    643663                ret = sysinfo_get_item(path, NULL, dry_run);
    644                 mutex_unlock(&sysinfo_lock);
     664                spinlock_unlock(&sysinfo_lock);
     665                interrupts_restore(ipl);
    645666        }
    646667        free(path);
Note: See TracChangeset for help on using the changeset viewer.