Changeset fb10289b in mainline


Ignore:
Timestamp:
2006-02-03T02:25:16Z (19 years ago)
Author:
Ondrej Palkovsky <ondrap@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
e1888f9
Parents:
086a600
Message:

SLAB allocator now uses itself for all its internal structures.
Added description of allocator.
Removed messy_stack_trace from amd64, as it would scroll away important
part of exception.

Files:
2 edited

Legend:

Unmodified
Added
Removed
  • arch/amd64/src/interrupt.c

    r086a600 rfb10289b  
    4343#include <proc/thread.h>
    4444
    45 
     45/*
    4646static void messy_stack_trace(__native *stack)
    4747{
     
    5858        printf("\n");
    5959}
    60 
     60*/
    6161static void print_info_errcode(int n, void *st)
    6262{
     
    8484        printf("       %Q, %Q, %Q\n", x[20], x[21], x[22]);
    8585        printf("       %Q, %Q, %Q\n", x[23], x[24], x[25]);
    86         messy_stack_trace(&x[5]);
     86//      messy_stack_trace(&x[5]);
    8787}
    8888
  • generic/src/mm/slab.c

    r086a600 rfb10289b  
    2626 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    2727 */
     28
     29/*
     30 * The SLAB allocator is closely modelled after Opensolaris SLAB allocator
     31 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
     32 *
     33 * with the following exceptions:
     34 *   - empty SLABS are deallocated immediately
     35 *     (in Linux they are kept in linked list, in Solaris ???)
     36 *   - empty magazines are deallocated when not needed
     37 *     (in Solaris they are held in linked list in slab cache)
     38 *
     39 *   Following features are not currently supported but would be easy to do:
     40 *   - cache coloring
     41 *   - dynamic magazine growing (different magazine sizes are already
     42 *     supported, but we would need to adjust allocating strategy)
     43 *
     44 * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
     45 * good SMP scaling.
     46 *
     47 * When a new object is being allocated, it is first checked, if it is
     48 * available in CPU-bound magazine. If it is not found there, it is
     49 * allocated from CPU-shared SLAB - if partial full is found, it is used,
     50 * otherwise a new one is allocated.
     51 *
     52 * When an object is being deallocated, it is put to CPU-bound magazine.
     53 * If there is no such magazine, new one is allocated (if it fails,
     54 * the object is deallocated into SLAB). If the magazine is full, it is
     55 * put into cpu-shared list of magazines and new one is allocated.
     56 *
     57 * The CPU-bound magazine is actually a pair of magazine to avoid
     58 * thrashing when somebody is allocating/deallocating 1 item at the magazine
     59 * size boundary. LIFO order is enforced, which should avoid fragmentation
     60 * as much as possible.
     61 * 
     62 * Every cache contains list of full slabs and list of partialy full slabs.
     63 * Empty SLABS are immediately freed (thrashing will be avoided because
     64 * of magazines).
     65 *
     66 * The SLAB information structure is kept inside the data area, if possible.
     67 * The cache can be marked that it should not use magazines. This is used
     68 * only for SLAB related caches to avoid deadlocks and infinite recursion
     69 * (the SLAB allocator uses itself for allocating all it's control structures).
     70 *
     71 * The SLAB allocator allocates lot of space and does not free it. When
     72 * frame allocator fails to allocate the frame, it calls slab_reclaim().
     73 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
     74 * releases slabs from cpu-shared magazine-list, until at least 1 slab
     75 * is deallocated in each cache (this algorithm should probably change).
     76 * The brutal reclaim removes all cached objects, even from CPU-bound
     77 * magazines.
     78 *
     79 *
     80 */
     81
    2882
    2983#include <synch/spinlock.h>
     
    4195
    4296SPINLOCK_INITIALIZE(slab_cache_lock);
    43 LIST_INITIALIZE(slab_cache_list);
    44 
    45 slab_cache_t mag_cache;
    46 
    47 
     97static LIST_INITIALIZE(slab_cache_list);
     98
     99/** Magazine cache */
     100static slab_cache_t mag_cache;
     101/** Cache for cache descriptors */
     102static slab_cache_t slab_cache_cache;
     103
     104/** Cache for external slab descriptors
     105 * This time we want per-cpu cache, so do not make it static
     106 * - using SLAB for internal SLAB structures will not deadlock,
     107 *   as all slab structures are 'small' - control structures of
     108 *   their caches do not require further allocation
     109 */
     110static slab_cache_t *slab_extern_cache;
     111
     112/** Slab descriptor */
    48113typedef struct {
    49114        slab_cache_t *cache; /**< Pointer to parent cache */
     
    60125 * Allocate frames for slab space and initialize
    61126 *
    62  * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!!
    63127 */
    64128static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
     
    77141        }
    78142        if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
    79                 slab = malloc(sizeof(*slab)); // , flags);
     143                slab = slab_alloc(slab_extern_cache, flags);
    80144                if (!slab) {
    81145                        frame_free((__address)data);
     
    115179        frame_free((__address)slab->start);
    116180        if (! (cache->flags & SLAB_CACHE_SLINSIDE))
    117                 free(slab);
     181                slab_free(slab_extern_cache, slab);
    118182
    119183        atomic_dec(&cache->allocated_slabs);
     
    244308
    245309/**
     310 * Find full magazine, set it as current and return it
     311 *
     312 * Assume cpu_magazine lock is held
     313 */
     314static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
     315{
     316        slab_magazine_t *cmag, *lastmag, *newmag;
     317
     318        cmag = cache->mag_cache[CPU->id].current;
     319        lastmag = cache->mag_cache[CPU->id].last;
     320        if (cmag) { /* First try local CPU magazines */
     321                if (cmag->busy)
     322                        return cmag;
     323
     324                if (lastmag && lastmag->busy) {
     325                        cache->mag_cache[CPU->id].current = lastmag;
     326                        cache->mag_cache[CPU->id].last = cmag;
     327                        return lastmag;
     328                }
     329        }
     330        /* Local magazines are empty, import one from magazine list */
     331        spinlock_lock(&cache->lock);
     332        if (list_empty(&cache->magazines)) {
     333                spinlock_unlock(&cache->lock);
     334                return NULL;
     335        }
     336        newmag = list_get_instance(cache->magazines.next,
     337                                   slab_magazine_t,
     338                                   link);
     339        list_remove(&newmag->link);
     340        spinlock_unlock(&cache->lock);
     341
     342        if (lastmag)
     343                slab_free(&mag_cache, lastmag);
     344        cache->mag_cache[CPU->id].last = cmag;
     345        cache->mag_cache[CPU->id].current = newmag;
     346        return newmag;
     347}
     348
     349/**
    246350 * Try to find object in CPU-cache magazines
    247351 *
     
    255359        spinlock_lock(&cache->mag_cache[CPU->id].lock);
    256360
    257         mag = cache->mag_cache[CPU->id].current;
    258         if (!mag)
    259                 goto out;
    260 
    261         if (!mag->busy) {
    262                 /* If current is empty && last exists && not empty, exchange */
    263                 if (cache->mag_cache[CPU->id].last \
    264                     && cache->mag_cache[CPU->id].last->busy) {
    265                         cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last;
    266                         cache->mag_cache[CPU->id].last = mag;
    267                         mag = cache->mag_cache[CPU->id].current;
    268                         goto gotit;
    269                 }
    270                 /* If still not busy, exchange current with some from
    271                  * other full magazines */
    272                 spinlock_lock(&cache->lock);
    273                 if (list_empty(&cache->magazines)) {
    274                         spinlock_unlock(&cache->lock);
    275                         goto out;
    276                 }
    277                 /* Free current magazine and take one from list */
    278                 slab_free(&mag_cache, mag);
    279 
    280                 mag = list_get_instance(cache->magazines.next,
    281                                         slab_magazine_t,
    282                                         link);
    283                 list_remove(&mag->link);
    284                
    285                 spinlock_unlock(&cache->lock);
    286         }
    287 gotit:
     361        mag = get_full_current_mag(cache);
     362        if (!mag) {
     363                spinlock_unlock(&cache->mag_cache[CPU->id].lock);
     364                return NULL;
     365        }
    288366        obj = mag->objs[--mag->busy];
    289367        spinlock_unlock(&cache->mag_cache[CPU->id].lock);
     
    291369       
    292370        return obj;
    293 out:   
    294         spinlock_unlock(&cache->mag_cache[CPU->id].lock);
    295         return NULL;
    296371}
    297372
    298373/**
    299374 * Assure that the current magazine is empty, return pointer to it, or NULL if
    300  * no empty magazine available and cannot be allocated
     375 * no empty magazine is available and cannot be allocated
    301376 *
    302377 * We have 2 magazines bound to processor.
     
    355430
    356431        mag = make_empty_current_mag(cache);
    357         if (!mag)
    358                 goto errout;
     432        if (!mag) {
     433                spinlock_unlock(&cache->mag_cache[CPU->id].lock);
     434                return -1;
     435        }
    359436       
    360437        mag->objs[mag->busy++] = obj;
     
    363440        atomic_inc(&cache->cached_objs);
    364441        return 0;
    365 errout:
    366         spinlock_unlock(&cache->mag_cache[CPU->id].lock);
    367         return -1;
    368442}
    369443
     
    461535        slab_cache_t *cache;
    462536
    463         cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0]));
     537        cache = slab_alloc(&slab_cache_cache, 0);
    464538        _slab_cache_create(cache, name, size, align, constructor, destructor,
    465539                           flags);
     
    484558       
    485559        /* First lock all cpu caches, then the complete cache lock */
    486         for (i=0; i < config.cpu_count; i++)
    487                 spinlock_lock(&cache->mag_cache[i].lock);
     560        if (flags & SLAB_RECLAIM_ALL) {
     561                for (i=0; i < config.cpu_count; i++)
     562                        spinlock_lock(&cache->mag_cache[i].lock);
     563        }
    488564        spinlock_lock(&cache->lock);
    489565       
     
    519595       
    520596        spinlock_unlock(&cache->lock);
    521         for (i=0; i < config.cpu_count; i++)
    522                 spinlock_unlock(&cache->mag_cache[i].lock);
     597        if (flags & SLAB_RECLAIM_ALL) {
     598                for (i=0; i < config.cpu_count; i++)
     599                        spinlock_unlock(&cache->mag_cache[i].lock);
     600        }
    523601       
    524602        return frames;
     
    543621        spinlock_unlock(&slab_cache_lock);
    544622
    545         free(cache);
     623        slab_free(&slab_cache_cache, cache);
    546624}
    547625
     
    565643        }
    566644
     645        interrupts_restore(ipl);
     646
    567647        if (result)
    568648                atomic_inc(&cache->allocated_objs);
    569 
    570         interrupts_restore(ipl);
    571 
    572649
    573650        return result;
     
    588665                spinlock_unlock(&cache->lock);
    589666        }
     667        interrupts_restore(ipl);
    590668        atomic_dec(&cache->allocated_objs);
    591         interrupts_restore(ipl);
    592669}
    593670
     
    640717                           sizeof(__address),
    641718                           NULL, NULL,
    642                            SLAB_CACHE_NOMAGAZINE);
     719                           SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
     720        /* Initialize slab_cache cache */
     721        _slab_cache_create(&slab_cache_cache,
     722                           "slab_cache",
     723                           sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]),
     724                           sizeof(__address),
     725                           NULL, NULL,
     726                           SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
     727        /* Initialize external slab cache */
     728        slab_extern_cache = slab_cache_create("slab_extern",
     729                                              sizeof(slab_t),
     730                                              0, NULL, NULL,
     731                                              SLAB_CACHE_SLINSIDE);
    643732
    644733        /* Initialize structures for malloc */
Note: See TracChangeset for help on using the changeset viewer.