00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00101 #include <synch/spinlock.h>
00102 #include <mm/slab.h>
00103 #include <adt/list.h>
00104 #include <memstr.h>
00105 #include <align.h>
00106 #include <mm/frame.h>
00107 #include <config.h>
00108 #include <print.h>
00109 #include <arch.h>
00110 #include <panic.h>
00111 #include <debug.h>
00112 #include <bitops.h>
00113
00114 SPINLOCK_INITIALIZE(slab_cache_lock);
00115 static LIST_INITIALIZE(slab_cache_list);
00116
00118 static slab_cache_t mag_cache;
00120 static slab_cache_t slab_cache_cache;
00127 static slab_cache_t *slab_extern_cache;
00129 static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
00130 char *malloc_names[] = {
00131 "malloc-16","malloc-32","malloc-64","malloc-128",
00132 "malloc-256","malloc-512","malloc-1K","malloc-2K",
00133 "malloc-4K","malloc-8K","malloc-16K","malloc-32K",
00134 "malloc-64K","malloc-128K","malloc-256K"
00135 };
00136
00138 typedef struct {
00139 slab_cache_t *cache;
00140 link_t link;
00141 void *start;
00142 count_t available;
00143 index_t nextavail;
00144 }slab_t;
00145
00146 #ifdef CONFIG_DEBUG
00147 static int _slab_initialized = 0;
00148 #endif
00149
00150
00151
00152
00157 static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
00158 {
00159 void *data;
00160 slab_t *slab;
00161 size_t fsize;
00162 int i;
00163 int status;
00164 pfn_t pfn;
00165 int zone=0;
00166
00167 pfn = frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone);
00168 data = (void *) PA2KA(PFN2ADDR(pfn));
00169 if (status != FRAME_OK) {
00170 return NULL;
00171 }
00172 if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
00173 slab = slab_alloc(slab_extern_cache, flags);
00174 if (!slab) {
00175 frame_free(ADDR2PFN(KA2PA(data)));
00176 return NULL;
00177 }
00178 } else {
00179 fsize = (PAGE_SIZE << cache->order);
00180 slab = data + fsize - sizeof(*slab);
00181 }
00182
00183
00184 for (i=0; i < (1 << cache->order); i++)
00185 frame_set_parent(pfn+i, slab, zone);
00186
00187 slab->start = data;
00188 slab->available = cache->objects;
00189 slab->nextavail = 0;
00190 slab->cache = cache;
00191
00192 for (i=0; i<cache->objects;i++)
00193 *((int *) (slab->start + i*cache->size)) = i+1;
00194
00195 atomic_inc(&cache->allocated_slabs);
00196 return slab;
00197 }
00198
00204 static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
00205 {
00206 frame_free(ADDR2PFN(KA2PA(slab->start)));
00207 if (! (cache->flags & SLAB_CACHE_SLINSIDE))
00208 slab_free(slab_extern_cache, slab);
00209
00210 atomic_dec(&cache->allocated_slabs);
00211
00212 return 1 << cache->order;
00213 }
00214
00216 static slab_t * obj2slab(void *obj)
00217 {
00218 return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0);
00219 }
00220
00221
00222
00223
00224
00232 static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
00233 slab_t *slab)
00234 {
00235 int freed = 0;
00236
00237 if (!slab)
00238 slab = obj2slab(obj);
00239
00240 ASSERT(slab->cache == cache);
00241
00242 if (cache->destructor)
00243 freed = cache->destructor(obj);
00244
00245 spinlock_lock(&cache->slablock);
00246 ASSERT(slab->available < cache->objects);
00247
00248 *((int *)obj) = slab->nextavail;
00249 slab->nextavail = (obj - slab->start)/cache->size;
00250 slab->available++;
00251
00252
00253 if (slab->available == cache->objects) {
00254
00255 list_remove(&slab->link);
00256 spinlock_unlock(&cache->slablock);
00257
00258 return freed + slab_space_free(cache, slab);
00259
00260 } else if (slab->available == 1) {
00261
00262 list_remove(&slab->link);
00263 list_prepend(&slab->link, &cache->partial_slabs);
00264 }
00265 spinlock_unlock(&cache->slablock);
00266 return freed;
00267 }
00268
00274 static void * slab_obj_create(slab_cache_t *cache, int flags)
00275 {
00276 slab_t *slab;
00277 void *obj;
00278
00279 spinlock_lock(&cache->slablock);
00280
00281 if (list_empty(&cache->partial_slabs)) {
00282
00283
00284
00285
00286
00287
00288 spinlock_unlock(&cache->slablock);
00289 slab = slab_space_alloc(cache, flags);
00290 if (!slab)
00291 return NULL;
00292 spinlock_lock(&cache->slablock);
00293 } else {
00294 slab = list_get_instance(cache->partial_slabs.next,
00295 slab_t,
00296 link);
00297 list_remove(&slab->link);
00298 }
00299 obj = slab->start + slab->nextavail * cache->size;
00300 slab->nextavail = *((int *)obj);
00301 slab->available--;
00302
00303 if (! slab->available)
00304 list_prepend(&slab->link, &cache->full_slabs);
00305 else
00306 list_prepend(&slab->link, &cache->partial_slabs);
00307
00308 spinlock_unlock(&cache->slablock);
00309
00310 if (cache->constructor && cache->constructor(obj, flags)) {
00311
00312 slab_obj_destroy(cache, obj, slab);
00313 return NULL;
00314 }
00315 return obj;
00316 }
00317
00318
00319
00320
00327 static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
00328 int first)
00329 {
00330 slab_magazine_t *mag = NULL;
00331 link_t *cur;
00332
00333 spinlock_lock(&cache->maglock);
00334 if (!list_empty(&cache->magazines)) {
00335 if (first)
00336 cur = cache->magazines.next;
00337 else
00338 cur = cache->magazines.prev;
00339 mag = list_get_instance(cur, slab_magazine_t, link);
00340 list_remove(&mag->link);
00341 atomic_dec(&cache->magazine_counter);
00342 }
00343 spinlock_unlock(&cache->maglock);
00344 return mag;
00345 }
00346
00348 static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag)
00349 {
00350 spinlock_lock(&cache->maglock);
00351
00352 list_prepend(&mag->link, &cache->magazines);
00353 atomic_inc(&cache->magazine_counter);
00354
00355 spinlock_unlock(&cache->maglock);
00356 }
00357
00363 static count_t magazine_destroy(slab_cache_t *cache,
00364 slab_magazine_t *mag)
00365 {
00366 int i;
00367 count_t frames = 0;
00368
00369 for (i=0;i < mag->busy; i++) {
00370 frames += slab_obj_destroy(cache, mag->objs[i], NULL);
00371 atomic_dec(&cache->cached_objs);
00372 }
00373
00374 slab_free(&mag_cache, mag);
00375
00376 return frames;
00377 }
00378
00384 static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
00385 {
00386 slab_magazine_t *cmag, *lastmag, *newmag;
00387
00388 cmag = cache->mag_cache[CPU->id].current;
00389 lastmag = cache->mag_cache[CPU->id].last;
00390 if (cmag) {
00391 if (cmag->busy)
00392 return cmag;
00393
00394 if (lastmag && lastmag->busy) {
00395 cache->mag_cache[CPU->id].current = lastmag;
00396 cache->mag_cache[CPU->id].last = cmag;
00397 return lastmag;
00398 }
00399 }
00400
00401 newmag = get_mag_from_cache(cache, 1);
00402 if (!newmag)
00403 return NULL;
00404
00405 if (lastmag)
00406 magazine_destroy(cache, lastmag);
00407
00408 cache->mag_cache[CPU->id].last = cmag;
00409 cache->mag_cache[CPU->id].current = newmag;
00410 return newmag;
00411 }
00412
00418 static void * magazine_obj_get(slab_cache_t *cache)
00419 {
00420 slab_magazine_t *mag;
00421 void *obj;
00422
00423 if (!CPU)
00424 return NULL;
00425
00426 spinlock_lock(&cache->mag_cache[CPU->id].lock);
00427
00428 mag = get_full_current_mag(cache);
00429 if (!mag) {
00430 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
00431 return NULL;
00432 }
00433 obj = mag->objs[--mag->busy];
00434 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
00435 atomic_dec(&cache->cached_objs);
00436
00437 return obj;
00438 }
00439
00453 static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
00454 {
00455 slab_magazine_t *cmag,*lastmag,*newmag;
00456
00457 cmag = cache->mag_cache[CPU->id].current;
00458 lastmag = cache->mag_cache[CPU->id].last;
00459
00460 if (cmag) {
00461 if (cmag->busy < cmag->size)
00462 return cmag;
00463 if (lastmag && lastmag->busy < lastmag->size) {
00464 cache->mag_cache[CPU->id].last = cmag;
00465 cache->mag_cache[CPU->id].current = lastmag;
00466 return lastmag;
00467 }
00468 }
00469
00470
00471
00472
00473 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
00474 if (!newmag)
00475 return NULL;
00476 newmag->size = SLAB_MAG_SIZE;
00477 newmag->busy = 0;
00478
00479
00480 if (lastmag)
00481 put_mag_to_cache(cache, lastmag);
00482
00483
00484 cache->mag_cache[CPU->id].last = cmag;
00485 cache->mag_cache[CPU->id].current = newmag;
00486
00487 return newmag;
00488 }
00489
00495 static int magazine_obj_put(slab_cache_t *cache, void *obj)
00496 {
00497 slab_magazine_t *mag;
00498
00499 if (!CPU)
00500 return -1;
00501
00502 spinlock_lock(&cache->mag_cache[CPU->id].lock);
00503
00504 mag = make_empty_current_mag(cache);
00505 if (!mag) {
00506 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
00507 return -1;
00508 }
00509
00510 mag->objs[mag->busy++] = obj;
00511
00512 spinlock_unlock(&cache->mag_cache[CPU->id].lock);
00513 atomic_inc(&cache->cached_objs);
00514 return 0;
00515 }
00516
00517
00518
00519
00520
00522 static int comp_objects(slab_cache_t *cache)
00523 {
00524 if (cache->flags & SLAB_CACHE_SLINSIDE)
00525 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
00526 else
00527 return (PAGE_SIZE << cache->order) / cache->size;
00528 }
00529
00531 static int badness(slab_cache_t *cache)
00532 {
00533 int objects;
00534 int ssize;
00535
00536 objects = comp_objects(cache);
00537 ssize = PAGE_SIZE << cache->order;
00538 if (cache->flags & SLAB_CACHE_SLINSIDE)
00539 ssize -= sizeof(slab_t);
00540 return ssize - objects*cache->size;
00541 }
00542
00546 static void make_magcache(slab_cache_t *cache)
00547 {
00548 int i;
00549
00550 ASSERT(_slab_initialized >= 2);
00551
00552 cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0);
00553 for (i=0; i < config.cpu_count; i++) {
00554 memsetb((__address)&cache->mag_cache[i],
00555 sizeof(cache->mag_cache[i]), 0);
00556 spinlock_initialize(&cache->mag_cache[i].lock,
00557 "slab_maglock_cpu");
00558 }
00559 }
00560
00562 static void
00563 _slab_cache_create(slab_cache_t *cache,
00564 char *name,
00565 size_t size,
00566 size_t align,
00567 int (*constructor)(void *obj, int kmflag),
00568 int (*destructor)(void *obj),
00569 int flags)
00570 {
00571 int pages;
00572 ipl_t ipl;
00573
00574 memsetb((__address)cache, sizeof(*cache), 0);
00575 cache->name = name;
00576
00577 if (align < sizeof(__native))
00578 align = sizeof(__native);
00579 size = ALIGN_UP(size, align);
00580
00581 cache->size = size;
00582
00583 cache->constructor = constructor;
00584 cache->destructor = destructor;
00585 cache->flags = flags;
00586
00587 list_initialize(&cache->full_slabs);
00588 list_initialize(&cache->partial_slabs);
00589 list_initialize(&cache->magazines);
00590 spinlock_initialize(&cache->slablock, "slab_lock");
00591 spinlock_initialize(&cache->maglock, "slab_maglock");
00592 if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
00593 make_magcache(cache);
00594
00595
00596 if (cache->size < SLAB_INSIDE_SIZE)
00597 cache->flags |= SLAB_CACHE_SLINSIDE;
00598
00599
00600 pages = SIZE2FRAMES(cache->size);
00601
00602 if (pages == 1)
00603 cache->order = 0;
00604 else
00605 cache->order = fnzb(pages-1)+1;
00606
00607 while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
00608 cache->order += 1;
00609 }
00610 cache->objects = comp_objects(cache);
00611
00612 if (badness(cache) > sizeof(slab_t))
00613 cache->flags |= SLAB_CACHE_SLINSIDE;
00614
00615
00616 ipl = interrupts_disable();
00617 spinlock_lock(&slab_cache_lock);
00618
00619 list_append(&cache->link, &slab_cache_list);
00620
00621 spinlock_unlock(&slab_cache_lock);
00622 interrupts_restore(ipl);
00623 }
00624
00626 slab_cache_t * slab_cache_create(char *name,
00627 size_t size,
00628 size_t align,
00629 int (*constructor)(void *obj, int kmflag),
00630 int (*destructor)(void *obj),
00631 int flags)
00632 {
00633 slab_cache_t *cache;
00634
00635 cache = slab_alloc(&slab_cache_cache, 0);
00636 _slab_cache_create(cache, name, size, align, constructor, destructor,
00637 flags);
00638 return cache;
00639 }
00640
00647 static count_t _slab_reclaim(slab_cache_t *cache, int flags)
00648 {
00649 int i;
00650 slab_magazine_t *mag;
00651 count_t frames = 0;
00652 int magcount;
00653
00654 if (cache->flags & SLAB_CACHE_NOMAGAZINE)
00655 return 0;
00656
00657
00658
00659
00660 magcount = atomic_get(&cache->magazine_counter);
00661 while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
00662 frames += magazine_destroy(cache,mag);
00663 if (!(flags & SLAB_RECLAIM_ALL) && frames)
00664 break;
00665 }
00666
00667 if (flags & SLAB_RECLAIM_ALL) {
00668
00669
00670 for (i=0; i<config.cpu_count; i++) {
00671 spinlock_lock(&cache->mag_cache[i].lock);
00672
00673 mag = cache->mag_cache[i].current;
00674 if (mag)
00675 frames += magazine_destroy(cache, mag);
00676 cache->mag_cache[i].current = NULL;
00677
00678 mag = cache->mag_cache[i].last;
00679 if (mag)
00680 frames += magazine_destroy(cache, mag);
00681 cache->mag_cache[i].last = NULL;
00682
00683 spinlock_unlock(&cache->mag_cache[i].lock);
00684 }
00685 }
00686
00687 return frames;
00688 }
00689
00691 void slab_cache_destroy(slab_cache_t *cache)
00692 {
00693 ipl_t ipl;
00694
00695
00696
00697
00698
00699 ipl = interrupts_disable();
00700 spinlock_lock(&slab_cache_lock);
00701
00702 list_remove(&cache->link);
00703
00704 spinlock_unlock(&slab_cache_lock);
00705 interrupts_restore(ipl);
00706
00707
00708
00709
00710
00711 _slab_reclaim(cache, SLAB_RECLAIM_ALL);
00712
00713
00714 if (!list_empty(&cache->full_slabs) \
00715 || !list_empty(&cache->partial_slabs))
00716 panic("Destroying cache that is not empty.");
00717
00718 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
00719 free(cache->mag_cache);
00720 slab_free(&slab_cache_cache, cache);
00721 }
00722
00725 void * slab_alloc(slab_cache_t *cache, int flags)
00726 {
00727 ipl_t ipl;
00728 void *result = NULL;
00729
00730
00731 ipl = interrupts_disable();
00732
00733 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) {
00734 result = magazine_obj_get(cache);
00735 }
00736 if (!result)
00737 result = slab_obj_create(cache, flags);
00738
00739 interrupts_restore(ipl);
00740
00741 if (result)
00742 atomic_inc(&cache->allocated_objs);
00743
00744 return result;
00745 }
00746
00748 static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
00749 {
00750 ipl_t ipl;
00751
00752 ipl = interrupts_disable();
00753
00754 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
00755 || magazine_obj_put(cache, obj)) {
00756
00757 slab_obj_destroy(cache, obj, slab);
00758
00759 }
00760 interrupts_restore(ipl);
00761 atomic_dec(&cache->allocated_objs);
00762 }
00763
00765 void slab_free(slab_cache_t *cache, void *obj)
00766 {
00767 _slab_free(cache,obj,NULL);
00768 }
00769
00770
00771 count_t slab_reclaim(int flags)
00772 {
00773 slab_cache_t *cache;
00774 link_t *cur;
00775 count_t frames = 0;
00776
00777 spinlock_lock(&slab_cache_lock);
00778
00779
00780
00781
00782
00783 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
00784 cache = list_get_instance(cur, slab_cache_t, link);
00785 frames += _slab_reclaim(cache, flags);
00786 }
00787
00788 spinlock_unlock(&slab_cache_lock);
00789
00790 return frames;
00791 }
00792
00793
00794
00795 void slab_print_list(void)
00796 {
00797 slab_cache_t *cache;
00798 link_t *cur;
00799 ipl_t ipl;
00800
00801 ipl = interrupts_disable();
00802 spinlock_lock(&slab_cache_lock);
00803 printf("slab name\t Osize\t Pages\t Obj/pg\t Slabs\t Cached\tAllocobjs\tCtl\n");
00804 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
00805 cache = list_get_instance(cur, slab_cache_t, link);
00806 printf("%s\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t\t%s\n", cache->name, cache->size,
00807 (1 << cache->order), cache->objects,
00808 atomic_get(&cache->allocated_slabs),
00809 atomic_get(&cache->cached_objs),
00810 atomic_get(&cache->allocated_objs),
00811 cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
00812 }
00813 spinlock_unlock(&slab_cache_lock);
00814 interrupts_restore(ipl);
00815 }
00816
00817 void slab_cache_init(void)
00818 {
00819 int i, size;
00820
00821
00822 _slab_cache_create(&mag_cache,
00823 "slab_magazine",
00824 sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
00825 sizeof(__address),
00826 NULL, NULL,
00827 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
00828
00829 _slab_cache_create(&slab_cache_cache,
00830 "slab_cache",
00831 sizeof(slab_cache_cache),
00832 sizeof(__address),
00833 NULL, NULL,
00834 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
00835
00836 slab_extern_cache = slab_cache_create("slab_extern",
00837 sizeof(slab_t),
00838 0, NULL, NULL,
00839 SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
00840
00841
00842 for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
00843 i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
00844 i++, size <<= 1) {
00845 malloc_caches[i] = slab_cache_create(malloc_names[i],
00846 size, 0,
00847 NULL,NULL, SLAB_CACHE_MAGDEFERRED);
00848 }
00849 #ifdef CONFIG_DEBUG
00850 _slab_initialized = 1;
00851 #endif
00852 }
00853
00861 void slab_enable_cpucache(void)
00862 {
00863 link_t *cur;
00864 slab_cache_t *s;
00865
00866 #ifdef CONFIG_DEBUG
00867 _slab_initialized = 2;
00868 #endif
00869
00870 spinlock_lock(&slab_cache_lock);
00871
00872 for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
00873 s = list_get_instance(cur, slab_cache_t, link);
00874 if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
00875 continue;
00876 make_magcache(s);
00877 s->flags &= ~SLAB_CACHE_MAGDEFERRED;
00878 }
00879
00880 spinlock_unlock(&slab_cache_lock);
00881 }
00882
00883
00884
00885 void * malloc(unsigned int size, int flags)
00886 {
00887 int idx;
00888
00889 ASSERT(_slab_initialized);
00890 ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
00891
00892 if (size < (1 << SLAB_MIN_MALLOC_W))
00893 size = (1 << SLAB_MIN_MALLOC_W);
00894
00895 idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
00896
00897 return slab_alloc(malloc_caches[idx], flags);
00898 }
00899
00900 void free(void *obj)
00901 {
00902 slab_t *slab;
00903
00904 if (!obj) return;
00905
00906 slab = obj2slab(obj);
00907 _slab_free(slab->cache, obj, slab);
00908 }
00909