Changeset a294ad0 in mainline for generic/src/mm/slab.c
- Timestamp:
- 2006-02-02T14:00:32Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 2d43f3e
- Parents:
- 758e065
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
generic/src/mm/slab.c
r758e065 ra294ad0 33 33 #include <align.h> 34 34 #include <mm/heap.h> 35 #include <mm/frame.h> 35 36 #include <config.h> 36 37 #include <print.h> 37 38 #include <arch.h> 38 39 #include <panic.h> 40 #include <debug.h> 39 41 40 42 SPINLOCK_INITIALIZE(slab_cache_lock); … … 43 45 slab_cache_t mag_cache; 44 46 47 48 typedef struct { 49 slab_cache_t *cache; /**< Pointer to parent cache */ 50 link_t link; /* List of full/partial slabs */ 51 void *start; /**< Start address of first available item */ 52 count_t available; /**< Count of available items in this slab */ 53 index_t nextavail; /**< The index of next available item */ 54 }slab_t; 55 45 56 /**************************************/ 46 /* SLAB low level functions */ 57 /* SLAB allocation functions */ 58 59 /** 60 * Allocate frames for slab space and initialize 61 * 62 * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!! 63 */ 64 static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) 65 { 66 void *data; 67 slab_t *slab; 68 size_t fsize; 69 int i; 70 zone_t *zone = NULL; 71 int status; 72 73 data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); 74 if (status != FRAME_OK) 75 return NULL; 76 77 if (! cache->flags & SLAB_CACHE_SLINSIDE) { 78 slab = malloc(sizeof(*slab)); // , flags); 79 if (!slab) { 80 frame_free((__address)data); 81 return NULL; 82 } 83 } else { 84 fsize = (PAGE_SIZE << cache->order); 85 slab = data + fsize - sizeof(*slab); 86 } 87 88 /* Fill in slab structures */ 89 /* TODO: some better way of accessing the frame, although 90 * the optimizer might optimize the division out :-/ */ 91 for (i=0; i< (1<<cache->order); i++) { 92 ADDR2FRAME(zone, (__address)(data+i*PAGE_SIZE))->parent = slab; 93 } 94 95 slab->start = data; 96 slab->available = cache->objects; 97 slab->nextavail = 0; 98 99 for (i=0; i<cache->objects;i++) 100 *((int *) (slab->start + i*cache->size)) = i+1; 101 return slab; 102 } 103 104 /** 105 * Free space associated with SLAB 106 * 107 * @return number of freed frames 108 */ 109 static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) 110 { 111 frame_free((__address)slab->start); 112 if (! cache->flags & SLAB_CACHE_SLINSIDE) 113 free(slab); 114 return 1 << cache->order; 115 } 116 117 /** Map object to slab structure */ 118 static slab_t * obj2slab(void *obj) 119 { 120 frame_t *frame; 121 122 frame = frame_addr2frame((__address)obj); 123 return (slab_t *)frame->parent; 124 } 125 126 /**************************************/ 127 /* SLAB functions */ 47 128 48 129 … … 50 131 * Return object to slab and call a destructor 51 132 * 133 * Assume the cache->lock is held; 134 * 135 * @param slab If the caller knows directly slab of the object, otherwise NULL 136 * 52 137 * @return Number of freed pages 53 138 */ 54 static count_t slab_obj_destroy(slab_cache_t *cache, void *obj) 55 { 56 return 0; 57 } 58 139 static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, 140 slab_t *slab) 141 { 142 count_t frames = 0; 143 144 if (!slab) 145 slab = obj2slab(obj); 146 147 spinlock_lock(cache->lock); 148 149 *((int *)obj) = slab->nextavail; 150 slab->nextavail = (obj - slab->start)/cache->size; 151 slab->available++; 152 153 /* Move it to correct list */ 154 if (slab->available == 1) { 155 /* It was in full, move to partial */ 156 list_remove(&slab->link); 157 list_prepend(&cache->partial_slabs, &slab->link); 158 } 159 if (slab->available == cache->objects) { 160 /* Free associated memory */ 161 list_remove(&slab->link); 162 /* Avoid deadlock */ 163 spinlock_unlock(&cache->lock); 164 frames = slab_space_free(cache, slab); 165 spinlock_lock(&cache->lock); 166 } 167 168 spinlock_unlock(cache->lock); 169 170 return frames; 171 } 59 172 60 173 /** 61 174 * Take new object from slab or create new if needed 62 175 * 176 * Assume cache->lock is held. 177 * 63 178 * @return Object address or null 64 179 */ 65 180 static void * slab_obj_create(slab_cache_t *cache, int flags) 66 181 { 67 return NULL; 182 slab_t *slab; 183 void *obj; 184 185 if (list_empty(&cache->partial_slabs)) { 186 /* Allow recursion and reclaiming 187 * - this should work, as the SLAB control structures 188 * are small and do not need to allocte with anything 189 * other ten frame_alloc when they are allocating, 190 * that's why we should get recursion at most 1-level deep 191 */ 192 spinlock_unlock(&cache->lock); 193 slab = slab_space_alloc(cache, flags); 194 spinlock_lock(&cache->lock); 195 if (!slab) 196 return NULL; 197 } else { 198 slab = list_get_instance(cache->partial_slabs.next, 199 slab_t, 200 link); 201 list_remove(&slab->link); 202 } 203 obj = slab->start + slab->nextavail * cache->size; 204 slab->nextavail = *((int *)obj); 205 slab->available--; 206 if (! slab->available) 207 list_prepend(&cache->full_slabs, &slab->link); 208 else 209 list_prepend(&cache->partial_slabs, &slab->link); 210 return obj; 68 211 } 69 212 … … 74 217 * Free all objects in magazine and free memory associated with magazine 75 218 * 76 * Assume cpu->lock is locked219 * Assume mag_cache[cpu].lock is locked 77 220 * 78 221 * @return Number of freed pages … … 85 228 86 229 for (i=0;i < mag->busy; i++) 87 frames += slab_obj_destroy(cache, mag->objs[i] );230 frames += slab_obj_destroy(cache, mag->objs[i], NULL); 88 231 89 232 slab_free(&mag_cache, mag); … … 116 259 goto gotit; 117 260 } 118 /* If still not busy, exchange current with some from e261 /* If still not busy, exchange current with some from 119 262 * other full magazines */ 120 263 spinlock_lock(&cache->lock); … … 162 305 /* Especially we do not want reclaiming to start, as 163 306 * this would deadlock */ 164 mag = slab_alloc(&mag_cache, SLAB_ATOMIC | SLAB_NO_RECLAIM);307 mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); 165 308 if (!mag) /* Allocation failed, give up on caching */ 166 309 goto errout; … … 176 319 list_prepend(&cache->magazines, &mag->link); 177 320 178 mag = slab_alloc(&mag_cache, SLAB_ATOMIC | SLAB_NO_RECLAIM);321 mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); 179 322 if (!mag) 180 323 goto errout; … … 199 342 200 343 /**************************************/ 201 /* Top level SLAB functions */ 344 /* SLAB CACHE functions */ 345 346 /** Return number of objects that fit in certain cache size */ 347 static int comp_objects(slab_cache_t *cache) 348 { 349 if (cache->flags & SLAB_CACHE_SLINSIDE) 350 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; 351 else 352 return (PAGE_SIZE << cache->order) / cache->size; 353 } 354 355 /** Return wasted space in slab */ 356 static int badness(slab_cache_t *cache) 357 { 358 int objects; 359 int ssize; 360 361 objects = comp_objects(cache); 362 ssize = PAGE_SIZE << cache->order; 363 if (cache->flags & SLAB_CACHE_SLINSIDE) 364 ssize -= sizeof(slab_t); 365 return ssize - objects*cache->size; 366 } 202 367 203 368 /** Initialize allocated memory as a slab cache */ … … 215 380 memsetb((__address)cache, sizeof(*cache), 0); 216 381 cache->name = name; 217 cache->align = align; 218 219 cache->size = ALIGN_UP(size, align); 382 383 if (align) 384 size = ALIGN_UP(size, align); 385 cache->size = size; 220 386 221 387 cache->constructor = constructor; … … 237 403 cache->flags |= SLAB_CACHE_SLINSIDE; 238 404 239 405 /* Minimum slab order */ 406 cache->order = (cache->size / PAGE_SIZE) + 1; 407 408 while (badness(cache) > SLAB_MAX_BADNESS(cache)) { 409 cache->order += 1; 410 } 411 412 cache->objects = comp_objects(cache); 240 413 241 414 spinlock_lock(&slab_cache_lock); … … 267 440 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing 268 441 * @return Number of freed pages 442 * 443 * TODO: Add light reclaim 269 444 */ 270 445 static count_t _slab_reclaim(slab_cache_t *cache, int flags) … … 284 459 285 460 if (flags & SLAB_RECLAIM_ALL) { 461 /* Aggressive memfree */ 462 286 463 /* Destroy CPU magazines */ 287 464 for (i=0; i<config.cpu_count; i++) { … … 296 473 cache->mag_cache[i].last = NULL; 297 474 } 298 /* Destroy full magazines */ 299 cur=cache->magazines.next; 300 while (cur!=&cache->magazines) { 301 mag = list_get_instance(cur, slab_magazine_t, link); 302 303 cur = cur->next; 304 list_remove(cur->prev); 305 frames += magazine_destroy(cache,mag); 306 } 475 } 476 /* Destroy full magazines */ 477 cur=cache->magazines.prev; 478 while (cur!=&cache->magazines) { 479 mag = list_get_instance(cur, slab_magazine_t, link); 480 481 cur = cur->prev; 482 list_remove(cur->next); 483 frames += magazine_destroy(cache,mag); 484 /* If we do not do full reclaim, break 485 * as soon as something is freed */ 486 if (!(flags & SLAB_RECLAIM_ALL) && frames) 487 break; 307 488 } 308 489 … … 348 529 result = magazine_obj_get(cache); 349 530 350 if (!result) 531 if (!result) { 532 spinlock_lock(&cache->lock); 351 533 result = slab_obj_create(cache, flags); 534 spinlock_unlock(&cache->lock); 535 } 352 536 353 537 interrupts_restore(ipl); … … 363 547 ipl = interrupts_disable(); 364 548 365 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 366 slab_obj_destroy(cache, obj); 367 else { 368 if (magazine_obj_put(cache, obj)) /* If magazine put failed */ 369 slab_obj_destroy(cache, obj); 549 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ 550 || magazine_obj_put(cache, obj)) { 551 552 spinlock_lock(&cache->lock); 553 slab_obj_destroy(cache, obj, NULL); 554 spinlock_unlock(&cache->lock); 370 555 } 371 556 interrupts_restore(ipl); … … 399 584 400 585 spinlock_lock(&slab_cache_lock); 401 printf("SLAB name\tO bj size\n");586 printf("SLAB name\tOsize\tOrder\n"); 402 587 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { 403 588 cache = list_get_instance(cur, slab_cache_t, link); 404 printf("%s\t%d\ n", cache->name, cache->size);589 printf("%s\t%d\t%d\n", cache->name, cache->size, cache->order); 405 590 } 406 591 spinlock_unlock(&slab_cache_lock);
Note:
See TracChangeset
for help on using the changeset viewer.