Changeset fb10289b in mainline
- Timestamp:
- 2006-02-03T02:25:16Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- e1888f9
- Parents:
- 086a600
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/amd64/src/interrupt.c
r086a600 rfb10289b 43 43 #include <proc/thread.h> 44 44 45 45 /* 46 46 static void messy_stack_trace(__native *stack) 47 47 { … … 58 58 printf("\n"); 59 59 } 60 60 */ 61 61 static void print_info_errcode(int n, void *st) 62 62 { … … 84 84 printf(" %Q, %Q, %Q\n", x[20], x[21], x[22]); 85 85 printf(" %Q, %Q, %Q\n", x[23], x[24], x[25]); 86 messy_stack_trace(&x[5]);86 // messy_stack_trace(&x[5]); 87 87 } 88 88 -
generic/src/mm/slab.c
r086a600 rfb10289b 26 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 27 */ 28 29 /* 30 * The SLAB allocator is closely modelled after Opensolaris SLAB allocator 31 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ 32 * 33 * with the following exceptions: 34 * - empty SLABS are deallocated immediately 35 * (in Linux they are kept in linked list, in Solaris ???) 36 * - empty magazines are deallocated when not needed 37 * (in Solaris they are held in linked list in slab cache) 38 * 39 * Following features are not currently supported but would be easy to do: 40 * - cache coloring 41 * - dynamic magazine growing (different magazine sizes are already 42 * supported, but we would need to adjust allocating strategy) 43 * 44 * The SLAB allocator supports per-CPU caches ('magazines') to facilitate 45 * good SMP scaling. 46 * 47 * When a new object is being allocated, it is first checked, if it is 48 * available in CPU-bound magazine. If it is not found there, it is 49 * allocated from CPU-shared SLAB - if partial full is found, it is used, 50 * otherwise a new one is allocated. 51 * 52 * When an object is being deallocated, it is put to CPU-bound magazine. 53 * If there is no such magazine, new one is allocated (if it fails, 54 * the object is deallocated into SLAB). If the magazine is full, it is 55 * put into cpu-shared list of magazines and new one is allocated. 56 * 57 * The CPU-bound magazine is actually a pair of magazine to avoid 58 * thrashing when somebody is allocating/deallocating 1 item at the magazine 59 * size boundary. LIFO order is enforced, which should avoid fragmentation 60 * as much as possible. 61 * 62 * Every cache contains list of full slabs and list of partialy full slabs. 63 * Empty SLABS are immediately freed (thrashing will be avoided because 64 * of magazines). 65 * 66 * The SLAB information structure is kept inside the data area, if possible. 67 * The cache can be marked that it should not use magazines. This is used 68 * only for SLAB related caches to avoid deadlocks and infinite recursion 69 * (the SLAB allocator uses itself for allocating all it's control structures). 70 * 71 * The SLAB allocator allocates lot of space and does not free it. When 72 * frame allocator fails to allocate the frame, it calls slab_reclaim(). 73 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim 74 * releases slabs from cpu-shared magazine-list, until at least 1 slab 75 * is deallocated in each cache (this algorithm should probably change). 76 * The brutal reclaim removes all cached objects, even from CPU-bound 77 * magazines. 78 * 79 * 80 */ 81 28 82 29 83 #include <synch/spinlock.h> … … 41 95 42 96 SPINLOCK_INITIALIZE(slab_cache_lock); 43 LIST_INITIALIZE(slab_cache_list); 44 45 slab_cache_t mag_cache; 46 47 97 static LIST_INITIALIZE(slab_cache_list); 98 99 /** Magazine cache */ 100 static slab_cache_t mag_cache; 101 /** Cache for cache descriptors */ 102 static slab_cache_t slab_cache_cache; 103 104 /** Cache for external slab descriptors 105 * This time we want per-cpu cache, so do not make it static 106 * - using SLAB for internal SLAB structures will not deadlock, 107 * as all slab structures are 'small' - control structures of 108 * their caches do not require further allocation 109 */ 110 static slab_cache_t *slab_extern_cache; 111 112 /** Slab descriptor */ 48 113 typedef struct { 49 114 slab_cache_t *cache; /**< Pointer to parent cache */ … … 60 125 * Allocate frames for slab space and initialize 61 126 * 62 * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!!63 127 */ 64 128 static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) … … 77 141 } 78 142 if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { 79 slab = malloc(sizeof(*slab)); //, flags);143 slab = slab_alloc(slab_extern_cache, flags); 80 144 if (!slab) { 81 145 frame_free((__address)data); … … 115 179 frame_free((__address)slab->start); 116 180 if (! (cache->flags & SLAB_CACHE_SLINSIDE)) 117 free(slab);181 slab_free(slab_extern_cache, slab); 118 182 119 183 atomic_dec(&cache->allocated_slabs); … … 244 308 245 309 /** 310 * Find full magazine, set it as current and return it 311 * 312 * Assume cpu_magazine lock is held 313 */ 314 static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) 315 { 316 slab_magazine_t *cmag, *lastmag, *newmag; 317 318 cmag = cache->mag_cache[CPU->id].current; 319 lastmag = cache->mag_cache[CPU->id].last; 320 if (cmag) { /* First try local CPU magazines */ 321 if (cmag->busy) 322 return cmag; 323 324 if (lastmag && lastmag->busy) { 325 cache->mag_cache[CPU->id].current = lastmag; 326 cache->mag_cache[CPU->id].last = cmag; 327 return lastmag; 328 } 329 } 330 /* Local magazines are empty, import one from magazine list */ 331 spinlock_lock(&cache->lock); 332 if (list_empty(&cache->magazines)) { 333 spinlock_unlock(&cache->lock); 334 return NULL; 335 } 336 newmag = list_get_instance(cache->magazines.next, 337 slab_magazine_t, 338 link); 339 list_remove(&newmag->link); 340 spinlock_unlock(&cache->lock); 341 342 if (lastmag) 343 slab_free(&mag_cache, lastmag); 344 cache->mag_cache[CPU->id].last = cmag; 345 cache->mag_cache[CPU->id].current = newmag; 346 return newmag; 347 } 348 349 /** 246 350 * Try to find object in CPU-cache magazines 247 351 * … … 255 359 spinlock_lock(&cache->mag_cache[CPU->id].lock); 256 360 257 mag = cache->mag_cache[CPU->id].current; 258 if (!mag) 259 goto out; 260 261 if (!mag->busy) { 262 /* If current is empty && last exists && not empty, exchange */ 263 if (cache->mag_cache[CPU->id].last \ 264 && cache->mag_cache[CPU->id].last->busy) { 265 cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last; 266 cache->mag_cache[CPU->id].last = mag; 267 mag = cache->mag_cache[CPU->id].current; 268 goto gotit; 269 } 270 /* If still not busy, exchange current with some from 271 * other full magazines */ 272 spinlock_lock(&cache->lock); 273 if (list_empty(&cache->magazines)) { 274 spinlock_unlock(&cache->lock); 275 goto out; 276 } 277 /* Free current magazine and take one from list */ 278 slab_free(&mag_cache, mag); 279 280 mag = list_get_instance(cache->magazines.next, 281 slab_magazine_t, 282 link); 283 list_remove(&mag->link); 284 285 spinlock_unlock(&cache->lock); 286 } 287 gotit: 361 mag = get_full_current_mag(cache); 362 if (!mag) { 363 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 364 return NULL; 365 } 288 366 obj = mag->objs[--mag->busy]; 289 367 spinlock_unlock(&cache->mag_cache[CPU->id].lock); … … 291 369 292 370 return obj; 293 out:294 spinlock_unlock(&cache->mag_cache[CPU->id].lock);295 return NULL;296 371 } 297 372 298 373 /** 299 374 * Assure that the current magazine is empty, return pointer to it, or NULL if 300 * no empty magazine available and cannot be allocated375 * no empty magazine is available and cannot be allocated 301 376 * 302 377 * We have 2 magazines bound to processor. … … 355 430 356 431 mag = make_empty_current_mag(cache); 357 if (!mag) 358 goto errout; 432 if (!mag) { 433 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 434 return -1; 435 } 359 436 360 437 mag->objs[mag->busy++] = obj; … … 363 440 atomic_inc(&cache->cached_objs); 364 441 return 0; 365 errout:366 spinlock_unlock(&cache->mag_cache[CPU->id].lock);367 return -1;368 442 } 369 443 … … 461 535 slab_cache_t *cache; 462 536 463 cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0]));537 cache = slab_alloc(&slab_cache_cache, 0); 464 538 _slab_cache_create(cache, name, size, align, constructor, destructor, 465 539 flags); … … 484 558 485 559 /* First lock all cpu caches, then the complete cache lock */ 486 for (i=0; i < config.cpu_count; i++) 487 spinlock_lock(&cache->mag_cache[i].lock); 560 if (flags & SLAB_RECLAIM_ALL) { 561 for (i=0; i < config.cpu_count; i++) 562 spinlock_lock(&cache->mag_cache[i].lock); 563 } 488 564 spinlock_lock(&cache->lock); 489 565 … … 519 595 520 596 spinlock_unlock(&cache->lock); 521 for (i=0; i < config.cpu_count; i++) 522 spinlock_unlock(&cache->mag_cache[i].lock); 597 if (flags & SLAB_RECLAIM_ALL) { 598 for (i=0; i < config.cpu_count; i++) 599 spinlock_unlock(&cache->mag_cache[i].lock); 600 } 523 601 524 602 return frames; … … 543 621 spinlock_unlock(&slab_cache_lock); 544 622 545 free(cache);623 slab_free(&slab_cache_cache, cache); 546 624 } 547 625 … … 565 643 } 566 644 645 interrupts_restore(ipl); 646 567 647 if (result) 568 648 atomic_inc(&cache->allocated_objs); 569 570 interrupts_restore(ipl);571 572 649 573 650 return result; … … 588 665 spinlock_unlock(&cache->lock); 589 666 } 667 interrupts_restore(ipl); 590 668 atomic_dec(&cache->allocated_objs); 591 interrupts_restore(ipl);592 669 } 593 670 … … 640 717 sizeof(__address), 641 718 NULL, NULL, 642 SLAB_CACHE_NOMAGAZINE); 719 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 720 /* Initialize slab_cache cache */ 721 _slab_cache_create(&slab_cache_cache, 722 "slab_cache", 723 sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]), 724 sizeof(__address), 725 NULL, NULL, 726 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 727 /* Initialize external slab cache */ 728 slab_extern_cache = slab_cache_create("slab_extern", 729 sizeof(slab_t), 730 0, NULL, NULL, 731 SLAB_CACHE_SLINSIDE); 643 732 644 733 /* Initialize structures for malloc */
Note:
See TracChangeset
for help on using the changeset viewer.