Changes in kernel/generic/src/mm/slab.c [55821eea:2d3ddad] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/slab.c
r55821eea r2d3ddad 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Slab allocator. 36 36 * 37 37 * The slab allocator is closely modelled after OpenSolaris slab allocator. … … 50 50 * 51 51 * The slab allocator supports per-CPU caches ('magazines') to facilitate 52 * good SMP scaling. 52 * good SMP scaling. 53 53 * 54 54 * When a new object is being allocated, it is first checked, if it is … … 65 65 * thrashing when somebody is allocating/deallocating 1 item at the magazine 66 66 * size boundary. LIFO order is enforced, which should avoid fragmentation 67 * as much as possible. 68 * 67 * as much as possible. 68 * 69 69 * Every cache contains list of full slabs and list of partially full slabs. 70 70 * Empty slabs are immediately freed (thrashing will be avoided because 71 * of magazines). 71 * of magazines). 72 72 * 73 73 * The slab information structure is kept inside the data area, if possible. … … 95 95 * 96 96 * @todo 97 * it might be good to add granularity of locks even to slab level,97 * It might be good to add granularity of locks even to slab level, 98 98 * we could then try_spinlock over all partial slabs and thus improve 99 * scalability even on slab level 99 * scalability even on slab level. 100 * 100 101 */ 101 102 … … 114 115 #include <macros.h> 115 116 116 SPINLOCK_INITIALIZE(slab_cache_lock);117 IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock); 117 118 static LIST_INITIALIZE(slab_cache_list); 118 119 119 120 /** Magazine cache */ 120 121 static slab_cache_t mag_cache; 122 121 123 /** Cache for cache descriptors */ 122 124 static slab_cache_t slab_cache_cache; 125 123 126 /** Cache for external slab descriptors 124 127 * This time we want per-cpu cache, so do not make it static … … 128 131 */ 129 132 static slab_cache_t *slab_extern_cache; 133 130 134 /** Caches for malloc */ 131 135 static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; 136 132 137 static const char *malloc_names[] = { 133 138 "malloc-16", … … 154 159 /** Slab descriptor */ 155 160 typedef struct { 156 slab_cache_t *cache; 157 link_t link; 158 void *start; 159 size_t available; 160 size_t nextavail; 161 slab_cache_t *cache; /**< Pointer to parent cache. */ 162 link_t link; /**< List of full/partial slabs. */ 163 void *start; /**< Start address of first available item. */ 164 size_t available; /**< Count of available items in this slab. */ 165 size_t nextavail; /**< The index of next available item. */ 161 166 } slab_t; 162 167 163 168 #ifdef CONFIG_DEBUG 164 static int _slab_initialized = 0;169 static unsigned int _slab_initialized = 0; 165 170 #endif 166 171 167 172 /**************************************/ 168 173 /* Slab allocation functions */ 169 170 /** 171 * Allocate frames for slab space and initialize 172 * 173 */ 174 static slab_t *slab_space_alloc(slab_cache_t *cache, int flags) 175 { 176 void *data; 174 /**************************************/ 175 176 /** Allocate frames for slab space and initialize 177 * 178 */ 179 static slab_t *slab_space_alloc(slab_cache_t *cache, unsigned int flags) 180 { 181 182 183 size_t zone = 0; 184 185 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 186 if (!data) { 187 return NULL; 188 } 189 177 190 slab_t *slab; 178 191 size_t fsize; 179 unsigned int i; 180 size_t zone = 0; 181 182 data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 183 if (!data) { 184 return NULL; 185 } 192 186 193 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) { 187 194 slab = slab_alloc(slab_extern_cache, flags); … … 196 203 197 204 /* Fill in slab structures */ 198 for (i = 0; i < ((unsigned int) 1 << cache->order); i++) 205 size_t i; 206 for (i = 0; i < ((size_t) 1 << cache->order); i++) 199 207 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 200 208 201 209 slab->start = data; 202 210 slab->available = cache->objects; 203 211 slab->nextavail = 0; 204 212 slab->cache = cache; 205 213 206 214 for (i = 0; i < cache->objects; i++) 207 *(( int *) (slab->start + i*cache->size)) = i + 1;208 215 *((size_t *) (slab->start + i * cache->size)) = i + 1; 216 209 217 atomic_inc(&cache->allocated_slabs); 210 218 return slab; 211 219 } 212 220 213 /** 214 * Deallocate space associated with slab 221 /** Deallocate space associated with slab 215 222 * 216 223 * @return number of freed frames 224 * 217 225 */ 218 226 static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 219 227 { 220 228 frame_free(KA2PA(slab->start)); 221 if (! 229 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) 222 230 slab_free(slab_extern_cache, slab); 223 231 224 232 atomic_dec(&cache->allocated_slabs); 225 233 226 return 1 << cache->order;234 return (1 << cache->order); 227 235 } 228 236 229 237 /** Map object to slab structure */ 230 static slab_t * 238 static slab_t *obj2slab(void *obj) 231 239 { 232 240 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); 233 241 } 234 242 235 /****************** ********************/243 /******************/ 236 244 /* Slab functions */ 237 238 239 /** 240 * Return object to slab and call a destructor 245 /******************/ 246 247 /** Return object to slab and call a destructor 241 248 * 242 249 * @param slab If the caller knows directly slab of the object, otherwise NULL 243 250 * 244 251 * @return Number of freed pages 252 * 245 253 */ 246 254 static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab) 247 255 { 248 int freed = 0;249 250 256 if (!slab) 251 257 slab = obj2slab(obj); 252 258 253 259 ASSERT(slab->cache == cache); 254 260 261 size_t freed = 0; 262 255 263 if (cache->destructor) 256 264 freed = cache->destructor(obj); … … 258 266 spinlock_lock(&cache->slablock); 259 267 ASSERT(slab->available < cache->objects); 260 261 *(( int *)obj) = slab->nextavail;268 269 *((size_t *) obj) = slab->nextavail; 262 270 slab->nextavail = (obj - slab->start) / cache->size; 263 271 slab->available++; 264 272 265 273 /* Move it to correct list */ 266 274 if (slab->available == cache->objects) { … … 268 276 list_remove(&slab->link); 269 277 spinlock_unlock(&cache->slablock); 270 278 271 279 return freed + slab_space_free(cache, slab); 272 273 280 } else if (slab->available == 1) { 274 281 /* It was in full, move to partial */ … … 276 283 list_prepend(&slab->link, &cache->partial_slabs); 277 284 } 285 278 286 spinlock_unlock(&cache->slablock); 279 287 return freed; 280 288 } 281 289 282 /** 283 * Take new object from slab or create new if needed 290 /** Take new object from slab or create new if needed 284 291 * 285 292 * @return Object address or null 293 * 286 294 */ 287 295 static void *slab_obj_create(slab_cache_t *cache, int flags) 288 296 { 297 spinlock_lock(&cache->slablock); 298 289 299 slab_t *slab; 290 void *obj; 291 292 spinlock_lock(&cache->slablock); 293 300 294 301 if (list_empty(&cache->partial_slabs)) { 295 /* Allow recursion and reclaiming 302 /* 303 * Allow recursion and reclaiming 296 304 * - this should work, as the slab control structures 297 305 * are small and do not need to allocate with anything 298 306 * other than frame_alloc when they are allocating, 299 307 * that's why we should get recursion at most 1-level deep 308 * 300 309 */ 301 310 spinlock_unlock(&cache->slablock); … … 303 312 if (!slab) 304 313 return NULL; 314 305 315 spinlock_lock(&cache->slablock); 306 316 } else { … … 309 319 list_remove(&slab->link); 310 320 } 311 obj = slab->start + slab->nextavail * cache->size; 312 slab->nextavail = *((int *)obj); 321 322 void *obj = slab->start + slab->nextavail * cache->size; 323 slab->nextavail = *((size_t *) obj); 313 324 slab->available--; 314 325 315 326 if (!slab->available) 316 327 list_prepend(&slab->link, &cache->full_slabs); 317 328 else 318 329 list_prepend(&slab->link, &cache->partial_slabs); 319 330 320 331 spinlock_unlock(&cache->slablock); 321 322 if ( cache->constructor && cache->constructor(obj, flags)) {332 333 if ((cache->constructor) && (cache->constructor(obj, flags))) { 323 334 /* Bad, bad, construction failed */ 324 335 slab_obj_destroy(cache, obj, slab); 325 336 return NULL; 326 337 } 338 327 339 return obj; 328 340 } 329 341 330 /**************************** **********/342 /****************************/ 331 343 /* CPU-Cache slab functions */ 332 333 /** 334 * Finds a full magazine in cache, takes it from list335 * and returns it336 * 337 * @param first If true, return first, else last mag338 */ 339 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, intfirst)344 /****************************/ 345 346 /** Find a full magazine in cache, take it from list and return it 347 * 348 * @param first If true, return first, else last mag. 349 * 350 */ 351 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, bool first) 340 352 { 341 353 slab_magazine_t *mag = NULL; 342 354 link_t *cur; 343 355 344 356 spinlock_lock(&cache->maglock); 345 357 if (!list_empty(&cache->magazines)) { … … 348 360 else 349 361 cur = cache->magazines.prev; 362 350 363 mag = list_get_instance(cur, slab_magazine_t, link); 351 364 list_remove(&mag->link); 352 365 atomic_dec(&cache->magazine_counter); 353 366 } 367 354 368 spinlock_unlock(&cache->maglock); 355 369 return mag; 356 370 } 357 371 358 /** Prepend magazine to magazine list in cache */ 372 /** Prepend magazine to magazine list in cache 373 * 374 */ 359 375 static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) 360 376 { 361 377 spinlock_lock(&cache->maglock); 362 378 363 379 list_prepend(&mag->link, &cache->magazines); 364 380 atomic_inc(&cache->magazine_counter); … … 367 383 } 368 384 369 /** 370 * Free all objects in magazine and free memory associated with magazine 385 /** Free all objects in magazine and free memory associated with magazine 371 386 * 372 387 * @return Number of freed pages 388 * 373 389 */ 374 390 static size_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag) 375 391 { 376 unsigned int i;392 size_t i; 377 393 size_t frames = 0; 378 394 379 395 for (i = 0; i < mag->busy; i++) { 380 396 frames += slab_obj_destroy(cache, mag->objs[i], NULL); … … 383 399 384 400 slab_free(&mag_cache, mag); 385 401 386 402 return frames; 387 403 } 388 404 389 /** 390 * Find full magazine, set it as current and return it 391 * 392 * Assume cpu_magazine lock is held 405 /** Find full magazine, set it as current and return it 406 * 393 407 */ 394 408 static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 395 409 { 396 slab_magazine_t *cmag, *lastmag, *newmag; 397 398 cmag = cache->mag_cache[CPU->id].current; 399 lastmag = cache->mag_cache[CPU->id].last; 410 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 411 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 412 413 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 414 400 415 if (cmag) { /* First try local CPU magazines */ 401 416 if (cmag->busy) 402 417 return cmag; 403 404 if ( lastmag && lastmag->busy) {418 419 if ((lastmag) && (lastmag->busy)) { 405 420 cache->mag_cache[CPU->id].current = lastmag; 406 421 cache->mag_cache[CPU->id].last = cmag; … … 408 423 } 409 424 } 425 410 426 /* Local magazines are empty, import one from magazine list */ 411 newmag = get_mag_from_cache(cache, 1);427 slab_magazine_t *newmag = get_mag_from_cache(cache, 1); 412 428 if (!newmag) 413 429 return NULL; 414 430 415 431 if (lastmag) 416 432 magazine_destroy(cache, lastmag); 417 433 418 434 cache->mag_cache[CPU->id].last = cmag; 419 435 cache->mag_cache[CPU->id].current = newmag; 436 420 437 return newmag; 421 438 } 422 439 423 /** 424 * Try to find object in CPU-cache magazines 440 /** Try to find object in CPU-cache magazines 425 441 * 426 442 * @return Pointer to object or NULL if not available 443 * 427 444 */ 428 445 static void *magazine_obj_get(slab_cache_t *cache) 429 446 { 430 slab_magazine_t *mag;431 void *obj;432 433 447 if (!CPU) 434 448 return NULL; 435 449 436 450 spinlock_lock(&cache->mag_cache[CPU->id].lock); 437 438 mag = get_full_current_mag(cache);451 452 slab_magazine_t *mag = get_full_current_mag(cache); 439 453 if (!mag) { 440 454 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 441 455 return NULL; 442 456 } 443 obj = mag->objs[--mag->busy]; 457 458 void *obj = mag->objs[--mag->busy]; 444 459 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 460 445 461 atomic_dec(&cache->cached_objs); 446 462 … … 448 464 } 449 465 450 /** 451 * Assure that the current magazine is empty, return pointer to it, or NULL if 452 * no empty magazine is available and cannot be allocated 453 * 454 * Assume mag_cache[CPU->id].lock is held 455 * 456 * We have 2 magazines bound to processor. 457 * First try the current. 458 * If full, try the last. 459 * If full, put to magazines list. 460 * allocate new, exchange last & current 466 /** Assure that the current magazine is empty, return pointer to it, 467 * or NULL if no empty magazine is available and cannot be allocated 468 * 469 * We have 2 magazines bound to processor. 470 * First try the current. 471 * If full, try the last. 472 * If full, put to magazines list. 461 473 * 462 474 */ 463 475 static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 464 476 { 465 slab_magazine_t *cmag ,*lastmag,*newmag;466 467 cmag = cache->mag_cache[CPU->id].current;468 lastmag = cache->mag_cache[CPU->id].last;477 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 478 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 479 480 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 469 481 470 482 if (cmag) { 471 483 if (cmag->busy < cmag->size) 472 484 return cmag; 473 if (lastmag && lastmag->busy < lastmag->size) { 485 486 if ((lastmag) && (lastmag->busy < lastmag->size)) { 474 487 cache->mag_cache[CPU->id].last = cmag; 475 488 cache->mag_cache[CPU->id].current = lastmag; … … 477 490 } 478 491 } 492 479 493 /* current | last are full | nonexistent, allocate new */ 480 /* We do not want to sleep just because of caching */ 481 /* Especially we do not want reclaiming to start, as 482 * this would deadlock */ 483 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); 494 495 /* 496 * We do not want to sleep just because of caching, 497 * especially we do not want reclaiming to start, as 498 * this would deadlock. 499 * 500 */ 501 slab_magazine_t *newmag = slab_alloc(&mag_cache, 502 FRAME_ATOMIC | FRAME_NO_RECLAIM); 484 503 if (!newmag) 485 504 return NULL; 505 486 506 newmag->size = SLAB_MAG_SIZE; 487 507 newmag->busy = 0; 488 508 489 509 /* Flush last to magazine list */ 490 510 if (lastmag) 491 511 put_mag_to_cache(cache, lastmag); 492 512 493 513 /* Move current as last, save new as current */ 494 cache->mag_cache[CPU->id].last = cmag; 495 cache->mag_cache[CPU->id].current = newmag; 496 514 cache->mag_cache[CPU->id].last = cmag; 515 cache->mag_cache[CPU->id].current = newmag; 516 497 517 return newmag; 498 518 } 499 519 500 /** 501 * Put object into CPU-cache magazine502 * 503 * @return 0 - success, -1 - could not get memory520 /** Put object into CPU-cache magazine 521 * 522 * @return 0 on success, -1 on no memory 523 * 504 524 */ 505 525 static int magazine_obj_put(slab_cache_t *cache, void *obj) 506 526 { 507 slab_magazine_t *mag;508 509 527 if (!CPU) 510 528 return -1; 511 529 512 530 spinlock_lock(&cache->mag_cache[CPU->id].lock); 513 514 mag = make_empty_current_mag(cache);531 532 slab_magazine_t *mag = make_empty_current_mag(cache); 515 533 if (!mag) { 516 534 spinlock_unlock(&cache->mag_cache[CPU->id].lock); … … 519 537 520 538 mag->objs[mag->busy++] = obj; 521 539 522 540 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 541 523 542 atomic_inc(&cache->cached_objs); 543 524 544 return 0; 525 545 } 526 546 527 528 /**************************************/ 547 /************************/ 529 548 /* Slab cache functions */ 530 531 /** Return number of objects that fit in certain cache size */ 532 static unsigned int comp_objects(slab_cache_t *cache) 549 /************************/ 550 551 /** Return number of objects that fit in certain cache size 552 * 553 */ 554 static size_t comp_objects(slab_cache_t *cache) 533 555 { 534 556 if (cache->flags & SLAB_CACHE_SLINSIDE) 535 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /536 cache->size;537 else 557 return ((PAGE_SIZE << cache->order) 558 - sizeof(slab_t)) / cache->size; 559 else 538 560 return (PAGE_SIZE << cache->order) / cache->size; 539 561 } 540 562 541 /** Return wasted space in slab */542 static unsigned int badness(slab_cache_t *cache) 543 { 544 unsigned int objects; 545 unsigned int ssize; 546 547 objects = comp_objects(cache);548 ssize = PAGE_SIZE << cache->order;563 /** Return wasted space in slab 564 * 565 */ 566 static size_t badness(slab_cache_t *cache) 567 { 568 size_t objects = comp_objects(cache); 569 size_t ssize = PAGE_SIZE << cache->order; 570 549 571 if (cache->flags & SLAB_CACHE_SLINSIDE) 550 572 ssize -= sizeof(slab_t); 573 551 574 return ssize - objects * cache->size; 552 575 } 553 576 554 /** 555 * Initialize mag_cache structure in slab cache577 /** Initialize mag_cache structure in slab cache 578 * 556 579 */ 557 580 static bool make_magcache(slab_cache_t *cache) 558 581 { 559 unsigned int i;560 561 582 ASSERT(_slab_initialized >= 2); 562 583 563 584 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, 564 585 FRAME_ATOMIC); 565 586 if (!cache->mag_cache) 566 587 return false; 567 588 589 size_t i; 568 590 for (i = 0; i < config.cpu_count; i++) { 569 591 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); 570 592 spinlock_initialize(&cache->mag_cache[i].lock, 571 "slab_maglock_cpu"); 572 } 593 "slab.cache.mag_cache[].lock"); 594 } 595 573 596 return true; 574 597 } 575 598 576 /** Initialize allocated memory as a slab cache */ 599 /** Initialize allocated memory as a slab cache 600 * 601 */ 577 602 static void _slab_cache_create(slab_cache_t *cache, const char *name, 578 size_t size, size_t align, int (*constructor)(void *obj, int kmflag), 579 int (*destructor)(void *obj), int flags) 580 { 581 int pages; 582 ipl_t ipl; 583 603 size_t size, size_t align, int (*constructor)(void *obj, 604 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags) 605 { 584 606 memsetb(cache, sizeof(*cache), 0); 585 607 cache->name = name; 586 608 587 609 if (align < sizeof(unative_t)) 588 610 align = sizeof(unative_t); 611 589 612 size = ALIGN_UP(size, align); 590 613 591 614 cache->size = size; 592 593 615 cache->constructor = constructor; 594 616 cache->destructor = destructor; 595 617 cache->flags = flags; 596 618 597 619 list_initialize(&cache->full_slabs); 598 620 list_initialize(&cache->partial_slabs); 599 621 list_initialize(&cache->magazines); 600 spinlock_initialize(&cache->slablock, "slab_lock"); 601 spinlock_initialize(&cache->maglock, "slab_maglock"); 622 623 spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 624 spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 625 602 626 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 603 627 (void) make_magcache(cache); 604 628 605 629 /* Compute slab sizes, object counts in slabs etc. */ 606 630 if (cache->size < SLAB_INSIDE_SIZE) 607 631 cache->flags |= SLAB_CACHE_SLINSIDE; 608 632 609 633 /* Minimum slab order */ 610 pages = SIZE2FRAMES(cache->size); 634 size_t pages = SIZE2FRAMES(cache->size); 635 611 636 /* We need the 2^order >= pages */ 612 637 if (pages == 1) … … 614 639 else 615 640 cache->order = fnzb(pages - 1) + 1; 616 617 while (badness(cache) > SLAB_MAX_BADNESS(cache)) {641 642 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 618 643 cache->order += 1; 619 }644 620 645 cache->objects = comp_objects(cache); 646 621 647 /* If info fits in, put it inside */ 622 648 if (badness(cache) > sizeof(slab_t)) 623 649 cache->flags |= SLAB_CACHE_SLINSIDE; 624 650 625 651 /* Add cache to cache list */ 626 ipl = interrupts_disable(); 627 spinlock_lock(&slab_cache_lock); 628 652 irq_spinlock_lock(&slab_cache_lock, true); 629 653 list_append(&cache->link, &slab_cache_list); 630 631 spinlock_unlock(&slab_cache_lock); 632 interrupts_restore(ipl); 633 } 634 635 /** Create slab cache*/654 irq_spinlock_unlock(&slab_cache_lock, true); 655 } 656 657 /** Create slab cache 658 * 659 */ 636 660 slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align, 637 int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), 638 int flags) 639 { 640 slab_cache_t *cache; 641 642 cache = slab_alloc(&slab_cache_cache, 0); 661 int (*constructor)(void *obj, unsigned int kmflag), 662 size_t (*destructor)(void *obj), unsigned int flags) 663 { 664 slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0); 643 665 _slab_cache_create(cache, name, size, align, constructor, destructor, 644 666 flags); 667 645 668 return cache; 646 669 } 647 670 648 /** 649 * Reclaim space occupied by objects that are already free 671 /** Reclaim space occupied by objects that are already free 650 672 * 651 673 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing 674 * 652 675 * @return Number of freed pages 653 */ 654 static size_t _slab_reclaim(slab_cache_t *cache, int flags) 655 { 656 unsigned int i; 676 * 677 */ 678 static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags) 679 { 680 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 681 return 0; /* Nothing to do */ 682 683 /* 684 * We count up to original magazine count to avoid 685 * endless loop 686 */ 687 atomic_count_t magcount = atomic_get(&cache->magazine_counter); 688 657 689 slab_magazine_t *mag; 658 690 size_t frames = 0; 659 int magcount; 660 661 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 662 return 0; /* Nothing to do */ 663 664 /* We count up to original magazine count to avoid 665 * endless loop 666 */ 667 magcount = atomic_get(&cache->magazine_counter); 668 while (magcount-- && (mag=get_mag_from_cache(cache, 0))) { 669 frames += magazine_destroy(cache,mag); 670 if (!(flags & SLAB_RECLAIM_ALL) && frames) 691 692 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) { 693 frames += magazine_destroy(cache, mag); 694 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames)) 671 695 break; 672 696 } … … 675 699 /* Free cpu-bound magazines */ 676 700 /* Destroy CPU magazines */ 701 size_t i; 677 702 for (i = 0; i < config.cpu_count; i++) { 678 703 spinlock_lock(&cache->mag_cache[i].lock); 679 704 680 705 mag = cache->mag_cache[i].current; 681 706 if (mag) … … 687 712 frames += magazine_destroy(cache, mag); 688 713 cache->mag_cache[i].last = NULL; 689 714 690 715 spinlock_unlock(&cache->mag_cache[i].lock); 691 716 } 692 717 } 693 718 694 719 return frames; 695 720 } 696 721 697 /** Check that there are no slabs and remove cache from system */ 722 /** Check that there are no slabs and remove cache from system 723 * 724 */ 698 725 void slab_cache_destroy(slab_cache_t *cache) 699 726 { 700 ipl_t ipl; 701 702 /* First remove cache from link, so that we don't need 727 /* 728 * First remove cache from link, so that we don't need 703 729 * to disable interrupts later 730 * 704 731 */ 705 706 ipl = interrupts_disable(); 707 spinlock_lock(&slab_cache_lock); 708 732 irq_spinlock_lock(&slab_cache_lock, true); 709 733 list_remove(&cache->link); 710 711 spinlock_unlock(&slab_cache_lock); 712 interrupts_restore(ipl); 713 714 /* Do not lock anything, we assume the software is correct and 715 * does not touch the cache when it decides to destroy it */ 734 irq_spinlock_unlock(&slab_cache_lock, true); 735 736 /* 737 * Do not lock anything, we assume the software is correct and 738 * does not touch the cache when it decides to destroy it 739 * 740 */ 716 741 717 742 /* Destroy all magazines */ 718 743 _slab_reclaim(cache, SLAB_RECLAIM_ALL); 719 744 720 745 /* All slabs must be empty */ 721 if ( !list_empty(&cache->full_slabs) ||722 !list_empty(&cache->partial_slabs))746 if ((!list_empty(&cache->full_slabs)) || 747 (!list_empty(&cache->partial_slabs))) 723 748 panic("Destroying cache that is not empty."); 724 749 725 750 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 726 751 free(cache->mag_cache); 752 727 753 slab_free(&slab_cache_cache, cache); 728 754 } 729 755 730 /** Allocate new object from cache - if no flags given, always returns memory */ 731 void *slab_alloc(slab_cache_t *cache, int flags) 732 { 733 ipl_t ipl; 756 /** Allocate new object from cache - if no flags given, always returns memory 757 * 758 */ 759 void *slab_alloc(slab_cache_t *cache, unsigned int flags) 760 { 761 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 762 ipl_t ipl = interrupts_disable(); 763 734 764 void *result = NULL; 735 765 736 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 737 ipl = interrupts_disable(); 738 739 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { 766 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 740 767 result = magazine_obj_get(cache); 741 }768 742 769 if (!result) 743 770 result = slab_obj_create(cache, flags); 744 771 745 772 interrupts_restore(ipl); 746 773 747 774 if (result) 748 775 atomic_inc(&cache->allocated_objs); 749 776 750 777 return result; 751 778 } 752 779 753 /** Return object to cache, use slab if known */ 780 /** Return object to cache, use slab if known 781 * 782 */ 754 783 static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 755 784 { 756 ipl_t ipl; 757 758 ipl = interrupts_disable(); 759 785 ipl_t ipl = interrupts_disable(); 786 760 787 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) || 761 magazine_obj_put(cache, obj)) {788 (magazine_obj_put(cache, obj))) 762 789 slab_obj_destroy(cache, obj, slab); 763 764 } 790 765 791 interrupts_restore(ipl); 766 792 atomic_dec(&cache->allocated_objs); 767 793 } 768 794 769 /** Return slab object to cache */ 795 /** Return slab object to cache 796 * 797 */ 770 798 void slab_free(slab_cache_t *cache, void *obj) 771 799 { … … 773 801 } 774 802 775 /* Go through all caches and reclaim what is possible */ 776 size_t slab_reclaim(int flags) 777 { 778 slab_cache_t *cache; 803 /** Go through all caches and reclaim what is possible 804 * 805 * Interrupts must be disabled before calling this function, 806 * otherwise memory allocation from interrupts can deadlock. 807 * 808 */ 809 size_t slab_reclaim(unsigned int flags) 810 { 811 irq_spinlock_lock(&slab_cache_lock, false); 812 813 size_t frames = 0; 779 814 link_t *cur; 780 size_t frames = 0;781 782 spinlock_lock(&slab_cache_lock);783 784 /* TODO: Add assert, that interrupts are disabled, otherwise785 * memory allocation from interrupts can deadlock.786 */787 788 815 for (cur = slab_cache_list.next; cur != &slab_cache_list; 789 816 cur = cur->next) { 790 cache = list_get_instance(cur, slab_cache_t, link);817 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 791 818 frames += _slab_reclaim(cache, flags); 792 819 } 793 794 spinlock_unlock(&slab_cache_lock);795 820 821 irq_spinlock_unlock(&slab_cache_lock, false); 822 796 823 return frames; 797 824 } 798 825 799 800 /* Print list of slabs */ 826 /* Print list of slabs 827 * 828 */ 801 829 void slab_print_list(void) 802 830 { 803 int skip = 0; 804 805 printf("slab name size pages obj/pg slabs cached allocated" 831 printf("slab name size pages obj/pg slabs cached allocated" 806 832 " ctl\n"); 807 printf("---------------- -------- ------ ------ ------ ------ ---------"833 printf("---------------- -------- ------ -------- ------ ------ ---------" 808 834 " ---\n"); 809 835 836 size_t skip = 0; 810 837 while (true) { 811 slab_cache_t *cache;812 link_t *cur;813 ipl_t ipl;814 int i;815 816 838 /* 817 839 * We must not hold the slab_cache_lock spinlock when printing … … 836 858 * statistics. 837 859 */ 838 839 ipl = interrupts_disable(); 840 spinlock_lock(&slab_cache_lock); 841 860 861 irq_spinlock_lock(&slab_cache_lock, true); 862 863 link_t *cur; 864 size_t i; 842 865 for (i = 0, cur = slab_cache_list.next; 843 i < skip && cur != &slab_cache_list; 844 i++, cur = cur->next) 845 ; 846 866 (i < skip) && (cur != &slab_cache_list); 867 i++, cur = cur->next); 868 847 869 if (cur == &slab_cache_list) { 848 spinlock_unlock(&slab_cache_lock); 849 interrupts_restore(ipl); 870 irq_spinlock_unlock(&slab_cache_lock, true); 850 871 break; 851 872 } 852 873 853 874 skip++; 854 855 cache = list_get_instance(cur, slab_cache_t, link);856 875 876 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 877 857 878 const char *name = cache->name; 858 879 uint8_t order = cache->order; 859 880 size_t size = cache->size; 860 unsigned int objects = cache->objects;881 size_t objects = cache->objects; 861 882 long allocated_slabs = atomic_get(&cache->allocated_slabs); 862 883 long cached_objs = atomic_get(&cache->cached_objs); 863 884 long allocated_objs = atomic_get(&cache->allocated_objs); 864 int flags = cache->flags; 865 866 spinlock_unlock(&slab_cache_lock); 867 interrupts_restore(ipl); 868 869 printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", 885 unsigned int flags = cache->flags; 886 887 irq_spinlock_unlock(&slab_cache_lock, true); 888 889 printf("%-16s %8" PRIs " %6u %8" PRIs " %6ld %6ld %9ld %-3s\n", 870 890 name, size, (1 << order), objects, allocated_slabs, 871 891 cached_objs, allocated_objs, … … 876 896 void slab_cache_init(void) 877 897 { 878 int i, size;879 880 898 /* Initialize magazine cache */ 881 899 _slab_cache_create(&mag_cache, "slab_magazine", … … 883 901 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | 884 902 SLAB_CACHE_SLINSIDE); 903 885 904 /* Initialize slab_cache cache */ 886 905 _slab_cache_create(&slab_cache_cache, "slab_cache", 887 906 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, 888 907 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 908 889 909 /* Initialize external slab cache */ 890 910 slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0, 891 911 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 892 912 893 913 /* Initialize structures for malloc */ 914 size_t i; 915 size_t size; 916 894 917 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W); 895 918 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); … … 898 921 NULL, NULL, SLAB_CACHE_MAGDEFERRED); 899 922 } 923 900 924 #ifdef CONFIG_DEBUG 901 925 _slab_initialized = 1; … … 906 930 * 907 931 * Kernel calls this function, when it knows the real number of 908 * processors. 909 * Allocate slab for cpucache and enable it on all existing910 * slabs that are SLAB_CACHE_MAGDEFERRED932 * processors. Allocate slab for cpucache and enable it on all 933 * existing slabs that are SLAB_CACHE_MAGDEFERRED 934 * 911 935 */ 912 936 void slab_enable_cpucache(void) 913 937 { 914 link_t *cur;915 slab_cache_t *s;916 917 938 #ifdef CONFIG_DEBUG 918 939 _slab_initialized = 2; 919 940 #endif 920 921 spinlock_lock(&slab_cache_lock); 922 941 942 irq_spinlock_lock(&slab_cache_lock, false); 943 944 link_t *cur; 923 945 for (cur = slab_cache_list.next; cur != &slab_cache_list; 924 cur = cur->next) {925 s = list_get_instance(cur, slab_cache_t, link);926 if ((s ->flags & SLAB_CACHE_MAGDEFERRED) !=946 cur = cur->next) { 947 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link); 948 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != 927 949 SLAB_CACHE_MAGDEFERRED) 928 950 continue; 929 (void) make_magcache(s); 930 s->flags &= ~SLAB_CACHE_MAGDEFERRED; 931 } 932 933 spinlock_unlock(&slab_cache_lock); 934 } 935 936 /**************************************/ 937 /* kalloc/kfree functions */ 938 void *malloc(unsigned int size, int flags) 951 952 (void) make_magcache(slab); 953 slab->flags &= ~SLAB_CACHE_MAGDEFERRED; 954 } 955 956 irq_spinlock_unlock(&slab_cache_lock, false); 957 } 958 959 void *malloc(size_t size, unsigned int flags) 939 960 { 940 961 ASSERT(_slab_initialized); … … 943 964 if (size < (1 << SLAB_MIN_MALLOC_W)) 944 965 size = (1 << SLAB_MIN_MALLOC_W); 945 946 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;947 966 967 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 968 948 969 return slab_alloc(malloc_caches[idx], flags); 949 970 } 950 971 951 void *realloc(void *ptr, unsigned int size,int flags)972 void *realloc(void *ptr, size_t size, unsigned int flags) 952 973 { 953 974 ASSERT(_slab_initialized); … … 959 980 if (size < (1 << SLAB_MIN_MALLOC_W)) 960 981 size = (1 << SLAB_MIN_MALLOC_W); 961 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;982 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 962 983 963 984 new_ptr = slab_alloc(malloc_caches[idx], flags); … … 980 1001 if (!ptr) 981 1002 return; 982 1003 983 1004 slab_t *slab = obj2slab(ptr); 984 1005 _slab_free(slab->cache, ptr, slab);
Note:
See TracChangeset
for help on using the changeset viewer.