Changes in kernel/generic/src/mm/slab.c [2d3ddad:55821eea] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/slab.c
r2d3ddad r55821eea 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Slab allocator. 36 36 * 37 37 * The slab allocator is closely modelled after OpenSolaris slab allocator. … … 50 50 * 51 51 * The slab allocator supports per-CPU caches ('magazines') to facilitate 52 * good SMP scaling. 52 * good SMP scaling. 53 53 * 54 54 * When a new object is being allocated, it is first checked, if it is … … 65 65 * thrashing when somebody is allocating/deallocating 1 item at the magazine 66 66 * size boundary. LIFO order is enforced, which should avoid fragmentation 67 * as much as possible. 68 * 67 * as much as possible. 68 * 69 69 * Every cache contains list of full slabs and list of partially full slabs. 70 70 * Empty slabs are immediately freed (thrashing will be avoided because 71 * of magazines). 71 * of magazines). 72 72 * 73 73 * The slab information structure is kept inside the data area, if possible. … … 95 95 * 96 96 * @todo 97 * It might be good to add granularity of locks even to slab level,97 * it might be good to add granularity of locks even to slab level, 98 98 * we could then try_spinlock over all partial slabs and thus improve 99 * scalability even on slab level. 100 * 99 * scalability even on slab level 101 100 */ 102 101 … … 115 114 #include <macros.h> 116 115 117 IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock);116 SPINLOCK_INITIALIZE(slab_cache_lock); 118 117 static LIST_INITIALIZE(slab_cache_list); 119 118 120 119 /** Magazine cache */ 121 120 static slab_cache_t mag_cache; 122 123 121 /** Cache for cache descriptors */ 124 122 static slab_cache_t slab_cache_cache; 125 126 123 /** Cache for external slab descriptors 127 124 * This time we want per-cpu cache, so do not make it static … … 131 128 */ 132 129 static slab_cache_t *slab_extern_cache; 133 134 130 /** Caches for malloc */ 135 131 static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; 136 137 132 static const char *malloc_names[] = { 138 133 "malloc-16", … … 159 154 /** Slab descriptor */ 160 155 typedef struct { 161 slab_cache_t *cache; 162 link_t link; 163 void *start; 164 size_t available; 165 size_t nextavail; 156 slab_cache_t *cache; /**< Pointer to parent cache. */ 157 link_t link; /**< List of full/partial slabs. */ 158 void *start; /**< Start address of first available item. */ 159 size_t available; /**< Count of available items in this slab. */ 160 size_t nextavail; /**< The index of next available item. */ 166 161 } slab_t; 167 162 168 163 #ifdef CONFIG_DEBUG 169 static unsignedint _slab_initialized = 0;164 static int _slab_initialized = 0; 170 165 #endif 171 166 172 167 /**************************************/ 173 168 /* Slab allocation functions */ 174 /**************************************/ 175 176 /** Allocate frames for slab space and initialize 177 * 178 */ 179 static slab_t *slab_space_alloc(slab_cache_t *cache, unsigned int flags) 180 { 181 182 169 170 /** 171 * Allocate frames for slab space and initialize 172 * 173 */ 174 static slab_t *slab_space_alloc(slab_cache_t *cache, int flags) 175 { 176 void *data; 177 slab_t *slab; 178 size_t fsize; 179 unsigned int i; 183 180 size_t zone = 0; 184 181 185 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);182 data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 186 183 if (!data) { 187 184 return NULL; 188 185 } 189 190 slab_t *slab;191 size_t fsize;192 193 186 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) { 194 187 slab = slab_alloc(slab_extern_cache, flags); … … 203 196 204 197 /* Fill in slab structures */ 205 size_t i; 206 for (i = 0; i < ((size_t) 1 << cache->order); i++) 198 for (i = 0; i < ((unsigned int) 1 << cache->order); i++) 207 199 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 208 200 209 201 slab->start = data; 210 202 slab->available = cache->objects; 211 203 slab->nextavail = 0; 212 204 slab->cache = cache; 213 205 214 206 for (i = 0; i < cache->objects; i++) 215 *(( size_t *) (slab->start + i *cache->size)) = i + 1;216 207 *((int *) (slab->start + i*cache->size)) = i + 1; 208 217 209 atomic_inc(&cache->allocated_slabs); 218 210 return slab; 219 211 } 220 212 221 /** Deallocate space associated with slab 213 /** 214 * Deallocate space associated with slab 222 215 * 223 216 * @return number of freed frames 224 *225 217 */ 226 218 static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 227 219 { 228 220 frame_free(KA2PA(slab->start)); 229 if (! (cache->flags & SLAB_CACHE_SLINSIDE))221 if (! (cache->flags & SLAB_CACHE_SLINSIDE)) 230 222 slab_free(slab_extern_cache, slab); 231 223 232 224 atomic_dec(&cache->allocated_slabs); 233 225 234 return (1 << cache->order);226 return 1 << cache->order; 235 227 } 236 228 237 229 /** Map object to slab structure */ 238 static slab_t * obj2slab(void *obj)230 static slab_t * obj2slab(void *obj) 239 231 { 240 232 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); 241 233 } 242 234 243 /****************** /235 /**************************************/ 244 236 /* Slab functions */ 245 /******************/ 246 247 /** Return object to slab and call a destructor 237 238 239 /** 240 * Return object to slab and call a destructor 248 241 * 249 242 * @param slab If the caller knows directly slab of the object, otherwise NULL 250 243 * 251 244 * @return Number of freed pages 252 *253 245 */ 254 246 static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab) 255 247 { 248 int freed = 0; 249 256 250 if (!slab) 257 251 slab = obj2slab(obj); 258 252 259 253 ASSERT(slab->cache == cache); 260 261 size_t freed = 0; 262 254 263 255 if (cache->destructor) 264 256 freed = cache->destructor(obj); … … 266 258 spinlock_lock(&cache->slablock); 267 259 ASSERT(slab->available < cache->objects); 268 269 *(( size_t *)obj) = slab->nextavail;260 261 *((int *)obj) = slab->nextavail; 270 262 slab->nextavail = (obj - slab->start) / cache->size; 271 263 slab->available++; 272 264 273 265 /* Move it to correct list */ 274 266 if (slab->available == cache->objects) { … … 276 268 list_remove(&slab->link); 277 269 spinlock_unlock(&cache->slablock); 278 270 279 271 return freed + slab_space_free(cache, slab); 272 280 273 } else if (slab->available == 1) { 281 274 /* It was in full, move to partial */ … … 283 276 list_prepend(&slab->link, &cache->partial_slabs); 284 277 } 285 286 278 spinlock_unlock(&cache->slablock); 287 279 return freed; 288 280 } 289 281 290 /** Take new object from slab or create new if needed 282 /** 283 * Take new object from slab or create new if needed 291 284 * 292 285 * @return Object address or null 293 *294 286 */ 295 287 static void *slab_obj_create(slab_cache_t *cache, int flags) 296 288 { 289 slab_t *slab; 290 void *obj; 291 297 292 spinlock_lock(&cache->slablock); 298 299 slab_t *slab; 300 293 301 294 if (list_empty(&cache->partial_slabs)) { 302 /* 303 * Allow recursion and reclaiming 295 /* Allow recursion and reclaiming 304 296 * - this should work, as the slab control structures 305 297 * are small and do not need to allocate with anything 306 298 * other than frame_alloc when they are allocating, 307 299 * that's why we should get recursion at most 1-level deep 308 *309 300 */ 310 301 spinlock_unlock(&cache->slablock); … … 312 303 if (!slab) 313 304 return NULL; 314 315 305 spinlock_lock(&cache->slablock); 316 306 } else { … … 319 309 list_remove(&slab->link); 320 310 } 321 322 void *obj = slab->start + slab->nextavail * cache->size; 323 slab->nextavail = *((size_t *) obj); 311 obj = slab->start + slab->nextavail * cache->size; 312 slab->nextavail = *((int *)obj); 324 313 slab->available--; 325 314 326 315 if (!slab->available) 327 316 list_prepend(&slab->link, &cache->full_slabs); 328 317 else 329 318 list_prepend(&slab->link, &cache->partial_slabs); 330 319 331 320 spinlock_unlock(&cache->slablock); 332 333 if ( (cache->constructor) && (cache->constructor(obj, flags))) {321 322 if (cache->constructor && cache->constructor(obj, flags)) { 334 323 /* Bad, bad, construction failed */ 335 324 slab_obj_destroy(cache, obj, slab); 336 325 return NULL; 337 326 } 338 339 327 return obj; 340 328 } 341 329 342 /**************************** /330 /**************************************/ 343 331 /* CPU-Cache slab functions */ 344 /****************************/ 345 346 /** Find a full magazine in cache, take it from list and return it347 * 348 * @param first If true, return first, else last mag.349 * 350 */ 351 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, boolfirst)332 333 /** 334 * Finds a full magazine in cache, takes it from list 335 * and returns it 336 * 337 * @param first If true, return first, else last mag 338 */ 339 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first) 352 340 { 353 341 slab_magazine_t *mag = NULL; 354 342 link_t *cur; 355 343 356 344 spinlock_lock(&cache->maglock); 357 345 if (!list_empty(&cache->magazines)) { … … 360 348 else 361 349 cur = cache->magazines.prev; 362 363 350 mag = list_get_instance(cur, slab_magazine_t, link); 364 351 list_remove(&mag->link); 365 352 atomic_dec(&cache->magazine_counter); 366 353 } 367 368 354 spinlock_unlock(&cache->maglock); 369 355 return mag; 370 356 } 371 357 372 /** Prepend magazine to magazine list in cache 373 * 374 */ 358 /** Prepend magazine to magazine list in cache */ 375 359 static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) 376 360 { 377 361 spinlock_lock(&cache->maglock); 378 362 379 363 list_prepend(&mag->link, &cache->magazines); 380 364 atomic_inc(&cache->magazine_counter); … … 383 367 } 384 368 385 /** Free all objects in magazine and free memory associated with magazine 369 /** 370 * Free all objects in magazine and free memory associated with magazine 386 371 * 387 372 * @return Number of freed pages 388 *389 373 */ 390 374 static size_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag) 391 375 { 392 size_t i;376 unsigned int i; 393 377 size_t frames = 0; 394 378 395 379 for (i = 0; i < mag->busy; i++) { 396 380 frames += slab_obj_destroy(cache, mag->objs[i], NULL); … … 399 383 400 384 slab_free(&mag_cache, mag); 401 385 402 386 return frames; 403 387 } 404 388 405 /** Find full magazine, set it as current and return it 406 * 389 /** 390 * Find full magazine, set it as current and return it 391 * 392 * Assume cpu_magazine lock is held 407 393 */ 408 394 static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 409 395 { 410 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 411 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 412 413 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 414 396 slab_magazine_t *cmag, *lastmag, *newmag; 397 398 cmag = cache->mag_cache[CPU->id].current; 399 lastmag = cache->mag_cache[CPU->id].last; 415 400 if (cmag) { /* First try local CPU magazines */ 416 401 if (cmag->busy) 417 402 return cmag; 418 419 if ( (lastmag) && (lastmag->busy)) {403 404 if (lastmag && lastmag->busy) { 420 405 cache->mag_cache[CPU->id].current = lastmag; 421 406 cache->mag_cache[CPU->id].last = cmag; … … 423 408 } 424 409 } 425 426 410 /* Local magazines are empty, import one from magazine list */ 427 slab_magazine_t *newmag = get_mag_from_cache(cache, 1);411 newmag = get_mag_from_cache(cache, 1); 428 412 if (!newmag) 429 413 return NULL; 430 414 431 415 if (lastmag) 432 416 magazine_destroy(cache, lastmag); 433 417 434 418 cache->mag_cache[CPU->id].last = cmag; 435 419 cache->mag_cache[CPU->id].current = newmag; 436 437 420 return newmag; 438 421 } 439 422 440 /** Try to find object in CPU-cache magazines 423 /** 424 * Try to find object in CPU-cache magazines 441 425 * 442 426 * @return Pointer to object or NULL if not available 443 *444 427 */ 445 428 static void *magazine_obj_get(slab_cache_t *cache) 446 429 { 430 slab_magazine_t *mag; 431 void *obj; 432 447 433 if (!CPU) 448 434 return NULL; 449 435 450 436 spinlock_lock(&cache->mag_cache[CPU->id].lock); 451 452 slab_magazine_t *mag = get_full_current_mag(cache);437 438 mag = get_full_current_mag(cache); 453 439 if (!mag) { 454 440 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 455 441 return NULL; 456 442 } 457 458 void *obj = mag->objs[--mag->busy]; 443 obj = mag->objs[--mag->busy]; 459 444 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 460 461 445 atomic_dec(&cache->cached_objs); 462 446 … … 464 448 } 465 449 466 /** Assure that the current magazine is empty, return pointer to it, 467 * or NULL if no empty magazine is available and cannot be allocated 468 * 469 * We have 2 magazines bound to processor. 470 * First try the current. 471 * If full, try the last. 472 * If full, put to magazines list. 450 /** 451 * Assure that the current magazine is empty, return pointer to it, or NULL if 452 * no empty magazine is available and cannot be allocated 453 * 454 * Assume mag_cache[CPU->id].lock is held 455 * 456 * We have 2 magazines bound to processor. 457 * First try the current. 458 * If full, try the last. 459 * If full, put to magazines list. 460 * allocate new, exchange last & current 473 461 * 474 462 */ 475 463 static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 476 464 { 477 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current;478 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 479 480 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock));465 slab_magazine_t *cmag,*lastmag,*newmag; 466 467 cmag = cache->mag_cache[CPU->id].current; 468 lastmag = cache->mag_cache[CPU->id].last; 481 469 482 470 if (cmag) { 483 471 if (cmag->busy < cmag->size) 484 472 return cmag; 485 486 if ((lastmag) && (lastmag->busy < lastmag->size)) { 473 if (lastmag && lastmag->busy < lastmag->size) { 487 474 cache->mag_cache[CPU->id].last = cmag; 488 475 cache->mag_cache[CPU->id].current = lastmag; … … 490 477 } 491 478 } 492 493 479 /* current | last are full | nonexistent, allocate new */ 494 495 /* 496 * We do not want to sleep just because of caching, 497 * especially we do not want reclaiming to start, as 498 * this would deadlock. 499 * 500 */ 501 slab_magazine_t *newmag = slab_alloc(&mag_cache, 502 FRAME_ATOMIC | FRAME_NO_RECLAIM); 480 /* We do not want to sleep just because of caching */ 481 /* Especially we do not want reclaiming to start, as 482 * this would deadlock */ 483 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); 503 484 if (!newmag) 504 485 return NULL; 505 506 486 newmag->size = SLAB_MAG_SIZE; 507 487 newmag->busy = 0; 508 488 509 489 /* Flush last to magazine list */ 510 490 if (lastmag) 511 491 put_mag_to_cache(cache, lastmag); 512 492 513 493 /* Move current as last, save new as current */ 514 cache->mag_cache[CPU->id].last = cmag; 515 cache->mag_cache[CPU->id].current = newmag; 516 494 cache->mag_cache[CPU->id].last = cmag; 495 cache->mag_cache[CPU->id].current = newmag; 496 517 497 return newmag; 518 498 } 519 499 520 /** Put object into CPU-cache magazine521 * 522 * @return 0 on success, -1 on no memory523 * 500 /** 501 * Put object into CPU-cache magazine 502 * 503 * @return 0 - success, -1 - could not get memory 524 504 */ 525 505 static int magazine_obj_put(slab_cache_t *cache, void *obj) 526 506 { 507 slab_magazine_t *mag; 508 527 509 if (!CPU) 528 510 return -1; 529 511 530 512 spinlock_lock(&cache->mag_cache[CPU->id].lock); 531 532 slab_magazine_t *mag = make_empty_current_mag(cache);513 514 mag = make_empty_current_mag(cache); 533 515 if (!mag) { 534 516 spinlock_unlock(&cache->mag_cache[CPU->id].lock); … … 537 519 538 520 mag->objs[mag->busy++] = obj; 539 521 540 522 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 541 542 523 atomic_inc(&cache->cached_objs); 543 544 524 return 0; 545 525 } 546 526 547 /************************/ 527 528 /**************************************/ 548 529 /* Slab cache functions */ 549 /************************/ 550 551 /** Return number of objects that fit in certain cache size 552 * 553 */ 554 static size_t comp_objects(slab_cache_t *cache) 530 531 /** Return number of objects that fit in certain cache size */ 532 static unsigned int comp_objects(slab_cache_t *cache) 555 533 { 556 534 if (cache->flags & SLAB_CACHE_SLINSIDE) 557 return ((PAGE_SIZE << cache->order) 558 - sizeof(slab_t)) /cache->size;559 else 535 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / 536 cache->size; 537 else 560 538 return (PAGE_SIZE << cache->order) / cache->size; 561 539 } 562 540 563 /** Return wasted space in slab 564 * 565 */ 566 static size_t badness(slab_cache_t *cache) 567 { 568 size_t objects = comp_objects(cache); 569 size_t ssize = PAGE_SIZE << cache->order;570 541 /** Return wasted space in slab */ 542 static unsigned int badness(slab_cache_t *cache) 543 { 544 unsigned int objects; 545 unsigned int ssize; 546 547 objects = comp_objects(cache); 548 ssize = PAGE_SIZE << cache->order; 571 549 if (cache->flags & SLAB_CACHE_SLINSIDE) 572 550 ssize -= sizeof(slab_t); 573 574 551 return ssize - objects * cache->size; 575 552 } 576 553 577 /** Initialize mag_cache structure in slab cache578 * 554 /** 555 * Initialize mag_cache structure in slab cache 579 556 */ 580 557 static bool make_magcache(slab_cache_t *cache) 581 558 { 559 unsigned int i; 560 582 561 ASSERT(_slab_initialized >= 2); 583 562 584 563 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, 585 564 FRAME_ATOMIC); 586 565 if (!cache->mag_cache) 587 566 return false; 588 589 size_t i; 567 590 568 for (i = 0; i < config.cpu_count; i++) { 591 569 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); 592 570 spinlock_initialize(&cache->mag_cache[i].lock, 593 "slab.cache.mag_cache[].lock"); 594 } 595 571 "slab_maglock_cpu"); 572 } 596 573 return true; 597 574 } 598 575 599 /** Initialize allocated memory as a slab cache 600 * 601 */ 576 /** Initialize allocated memory as a slab cache */ 602 577 static void _slab_cache_create(slab_cache_t *cache, const char *name, 603 size_t size, size_t align, int (*constructor)(void *obj, 604 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags) 605 { 578 size_t size, size_t align, int (*constructor)(void *obj, int kmflag), 579 int (*destructor)(void *obj), int flags) 580 { 581 int pages; 582 ipl_t ipl; 583 606 584 memsetb(cache, sizeof(*cache), 0); 607 585 cache->name = name; 608 586 609 587 if (align < sizeof(unative_t)) 610 588 align = sizeof(unative_t); 611 612 589 size = ALIGN_UP(size, align); 613 590 614 591 cache->size = size; 592 615 593 cache->constructor = constructor; 616 594 cache->destructor = destructor; 617 595 cache->flags = flags; 618 596 619 597 list_initialize(&cache->full_slabs); 620 598 list_initialize(&cache->partial_slabs); 621 599 list_initialize(&cache->magazines); 622 623 spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 624 spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 625 600 spinlock_initialize(&cache->slablock, "slab_lock"); 601 spinlock_initialize(&cache->maglock, "slab_maglock"); 626 602 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 627 603 (void) make_magcache(cache); 628 604 629 605 /* Compute slab sizes, object counts in slabs etc. */ 630 606 if (cache->size < SLAB_INSIDE_SIZE) 631 607 cache->flags |= SLAB_CACHE_SLINSIDE; 632 608 633 609 /* Minimum slab order */ 634 size_t pages = SIZE2FRAMES(cache->size); 635 610 pages = SIZE2FRAMES(cache->size); 636 611 /* We need the 2^order >= pages */ 637 612 if (pages == 1) … … 639 614 else 640 615 cache->order = fnzb(pages - 1) + 1; 641 642 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 616 617 while (badness(cache) > SLAB_MAX_BADNESS(cache)) { 643 618 cache->order += 1; 644 619 } 645 620 cache->objects = comp_objects(cache); 646 647 621 /* If info fits in, put it inside */ 648 622 if (badness(cache) > sizeof(slab_t)) 649 623 cache->flags |= SLAB_CACHE_SLINSIDE; 650 624 651 625 /* Add cache to cache list */ 652 irq_spinlock_lock(&slab_cache_lock, true); 626 ipl = interrupts_disable(); 627 spinlock_lock(&slab_cache_lock); 628 653 629 list_append(&cache->link, &slab_cache_list); 654 irq_spinlock_unlock(&slab_cache_lock, true); 655 } 656 657 /** Create slab cache 658 * 659 */630 631 spinlock_unlock(&slab_cache_lock); 632 interrupts_restore(ipl); 633 } 634 635 /** Create slab cache */ 660 636 slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align, 661 int (*constructor)(void *obj, unsigned int kmflag), 662 size_t (*destructor)(void *obj), unsigned int flags) 663 { 664 slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0); 637 int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), 638 int flags) 639 { 640 slab_cache_t *cache; 641 642 cache = slab_alloc(&slab_cache_cache, 0); 665 643 _slab_cache_create(cache, name, size, align, constructor, destructor, 666 644 flags); 667 668 645 return cache; 669 646 } 670 647 671 /** Reclaim space occupied by objects that are already free 648 /** 649 * Reclaim space occupied by objects that are already free 672 650 * 673 651 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing 674 *675 652 * @return Number of freed pages 676 * 677 */ 678 static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags) 679 { 653 */ 654 static size_t _slab_reclaim(slab_cache_t *cache, int flags) 655 { 656 unsigned int i; 657 slab_magazine_t *mag; 658 size_t frames = 0; 659 int magcount; 660 680 661 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 681 662 return 0; /* Nothing to do */ 682 683 /* 684 * We count up to original magazine count to avoid 685 * endless loop 663 664 /* We count up to original magazine count to avoid 665 * endless loop 686 666 */ 687 atomic_count_t magcount = atomic_get(&cache->magazine_counter); 688 689 slab_magazine_t *mag; 690 size_t frames = 0; 691 692 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) { 693 frames += magazine_destroy(cache, mag); 694 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames)) 667 magcount = atomic_get(&cache->magazine_counter); 668 while (magcount-- && (mag=get_mag_from_cache(cache, 0))) { 669 frames += magazine_destroy(cache,mag); 670 if (!(flags & SLAB_RECLAIM_ALL) && frames) 695 671 break; 696 672 } … … 699 675 /* Free cpu-bound magazines */ 700 676 /* Destroy CPU magazines */ 701 size_t i;702 677 for (i = 0; i < config.cpu_count; i++) { 703 678 spinlock_lock(&cache->mag_cache[i].lock); 704 679 705 680 mag = cache->mag_cache[i].current; 706 681 if (mag) … … 712 687 frames += magazine_destroy(cache, mag); 713 688 cache->mag_cache[i].last = NULL; 714 689 715 690 spinlock_unlock(&cache->mag_cache[i].lock); 716 691 } 717 692 } 718 693 719 694 return frames; 720 695 } 721 696 722 /** Check that there are no slabs and remove cache from system 723 * 724 */ 697 /** Check that there are no slabs and remove cache from system */ 725 698 void slab_cache_destroy(slab_cache_t *cache) 726 699 { 727 /* 728 * First remove cache from link, so that we don't need 700 ipl_t ipl; 701 702 /* First remove cache from link, so that we don't need 729 703 * to disable interrupts later 730 *731 704 */ 732 irq_spinlock_lock(&slab_cache_lock, true); 705 706 ipl = interrupts_disable(); 707 spinlock_lock(&slab_cache_lock); 708 733 709 list_remove(&cache->link); 734 irq_spinlock_unlock(&slab_cache_lock, true); 735 736 /* 737 * Do not lock anything, we assume the software is correct and 738 * does not touch the cache when it decides to destroy it 739 * 740 */ 710 711 spinlock_unlock(&slab_cache_lock); 712 interrupts_restore(ipl); 713 714 /* Do not lock anything, we assume the software is correct and 715 * does not touch the cache when it decides to destroy it */ 741 716 742 717 /* Destroy all magazines */ 743 718 _slab_reclaim(cache, SLAB_RECLAIM_ALL); 744 719 745 720 /* All slabs must be empty */ 746 if ( (!list_empty(&cache->full_slabs)) ||747 (!list_empty(&cache->partial_slabs)))721 if (!list_empty(&cache->full_slabs) || 722 !list_empty(&cache->partial_slabs)) 748 723 panic("Destroying cache that is not empty."); 749 724 750 725 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 751 726 free(cache->mag_cache); 752 753 727 slab_free(&slab_cache_cache, cache); 754 728 } 755 729 756 /** Allocate new object from cache - if no flags given, always returns memory 757 * 758 */ 759 void *slab_alloc(slab_cache_t *cache, unsigned int flags) 760 { 730 /** Allocate new object from cache - if no flags given, always returns memory */ 731 void *slab_alloc(slab_cache_t *cache, int flags) 732 { 733 ipl_t ipl; 734 void *result = NULL; 735 761 736 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 762 ipl_t ipl = interrupts_disable(); 763 764 void *result = NULL; 765 766 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 737 ipl = interrupts_disable(); 738 739 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { 767 740 result = magazine_obj_get(cache); 768 741 } 769 742 if (!result) 770 743 result = slab_obj_create(cache, flags); 771 744 772 745 interrupts_restore(ipl); 773 746 774 747 if (result) 775 748 atomic_inc(&cache->allocated_objs); 776 749 777 750 return result; 778 751 } 779 752 780 /** Return object to cache, use slab if known 781 * 782 */ 753 /** Return object to cache, use slab if known */ 783 754 static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 784 755 { 785 ipl_t ipl = interrupts_disable(); 786 756 ipl_t ipl; 757 758 ipl = interrupts_disable(); 759 787 760 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) || 788 (magazine_obj_put(cache, obj)))761 magazine_obj_put(cache, obj)) { 789 762 slab_obj_destroy(cache, obj, slab); 790 763 764 } 791 765 interrupts_restore(ipl); 792 766 atomic_dec(&cache->allocated_objs); 793 767 } 794 768 795 /** Return slab object to cache 796 * 797 */ 769 /** Return slab object to cache */ 798 770 void slab_free(slab_cache_t *cache, void *obj) 799 771 { … … 801 773 } 802 774 803 /** Go through all caches and reclaim what is possible 804 * 805 * Interrupts must be disabled before calling this function, 806 * otherwise memory allocation from interrupts can deadlock. 807 * 808 */ 809 size_t slab_reclaim(unsigned int flags) 810 { 811 irq_spinlock_lock(&slab_cache_lock, false); 812 775 /* Go through all caches and reclaim what is possible */ 776 size_t slab_reclaim(int flags) 777 { 778 slab_cache_t *cache; 779 link_t *cur; 813 780 size_t frames = 0; 814 link_t *cur; 781 782 spinlock_lock(&slab_cache_lock); 783 784 /* TODO: Add assert, that interrupts are disabled, otherwise 785 * memory allocation from interrupts can deadlock. 786 */ 787 815 788 for (cur = slab_cache_list.next; cur != &slab_cache_list; 816 789 cur = cur->next) { 817 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);790 cache = list_get_instance(cur, slab_cache_t, link); 818 791 frames += _slab_reclaim(cache, flags); 819 792 } 820 821 irq_spinlock_unlock(&slab_cache_lock, false);822 793 794 spinlock_unlock(&slab_cache_lock); 795 823 796 return frames; 824 797 } 825 798 826 /* Print list of slabs 827 * 828 */ 799 800 /* Print list of slabs */ 829 801 void slab_print_list(void) 830 802 { 831 printf("slab name size pages obj/pg slabs cached allocated" 803 int skip = 0; 804 805 printf("slab name size pages obj/pg slabs cached allocated" 832 806 " ctl\n"); 833 printf("---------------- -------- ------ ------ -------- ------ ---------"807 printf("---------------- -------- ------ ------ ------ ------ ---------" 834 808 " ---\n"); 835 836 size_t skip = 0; 809 837 810 while (true) { 811 slab_cache_t *cache; 812 link_t *cur; 813 ipl_t ipl; 814 int i; 815 838 816 /* 839 817 * We must not hold the slab_cache_lock spinlock when printing … … 858 836 * statistics. 859 837 */ 860 861 irq_spinlock_lock(&slab_cache_lock, true); 862 863 link_t *cur; 864 size_t i; 838 839 ipl = interrupts_disable(); 840 spinlock_lock(&slab_cache_lock); 841 865 842 for (i = 0, cur = slab_cache_list.next; 866 (i < skip) && (cur != &slab_cache_list); 867 i++, cur = cur->next); 868 843 i < skip && cur != &slab_cache_list; 844 i++, cur = cur->next) 845 ; 846 869 847 if (cur == &slab_cache_list) { 870 irq_spinlock_unlock(&slab_cache_lock, true); 848 spinlock_unlock(&slab_cache_lock); 849 interrupts_restore(ipl); 871 850 break; 872 851 } 873 852 874 853 skip++; 875 876 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);877 854 855 cache = list_get_instance(cur, slab_cache_t, link); 856 878 857 const char *name = cache->name; 879 858 uint8_t order = cache->order; 880 859 size_t size = cache->size; 881 size_t objects = cache->objects;860 unsigned int objects = cache->objects; 882 861 long allocated_slabs = atomic_get(&cache->allocated_slabs); 883 862 long cached_objs = atomic_get(&cache->cached_objs); 884 863 long allocated_objs = atomic_get(&cache->allocated_objs); 885 unsignedint flags = cache->flags;864 int flags = cache->flags; 886 865 887 irq_spinlock_unlock(&slab_cache_lock, true); 866 spinlock_unlock(&slab_cache_lock); 867 interrupts_restore(ipl); 888 868 889 printf("%-16s %8" PRIs " %6 u %8" PRIs "%6ld %6ld %9ld %-3s\n",869 printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", 890 870 name, size, (1 << order), objects, allocated_slabs, 891 871 cached_objs, allocated_objs, … … 896 876 void slab_cache_init(void) 897 877 { 878 int i, size; 879 898 880 /* Initialize magazine cache */ 899 881 _slab_cache_create(&mag_cache, "slab_magazine", … … 901 883 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | 902 884 SLAB_CACHE_SLINSIDE); 903 904 885 /* Initialize slab_cache cache */ 905 886 _slab_cache_create(&slab_cache_cache, "slab_cache", 906 887 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, 907 888 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 908 909 889 /* Initialize external slab cache */ 910 890 slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0, 911 891 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 912 892 913 893 /* Initialize structures for malloc */ 914 size_t i;915 size_t size;916 917 894 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W); 918 895 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); … … 921 898 NULL, NULL, SLAB_CACHE_MAGDEFERRED); 922 899 } 923 924 900 #ifdef CONFIG_DEBUG 925 901 _slab_initialized = 1; … … 930 906 * 931 907 * Kernel calls this function, when it knows the real number of 932 * processors. Allocate slab for cpucache and enable it on all933 * existing slabs that are SLAB_CACHE_MAGDEFERRED934 * 908 * processors. 909 * Allocate slab for cpucache and enable it on all existing 910 * slabs that are SLAB_CACHE_MAGDEFERRED 935 911 */ 936 912 void slab_enable_cpucache(void) 937 913 { 914 link_t *cur; 915 slab_cache_t *s; 916 938 917 #ifdef CONFIG_DEBUG 939 918 _slab_initialized = 2; 940 919 #endif 941 942 irq_spinlock_lock(&slab_cache_lock, false); 943 944 link_t *cur; 920 921 spinlock_lock(&slab_cache_lock); 922 945 923 for (cur = slab_cache_list.next; cur != &slab_cache_list; 946 cur = cur->next) 947 s lab_cache_t *slab= list_get_instance(cur, slab_cache_t, link);948 if ((s lab->flags & SLAB_CACHE_MAGDEFERRED) !=924 cur = cur->next){ 925 s = list_get_instance(cur, slab_cache_t, link); 926 if ((s->flags & SLAB_CACHE_MAGDEFERRED) != 949 927 SLAB_CACHE_MAGDEFERRED) 950 928 continue; 951 952 (void) make_magcache(slab); 953 slab->flags &= ~SLAB_CACHE_MAGDEFERRED; 954 } 955 956 irq_spinlock_unlock(&slab_cache_lock, false); 957 } 958 959 void *malloc(size_t size, unsigned int flags) 929 (void) make_magcache(s); 930 s->flags &= ~SLAB_CACHE_MAGDEFERRED; 931 } 932 933 spinlock_unlock(&slab_cache_lock); 934 } 935 936 /**************************************/ 937 /* kalloc/kfree functions */ 938 void *malloc(unsigned int size, int flags) 960 939 { 961 940 ASSERT(_slab_initialized); … … 964 943 if (size < (1 << SLAB_MIN_MALLOC_W)) 965 944 size = (1 << SLAB_MIN_MALLOC_W); 966 967 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;968 945 946 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 947 969 948 return slab_alloc(malloc_caches[idx], flags); 970 949 } 971 950 972 void *realloc(void *ptr, size_t size, unsignedint flags)951 void *realloc(void *ptr, unsigned int size, int flags) 973 952 { 974 953 ASSERT(_slab_initialized); … … 980 959 if (size < (1 << SLAB_MIN_MALLOC_W)) 981 960 size = (1 << SLAB_MIN_MALLOC_W); 982 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;961 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 983 962 984 963 new_ptr = slab_alloc(malloc_caches[idx], flags); … … 1001 980 if (!ptr) 1002 981 return; 1003 982 1004 983 slab_t *slab = obj2slab(ptr); 1005 984 _slab_free(slab->cache, ptr, slab);
Note:
See TracChangeset
for help on using the changeset viewer.