Changes in kernel/generic/src/mm/slab.c [7a0359b:55821eea] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/slab.c
r7a0359b r55821eea 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Slab allocator. 36 36 * 37 37 * The slab allocator is closely modelled after OpenSolaris slab allocator. … … 50 50 * 51 51 * The slab allocator supports per-CPU caches ('magazines') to facilitate 52 * good SMP scaling. 52 * good SMP scaling. 53 53 * 54 54 * When a new object is being allocated, it is first checked, if it is … … 65 65 * thrashing when somebody is allocating/deallocating 1 item at the magazine 66 66 * size boundary. LIFO order is enforced, which should avoid fragmentation 67 * as much as possible. 68 * 67 * as much as possible. 68 * 69 69 * Every cache contains list of full slabs and list of partially full slabs. 70 70 * Empty slabs are immediately freed (thrashing will be avoided because 71 * of magazines). 71 * of magazines). 72 72 * 73 73 * The slab information structure is kept inside the data area, if possible. … … 95 95 * 96 96 * @todo 97 * It might be good to add granularity of locks even to slab level,97 * it might be good to add granularity of locks even to slab level, 98 98 * we could then try_spinlock over all partial slabs and thus improve 99 * scalability even on slab level. 100 * 99 * scalability even on slab level 101 100 */ 102 101 … … 115 114 #include <macros.h> 116 115 117 IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock);116 SPINLOCK_INITIALIZE(slab_cache_lock); 118 117 static LIST_INITIALIZE(slab_cache_list); 119 118 120 119 /** Magazine cache */ 121 120 static slab_cache_t mag_cache; 122 123 121 /** Cache for cache descriptors */ 124 122 static slab_cache_t slab_cache_cache; 125 126 123 /** Cache for external slab descriptors 127 124 * This time we want per-cpu cache, so do not make it static … … 131 128 */ 132 129 static slab_cache_t *slab_extern_cache; 133 134 130 /** Caches for malloc */ 135 131 static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; 136 137 132 static const char *malloc_names[] = { 138 133 "malloc-16", … … 159 154 /** Slab descriptor */ 160 155 typedef struct { 161 slab_cache_t *cache; 162 link_t link; 163 void *start; 164 size_t available; 165 size_t nextavail; 156 slab_cache_t *cache; /**< Pointer to parent cache. */ 157 link_t link; /**< List of full/partial slabs. */ 158 void *start; /**< Start address of first available item. */ 159 size_t available; /**< Count of available items in this slab. */ 160 size_t nextavail; /**< The index of next available item. */ 166 161 } slab_t; 167 162 168 163 #ifdef CONFIG_DEBUG 169 static unsignedint _slab_initialized = 0;164 static int _slab_initialized = 0; 170 165 #endif 171 166 172 167 /**************************************/ 173 168 /* Slab allocation functions */ 174 /**************************************/ 175 176 /** Allocate frames for slab space and initialize 177 * 178 */ 179 NO_TRACE static slab_t *slab_space_alloc(slab_cache_t *cache, 180 unsigned int flags) 181 { 182 183 169 170 /** 171 * Allocate frames for slab space and initialize 172 * 173 */ 174 static slab_t *slab_space_alloc(slab_cache_t *cache, int flags) 175 { 176 void *data; 177 slab_t *slab; 178 size_t fsize; 179 unsigned int i; 184 180 size_t zone = 0; 185 181 186 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);182 data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 187 183 if (!data) { 188 184 return NULL; 189 185 } 190 191 slab_t *slab;192 size_t fsize;193 194 186 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) { 195 187 slab = slab_alloc(slab_extern_cache, flags); … … 204 196 205 197 /* Fill in slab structures */ 206 size_t i; 207 for (i = 0; i < ((size_t) 1 << cache->order); i++) 198 for (i = 0; i < ((unsigned int) 1 << cache->order); i++) 208 199 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 209 200 210 201 slab->start = data; 211 202 slab->available = cache->objects; 212 203 slab->nextavail = 0; 213 204 slab->cache = cache; 214 205 215 206 for (i = 0; i < cache->objects; i++) 216 *(( size_t *) (slab->start + i *cache->size)) = i + 1;217 207 *((int *) (slab->start + i*cache->size)) = i + 1; 208 218 209 atomic_inc(&cache->allocated_slabs); 219 210 return slab; 220 211 } 221 212 222 /** Deallocate space associated with slab 213 /** 214 * Deallocate space associated with slab 223 215 * 224 216 * @return number of freed frames 225 * 226 */ 227 NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 217 */ 218 static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 228 219 { 229 220 frame_free(KA2PA(slab->start)); 230 if (! (cache->flags & SLAB_CACHE_SLINSIDE))221 if (! (cache->flags & SLAB_CACHE_SLINSIDE)) 231 222 slab_free(slab_extern_cache, slab); 232 223 233 224 atomic_dec(&cache->allocated_slabs); 234 225 235 return (1 << cache->order);226 return 1 << cache->order; 236 227 } 237 228 238 229 /** Map object to slab structure */ 239 NO_TRACE static slab_t *obj2slab(void *obj)230 static slab_t * obj2slab(void *obj) 240 231 { 241 232 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); 242 233 } 243 234 244 /****************** /235 /**************************************/ 245 236 /* Slab functions */ 246 /******************/ 247 248 /** Return object to slab and call a destructor 237 238 239 /** 240 * Return object to slab and call a destructor 249 241 * 250 242 * @param slab If the caller knows directly slab of the object, otherwise NULL 251 243 * 252 244 * @return Number of freed pages 253 * 254 */ 255 NO_TRACE static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, 256 slab_t *slab) 257 { 245 */ 246 static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab) 247 { 248 int freed = 0; 249 258 250 if (!slab) 259 251 slab = obj2slab(obj); 260 252 261 253 ASSERT(slab->cache == cache); 262 263 size_t freed = 0; 264 254 265 255 if (cache->destructor) 266 256 freed = cache->destructor(obj); … … 268 258 spinlock_lock(&cache->slablock); 269 259 ASSERT(slab->available < cache->objects); 270 271 *(( size_t *)obj) = slab->nextavail;260 261 *((int *)obj) = slab->nextavail; 272 262 slab->nextavail = (obj - slab->start) / cache->size; 273 263 slab->available++; 274 264 275 265 /* Move it to correct list */ 276 266 if (slab->available == cache->objects) { … … 278 268 list_remove(&slab->link); 279 269 spinlock_unlock(&cache->slablock); 280 270 281 271 return freed + slab_space_free(cache, slab); 272 282 273 } else if (slab->available == 1) { 283 274 /* It was in full, move to partial */ … … 285 276 list_prepend(&slab->link, &cache->partial_slabs); 286 277 } 287 288 278 spinlock_unlock(&cache->slablock); 289 279 return freed; 290 280 } 291 281 292 /** Take new object from slab or create new if needed 282 /** 283 * Take new object from slab or create new if needed 293 284 * 294 285 * @return Object address or null 295 * 296 */ 297 NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags) 298 { 286 */ 287 static void *slab_obj_create(slab_cache_t *cache, int flags) 288 { 289 slab_t *slab; 290 void *obj; 291 299 292 spinlock_lock(&cache->slablock); 300 301 slab_t *slab; 302 293 303 294 if (list_empty(&cache->partial_slabs)) { 304 /* 305 * Allow recursion and reclaiming 295 /* Allow recursion and reclaiming 306 296 * - this should work, as the slab control structures 307 297 * are small and do not need to allocate with anything 308 298 * other than frame_alloc when they are allocating, 309 299 * that's why we should get recursion at most 1-level deep 310 *311 300 */ 312 301 spinlock_unlock(&cache->slablock); … … 314 303 if (!slab) 315 304 return NULL; 316 317 305 spinlock_lock(&cache->slablock); 318 306 } else { … … 321 309 list_remove(&slab->link); 322 310 } 323 324 void *obj = slab->start + slab->nextavail * cache->size; 325 slab->nextavail = *((size_t *) obj); 311 obj = slab->start + slab->nextavail * cache->size; 312 slab->nextavail = *((int *)obj); 326 313 slab->available--; 327 314 328 315 if (!slab->available) 329 316 list_prepend(&slab->link, &cache->full_slabs); 330 317 else 331 318 list_prepend(&slab->link, &cache->partial_slabs); 332 319 333 320 spinlock_unlock(&cache->slablock); 334 335 if ( (cache->constructor) && (cache->constructor(obj, flags))) {321 322 if (cache->constructor && cache->constructor(obj, flags)) { 336 323 /* Bad, bad, construction failed */ 337 324 slab_obj_destroy(cache, obj, slab); 338 325 return NULL; 339 326 } 340 341 327 return obj; 342 328 } 343 329 344 /**************************** /330 /**************************************/ 345 331 /* CPU-Cache slab functions */ 346 /****************************/ 347 348 /** Find a full magazine in cache, take it from list and return it 349 * 350 * @param first If true, return first, else last mag. 351 * 352 */ 353 NO_TRACE static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, 354 bool first) 332 333 /** 334 * Finds a full magazine in cache, takes it from list 335 * and returns it 336 * 337 * @param first If true, return first, else last mag 338 */ 339 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first) 355 340 { 356 341 slab_magazine_t *mag = NULL; 357 342 link_t *cur; 358 343 359 344 spinlock_lock(&cache->maglock); 360 345 if (!list_empty(&cache->magazines)) { … … 363 348 else 364 349 cur = cache->magazines.prev; 365 366 350 mag = list_get_instance(cur, slab_magazine_t, link); 367 351 list_remove(&mag->link); 368 352 atomic_dec(&cache->magazine_counter); 369 353 } 370 371 354 spinlock_unlock(&cache->maglock); 372 355 return mag; 373 356 } 374 357 375 /** Prepend magazine to magazine list in cache 376 * 377 */ 378 NO_TRACE static void put_mag_to_cache(slab_cache_t *cache, 379 slab_magazine_t *mag) 358 /** Prepend magazine to magazine list in cache */ 359 static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) 380 360 { 381 361 spinlock_lock(&cache->maglock); 382 362 383 363 list_prepend(&mag->link, &cache->magazines); 384 364 atomic_inc(&cache->magazine_counter); … … 387 367 } 388 368 389 /** Free all objects in magazine and free memory associated with magazine 369 /** 370 * Free all objects in magazine and free memory associated with magazine 390 371 * 391 372 * @return Number of freed pages 392 * 393 */ 394 NO_TRACE static size_t magazine_destroy(slab_cache_t *cache, 395 slab_magazine_t *mag) 396 { 397 size_t i; 373 */ 374 static size_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag) 375 { 376 unsigned int i; 398 377 size_t frames = 0; 399 378 400 379 for (i = 0; i < mag->busy; i++) { 401 380 frames += slab_obj_destroy(cache, mag->objs[i], NULL); … … 404 383 405 384 slab_free(&mag_cache, mag); 406 385 407 386 return frames; 408 387 } 409 388 410 /** Find full magazine, set it as current and return it 411 * 412 */ 413 NO_TRACE static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 414 { 415 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 416 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 417 418 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 419 389 /** 390 * Find full magazine, set it as current and return it 391 * 392 * Assume cpu_magazine lock is held 393 */ 394 static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 395 { 396 slab_magazine_t *cmag, *lastmag, *newmag; 397 398 cmag = cache->mag_cache[CPU->id].current; 399 lastmag = cache->mag_cache[CPU->id].last; 420 400 if (cmag) { /* First try local CPU magazines */ 421 401 if (cmag->busy) 422 402 return cmag; 423 424 if ( (lastmag) && (lastmag->busy)) {403 404 if (lastmag && lastmag->busy) { 425 405 cache->mag_cache[CPU->id].current = lastmag; 426 406 cache->mag_cache[CPU->id].last = cmag; … … 428 408 } 429 409 } 430 431 410 /* Local magazines are empty, import one from magazine list */ 432 slab_magazine_t *newmag = get_mag_from_cache(cache, 1);411 newmag = get_mag_from_cache(cache, 1); 433 412 if (!newmag) 434 413 return NULL; 435 414 436 415 if (lastmag) 437 416 magazine_destroy(cache, lastmag); 438 417 439 418 cache->mag_cache[CPU->id].last = cmag; 440 419 cache->mag_cache[CPU->id].current = newmag; 441 442 420 return newmag; 443 421 } 444 422 445 /** Try to find object in CPU-cache magazines 423 /** 424 * Try to find object in CPU-cache magazines 446 425 * 447 426 * @return Pointer to object or NULL if not available 448 * 449 */ 450 NO_TRACE static void *magazine_obj_get(slab_cache_t *cache) 451 { 427 */ 428 static void *magazine_obj_get(slab_cache_t *cache) 429 { 430 slab_magazine_t *mag; 431 void *obj; 432 452 433 if (!CPU) 453 434 return NULL; 454 435 455 436 spinlock_lock(&cache->mag_cache[CPU->id].lock); 456 457 slab_magazine_t *mag = get_full_current_mag(cache);437 438 mag = get_full_current_mag(cache); 458 439 if (!mag) { 459 440 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 460 441 return NULL; 461 442 } 462 463 void *obj = mag->objs[--mag->busy]; 443 obj = mag->objs[--mag->busy]; 464 444 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 465 466 445 atomic_dec(&cache->cached_objs); 467 446 … … 469 448 } 470 449 471 /** Assure that the current magazine is empty, return pointer to it, 472 * or NULL if no empty magazine is available and cannot be allocated 473 * 474 * We have 2 magazines bound to processor. 475 * First try the current. 476 * If full, try the last. 477 * If full, put to magazines list. 478 * 479 */ 480 NO_TRACE static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 481 { 482 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 483 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 484 485 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 486 450 /** 451 * Assure that the current magazine is empty, return pointer to it, or NULL if 452 * no empty magazine is available and cannot be allocated 453 * 454 * Assume mag_cache[CPU->id].lock is held 455 * 456 * We have 2 magazines bound to processor. 457 * First try the current. 458 * If full, try the last. 459 * If full, put to magazines list. 460 * allocate new, exchange last & current 461 * 462 */ 463 static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 464 { 465 slab_magazine_t *cmag,*lastmag,*newmag; 466 467 cmag = cache->mag_cache[CPU->id].current; 468 lastmag = cache->mag_cache[CPU->id].last; 469 487 470 if (cmag) { 488 471 if (cmag->busy < cmag->size) 489 472 return cmag; 490 491 if ((lastmag) && (lastmag->busy < lastmag->size)) { 473 if (lastmag && lastmag->busy < lastmag->size) { 492 474 cache->mag_cache[CPU->id].last = cmag; 493 475 cache->mag_cache[CPU->id].current = lastmag; … … 495 477 } 496 478 } 497 498 479 /* current | last are full | nonexistent, allocate new */ 499 500 /* 501 * We do not want to sleep just because of caching, 502 * especially we do not want reclaiming to start, as 503 * this would deadlock. 504 * 505 */ 506 slab_magazine_t *newmag = slab_alloc(&mag_cache, 507 FRAME_ATOMIC | FRAME_NO_RECLAIM); 480 /* We do not want to sleep just because of caching */ 481 /* Especially we do not want reclaiming to start, as 482 * this would deadlock */ 483 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); 508 484 if (!newmag) 509 485 return NULL; 510 511 486 newmag->size = SLAB_MAG_SIZE; 512 487 newmag->busy = 0; 513 488 514 489 /* Flush last to magazine list */ 515 490 if (lastmag) 516 491 put_mag_to_cache(cache, lastmag); 517 492 518 493 /* Move current as last, save new as current */ 519 cache->mag_cache[CPU->id].last = cmag; 520 cache->mag_cache[CPU->id].current = newmag; 521 494 cache->mag_cache[CPU->id].last = cmag; 495 cache->mag_cache[CPU->id].current = newmag; 496 522 497 return newmag; 523 498 } 524 499 525 /** Put object into CPU-cache magazine 526 * 527 * @return 0 on success, -1 on no memory 528 * 529 */ 530 NO_TRACE static int magazine_obj_put(slab_cache_t *cache, void *obj) 531 { 500 /** 501 * Put object into CPU-cache magazine 502 * 503 * @return 0 - success, -1 - could not get memory 504 */ 505 static int magazine_obj_put(slab_cache_t *cache, void *obj) 506 { 507 slab_magazine_t *mag; 508 532 509 if (!CPU) 533 510 return -1; 534 511 535 512 spinlock_lock(&cache->mag_cache[CPU->id].lock); 536 537 slab_magazine_t *mag = make_empty_current_mag(cache);513 514 mag = make_empty_current_mag(cache); 538 515 if (!mag) { 539 516 spinlock_unlock(&cache->mag_cache[CPU->id].lock); … … 542 519 543 520 mag->objs[mag->busy++] = obj; 544 521 545 522 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 546 547 523 atomic_inc(&cache->cached_objs); 548 549 524 return 0; 550 525 } 551 526 552 /************************/ 527 528 /**************************************/ 553 529 /* Slab cache functions */ 554 /************************/ 555 556 /** Return number of objects that fit in certain cache size 557 * 558 */ 559 NO_TRACE static size_t comp_objects(slab_cache_t *cache) 530 531 /** Return number of objects that fit in certain cache size */ 532 static unsigned int comp_objects(slab_cache_t *cache) 560 533 { 561 534 if (cache->flags & SLAB_CACHE_SLINSIDE) 562 return ((PAGE_SIZE << cache->order) 563 - sizeof(slab_t)) /cache->size;564 else 535 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / 536 cache->size; 537 else 565 538 return (PAGE_SIZE << cache->order) / cache->size; 566 539 } 567 540 568 /** Return wasted space in slab 569 * 570 */ 571 NO_TRACE static size_t badness(slab_cache_t *cache) 572 { 573 size_t objects = comp_objects(cache); 574 size_t ssize = PAGE_SIZE << cache->order;575 541 /** Return wasted space in slab */ 542 static unsigned int badness(slab_cache_t *cache) 543 { 544 unsigned int objects; 545 unsigned int ssize; 546 547 objects = comp_objects(cache); 548 ssize = PAGE_SIZE << cache->order; 576 549 if (cache->flags & SLAB_CACHE_SLINSIDE) 577 550 ssize -= sizeof(slab_t); 578 579 551 return ssize - objects * cache->size; 580 552 } 581 553 582 /** Initialize mag_cache structure in slab cache 583 * 584 */ 585 NO_TRACE static bool make_magcache(slab_cache_t *cache) 586 { 554 /** 555 * Initialize mag_cache structure in slab cache 556 */ 557 static bool make_magcache(slab_cache_t *cache) 558 { 559 unsigned int i; 560 587 561 ASSERT(_slab_initialized >= 2); 588 562 589 563 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, 590 564 FRAME_ATOMIC); 591 565 if (!cache->mag_cache) 592 566 return false; 593 594 size_t i; 567 595 568 for (i = 0; i < config.cpu_count; i++) { 596 569 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); 597 570 spinlock_initialize(&cache->mag_cache[i].lock, 598 "slab.cache.mag_cache[].lock"); 599 } 600 571 "slab_maglock_cpu"); 572 } 601 573 return true; 602 574 } 603 575 604 /** Initialize allocated memory as a slab cache 605 * 606 */ 607 NO_TRACE static void _slab_cache_create(slab_cache_t *cache, const char *name, 608 size_t size, size_t align, int (*constructor)(void *obj, 609 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags) 610 { 576 /** Initialize allocated memory as a slab cache */ 577 static void _slab_cache_create(slab_cache_t *cache, const char *name, 578 size_t size, size_t align, int (*constructor)(void *obj, int kmflag), 579 int (*destructor)(void *obj), int flags) 580 { 581 int pages; 582 ipl_t ipl; 583 611 584 memsetb(cache, sizeof(*cache), 0); 612 585 cache->name = name; 613 586 614 587 if (align < sizeof(unative_t)) 615 588 align = sizeof(unative_t); 616 617 589 size = ALIGN_UP(size, align); 618 590 619 591 cache->size = size; 592 620 593 cache->constructor = constructor; 621 594 cache->destructor = destructor; 622 595 cache->flags = flags; 623 596 624 597 list_initialize(&cache->full_slabs); 625 598 list_initialize(&cache->partial_slabs); 626 599 list_initialize(&cache->magazines); 627 628 spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 629 spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 630 600 spinlock_initialize(&cache->slablock, "slab_lock"); 601 spinlock_initialize(&cache->maglock, "slab_maglock"); 631 602 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 632 603 (void) make_magcache(cache); 633 604 634 605 /* Compute slab sizes, object counts in slabs etc. */ 635 606 if (cache->size < SLAB_INSIDE_SIZE) 636 607 cache->flags |= SLAB_CACHE_SLINSIDE; 637 608 638 609 /* Minimum slab order */ 639 size_t pages = SIZE2FRAMES(cache->size); 640 610 pages = SIZE2FRAMES(cache->size); 641 611 /* We need the 2^order >= pages */ 642 612 if (pages == 1) … … 644 614 else 645 615 cache->order = fnzb(pages - 1) + 1; 646 647 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 616 617 while (badness(cache) > SLAB_MAX_BADNESS(cache)) { 648 618 cache->order += 1; 649 619 } 650 620 cache->objects = comp_objects(cache); 651 652 621 /* If info fits in, put it inside */ 653 622 if (badness(cache) > sizeof(slab_t)) 654 623 cache->flags |= SLAB_CACHE_SLINSIDE; 655 624 656 625 /* Add cache to cache list */ 657 irq_spinlock_lock(&slab_cache_lock, true); 626 ipl = interrupts_disable(); 627 spinlock_lock(&slab_cache_lock); 628 658 629 list_append(&cache->link, &slab_cache_list); 659 irq_spinlock_unlock(&slab_cache_lock, true); 660 } 661 662 /** Create slab cache 663 * 664 */630 631 spinlock_unlock(&slab_cache_lock); 632 interrupts_restore(ipl); 633 } 634 635 /** Create slab cache */ 665 636 slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align, 666 int (*constructor)(void *obj, unsigned int kmflag), 667 size_t (*destructor)(void *obj), unsigned int flags) 668 { 669 slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0); 637 int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), 638 int flags) 639 { 640 slab_cache_t *cache; 641 642 cache = slab_alloc(&slab_cache_cache, 0); 670 643 _slab_cache_create(cache, name, size, align, constructor, destructor, 671 644 flags); 672 673 645 return cache; 674 646 } 675 647 676 /** Reclaim space occupied by objects that are already free 648 /** 649 * Reclaim space occupied by objects that are already free 677 650 * 678 651 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing 679 *680 652 * @return Number of freed pages 681 * 682 */ 683 NO_TRACE static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags) 684 { 653 */ 654 static size_t _slab_reclaim(slab_cache_t *cache, int flags) 655 { 656 unsigned int i; 657 slab_magazine_t *mag; 658 size_t frames = 0; 659 int magcount; 660 685 661 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 686 662 return 0; /* Nothing to do */ 687 688 /* 689 * We count up to original magazine count to avoid 690 * endless loop 663 664 /* We count up to original magazine count to avoid 665 * endless loop 691 666 */ 692 atomic_count_t magcount = atomic_get(&cache->magazine_counter); 693 694 slab_magazine_t *mag; 695 size_t frames = 0; 696 697 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) { 698 frames += magazine_destroy(cache, mag); 699 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames)) 667 magcount = atomic_get(&cache->magazine_counter); 668 while (magcount-- && (mag=get_mag_from_cache(cache, 0))) { 669 frames += magazine_destroy(cache,mag); 670 if (!(flags & SLAB_RECLAIM_ALL) && frames) 700 671 break; 701 672 } … … 704 675 /* Free cpu-bound magazines */ 705 676 /* Destroy CPU magazines */ 706 size_t i;707 677 for (i = 0; i < config.cpu_count; i++) { 708 678 spinlock_lock(&cache->mag_cache[i].lock); 709 679 710 680 mag = cache->mag_cache[i].current; 711 681 if (mag) … … 717 687 frames += magazine_destroy(cache, mag); 718 688 cache->mag_cache[i].last = NULL; 719 689 720 690 spinlock_unlock(&cache->mag_cache[i].lock); 721 691 } 722 692 } 723 693 724 694 return frames; 725 695 } 726 696 727 /** Check that there are no slabs and remove cache from system 728 * 729 */ 697 /** Check that there are no slabs and remove cache from system */ 730 698 void slab_cache_destroy(slab_cache_t *cache) 731 699 { 732 /* 733 * First remove cache from link, so that we don't need 700 ipl_t ipl; 701 702 /* First remove cache from link, so that we don't need 734 703 * to disable interrupts later 735 *736 704 */ 737 irq_spinlock_lock(&slab_cache_lock, true); 705 706 ipl = interrupts_disable(); 707 spinlock_lock(&slab_cache_lock); 708 738 709 list_remove(&cache->link); 739 irq_spinlock_unlock(&slab_cache_lock, true); 740 741 /* 742 * Do not lock anything, we assume the software is correct and 743 * does not touch the cache when it decides to destroy it 744 * 745 */ 710 711 spinlock_unlock(&slab_cache_lock); 712 interrupts_restore(ipl); 713 714 /* Do not lock anything, we assume the software is correct and 715 * does not touch the cache when it decides to destroy it */ 746 716 747 717 /* Destroy all magazines */ 748 718 _slab_reclaim(cache, SLAB_RECLAIM_ALL); 749 719 750 720 /* All slabs must be empty */ 751 if ( (!list_empty(&cache->full_slabs)) ||752 (!list_empty(&cache->partial_slabs)))721 if (!list_empty(&cache->full_slabs) || 722 !list_empty(&cache->partial_slabs)) 753 723 panic("Destroying cache that is not empty."); 754 724 755 725 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 756 726 free(cache->mag_cache); 757 758 727 slab_free(&slab_cache_cache, cache); 759 728 } 760 729 761 /** Allocate new object from cache - if no flags given, always returns memory 762 * 763 */ 764 void *slab_alloc(slab_cache_t *cache, unsigned int flags) 765 { 730 /** Allocate new object from cache - if no flags given, always returns memory */ 731 void *slab_alloc(slab_cache_t *cache, int flags) 732 { 733 ipl_t ipl; 734 void *result = NULL; 735 766 736 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 767 ipl_t ipl = interrupts_disable(); 768 769 void *result = NULL; 770 771 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 737 ipl = interrupts_disable(); 738 739 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { 772 740 result = magazine_obj_get(cache); 773 741 } 774 742 if (!result) 775 743 result = slab_obj_create(cache, flags); 776 744 777 745 interrupts_restore(ipl); 778 746 779 747 if (result) 780 748 atomic_inc(&cache->allocated_objs); 781 749 782 750 return result; 783 751 } 784 752 785 /** Return object to cache, use slab if known 786 * 787 */ 788 NO_TRACE static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 789 { 790 ipl _t ipl= interrupts_disable();791 753 /** Return object to cache, use slab if known */ 754 static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 755 { 756 ipl_t ipl; 757 758 ipl = interrupts_disable(); 759 792 760 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) || 793 (magazine_obj_put(cache, obj)))761 magazine_obj_put(cache, obj)) { 794 762 slab_obj_destroy(cache, obj, slab); 795 763 764 } 796 765 interrupts_restore(ipl); 797 766 atomic_dec(&cache->allocated_objs); 798 767 } 799 768 800 /** Return slab object to cache 801 * 802 */ 769 /** Return slab object to cache */ 803 770 void slab_free(slab_cache_t *cache, void *obj) 804 771 { … … 806 773 } 807 774 808 /** Go through all caches and reclaim what is possible 809 * 810 * Interrupts must be disabled before calling this function, 811 * otherwise memory allocation from interrupts can deadlock. 812 * 813 */ 814 size_t slab_reclaim(unsigned int flags) 815 { 816 irq_spinlock_lock(&slab_cache_lock, false); 817 775 /* Go through all caches and reclaim what is possible */ 776 size_t slab_reclaim(int flags) 777 { 778 slab_cache_t *cache; 779 link_t *cur; 818 780 size_t frames = 0; 819 link_t *cur; 781 782 spinlock_lock(&slab_cache_lock); 783 784 /* TODO: Add assert, that interrupts are disabled, otherwise 785 * memory allocation from interrupts can deadlock. 786 */ 787 820 788 for (cur = slab_cache_list.next; cur != &slab_cache_list; 821 789 cur = cur->next) { 822 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);790 cache = list_get_instance(cur, slab_cache_t, link); 823 791 frames += _slab_reclaim(cache, flags); 824 792 } 825 826 irq_spinlock_unlock(&slab_cache_lock, false);827 793 794 spinlock_unlock(&slab_cache_lock); 795 828 796 return frames; 829 797 } 830 798 831 /* Print list of slabs 832 * 833 */ 799 800 /* Print list of slabs */ 834 801 void slab_print_list(void) 835 802 { 836 printf("[slab name ] [size ] [pages ] [obj/pg] [slabs ]" 837 " [cached] [alloc ] [ctl]\n"); 838 839 size_t skip = 0; 803 int skip = 0; 804 805 printf("slab name size pages obj/pg slabs cached allocated" 806 " ctl\n"); 807 printf("---------------- -------- ------ ------ ------ ------ ---------" 808 " ---\n"); 809 840 810 while (true) { 811 slab_cache_t *cache; 812 link_t *cur; 813 ipl_t ipl; 814 int i; 815 841 816 /* 842 817 * We must not hold the slab_cache_lock spinlock when printing … … 861 836 * statistics. 862 837 */ 863 864 irq_spinlock_lock(&slab_cache_lock, true); 865 866 link_t *cur; 867 size_t i; 838 839 ipl = interrupts_disable(); 840 spinlock_lock(&slab_cache_lock); 841 868 842 for (i = 0, cur = slab_cache_list.next; 869 (i < skip) && (cur != &slab_cache_list); 870 i++, cur = cur->next); 871 843 i < skip && cur != &slab_cache_list; 844 i++, cur = cur->next) 845 ; 846 872 847 if (cur == &slab_cache_list) { 873 irq_spinlock_unlock(&slab_cache_lock, true); 848 spinlock_unlock(&slab_cache_lock); 849 interrupts_restore(ipl); 874 850 break; 875 851 } 876 852 877 853 skip++; 878 879 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);880 854 855 cache = list_get_instance(cur, slab_cache_t, link); 856 881 857 const char *name = cache->name; 882 858 uint8_t order = cache->order; 883 859 size_t size = cache->size; 884 size_t objects = cache->objects;860 unsigned int objects = cache->objects; 885 861 long allocated_slabs = atomic_get(&cache->allocated_slabs); 886 862 long cached_objs = atomic_get(&cache->cached_objs); 887 863 long allocated_objs = atomic_get(&cache->allocated_objs); 888 unsignedint flags = cache->flags;864 int flags = cache->flags; 889 865 890 irq_spinlock_unlock(&slab_cache_lock, true); 866 spinlock_unlock(&slab_cache_lock); 867 interrupts_restore(ipl); 891 868 892 printf("%-1 8s %8" PRIs " %8u %8" PRIs " %8ld %8ld %8ld %-5s\n",869 printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", 893 870 name, size, (1 << order), objects, allocated_slabs, 894 871 cached_objs, allocated_objs, … … 899 876 void slab_cache_init(void) 900 877 { 878 int i, size; 879 901 880 /* Initialize magazine cache */ 902 881 _slab_cache_create(&mag_cache, "slab_magazine", … … 904 883 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | 905 884 SLAB_CACHE_SLINSIDE); 906 907 885 /* Initialize slab_cache cache */ 908 886 _slab_cache_create(&slab_cache_cache, "slab_cache", 909 887 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, 910 888 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 911 912 889 /* Initialize external slab cache */ 913 890 slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0, 914 891 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 915 892 916 893 /* Initialize structures for malloc */ 917 size_t i;918 size_t size;919 920 894 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W); 921 895 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); … … 924 898 NULL, NULL, SLAB_CACHE_MAGDEFERRED); 925 899 } 926 927 900 #ifdef CONFIG_DEBUG 928 901 _slab_initialized = 1; … … 933 906 * 934 907 * Kernel calls this function, when it knows the real number of 935 * processors. Allocate slab for cpucache and enable it on all936 * existing slabs that are SLAB_CACHE_MAGDEFERRED937 * 908 * processors. 909 * Allocate slab for cpucache and enable it on all existing 910 * slabs that are SLAB_CACHE_MAGDEFERRED 938 911 */ 939 912 void slab_enable_cpucache(void) 940 913 { 914 link_t *cur; 915 slab_cache_t *s; 916 941 917 #ifdef CONFIG_DEBUG 942 918 _slab_initialized = 2; 943 919 #endif 944 945 irq_spinlock_lock(&slab_cache_lock, false); 946 947 link_t *cur; 920 921 spinlock_lock(&slab_cache_lock); 922 948 923 for (cur = slab_cache_list.next; cur != &slab_cache_list; 949 cur = cur->next) 950 s lab_cache_t *slab= list_get_instance(cur, slab_cache_t, link);951 if ((s lab->flags & SLAB_CACHE_MAGDEFERRED) !=924 cur = cur->next){ 925 s = list_get_instance(cur, slab_cache_t, link); 926 if ((s->flags & SLAB_CACHE_MAGDEFERRED) != 952 927 SLAB_CACHE_MAGDEFERRED) 953 928 continue; 954 955 (void) make_magcache(slab); 956 slab->flags &= ~SLAB_CACHE_MAGDEFERRED; 957 } 958 959 irq_spinlock_unlock(&slab_cache_lock, false); 960 } 961 962 void *malloc(size_t size, unsigned int flags) 929 (void) make_magcache(s); 930 s->flags &= ~SLAB_CACHE_MAGDEFERRED; 931 } 932 933 spinlock_unlock(&slab_cache_lock); 934 } 935 936 /**************************************/ 937 /* kalloc/kfree functions */ 938 void *malloc(unsigned int size, int flags) 963 939 { 964 940 ASSERT(_slab_initialized); … … 967 943 if (size < (1 << SLAB_MIN_MALLOC_W)) 968 944 size = (1 << SLAB_MIN_MALLOC_W); 969 970 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;971 945 946 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 947 972 948 return slab_alloc(malloc_caches[idx], flags); 973 949 } 974 950 975 void *realloc(void *ptr, size_t size, unsignedint flags)951 void *realloc(void *ptr, unsigned int size, int flags) 976 952 { 977 953 ASSERT(_slab_initialized); … … 983 959 if (size < (1 << SLAB_MIN_MALLOC_W)) 984 960 size = (1 << SLAB_MIN_MALLOC_W); 985 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;961 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 986 962 987 963 new_ptr = slab_alloc(malloc_caches[idx], flags); … … 1004 980 if (!ptr) 1005 981 return; 1006 982 1007 983 slab_t *slab = obj2slab(ptr); 1008 984 _slab_free(slab->cache, ptr, slab);
Note:
See TracChangeset
for help on using the changeset viewer.