Changes in kernel/generic/src/mm/slab.c [ab6f2507:98000fb] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/slab.c
rab6f2507 r98000fb 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Slab allocator. 36 36 * 37 37 * The slab allocator is closely modelled after OpenSolaris slab allocator. … … 50 50 * 51 51 * The slab allocator supports per-CPU caches ('magazines') to facilitate 52 * good SMP scaling. 52 * good SMP scaling. 53 53 * 54 54 * When a new object is being allocated, it is first checked, if it is … … 65 65 * thrashing when somebody is allocating/deallocating 1 item at the magazine 66 66 * size boundary. LIFO order is enforced, which should avoid fragmentation 67 * as much as possible. 68 * 67 * as much as possible. 68 * 69 69 * Every cache contains list of full slabs and list of partially full slabs. 70 70 * Empty slabs are immediately freed (thrashing will be avoided because 71 * of magazines). 71 * of magazines). 72 72 * 73 73 * The slab information structure is kept inside the data area, if possible. … … 95 95 * 96 96 * @todo 97 * It might be good to add granularity of locks even to slab level,97 * it might be good to add granularity of locks even to slab level, 98 98 * we could then try_spinlock over all partial slabs and thus improve 99 * scalability even on slab level. 100 * 99 * scalability even on slab level 101 100 */ 102 101 … … 115 114 #include <macros.h> 116 115 117 IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock);116 SPINLOCK_INITIALIZE(slab_cache_lock); 118 117 static LIST_INITIALIZE(slab_cache_list); 119 118 120 119 /** Magazine cache */ 121 120 static slab_cache_t mag_cache; 122 123 121 /** Cache for cache descriptors */ 124 122 static slab_cache_t slab_cache_cache; 125 126 123 /** Cache for external slab descriptors 127 124 * This time we want per-cpu cache, so do not make it static … … 131 128 */ 132 129 static slab_cache_t *slab_extern_cache; 133 134 130 /** Caches for malloc */ 135 131 static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; 136 137 static const char *malloc_names[] = { 132 static char *malloc_names[] = { 138 133 "malloc-16", 139 134 "malloc-32", … … 159 154 /** Slab descriptor */ 160 155 typedef struct { 161 slab_cache_t *cache; 162 link_t link; 163 void *start; 164 size_t available; 165 size_t nextavail; 156 slab_cache_t *cache; /**< Pointer to parent cache. */ 157 link_t link; /**< List of full/partial slabs. */ 158 void *start; /**< Start address of first available item. */ 159 size_t available; /**< Count of available items in this slab. */ 160 size_t nextavail; /**< The index of next available item. */ 166 161 } slab_t; 167 162 168 163 #ifdef CONFIG_DEBUG 169 static unsignedint _slab_initialized = 0;164 static int _slab_initialized = 0; 170 165 #endif 171 166 172 167 /**************************************/ 173 168 /* Slab allocation functions */ 174 /**************************************/ 175 176 /** Allocate frames for slab space and initialize 177 * 178 */ 179 NO_TRACE static slab_t *slab_space_alloc(slab_cache_t *cache, 180 unsigned int flags) 181 { 182 183 169 170 /** 171 * Allocate frames for slab space and initialize 172 * 173 */ 174 static slab_t *slab_space_alloc(slab_cache_t *cache, int flags) 175 { 176 void *data; 177 slab_t *slab; 178 size_t fsize; 179 unsigned int i; 184 180 size_t zone = 0; 185 181 186 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);182 data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 187 183 if (!data) { 188 184 return NULL; 189 185 } 190 191 slab_t *slab;192 size_t fsize;193 194 186 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) { 195 187 slab = slab_alloc(slab_extern_cache, flags); … … 204 196 205 197 /* Fill in slab structures */ 206 size_t i; 207 for (i = 0; i < ((size_t) 1 << cache->order); i++) 198 for (i = 0; i < ((unsigned int) 1 << cache->order); i++) 208 199 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 209 200 210 201 slab->start = data; 211 202 slab->available = cache->objects; 212 203 slab->nextavail = 0; 213 204 slab->cache = cache; 214 205 215 206 for (i = 0; i < cache->objects; i++) 216 *(( size_t *) (slab->start + i *cache->size)) = i + 1;217 207 *((int *) (slab->start + i*cache->size)) = i + 1; 208 218 209 atomic_inc(&cache->allocated_slabs); 219 210 return slab; 220 211 } 221 212 222 /** Deallocate space associated with slab 213 /** 214 * Deallocate space associated with slab 223 215 * 224 216 * @return number of freed frames 225 * 226 */ 227 NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 217 */ 218 static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 228 219 { 229 220 frame_free(KA2PA(slab->start)); 230 if (! (cache->flags & SLAB_CACHE_SLINSIDE))221 if (! (cache->flags & SLAB_CACHE_SLINSIDE)) 231 222 slab_free(slab_extern_cache, slab); 232 223 233 224 atomic_dec(&cache->allocated_slabs); 234 225 235 return (1 << cache->order);226 return 1 << cache->order; 236 227 } 237 228 238 229 /** Map object to slab structure */ 239 NO_TRACE static slab_t *obj2slab(void *obj)230 static slab_t * obj2slab(void *obj) 240 231 { 241 232 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); 242 233 } 243 234 244 /****************** /235 /**************************************/ 245 236 /* Slab functions */ 246 /******************/ 247 248 /** Return object to slab and call a destructor 237 238 239 /** 240 * Return object to slab and call a destructor 249 241 * 250 242 * @param slab If the caller knows directly slab of the object, otherwise NULL 251 243 * 252 244 * @return Number of freed pages 253 * 254 */ 255 NO_TRACE static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, 256 slab_t *slab) 257 { 245 */ 246 static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab) 247 { 248 int freed = 0; 249 258 250 if (!slab) 259 251 slab = obj2slab(obj); 260 252 261 253 ASSERT(slab->cache == cache); 262 263 size_t freed = 0; 264 254 265 255 if (cache->destructor) 266 256 freed = cache->destructor(obj); … … 268 258 spinlock_lock(&cache->slablock); 269 259 ASSERT(slab->available < cache->objects); 270 271 *(( size_t *)obj) = slab->nextavail;260 261 *((int *)obj) = slab->nextavail; 272 262 slab->nextavail = (obj - slab->start) / cache->size; 273 263 slab->available++; 274 264 275 265 /* Move it to correct list */ 276 266 if (slab->available == cache->objects) { … … 278 268 list_remove(&slab->link); 279 269 spinlock_unlock(&cache->slablock); 280 270 281 271 return freed + slab_space_free(cache, slab); 272 282 273 } else if (slab->available == 1) { 283 274 /* It was in full, move to partial */ … … 285 276 list_prepend(&slab->link, &cache->partial_slabs); 286 277 } 287 288 278 spinlock_unlock(&cache->slablock); 289 279 return freed; 290 280 } 291 281 292 /** Take new object from slab or create new if needed 282 /** 283 * Take new object from slab or create new if needed 293 284 * 294 285 * @return Object address or null 295 * 296 */ 297 NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags) 298 { 286 */ 287 static void *slab_obj_create(slab_cache_t *cache, int flags) 288 { 289 slab_t *slab; 290 void *obj; 291 299 292 spinlock_lock(&cache->slablock); 300 301 slab_t *slab; 302 293 303 294 if (list_empty(&cache->partial_slabs)) { 304 /* 305 * Allow recursion and reclaiming 295 /* Allow recursion and reclaiming 306 296 * - this should work, as the slab control structures 307 297 * are small and do not need to allocate with anything 308 298 * other than frame_alloc when they are allocating, 309 299 * that's why we should get recursion at most 1-level deep 310 *311 300 */ 312 301 spinlock_unlock(&cache->slablock); … … 314 303 if (!slab) 315 304 return NULL; 316 317 305 spinlock_lock(&cache->slablock); 318 306 } else { … … 321 309 list_remove(&slab->link); 322 310 } 323 324 void *obj = slab->start + slab->nextavail * cache->size; 325 slab->nextavail = *((size_t *) obj); 311 obj = slab->start + slab->nextavail * cache->size; 312 slab->nextavail = *((int *)obj); 326 313 slab->available--; 327 314 328 315 if (!slab->available) 329 316 list_prepend(&slab->link, &cache->full_slabs); 330 317 else 331 318 list_prepend(&slab->link, &cache->partial_slabs); 332 319 333 320 spinlock_unlock(&cache->slablock); 334 335 if ( (cache->constructor) && (cache->constructor(obj, flags))) {321 322 if (cache->constructor && cache->constructor(obj, flags)) { 336 323 /* Bad, bad, construction failed */ 337 324 slab_obj_destroy(cache, obj, slab); 338 325 return NULL; 339 326 } 340 341 327 return obj; 342 328 } 343 329 344 /**************************** /330 /**************************************/ 345 331 /* CPU-Cache slab functions */ 346 /****************************/ 347 348 /** Find a full magazine in cache, take it from list and return it 349 * 350 * @param first If true, return first, else last mag. 351 * 352 */ 353 NO_TRACE static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, 354 bool first) 332 333 /** 334 * Finds a full magazine in cache, takes it from list 335 * and returns it 336 * 337 * @param first If true, return first, else last mag 338 */ 339 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first) 355 340 { 356 341 slab_magazine_t *mag = NULL; 357 342 link_t *cur; 358 343 359 344 spinlock_lock(&cache->maglock); 360 345 if (!list_empty(&cache->magazines)) { … … 363 348 else 364 349 cur = cache->magazines.prev; 365 366 350 mag = list_get_instance(cur, slab_magazine_t, link); 367 351 list_remove(&mag->link); 368 352 atomic_dec(&cache->magazine_counter); 369 353 } 370 371 354 spinlock_unlock(&cache->maglock); 372 355 return mag; 373 356 } 374 357 375 /** Prepend magazine to magazine list in cache 376 * 377 */ 378 NO_TRACE static void put_mag_to_cache(slab_cache_t *cache, 379 slab_magazine_t *mag) 358 /** Prepend magazine to magazine list in cache */ 359 static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) 380 360 { 381 361 spinlock_lock(&cache->maglock); 382 362 383 363 list_prepend(&mag->link, &cache->magazines); 384 364 atomic_inc(&cache->magazine_counter); … … 387 367 } 388 368 389 /** Free all objects in magazine and free memory associated with magazine 369 /** 370 * Free all objects in magazine and free memory associated with magazine 390 371 * 391 372 * @return Number of freed pages 392 * 393 */ 394 NO_TRACE static size_t magazine_destroy(slab_cache_t *cache, 395 slab_magazine_t *mag) 396 { 397 size_t i; 373 */ 374 static size_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag) 375 { 376 unsigned int i; 398 377 size_t frames = 0; 399 378 400 379 for (i = 0; i < mag->busy; i++) { 401 380 frames += slab_obj_destroy(cache, mag->objs[i], NULL); … … 404 383 405 384 slab_free(&mag_cache, mag); 406 385 407 386 return frames; 408 387 } 409 388 410 /** Find full magazine, set it as current and return it 411 * 412 */ 413 NO_TRACE static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 414 { 415 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 416 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 417 418 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 419 389 /** 390 * Find full magazine, set it as current and return it 391 * 392 * Assume cpu_magazine lock is held 393 */ 394 static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 395 { 396 slab_magazine_t *cmag, *lastmag, *newmag; 397 398 cmag = cache->mag_cache[CPU->id].current; 399 lastmag = cache->mag_cache[CPU->id].last; 420 400 if (cmag) { /* First try local CPU magazines */ 421 401 if (cmag->busy) 422 402 return cmag; 423 424 if ( (lastmag) && (lastmag->busy)) {403 404 if (lastmag && lastmag->busy) { 425 405 cache->mag_cache[CPU->id].current = lastmag; 426 406 cache->mag_cache[CPU->id].last = cmag; … … 428 408 } 429 409 } 430 431 410 /* Local magazines are empty, import one from magazine list */ 432 slab_magazine_t *newmag = get_mag_from_cache(cache, 1);411 newmag = get_mag_from_cache(cache, 1); 433 412 if (!newmag) 434 413 return NULL; 435 414 436 415 if (lastmag) 437 416 magazine_destroy(cache, lastmag); 438 417 439 418 cache->mag_cache[CPU->id].last = cmag; 440 419 cache->mag_cache[CPU->id].current = newmag; 441 442 420 return newmag; 443 421 } 444 422 445 /** Try to find object in CPU-cache magazines 423 /** 424 * Try to find object in CPU-cache magazines 446 425 * 447 426 * @return Pointer to object or NULL if not available 448 * 449 */ 450 NO_TRACE static void *magazine_obj_get(slab_cache_t *cache) 451 { 427 */ 428 static void *magazine_obj_get(slab_cache_t *cache) 429 { 430 slab_magazine_t *mag; 431 void *obj; 432 452 433 if (!CPU) 453 434 return NULL; 454 435 455 436 spinlock_lock(&cache->mag_cache[CPU->id].lock); 456 457 slab_magazine_t *mag = get_full_current_mag(cache);437 438 mag = get_full_current_mag(cache); 458 439 if (!mag) { 459 440 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 460 441 return NULL; 461 442 } 462 463 void *obj = mag->objs[--mag->busy]; 443 obj = mag->objs[--mag->busy]; 464 444 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 465 466 445 atomic_dec(&cache->cached_objs); 467 446 … … 469 448 } 470 449 471 /** Assure that the current magazine is empty, return pointer to it, 472 * or NULL if no empty magazine is available and cannot be allocated 473 * 474 * We have 2 magazines bound to processor. 475 * First try the current. 476 * If full, try the last. 477 * If full, put to magazines list. 478 * 479 */ 480 NO_TRACE static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 481 { 482 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 483 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 484 485 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 486 450 /** 451 * Assure that the current magazine is empty, return pointer to it, or NULL if 452 * no empty magazine is available and cannot be allocated 453 * 454 * Assume mag_cache[CPU->id].lock is held 455 * 456 * We have 2 magazines bound to processor. 457 * First try the current. 458 * If full, try the last. 459 * If full, put to magazines list. 460 * allocate new, exchange last & current 461 * 462 */ 463 static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 464 { 465 slab_magazine_t *cmag,*lastmag,*newmag; 466 467 cmag = cache->mag_cache[CPU->id].current; 468 lastmag = cache->mag_cache[CPU->id].last; 469 487 470 if (cmag) { 488 471 if (cmag->busy < cmag->size) 489 472 return cmag; 490 491 if ((lastmag) && (lastmag->busy < lastmag->size)) { 473 if (lastmag && lastmag->busy < lastmag->size) { 492 474 cache->mag_cache[CPU->id].last = cmag; 493 475 cache->mag_cache[CPU->id].current = lastmag; … … 495 477 } 496 478 } 497 498 479 /* current | last are full | nonexistent, allocate new */ 499 500 /* 501 * We do not want to sleep just because of caching, 502 * especially we do not want reclaiming to start, as 503 * this would deadlock. 504 * 505 */ 506 slab_magazine_t *newmag = slab_alloc(&mag_cache, 507 FRAME_ATOMIC | FRAME_NO_RECLAIM); 480 /* We do not want to sleep just because of caching */ 481 /* Especially we do not want reclaiming to start, as 482 * this would deadlock */ 483 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); 508 484 if (!newmag) 509 485 return NULL; 510 511 486 newmag->size = SLAB_MAG_SIZE; 512 487 newmag->busy = 0; 513 488 514 489 /* Flush last to magazine list */ 515 490 if (lastmag) 516 491 put_mag_to_cache(cache, lastmag); 517 492 518 493 /* Move current as last, save new as current */ 519 cache->mag_cache[CPU->id].last = cmag; 520 cache->mag_cache[CPU->id].current = newmag; 521 494 cache->mag_cache[CPU->id].last = cmag; 495 cache->mag_cache[CPU->id].current = newmag; 496 522 497 return newmag; 523 498 } 524 499 525 /** Put object into CPU-cache magazine 526 * 527 * @return 0 on success, -1 on no memory 528 * 529 */ 530 NO_TRACE static int magazine_obj_put(slab_cache_t *cache, void *obj) 531 { 500 /** 501 * Put object into CPU-cache magazine 502 * 503 * @return 0 - success, -1 - could not get memory 504 */ 505 static int magazine_obj_put(slab_cache_t *cache, void *obj) 506 { 507 slab_magazine_t *mag; 508 532 509 if (!CPU) 533 510 return -1; 534 511 535 512 spinlock_lock(&cache->mag_cache[CPU->id].lock); 536 537 slab_magazine_t *mag = make_empty_current_mag(cache);513 514 mag = make_empty_current_mag(cache); 538 515 if (!mag) { 539 516 spinlock_unlock(&cache->mag_cache[CPU->id].lock); … … 542 519 543 520 mag->objs[mag->busy++] = obj; 544 521 545 522 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 546 547 523 atomic_inc(&cache->cached_objs); 548 549 524 return 0; 550 525 } 551 526 552 /************************/ 527 528 /**************************************/ 553 529 /* Slab cache functions */ 554 /************************/ 555 556 /** Return number of objects that fit in certain cache size 557 * 558 */ 559 NO_TRACE static size_t comp_objects(slab_cache_t *cache) 530 531 /** Return number of objects that fit in certain cache size */ 532 static unsigned int comp_objects(slab_cache_t *cache) 560 533 { 561 534 if (cache->flags & SLAB_CACHE_SLINSIDE) 562 return ((PAGE_SIZE << cache->order) 563 - sizeof(slab_t)) /cache->size;564 else 535 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / 536 cache->size; 537 else 565 538 return (PAGE_SIZE << cache->order) / cache->size; 566 539 } 567 540 568 /** Return wasted space in slab 569 * 570 */ 571 NO_TRACE static size_t badness(slab_cache_t *cache) 572 { 573 size_t objects = comp_objects(cache); 574 size_t ssize = PAGE_SIZE << cache->order;575 541 /** Return wasted space in slab */ 542 static unsigned int badness(slab_cache_t *cache) 543 { 544 unsigned int objects; 545 unsigned int ssize; 546 547 objects = comp_objects(cache); 548 ssize = PAGE_SIZE << cache->order; 576 549 if (cache->flags & SLAB_CACHE_SLINSIDE) 577 550 ssize -= sizeof(slab_t); 578 579 551 return ssize - objects * cache->size; 580 552 } 581 553 582 /** Initialize mag_cache structure in slab cache 583 * 584 */ 585 NO_TRACE static bool make_magcache(slab_cache_t *cache) 586 { 554 /** 555 * Initialize mag_cache structure in slab cache 556 */ 557 static void make_magcache(slab_cache_t *cache) 558 { 559 unsigned int i; 560 587 561 ASSERT(_slab_initialized >= 2); 588 562 589 563 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, 590 FRAME_ATOMIC); 591 if (!cache->mag_cache) 592 return false; 593 594 size_t i; 564 0); 595 565 for (i = 0; i < config.cpu_count; i++) { 596 566 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); 597 567 spinlock_initialize(&cache->mag_cache[i].lock, 598 "slab .cache.mag_cache[].lock");599 } 600 601 return true; 602 } 603 604 /** Initialize allocated memory as a slab cache 605 *606 */607 NO_TRACE static void _slab_cache_create(slab_cache_t *cache, const char *name, 608 size_t size, size_t align, int (*constructor)(void *obj, 609 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags) 610 { 568 "slab_maglock_cpu"); 569 } 570 } 571 572 /** Initialize allocated memory as a slab cache */ 573 static void 574 _slab_cache_create(slab_cache_t *cache, char *name, size_t size, size_t align, 575 int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), 576 int flags) 577 { 578 int pages; 579 ipl_t ipl; 580 611 581 memsetb(cache, sizeof(*cache), 0); 612 582 cache->name = name; 613 614 if (align < sizeof(sysarg_t)) 615 align = sizeof(sysarg_t); 616 583 584 if (align < sizeof(unative_t)) 585 align = sizeof(unative_t); 617 586 size = ALIGN_UP(size, align); 618 587 619 588 cache->size = size; 589 620 590 cache->constructor = constructor; 621 591 cache->destructor = destructor; 622 592 cache->flags = flags; 623 593 624 594 list_initialize(&cache->full_slabs); 625 595 list_initialize(&cache->partial_slabs); 626 596 list_initialize(&cache->magazines); 627 628 spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 629 spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 630 597 spinlock_initialize(&cache->slablock, "slab_lock"); 598 spinlock_initialize(&cache->maglock, "slab_maglock"); 631 599 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 632 (void)make_magcache(cache);633 600 make_magcache(cache); 601 634 602 /* Compute slab sizes, object counts in slabs etc. */ 635 603 if (cache->size < SLAB_INSIDE_SIZE) 636 604 cache->flags |= SLAB_CACHE_SLINSIDE; 637 605 638 606 /* Minimum slab order */ 639 size_t pages = SIZE2FRAMES(cache->size); 640 607 pages = SIZE2FRAMES(cache->size); 641 608 /* We need the 2^order >= pages */ 642 609 if (pages == 1) … … 644 611 else 645 612 cache->order = fnzb(pages - 1) + 1; 646 647 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 613 614 while (badness(cache) > SLAB_MAX_BADNESS(cache)) { 648 615 cache->order += 1; 649 616 } 650 617 cache->objects = comp_objects(cache); 651 652 618 /* If info fits in, put it inside */ 653 619 if (badness(cache) > sizeof(slab_t)) 654 620 cache->flags |= SLAB_CACHE_SLINSIDE; 655 621 656 622 /* Add cache to cache list */ 657 irq_spinlock_lock(&slab_cache_lock, true); 623 ipl = interrupts_disable(); 624 spinlock_lock(&slab_cache_lock); 625 658 626 list_append(&cache->link, &slab_cache_list); 659 irq_spinlock_unlock(&slab_cache_lock, true); 660 } 661 662 /** Create slab cache 663 * 664 */ 665 slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align, 666 int (*constructor)(void *obj, unsigned int kmflag), 667 size_t (*destructor)(void *obj), unsigned int flags) 668 { 669 slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0); 627 628 spinlock_unlock(&slab_cache_lock); 629 interrupts_restore(ipl); 630 } 631 632 /** Create slab cache */ 633 slab_cache_t * 634 slab_cache_create(char *name, size_t size, size_t align, 635 int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), 636 int flags) 637 { 638 slab_cache_t *cache; 639 640 cache = slab_alloc(&slab_cache_cache, 0); 670 641 _slab_cache_create(cache, name, size, align, constructor, destructor, 671 642 flags); 672 673 643 return cache; 674 644 } 675 645 676 /** Reclaim space occupied by objects that are already free 646 /** 647 * Reclaim space occupied by objects that are already free 677 648 * 678 649 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing 679 *680 650 * @return Number of freed pages 681 * 682 */ 683 NO_TRACE static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags) 684 { 651 */ 652 static size_t _slab_reclaim(slab_cache_t *cache, int flags) 653 { 654 unsigned int i; 655 slab_magazine_t *mag; 656 size_t frames = 0; 657 int magcount; 658 685 659 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 686 660 return 0; /* Nothing to do */ 687 688 /* 689 * We count up to original magazine count to avoid 690 * endless loop 661 662 /* We count up to original magazine count to avoid 663 * endless loop 691 664 */ 692 atomic_count_t magcount = atomic_get(&cache->magazine_counter); 693 694 slab_magazine_t *mag; 695 size_t frames = 0; 696 697 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) { 698 frames += magazine_destroy(cache, mag); 699 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames)) 665 magcount = atomic_get(&cache->magazine_counter); 666 while (magcount-- && (mag=get_mag_from_cache(cache, 0))) { 667 frames += magazine_destroy(cache,mag); 668 if (!(flags & SLAB_RECLAIM_ALL) && frames) 700 669 break; 701 670 } … … 704 673 /* Free cpu-bound magazines */ 705 674 /* Destroy CPU magazines */ 706 size_t i;707 675 for (i = 0; i < config.cpu_count; i++) { 708 676 spinlock_lock(&cache->mag_cache[i].lock); 709 677 710 678 mag = cache->mag_cache[i].current; 711 679 if (mag) … … 717 685 frames += magazine_destroy(cache, mag); 718 686 cache->mag_cache[i].last = NULL; 719 687 720 688 spinlock_unlock(&cache->mag_cache[i].lock); 721 689 } 722 690 } 723 691 724 692 return frames; 725 693 } 726 694 727 /** Check that there are no slabs and remove cache from system 728 * 729 */ 695 /** Check that there are no slabs and remove cache from system */ 730 696 void slab_cache_destroy(slab_cache_t *cache) 731 697 { 732 /* 733 * First remove cache from link, so that we don't need 698 ipl_t ipl; 699 700 /* First remove cache from link, so that we don't need 734 701 * to disable interrupts later 735 *736 702 */ 737 irq_spinlock_lock(&slab_cache_lock, true); 703 704 ipl = interrupts_disable(); 705 spinlock_lock(&slab_cache_lock); 706 738 707 list_remove(&cache->link); 739 irq_spinlock_unlock(&slab_cache_lock, true); 740 741 /* 742 * Do not lock anything, we assume the software is correct and 743 * does not touch the cache when it decides to destroy it 744 * 745 */ 708 709 spinlock_unlock(&slab_cache_lock); 710 interrupts_restore(ipl); 711 712 /* Do not lock anything, we assume the software is correct and 713 * does not touch the cache when it decides to destroy it */ 746 714 747 715 /* Destroy all magazines */ 748 716 _slab_reclaim(cache, SLAB_RECLAIM_ALL); 749 717 750 718 /* All slabs must be empty */ 751 if ( (!list_empty(&cache->full_slabs)) ||752 (!list_empty(&cache->partial_slabs)))719 if (!list_empty(&cache->full_slabs) || 720 !list_empty(&cache->partial_slabs)) 753 721 panic("Destroying cache that is not empty."); 754 722 755 723 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 756 724 free(cache->mag_cache); 757 758 725 slab_free(&slab_cache_cache, cache); 759 726 } 760 727 761 /** Allocate new object from cache - if no flags given, always returns memory 762 * 763 */ 764 void *slab_alloc(slab_cache_t *cache, unsigned int flags) 765 { 728 /** Allocate new object from cache - if no flags given, always returns memory */ 729 void *slab_alloc(slab_cache_t *cache, int flags) 730 { 731 ipl_t ipl; 732 void *result = NULL; 733 766 734 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 767 ipl_t ipl = interrupts_disable(); 768 769 void *result = NULL; 770 771 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 735 ipl = interrupts_disable(); 736 737 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { 772 738 result = magazine_obj_get(cache); 773 739 } 774 740 if (!result) 775 741 result = slab_obj_create(cache, flags); 776 742 777 743 interrupts_restore(ipl); 778 744 779 745 if (result) 780 746 atomic_inc(&cache->allocated_objs); 781 747 782 748 return result; 783 749 } 784 750 785 /** Return object to cache, use slab if known 786 * 787 */ 788 NO_TRACE static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 789 { 790 ipl _t ipl= interrupts_disable();791 751 /** Return object to cache, use slab if known */ 752 static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 753 { 754 ipl_t ipl; 755 756 ipl = interrupts_disable(); 757 792 758 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) || 793 (magazine_obj_put(cache, obj)))759 magazine_obj_put(cache, obj)) { 794 760 slab_obj_destroy(cache, obj, slab); 795 761 762 } 796 763 interrupts_restore(ipl); 797 764 atomic_dec(&cache->allocated_objs); 798 765 } 799 766 800 /** Return slab object to cache 801 * 802 */ 767 /** Return slab object to cache */ 803 768 void slab_free(slab_cache_t *cache, void *obj) 804 769 { … … 806 771 } 807 772 808 /* *Go through all caches and reclaim what is possible */809 size_t slab_reclaim( unsignedint flags)810 { 811 irq_spinlock_lock(&slab_cache_lock, true);812 773 /* Go through all caches and reclaim what is possible */ 774 size_t slab_reclaim(int flags) 775 { 776 slab_cache_t *cache; 777 link_t *cur; 813 778 size_t frames = 0; 814 link_t *cur; 779 780 spinlock_lock(&slab_cache_lock); 781 782 /* TODO: Add assert, that interrupts are disabled, otherwise 783 * memory allocation from interrupts can deadlock. 784 */ 785 815 786 for (cur = slab_cache_list.next; cur != &slab_cache_list; 816 787 cur = cur->next) { 817 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);788 cache = list_get_instance(cur, slab_cache_t, link); 818 789 frames += _slab_reclaim(cache, flags); 819 790 } 820 821 irq_spinlock_unlock(&slab_cache_lock, true);822 791 792 spinlock_unlock(&slab_cache_lock); 793 823 794 return frames; 824 795 } 825 796 826 /* Print list of slabs 827 * 828 */ 797 798 /* Print list of slabs */ 829 799 void slab_print_list(void) 830 800 { 831 printf("[slab name ] [size ] [pages ] [obj/pg] [slabs ]" 832 " [cached] [alloc ] [ctl]\n"); 833 834 size_t skip = 0; 801 int skip = 0; 802 803 printf("slab name size pages obj/pg slabs cached allocated" 804 " ctl\n"); 805 printf("---------------- -------- ------ ------ ------ ------ ---------" 806 " ---\n"); 807 835 808 while (true) { 809 slab_cache_t *cache; 810 link_t *cur; 811 ipl_t ipl; 812 int i; 813 836 814 /* 837 815 * We must not hold the slab_cache_lock spinlock when printing … … 856 834 * statistics. 857 835 */ 858 859 irq_spinlock_lock(&slab_cache_lock, true); 860 861 link_t *cur; 862 size_t i; 836 837 ipl = interrupts_disable(); 838 spinlock_lock(&slab_cache_lock); 839 863 840 for (i = 0, cur = slab_cache_list.next; 864 (i < skip) && (cur != &slab_cache_list); 865 i++, cur = cur->next); 866 841 i < skip && cur != &slab_cache_list; 842 i++, cur = cur->next) 843 ; 844 867 845 if (cur == &slab_cache_list) { 868 irq_spinlock_unlock(&slab_cache_lock, true); 846 spinlock_unlock(&slab_cache_lock); 847 interrupts_restore(ipl); 869 848 break; 870 849 } 871 850 872 851 skip++; 873 874 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link);875 876 c onst char *name = cache->name;852 853 cache = list_get_instance(cur, slab_cache_t, link); 854 855 char *name = cache->name; 877 856 uint8_t order = cache->order; 878 857 size_t size = cache->size; 879 size_t objects = cache->objects;858 unsigned int objects = cache->objects; 880 859 long allocated_slabs = atomic_get(&cache->allocated_slabs); 881 860 long cached_objs = atomic_get(&cache->cached_objs); 882 861 long allocated_objs = atomic_get(&cache->allocated_objs); 883 unsignedint flags = cache->flags;862 int flags = cache->flags; 884 863 885 irq_spinlock_unlock(&slab_cache_lock, true); 864 spinlock_unlock(&slab_cache_lock); 865 interrupts_restore(ipl); 886 866 887 printf("%-1 8s %8zu %8u %8zu %8ld %8ld %8ld %-5s\n",867 printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", 888 868 name, size, (1 << order), objects, allocated_slabs, 889 869 cached_objs, allocated_objs, … … 894 874 void slab_cache_init(void) 895 875 { 876 int i, size; 877 896 878 /* Initialize magazine cache */ 897 879 _slab_cache_create(&mag_cache, "slab_magazine", … … 899 881 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | 900 882 SLAB_CACHE_SLINSIDE); 901 902 883 /* Initialize slab_cache cache */ 903 884 _slab_cache_create(&slab_cache_cache, "slab_cache", 904 885 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, 905 886 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 906 907 887 /* Initialize external slab cache */ 908 888 slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0, 909 889 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 910 890 911 891 /* Initialize structures for malloc */ 912 size_t i;913 size_t size;914 915 892 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W); 916 893 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); … … 919 896 NULL, NULL, SLAB_CACHE_MAGDEFERRED); 920 897 } 921 922 #ifdef CONFIG_DEBUG 898 #ifdef CONFIG_DEBUG 923 899 _slab_initialized = 1; 924 900 #endif … … 928 904 * 929 905 * Kernel calls this function, when it knows the real number of 930 * processors. Allocate slab for cpucache and enable it on all931 * existing slabs that are SLAB_CACHE_MAGDEFERRED932 * 906 * processors. 907 * Allocate slab for cpucache and enable it on all existing 908 * slabs that are SLAB_CACHE_MAGDEFERRED 933 909 */ 934 910 void slab_enable_cpucache(void) 935 911 { 912 link_t *cur; 913 slab_cache_t *s; 914 936 915 #ifdef CONFIG_DEBUG 937 916 _slab_initialized = 2; 938 917 #endif 939 940 irq_spinlock_lock(&slab_cache_lock, false); 941 942 link_t *cur; 918 919 spinlock_lock(&slab_cache_lock); 920 943 921 for (cur = slab_cache_list.next; cur != &slab_cache_list; 944 cur = cur->next) 945 s lab_cache_t *slab= list_get_instance(cur, slab_cache_t, link);946 if ((s lab->flags & SLAB_CACHE_MAGDEFERRED) !=922 cur = cur->next){ 923 s = list_get_instance(cur, slab_cache_t, link); 924 if ((s->flags & SLAB_CACHE_MAGDEFERRED) != 947 925 SLAB_CACHE_MAGDEFERRED) 948 926 continue; 949 950 (void) make_magcache(slab); 951 slab->flags &= ~SLAB_CACHE_MAGDEFERRED; 952 } 953 954 irq_spinlock_unlock(&slab_cache_lock, false); 955 } 956 957 void *malloc(size_t size, unsigned int flags) 927 make_magcache(s); 928 s->flags &= ~SLAB_CACHE_MAGDEFERRED; 929 } 930 931 spinlock_unlock(&slab_cache_lock); 932 } 933 934 /**************************************/ 935 /* kalloc/kfree functions */ 936 void *malloc(unsigned int size, int flags) 958 937 { 959 938 ASSERT(_slab_initialized); … … 962 941 if (size < (1 << SLAB_MIN_MALLOC_W)) 963 942 size = (1 << SLAB_MIN_MALLOC_W); 964 965 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;966 943 944 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 945 967 946 return slab_alloc(malloc_caches[idx], flags); 968 947 } 969 948 970 void *realloc(void *ptr, size_t size, unsignedint flags)949 void *realloc(void *ptr, unsigned int size, int flags) 971 950 { 972 951 ASSERT(_slab_initialized); … … 978 957 if (size < (1 << SLAB_MIN_MALLOC_W)) 979 958 size = (1 << SLAB_MIN_MALLOC_W); 980 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;959 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 981 960 982 961 new_ptr = slab_alloc(malloc_caches[idx], flags); … … 999 978 if (!ptr) 1000 979 return; 1001 980 1002 981 slab_t *slab = obj2slab(ptr); 1003 982 _slab_free(slab->cache, ptr, slab);
Note:
See TracChangeset
for help on using the changeset viewer.