Changes in kernel/generic/src/mm/slab.c [55821eea:7a0359b] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/slab.c
r55821eea r7a0359b 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Slab allocator. 36 36 * 37 37 * The slab allocator is closely modelled after OpenSolaris slab allocator. … … 50 50 * 51 51 * The slab allocator supports per-CPU caches ('magazines') to facilitate 52 * good SMP scaling. 52 * good SMP scaling. 53 53 * 54 54 * When a new object is being allocated, it is first checked, if it is … … 65 65 * thrashing when somebody is allocating/deallocating 1 item at the magazine 66 66 * size boundary. LIFO order is enforced, which should avoid fragmentation 67 * as much as possible. 68 * 67 * as much as possible. 68 * 69 69 * Every cache contains list of full slabs and list of partially full slabs. 70 70 * Empty slabs are immediately freed (thrashing will be avoided because 71 * of magazines). 71 * of magazines). 72 72 * 73 73 * The slab information structure is kept inside the data area, if possible. … … 95 95 * 96 96 * @todo 97 * it might be good to add granularity of locks even to slab level,97 * It might be good to add granularity of locks even to slab level, 98 98 * we could then try_spinlock over all partial slabs and thus improve 99 * scalability even on slab level 99 * scalability even on slab level. 100 * 100 101 */ 101 102 … … 114 115 #include <macros.h> 115 116 116 SPINLOCK_INITIALIZE(slab_cache_lock);117 IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock); 117 118 static LIST_INITIALIZE(slab_cache_list); 118 119 119 120 /** Magazine cache */ 120 121 static slab_cache_t mag_cache; 122 121 123 /** Cache for cache descriptors */ 122 124 static slab_cache_t slab_cache_cache; 125 123 126 /** Cache for external slab descriptors 124 127 * This time we want per-cpu cache, so do not make it static … … 128 131 */ 129 132 static slab_cache_t *slab_extern_cache; 133 130 134 /** Caches for malloc */ 131 135 static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; 136 132 137 static const char *malloc_names[] = { 133 138 "malloc-16", … … 154 159 /** Slab descriptor */ 155 160 typedef struct { 156 slab_cache_t *cache; 157 link_t link; 158 void *start; 159 size_t available; 160 size_t nextavail; 161 slab_cache_t *cache; /**< Pointer to parent cache. */ 162 link_t link; /**< List of full/partial slabs. */ 163 void *start; /**< Start address of first available item. */ 164 size_t available; /**< Count of available items in this slab. */ 165 size_t nextavail; /**< The index of next available item. */ 161 166 } slab_t; 162 167 163 168 #ifdef CONFIG_DEBUG 164 static int _slab_initialized = 0;169 static unsigned int _slab_initialized = 0; 165 170 #endif 166 171 167 172 /**************************************/ 168 173 /* Slab allocation functions */ 169 170 /** 171 * Allocate frames for slab space and initialize 172 * 173 */ 174 static slab_t *slab_space_alloc(slab_cache_t *cache, int flags) 175 { 176 void *data; 174 /**************************************/ 175 176 /** Allocate frames for slab space and initialize 177 * 178 */ 179 NO_TRACE static slab_t *slab_space_alloc(slab_cache_t *cache, 180 unsigned int flags) 181 { 182 183 184 size_t zone = 0; 185 186 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 187 if (!data) { 188 return NULL; 189 } 190 177 191 slab_t *slab; 178 192 size_t fsize; 179 unsigned int i; 180 size_t zone = 0; 181 182 data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 183 if (!data) { 184 return NULL; 185 } 193 186 194 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) { 187 195 slab = slab_alloc(slab_extern_cache, flags); … … 196 204 197 205 /* Fill in slab structures */ 198 for (i = 0; i < ((unsigned int) 1 << cache->order); i++) 206 size_t i; 207 for (i = 0; i < ((size_t) 1 << cache->order); i++) 199 208 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 200 209 201 210 slab->start = data; 202 211 slab->available = cache->objects; 203 212 slab->nextavail = 0; 204 213 slab->cache = cache; 205 214 206 215 for (i = 0; i < cache->objects; i++) 207 *(( int *) (slab->start + i*cache->size)) = i + 1;208 216 *((size_t *) (slab->start + i * cache->size)) = i + 1; 217 209 218 atomic_inc(&cache->allocated_slabs); 210 219 return slab; 211 220 } 212 221 213 /** 214 * Deallocate space associated with slab 222 /** Deallocate space associated with slab 215 223 * 216 224 * @return number of freed frames 217 */ 218 static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 225 * 226 */ 227 NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 219 228 { 220 229 frame_free(KA2PA(slab->start)); 221 if (! 230 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) 222 231 slab_free(slab_extern_cache, slab); 223 232 224 233 atomic_dec(&cache->allocated_slabs); 225 234 226 return 1 << cache->order;235 return (1 << cache->order); 227 236 } 228 237 229 238 /** Map object to slab structure */ 230 static slab_t *obj2slab(void *obj)239 NO_TRACE static slab_t *obj2slab(void *obj) 231 240 { 232 241 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); 233 242 } 234 243 235 /****************** ********************/244 /******************/ 236 245 /* Slab functions */ 237 238 239 /** 240 * Return object to slab and call a destructor 246 /******************/ 247 248 /** Return object to slab and call a destructor 241 249 * 242 250 * @param slab If the caller knows directly slab of the object, otherwise NULL 243 251 * 244 252 * @return Number of freed pages 245 * /246 static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab) 247 { 248 int freed = 0; 249 253 * 254 */ 255 NO_TRACE static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, 256 slab_t *slab) 257 { 250 258 if (!slab) 251 259 slab = obj2slab(obj); 252 260 253 261 ASSERT(slab->cache == cache); 254 262 263 size_t freed = 0; 264 255 265 if (cache->destructor) 256 266 freed = cache->destructor(obj); … … 258 268 spinlock_lock(&cache->slablock); 259 269 ASSERT(slab->available < cache->objects); 260 261 *(( int *)obj) = slab->nextavail;270 271 *((size_t *) obj) = slab->nextavail; 262 272 slab->nextavail = (obj - slab->start) / cache->size; 263 273 slab->available++; 264 274 265 275 /* Move it to correct list */ 266 276 if (slab->available == cache->objects) { … … 268 278 list_remove(&slab->link); 269 279 spinlock_unlock(&cache->slablock); 270 280 271 281 return freed + slab_space_free(cache, slab); 272 273 282 } else if (slab->available == 1) { 274 283 /* It was in full, move to partial */ … … 276 285 list_prepend(&slab->link, &cache->partial_slabs); 277 286 } 287 278 288 spinlock_unlock(&cache->slablock); 279 289 return freed; 280 290 } 281 291 282 /** 283 * Take new object from slab or create new if needed 292 /** Take new object from slab or create new if needed 284 293 * 285 294 * @return Object address or null 286 */ 287 static void *slab_obj_create(slab_cache_t *cache, int flags) 288 { 295 * 296 */ 297 NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags) 298 { 299 spinlock_lock(&cache->slablock); 300 289 301 slab_t *slab; 290 void *obj; 291 292 spinlock_lock(&cache->slablock); 293 302 294 303 if (list_empty(&cache->partial_slabs)) { 295 /* Allow recursion and reclaiming 304 /* 305 * Allow recursion and reclaiming 296 306 * - this should work, as the slab control structures 297 307 * are small and do not need to allocate with anything 298 308 * other than frame_alloc when they are allocating, 299 309 * that's why we should get recursion at most 1-level deep 310 * 300 311 */ 301 312 spinlock_unlock(&cache->slablock); … … 303 314 if (!slab) 304 315 return NULL; 316 305 317 spinlock_lock(&cache->slablock); 306 318 } else { … … 309 321 list_remove(&slab->link); 310 322 } 311 obj = slab->start + slab->nextavail * cache->size; 312 slab->nextavail = *((int *)obj); 323 324 void *obj = slab->start + slab->nextavail * cache->size; 325 slab->nextavail = *((size_t *) obj); 313 326 slab->available--; 314 327 315 328 if (!slab->available) 316 329 list_prepend(&slab->link, &cache->full_slabs); 317 330 else 318 331 list_prepend(&slab->link, &cache->partial_slabs); 319 332 320 333 spinlock_unlock(&cache->slablock); 321 322 if ( cache->constructor && cache->constructor(obj, flags)) {334 335 if ((cache->constructor) && (cache->constructor(obj, flags))) { 323 336 /* Bad, bad, construction failed */ 324 337 slab_obj_destroy(cache, obj, slab); 325 338 return NULL; 326 339 } 340 327 341 return obj; 328 342 } 329 343 330 /**************************** **********/344 /****************************/ 331 345 /* CPU-Cache slab functions */ 332 333 /** 334 * Finds a full magazine in cache, takes it from list 335 * and returns it 336 * 337 * @param first If true, return first, else last mag 338 */ 339 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first) 346 /****************************/ 347 348 /** Find a full magazine in cache, take it from list and return it 349 * 350 * @param first If true, return first, else last mag. 351 * 352 */ 353 NO_TRACE static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, 354 bool first) 340 355 { 341 356 slab_magazine_t *mag = NULL; 342 357 link_t *cur; 343 358 344 359 spinlock_lock(&cache->maglock); 345 360 if (!list_empty(&cache->magazines)) { … … 348 363 else 349 364 cur = cache->magazines.prev; 365 350 366 mag = list_get_instance(cur, slab_magazine_t, link); 351 367 list_remove(&mag->link); 352 368 atomic_dec(&cache->magazine_counter); 353 369 } 370 354 371 spinlock_unlock(&cache->maglock); 355 372 return mag; 356 373 } 357 374 358 /** Prepend magazine to magazine list in cache */ 359 static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) 375 /** Prepend magazine to magazine list in cache 376 * 377 */ 378 NO_TRACE static void put_mag_to_cache(slab_cache_t *cache, 379 slab_magazine_t *mag) 360 380 { 361 381 spinlock_lock(&cache->maglock); 362 382 363 383 list_prepend(&mag->link, &cache->magazines); 364 384 atomic_inc(&cache->magazine_counter); … … 367 387 } 368 388 369 /** 370 * Free all objects in magazine and free memory associated with magazine 389 /** Free all objects in magazine and free memory associated with magazine 371 390 * 372 391 * @return Number of freed pages 373 */ 374 static size_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag) 375 { 376 unsigned int i; 392 * 393 */ 394 NO_TRACE static size_t magazine_destroy(slab_cache_t *cache, 395 slab_magazine_t *mag) 396 { 397 size_t i; 377 398 size_t frames = 0; 378 399 379 400 for (i = 0; i < mag->busy; i++) { 380 401 frames += slab_obj_destroy(cache, mag->objs[i], NULL); … … 383 404 384 405 slab_free(&mag_cache, mag); 385 406 386 407 return frames; 387 408 } 388 409 389 /** 390 * Find full magazine, set it as current and return it 391 * 392 * Assume cpu_magazine lock is held 393 */ 394 static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 395 { 396 slab_magazine_t *cmag, *lastmag, *newmag; 397 398 cmag = cache->mag_cache[CPU->id].current; 399 lastmag = cache->mag_cache[CPU->id].last; 410 /** Find full magazine, set it as current and return it 411 * 412 */ 413 NO_TRACE static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 414 { 415 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 416 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 417 418 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 419 400 420 if (cmag) { /* First try local CPU magazines */ 401 421 if (cmag->busy) 402 422 return cmag; 403 404 if ( lastmag && lastmag->busy) {423 424 if ((lastmag) && (lastmag->busy)) { 405 425 cache->mag_cache[CPU->id].current = lastmag; 406 426 cache->mag_cache[CPU->id].last = cmag; … … 408 428 } 409 429 } 430 410 431 /* Local magazines are empty, import one from magazine list */ 411 newmag = get_mag_from_cache(cache, 1);432 slab_magazine_t *newmag = get_mag_from_cache(cache, 1); 412 433 if (!newmag) 413 434 return NULL; 414 435 415 436 if (lastmag) 416 437 magazine_destroy(cache, lastmag); 417 438 418 439 cache->mag_cache[CPU->id].last = cmag; 419 440 cache->mag_cache[CPU->id].current = newmag; 441 420 442 return newmag; 421 443 } 422 444 423 /** 424 * Try to find object in CPU-cache magazines 445 /** Try to find object in CPU-cache magazines 425 446 * 426 447 * @return Pointer to object or NULL if not available 427 */ 428 static void *magazine_obj_get(slab_cache_t *cache) 429 { 430 slab_magazine_t *mag; 431 void *obj; 432 448 * 449 */ 450 NO_TRACE static void *magazine_obj_get(slab_cache_t *cache) 451 { 433 452 if (!CPU) 434 453 return NULL; 435 454 436 455 spinlock_lock(&cache->mag_cache[CPU->id].lock); 437 438 mag = get_full_current_mag(cache);456 457 slab_magazine_t *mag = get_full_current_mag(cache); 439 458 if (!mag) { 440 459 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 441 460 return NULL; 442 461 } 443 obj = mag->objs[--mag->busy]; 462 463 void *obj = mag->objs[--mag->busy]; 444 464 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 465 445 466 atomic_dec(&cache->cached_objs); 446 467 … … 448 469 } 449 470 450 /** 451 * Assure that the current magazine is empty, return pointer to it, or NULL if 452 * no empty magazine is available and cannot be allocated 453 * 454 * Assume mag_cache[CPU->id].lock is held 455 * 456 * We have 2 magazines bound to processor. 457 * First try the current. 458 * If full, try the last. 459 * If full, put to magazines list. 460 * allocate new, exchange last & current 461 * 462 */ 463 static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 464 { 465 slab_magazine_t *cmag,*lastmag,*newmag; 466 467 cmag = cache->mag_cache[CPU->id].current; 468 lastmag = cache->mag_cache[CPU->id].last; 469 471 /** Assure that the current magazine is empty, return pointer to it, 472 * or NULL if no empty magazine is available and cannot be allocated 473 * 474 * We have 2 magazines bound to processor. 475 * First try the current. 476 * If full, try the last. 477 * If full, put to magazines list. 478 * 479 */ 480 NO_TRACE static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 481 { 482 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 483 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 484 485 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 486 470 487 if (cmag) { 471 488 if (cmag->busy < cmag->size) 472 489 return cmag; 473 if (lastmag && lastmag->busy < lastmag->size) { 490 491 if ((lastmag) && (lastmag->busy < lastmag->size)) { 474 492 cache->mag_cache[CPU->id].last = cmag; 475 493 cache->mag_cache[CPU->id].current = lastmag; … … 477 495 } 478 496 } 497 479 498 /* current | last are full | nonexistent, allocate new */ 480 /* We do not want to sleep just because of caching */ 481 /* Especially we do not want reclaiming to start, as 482 * this would deadlock */ 483 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); 499 500 /* 501 * We do not want to sleep just because of caching, 502 * especially we do not want reclaiming to start, as 503 * this would deadlock. 504 * 505 */ 506 slab_magazine_t *newmag = slab_alloc(&mag_cache, 507 FRAME_ATOMIC | FRAME_NO_RECLAIM); 484 508 if (!newmag) 485 509 return NULL; 510 486 511 newmag->size = SLAB_MAG_SIZE; 487 512 newmag->busy = 0; 488 513 489 514 /* Flush last to magazine list */ 490 515 if (lastmag) 491 516 put_mag_to_cache(cache, lastmag); 492 517 493 518 /* Move current as last, save new as current */ 494 cache->mag_cache[CPU->id].last = cmag; 495 cache->mag_cache[CPU->id].current = newmag; 496 519 cache->mag_cache[CPU->id].last = cmag; 520 cache->mag_cache[CPU->id].current = newmag; 521 497 522 return newmag; 498 523 } 499 524 500 /** 501 * Put object into CPU-cache magazine 502 * 503 * @return 0 - success, -1 - could not get memory 504 */ 505 static int magazine_obj_put(slab_cache_t *cache, void *obj) 506 { 507 slab_magazine_t *mag; 508 525 /** Put object into CPU-cache magazine 526 * 527 * @return 0 on success, -1 on no memory 528 * 529 */ 530 NO_TRACE static int magazine_obj_put(slab_cache_t *cache, void *obj) 531 { 509 532 if (!CPU) 510 533 return -1; 511 534 512 535 spinlock_lock(&cache->mag_cache[CPU->id].lock); 513 514 mag = make_empty_current_mag(cache);536 537 slab_magazine_t *mag = make_empty_current_mag(cache); 515 538 if (!mag) { 516 539 spinlock_unlock(&cache->mag_cache[CPU->id].lock); … … 519 542 520 543 mag->objs[mag->busy++] = obj; 521 544 522 545 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 546 523 547 atomic_inc(&cache->cached_objs); 548 524 549 return 0; 525 550 } 526 551 527 528 /**************************************/ 552 /************************/ 529 553 /* Slab cache functions */ 530 531 /** Return number of objects that fit in certain cache size */ 532 static unsigned int comp_objects(slab_cache_t *cache) 554 /************************/ 555 556 /** Return number of objects that fit in certain cache size 557 * 558 */ 559 NO_TRACE static size_t comp_objects(slab_cache_t *cache) 533 560 { 534 561 if (cache->flags & SLAB_CACHE_SLINSIDE) 535 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /536 cache->size;537 else 562 return ((PAGE_SIZE << cache->order) 563 - sizeof(slab_t)) / cache->size; 564 else 538 565 return (PAGE_SIZE << cache->order) / cache->size; 539 566 } 540 567 541 /** Return wasted space in slab */542 static unsigned int badness(slab_cache_t *cache) 543 { 544 unsigned int objects; 545 unsigned int ssize; 546 547 objects = comp_objects(cache);548 ssize = PAGE_SIZE << cache->order;568 /** Return wasted space in slab 569 * 570 */ 571 NO_TRACE static size_t badness(slab_cache_t *cache) 572 { 573 size_t objects = comp_objects(cache); 574 size_t ssize = PAGE_SIZE << cache->order; 575 549 576 if (cache->flags & SLAB_CACHE_SLINSIDE) 550 577 ssize -= sizeof(slab_t); 578 551 579 return ssize - objects * cache->size; 552 580 } 553 581 554 /** 555 * Initialize mag_cache structure in slab cache 556 */ 557 static bool make_magcache(slab_cache_t *cache) 558 { 559 unsigned int i; 560 582 /** Initialize mag_cache structure in slab cache 583 * 584 */ 585 NO_TRACE static bool make_magcache(slab_cache_t *cache) 586 { 561 587 ASSERT(_slab_initialized >= 2); 562 588 563 589 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, 564 590 FRAME_ATOMIC); 565 591 if (!cache->mag_cache) 566 592 return false; 567 593 594 size_t i; 568 595 for (i = 0; i < config.cpu_count; i++) { 569 596 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); 570 597 spinlock_initialize(&cache->mag_cache[i].lock, 571 "slab_maglock_cpu"); 572 } 598 "slab.cache.mag_cache[].lock"); 599 } 600 573 601 return true; 574 602 } 575 603 576 /** Initialize allocated memory as a slab cache */ 577 static void _slab_cache_create(slab_cache_t *cache, const char *name, 578 size_t size, size_t align, int (*constructor)(void *obj, int kmflag), 579 int (*destructor)(void *obj), int flags) 580 { 581 int pages; 582 ipl_t ipl; 583 604 /** Initialize allocated memory as a slab cache 605 * 606 */ 607 NO_TRACE static void _slab_cache_create(slab_cache_t *cache, const char *name, 608 size_t size, size_t align, int (*constructor)(void *obj, 609 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags) 610 { 584 611 memsetb(cache, sizeof(*cache), 0); 585 612 cache->name = name; 586 613 587 614 if (align < sizeof(unative_t)) 588 615 align = sizeof(unative_t); 616 589 617 size = ALIGN_UP(size, align); 590 618 591 619 cache->size = size; 592 593 620 cache->constructor = constructor; 594 621 cache->destructor = destructor; 595 622 cache->flags = flags; 596 623 597 624 list_initialize(&cache->full_slabs); 598 625 list_initialize(&cache->partial_slabs); 599 626 list_initialize(&cache->magazines); 600 spinlock_initialize(&cache->slablock, "slab_lock"); 601 spinlock_initialize(&cache->maglock, "slab_maglock"); 627 628 spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 629 spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 630 602 631 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 603 632 (void) make_magcache(cache); 604 633 605 634 /* Compute slab sizes, object counts in slabs etc. */ 606 635 if (cache->size < SLAB_INSIDE_SIZE) 607 636 cache->flags |= SLAB_CACHE_SLINSIDE; 608 637 609 638 /* Minimum slab order */ 610 pages = SIZE2FRAMES(cache->size); 639 size_t pages = SIZE2FRAMES(cache->size); 640 611 641 /* We need the 2^order >= pages */ 612 642 if (pages == 1) … … 614 644 else 615 645 cache->order = fnzb(pages - 1) + 1; 616 617 while (badness(cache) > SLAB_MAX_BADNESS(cache)) {646 647 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 618 648 cache->order += 1; 619 }649 620 650 cache->objects = comp_objects(cache); 651 621 652 /* If info fits in, put it inside */ 622 653 if (badness(cache) > sizeof(slab_t)) 623 654 cache->flags |= SLAB_CACHE_SLINSIDE; 624 655 625 656 /* Add cache to cache list */ 626 ipl = interrupts_disable(); 627 spinlock_lock(&slab_cache_lock); 628 657 irq_spinlock_lock(&slab_cache_lock, true); 629 658 list_append(&cache->link, &slab_cache_list); 630 631 spinlock_unlock(&slab_cache_lock); 632 interrupts_restore(ipl); 633 } 634 635 /** Create slab cache*/659 irq_spinlock_unlock(&slab_cache_lock, true); 660 } 661 662 /** Create slab cache 663 * 664 */ 636 665 slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align, 637 int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), 638 int flags) 639 { 640 slab_cache_t *cache; 641 642 cache = slab_alloc(&slab_cache_cache, 0); 666 int (*constructor)(void *obj, unsigned int kmflag), 667 size_t (*destructor)(void *obj), unsigned int flags) 668 { 669 slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0); 643 670 _slab_cache_create(cache, name, size, align, constructor, destructor, 644 671 flags); 672 645 673 return cache; 646 674 } 647 675 648 /** 649 * Reclaim space occupied by objects that are already free 676 /** Reclaim space occupied by objects that are already free 650 677 * 651 678 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing 679 * 652 680 * @return Number of freed pages 653 */ 654 static size_t _slab_reclaim(slab_cache_t *cache, int flags) 655 { 656 unsigned int i; 681 * 682 */ 683 NO_TRACE static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags) 684 { 685 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 686 return 0; /* Nothing to do */ 687 688 /* 689 * We count up to original magazine count to avoid 690 * endless loop 691 */ 692 atomic_count_t magcount = atomic_get(&cache->magazine_counter); 693 657 694 slab_magazine_t *mag; 658 695 size_t frames = 0; 659 int magcount; 660 661 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 662 return 0; /* Nothing to do */ 663 664 /* We count up to original magazine count to avoid 665 * endless loop 666 */ 667 magcount = atomic_get(&cache->magazine_counter); 668 while (magcount-- && (mag=get_mag_from_cache(cache, 0))) { 669 frames += magazine_destroy(cache,mag); 670 if (!(flags & SLAB_RECLAIM_ALL) && frames) 696 697 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) { 698 frames += magazine_destroy(cache, mag); 699 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames)) 671 700 break; 672 701 } … … 675 704 /* Free cpu-bound magazines */ 676 705 /* Destroy CPU magazines */ 706 size_t i; 677 707 for (i = 0; i < config.cpu_count; i++) { 678 708 spinlock_lock(&cache->mag_cache[i].lock); 679 709 680 710 mag = cache->mag_cache[i].current; 681 711 if (mag) … … 687 717 frames += magazine_destroy(cache, mag); 688 718 cache->mag_cache[i].last = NULL; 689 719 690 720 spinlock_unlock(&cache->mag_cache[i].lock); 691 721 } 692 722 } 693 723 694 724 return frames; 695 725 } 696 726 697 /** Check that there are no slabs and remove cache from system */ 727 /** Check that there are no slabs and remove cache from system 728 * 729 */ 698 730 void slab_cache_destroy(slab_cache_t *cache) 699 731 { 700 ipl_t ipl; 701 702 /* First remove cache from link, so that we don't need 732 /* 733 * First remove cache from link, so that we don't need 703 734 * to disable interrupts later 735 * 704 736 */ 705 706 ipl = interrupts_disable(); 707 spinlock_lock(&slab_cache_lock); 708 737 irq_spinlock_lock(&slab_cache_lock, true); 709 738 list_remove(&cache->link); 710 711 spinlock_unlock(&slab_cache_lock); 712 interrupts_restore(ipl); 713 714 /* Do not lock anything, we assume the software is correct and 715 * does not touch the cache when it decides to destroy it */ 739 irq_spinlock_unlock(&slab_cache_lock, true); 740 741 /* 742 * Do not lock anything, we assume the software is correct and 743 * does not touch the cache when it decides to destroy it 744 * 745 */ 716 746 717 747 /* Destroy all magazines */ 718 748 _slab_reclaim(cache, SLAB_RECLAIM_ALL); 719 749 720 750 /* All slabs must be empty */ 721 if ( !list_empty(&cache->full_slabs) ||722 !list_empty(&cache->partial_slabs))751 if ((!list_empty(&cache->full_slabs)) || 752 (!list_empty(&cache->partial_slabs))) 723 753 panic("Destroying cache that is not empty."); 724 754 725 755 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 726 756 free(cache->mag_cache); 757 727 758 slab_free(&slab_cache_cache, cache); 728 759 } 729 760 730 /** Allocate new object from cache - if no flags given, always returns memory */ 731 void *slab_alloc(slab_cache_t *cache, int flags) 732 { 733 ipl_t ipl; 761 /** Allocate new object from cache - if no flags given, always returns memory 762 * 763 */ 764 void *slab_alloc(slab_cache_t *cache, unsigned int flags) 765 { 766 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 767 ipl_t ipl = interrupts_disable(); 768 734 769 void *result = NULL; 735 770 736 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 737 ipl = interrupts_disable(); 738 739 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { 771 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 740 772 result = magazine_obj_get(cache); 741 }773 742 774 if (!result) 743 775 result = slab_obj_create(cache, flags); 744 776 745 777 interrupts_restore(ipl); 746 778 747 779 if (result) 748 780 atomic_inc(&cache->allocated_objs); 749 781 750 782 return result; 751 783 } 752 784 753 /** Return object to cache, use slab if known */754 static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 755 { 756 ipl_t ipl; 757 758 ipl = interrupts_disable();759 785 /** Return object to cache, use slab if known 786 * 787 */ 788 NO_TRACE static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 789 { 790 ipl_t ipl = interrupts_disable(); 791 760 792 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) || 761 magazine_obj_put(cache, obj)) {793 (magazine_obj_put(cache, obj))) 762 794 slab_obj_destroy(cache, obj, slab); 763 764 } 795 765 796 interrupts_restore(ipl); 766 797 atomic_dec(&cache->allocated_objs); 767 798 } 768 799 769 /** Return slab object to cache */ 800 /** Return slab object to cache 801 * 802 */ 770 803 void slab_free(slab_cache_t *cache, void *obj) 771 804 { … … 773 806 } 774 807 775 /* Go through all caches and reclaim what is possible */ 776 size_t slab_reclaim(int flags) 777 { 778 slab_cache_t *cache; 808 /** Go through all caches and reclaim what is possible 809 * 810 * Interrupts must be disabled before calling this function, 811 * otherwise memory allocation from interrupts can deadlock. 812 * 813 */ 814 size_t slab_reclaim(unsigned int flags) 815 { 816 irq_spinlock_lock(&slab_cache_lock, false); 817 818 size_t frames = 0; 779 819 link_t *cur; 780 size_t frames = 0;781 782 spinlock_lock(&slab_cache_lock);783 784 /* TODO: Add assert, that interrupts are disabled, otherwise785 * memory allocation from interrupts can deadlock.786 */787 788 820 for (cur = slab_cache_list.next; cur != &slab_cache_list; 789 821 cur = cur->next) { 790 cache = list_get_instance(cur, slab_cache_t, link);822 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 791 823 frames += _slab_reclaim(cache, flags); 792 824 } 793 794 spinlock_unlock(&slab_cache_lock);795 825 826 irq_spinlock_unlock(&slab_cache_lock, false); 827 796 828 return frames; 797 829 } 798 830 799 800 /* Print list of slabs */ 831 /* Print list of slabs 832 * 833 */ 801 834 void slab_print_list(void) 802 835 { 803 int skip = 0; 804 805 printf("slab name size pages obj/pg slabs cached allocated" 806 " ctl\n"); 807 printf("---------------- -------- ------ ------ ------ ------ ---------" 808 " ---\n"); 809 836 printf("[slab name ] [size ] [pages ] [obj/pg] [slabs ]" 837 " [cached] [alloc ] [ctl]\n"); 838 839 size_t skip = 0; 810 840 while (true) { 811 slab_cache_t *cache;812 link_t *cur;813 ipl_t ipl;814 int i;815 816 841 /* 817 842 * We must not hold the slab_cache_lock spinlock when printing … … 836 861 * statistics. 837 862 */ 838 839 ipl = interrupts_disable(); 840 spinlock_lock(&slab_cache_lock); 841 863 864 irq_spinlock_lock(&slab_cache_lock, true); 865 866 link_t *cur; 867 size_t i; 842 868 for (i = 0, cur = slab_cache_list.next; 843 i < skip && cur != &slab_cache_list; 844 i++, cur = cur->next) 845 ; 846 869 (i < skip) && (cur != &slab_cache_list); 870 i++, cur = cur->next); 871 847 872 if (cur == &slab_cache_list) { 848 spinlock_unlock(&slab_cache_lock); 849 interrupts_restore(ipl); 873 irq_spinlock_unlock(&slab_cache_lock, true); 850 874 break; 851 875 } 852 876 853 877 skip++; 854 855 cache = list_get_instance(cur, slab_cache_t, link);856 878 879 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 880 857 881 const char *name = cache->name; 858 882 uint8_t order = cache->order; 859 883 size_t size = cache->size; 860 unsigned int objects = cache->objects;884 size_t objects = cache->objects; 861 885 long allocated_slabs = atomic_get(&cache->allocated_slabs); 862 886 long cached_objs = atomic_get(&cache->cached_objs); 863 887 long allocated_objs = atomic_get(&cache->allocated_objs); 864 int flags = cache->flags; 865 866 spinlock_unlock(&slab_cache_lock); 867 interrupts_restore(ipl); 868 869 printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", 888 unsigned int flags = cache->flags; 889 890 irq_spinlock_unlock(&slab_cache_lock, true); 891 892 printf("%-18s %8" PRIs " %8u %8" PRIs " %8ld %8ld %8ld %-5s\n", 870 893 name, size, (1 << order), objects, allocated_slabs, 871 894 cached_objs, allocated_objs, … … 876 899 void slab_cache_init(void) 877 900 { 878 int i, size;879 880 901 /* Initialize magazine cache */ 881 902 _slab_cache_create(&mag_cache, "slab_magazine", … … 883 904 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | 884 905 SLAB_CACHE_SLINSIDE); 906 885 907 /* Initialize slab_cache cache */ 886 908 _slab_cache_create(&slab_cache_cache, "slab_cache", 887 909 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, 888 910 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 911 889 912 /* Initialize external slab cache */ 890 913 slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0, 891 914 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 892 915 893 916 /* Initialize structures for malloc */ 917 size_t i; 918 size_t size; 919 894 920 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W); 895 921 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); … … 898 924 NULL, NULL, SLAB_CACHE_MAGDEFERRED); 899 925 } 926 900 927 #ifdef CONFIG_DEBUG 901 928 _slab_initialized = 1; … … 906 933 * 907 934 * Kernel calls this function, when it knows the real number of 908 * processors. 909 * Allocate slab for cpucache and enable it on all existing910 * slabs that are SLAB_CACHE_MAGDEFERRED935 * processors. Allocate slab for cpucache and enable it on all 936 * existing slabs that are SLAB_CACHE_MAGDEFERRED 937 * 911 938 */ 912 939 void slab_enable_cpucache(void) 913 940 { 914 link_t *cur;915 slab_cache_t *s;916 917 941 #ifdef CONFIG_DEBUG 918 942 _slab_initialized = 2; 919 943 #endif 920 921 spinlock_lock(&slab_cache_lock); 922 944 945 irq_spinlock_lock(&slab_cache_lock, false); 946 947 link_t *cur; 923 948 for (cur = slab_cache_list.next; cur != &slab_cache_list; 924 cur = cur->next) {925 s = list_get_instance(cur, slab_cache_t, link);926 if ((s ->flags & SLAB_CACHE_MAGDEFERRED) !=949 cur = cur->next) { 950 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link); 951 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != 927 952 SLAB_CACHE_MAGDEFERRED) 928 953 continue; 929 (void) make_magcache(s); 930 s->flags &= ~SLAB_CACHE_MAGDEFERRED; 931 } 932 933 spinlock_unlock(&slab_cache_lock); 934 } 935 936 /**************************************/ 937 /* kalloc/kfree functions */ 938 void *malloc(unsigned int size, int flags) 954 955 (void) make_magcache(slab); 956 slab->flags &= ~SLAB_CACHE_MAGDEFERRED; 957 } 958 959 irq_spinlock_unlock(&slab_cache_lock, false); 960 } 961 962 void *malloc(size_t size, unsigned int flags) 939 963 { 940 964 ASSERT(_slab_initialized); … … 943 967 if (size < (1 << SLAB_MIN_MALLOC_W)) 944 968 size = (1 << SLAB_MIN_MALLOC_W); 945 946 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;947 969 970 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 971 948 972 return slab_alloc(malloc_caches[idx], flags); 949 973 } 950 974 951 void *realloc(void *ptr, unsigned int size,int flags)975 void *realloc(void *ptr, size_t size, unsigned int flags) 952 976 { 953 977 ASSERT(_slab_initialized); … … 959 983 if (size < (1 << SLAB_MIN_MALLOC_W)) 960 984 size = (1 << SLAB_MIN_MALLOC_W); 961 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;985 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 962 986 963 987 new_ptr = slab_alloc(malloc_caches[idx], flags); … … 980 1004 if (!ptr) 981 1005 return; 982 1006 983 1007 slab_t *slab = obj2slab(ptr); 984 1008 _slab_free(slab->cache, ptr, slab);
Note:
See TracChangeset
for help on using the changeset viewer.