Changes in kernel/generic/src/mm/slab.c [98000fb:ab6f2507] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/slab.c
r98000fb rab6f2507 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Slab allocator. 36 36 * 37 37 * The slab allocator is closely modelled after OpenSolaris slab allocator. … … 50 50 * 51 51 * The slab allocator supports per-CPU caches ('magazines') to facilitate 52 * good SMP scaling. 52 * good SMP scaling. 53 53 * 54 54 * When a new object is being allocated, it is first checked, if it is … … 65 65 * thrashing when somebody is allocating/deallocating 1 item at the magazine 66 66 * size boundary. LIFO order is enforced, which should avoid fragmentation 67 * as much as possible. 68 * 67 * as much as possible. 68 * 69 69 * Every cache contains list of full slabs and list of partially full slabs. 70 70 * Empty slabs are immediately freed (thrashing will be avoided because 71 * of magazines). 71 * of magazines). 72 72 * 73 73 * The slab information structure is kept inside the data area, if possible. … … 95 95 * 96 96 * @todo 97 * it might be good to add granularity of locks even to slab level,97 * It might be good to add granularity of locks even to slab level, 98 98 * we could then try_spinlock over all partial slabs and thus improve 99 * scalability even on slab level 99 * scalability even on slab level. 100 * 100 101 */ 101 102 … … 114 115 #include <macros.h> 115 116 116 SPINLOCK_INITIALIZE(slab_cache_lock);117 IRQ_SPINLOCK_STATIC_INITIALIZE(slab_cache_lock); 117 118 static LIST_INITIALIZE(slab_cache_list); 118 119 119 120 /** Magazine cache */ 120 121 static slab_cache_t mag_cache; 122 121 123 /** Cache for cache descriptors */ 122 124 static slab_cache_t slab_cache_cache; 125 123 126 /** Cache for external slab descriptors 124 127 * This time we want per-cpu cache, so do not make it static … … 128 131 */ 129 132 static slab_cache_t *slab_extern_cache; 133 130 134 /** Caches for malloc */ 131 135 static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; 132 static char *malloc_names[] = { 136 137 static const char *malloc_names[] = { 133 138 "malloc-16", 134 139 "malloc-32", … … 154 159 /** Slab descriptor */ 155 160 typedef struct { 156 slab_cache_t *cache; 157 link_t link; 158 void *start; 159 size_t available; 160 size_t nextavail; 161 slab_cache_t *cache; /**< Pointer to parent cache. */ 162 link_t link; /**< List of full/partial slabs. */ 163 void *start; /**< Start address of first available item. */ 164 size_t available; /**< Count of available items in this slab. */ 165 size_t nextavail; /**< The index of next available item. */ 161 166 } slab_t; 162 167 163 168 #ifdef CONFIG_DEBUG 164 static int _slab_initialized = 0;169 static unsigned int _slab_initialized = 0; 165 170 #endif 166 171 167 172 /**************************************/ 168 173 /* Slab allocation functions */ 169 170 /** 171 * Allocate frames for slab space and initialize 172 * 173 */ 174 static slab_t *slab_space_alloc(slab_cache_t *cache, int flags) 175 { 176 void *data; 174 /**************************************/ 175 176 /** Allocate frames for slab space and initialize 177 * 178 */ 179 NO_TRACE static slab_t *slab_space_alloc(slab_cache_t *cache, 180 unsigned int flags) 181 { 182 183 184 size_t zone = 0; 185 186 void *data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 187 if (!data) { 188 return NULL; 189 } 190 177 191 slab_t *slab; 178 192 size_t fsize; 179 unsigned int i; 180 size_t zone = 0; 181 182 data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); 183 if (!data) { 184 return NULL; 185 } 193 186 194 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) { 187 195 slab = slab_alloc(slab_extern_cache, flags); … … 196 204 197 205 /* Fill in slab structures */ 198 for (i = 0; i < ((unsigned int) 1 << cache->order); i++) 206 size_t i; 207 for (i = 0; i < ((size_t) 1 << cache->order); i++) 199 208 frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); 200 209 201 210 slab->start = data; 202 211 slab->available = cache->objects; 203 212 slab->nextavail = 0; 204 213 slab->cache = cache; 205 214 206 215 for (i = 0; i < cache->objects; i++) 207 *(( int *) (slab->start + i*cache->size)) = i + 1;208 216 *((size_t *) (slab->start + i * cache->size)) = i + 1; 217 209 218 atomic_inc(&cache->allocated_slabs); 210 219 return slab; 211 220 } 212 221 213 /** 214 * Deallocate space associated with slab 222 /** Deallocate space associated with slab 215 223 * 216 224 * @return number of freed frames 217 */ 218 static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 225 * 226 */ 227 NO_TRACE static size_t slab_space_free(slab_cache_t *cache, slab_t *slab) 219 228 { 220 229 frame_free(KA2PA(slab->start)); 221 if (! 230 if (!(cache->flags & SLAB_CACHE_SLINSIDE)) 222 231 slab_free(slab_extern_cache, slab); 223 232 224 233 atomic_dec(&cache->allocated_slabs); 225 234 226 return 1 << cache->order;235 return (1 << cache->order); 227 236 } 228 237 229 238 /** Map object to slab structure */ 230 static slab_t *obj2slab(void *obj)239 NO_TRACE static slab_t *obj2slab(void *obj) 231 240 { 232 241 return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); 233 242 } 234 243 235 /****************** ********************/244 /******************/ 236 245 /* Slab functions */ 237 238 239 /** 240 * Return object to slab and call a destructor 246 /******************/ 247 248 /** Return object to slab and call a destructor 241 249 * 242 250 * @param slab If the caller knows directly slab of the object, otherwise NULL 243 251 * 244 252 * @return Number of freed pages 245 * /246 static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab) 247 { 248 int freed = 0; 249 253 * 254 */ 255 NO_TRACE static size_t slab_obj_destroy(slab_cache_t *cache, void *obj, 256 slab_t *slab) 257 { 250 258 if (!slab) 251 259 slab = obj2slab(obj); 252 260 253 261 ASSERT(slab->cache == cache); 254 262 263 size_t freed = 0; 264 255 265 if (cache->destructor) 256 266 freed = cache->destructor(obj); … … 258 268 spinlock_lock(&cache->slablock); 259 269 ASSERT(slab->available < cache->objects); 260 261 *(( int *)obj) = slab->nextavail;270 271 *((size_t *) obj) = slab->nextavail; 262 272 slab->nextavail = (obj - slab->start) / cache->size; 263 273 slab->available++; 264 274 265 275 /* Move it to correct list */ 266 276 if (slab->available == cache->objects) { … … 268 278 list_remove(&slab->link); 269 279 spinlock_unlock(&cache->slablock); 270 280 271 281 return freed + slab_space_free(cache, slab); 272 273 282 } else if (slab->available == 1) { 274 283 /* It was in full, move to partial */ … … 276 285 list_prepend(&slab->link, &cache->partial_slabs); 277 286 } 287 278 288 spinlock_unlock(&cache->slablock); 279 289 return freed; 280 290 } 281 291 282 /** 283 * Take new object from slab or create new if needed 292 /** Take new object from slab or create new if needed 284 293 * 285 294 * @return Object address or null 286 */ 287 static void *slab_obj_create(slab_cache_t *cache, int flags) 288 { 295 * 296 */ 297 NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags) 298 { 299 spinlock_lock(&cache->slablock); 300 289 301 slab_t *slab; 290 void *obj; 291 292 spinlock_lock(&cache->slablock); 293 302 294 303 if (list_empty(&cache->partial_slabs)) { 295 /* Allow recursion and reclaiming 304 /* 305 * Allow recursion and reclaiming 296 306 * - this should work, as the slab control structures 297 307 * are small and do not need to allocate with anything 298 308 * other than frame_alloc when they are allocating, 299 309 * that's why we should get recursion at most 1-level deep 310 * 300 311 */ 301 312 spinlock_unlock(&cache->slablock); … … 303 314 if (!slab) 304 315 return NULL; 316 305 317 spinlock_lock(&cache->slablock); 306 318 } else { … … 309 321 list_remove(&slab->link); 310 322 } 311 obj = slab->start + slab->nextavail * cache->size; 312 slab->nextavail = *((int *)obj); 323 324 void *obj = slab->start + slab->nextavail * cache->size; 325 slab->nextavail = *((size_t *) obj); 313 326 slab->available--; 314 327 315 328 if (!slab->available) 316 329 list_prepend(&slab->link, &cache->full_slabs); 317 330 else 318 331 list_prepend(&slab->link, &cache->partial_slabs); 319 332 320 333 spinlock_unlock(&cache->slablock); 321 322 if ( cache->constructor && cache->constructor(obj, flags)) {334 335 if ((cache->constructor) && (cache->constructor(obj, flags))) { 323 336 /* Bad, bad, construction failed */ 324 337 slab_obj_destroy(cache, obj, slab); 325 338 return NULL; 326 339 } 340 327 341 return obj; 328 342 } 329 343 330 /**************************** **********/344 /****************************/ 331 345 /* CPU-Cache slab functions */ 332 333 /** 334 * Finds a full magazine in cache, takes it from list 335 * and returns it 336 * 337 * @param first If true, return first, else last mag 338 */ 339 static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first) 346 /****************************/ 347 348 /** Find a full magazine in cache, take it from list and return it 349 * 350 * @param first If true, return first, else last mag. 351 * 352 */ 353 NO_TRACE static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, 354 bool first) 340 355 { 341 356 slab_magazine_t *mag = NULL; 342 357 link_t *cur; 343 358 344 359 spinlock_lock(&cache->maglock); 345 360 if (!list_empty(&cache->magazines)) { … … 348 363 else 349 364 cur = cache->magazines.prev; 365 350 366 mag = list_get_instance(cur, slab_magazine_t, link); 351 367 list_remove(&mag->link); 352 368 atomic_dec(&cache->magazine_counter); 353 369 } 370 354 371 spinlock_unlock(&cache->maglock); 355 372 return mag; 356 373 } 357 374 358 /** Prepend magazine to magazine list in cache */ 359 static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) 375 /** Prepend magazine to magazine list in cache 376 * 377 */ 378 NO_TRACE static void put_mag_to_cache(slab_cache_t *cache, 379 slab_magazine_t *mag) 360 380 { 361 381 spinlock_lock(&cache->maglock); 362 382 363 383 list_prepend(&mag->link, &cache->magazines); 364 384 atomic_inc(&cache->magazine_counter); … … 367 387 } 368 388 369 /** 370 * Free all objects in magazine and free memory associated with magazine 389 /** Free all objects in magazine and free memory associated with magazine 371 390 * 372 391 * @return Number of freed pages 373 */ 374 static size_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag) 375 { 376 unsigned int i; 392 * 393 */ 394 NO_TRACE static size_t magazine_destroy(slab_cache_t *cache, 395 slab_magazine_t *mag) 396 { 397 size_t i; 377 398 size_t frames = 0; 378 399 379 400 for (i = 0; i < mag->busy; i++) { 380 401 frames += slab_obj_destroy(cache, mag->objs[i], NULL); … … 383 404 384 405 slab_free(&mag_cache, mag); 385 406 386 407 return frames; 387 408 } 388 409 389 /** 390 * Find full magazine, set it as current and return it 391 * 392 * Assume cpu_magazine lock is held 393 */ 394 static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 395 { 396 slab_magazine_t *cmag, *lastmag, *newmag; 397 398 cmag = cache->mag_cache[CPU->id].current; 399 lastmag = cache->mag_cache[CPU->id].last; 410 /** Find full magazine, set it as current and return it 411 * 412 */ 413 NO_TRACE static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) 414 { 415 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 416 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 417 418 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 419 400 420 if (cmag) { /* First try local CPU magazines */ 401 421 if (cmag->busy) 402 422 return cmag; 403 404 if ( lastmag && lastmag->busy) {423 424 if ((lastmag) && (lastmag->busy)) { 405 425 cache->mag_cache[CPU->id].current = lastmag; 406 426 cache->mag_cache[CPU->id].last = cmag; … … 408 428 } 409 429 } 430 410 431 /* Local magazines are empty, import one from magazine list */ 411 newmag = get_mag_from_cache(cache, 1);432 slab_magazine_t *newmag = get_mag_from_cache(cache, 1); 412 433 if (!newmag) 413 434 return NULL; 414 435 415 436 if (lastmag) 416 437 magazine_destroy(cache, lastmag); 417 438 418 439 cache->mag_cache[CPU->id].last = cmag; 419 440 cache->mag_cache[CPU->id].current = newmag; 441 420 442 return newmag; 421 443 } 422 444 423 /** 424 * Try to find object in CPU-cache magazines 445 /** Try to find object in CPU-cache magazines 425 446 * 426 447 * @return Pointer to object or NULL if not available 427 */ 428 static void *magazine_obj_get(slab_cache_t *cache) 429 { 430 slab_magazine_t *mag; 431 void *obj; 432 448 * 449 */ 450 NO_TRACE static void *magazine_obj_get(slab_cache_t *cache) 451 { 433 452 if (!CPU) 434 453 return NULL; 435 454 436 455 spinlock_lock(&cache->mag_cache[CPU->id].lock); 437 438 mag = get_full_current_mag(cache);456 457 slab_magazine_t *mag = get_full_current_mag(cache); 439 458 if (!mag) { 440 459 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 441 460 return NULL; 442 461 } 443 obj = mag->objs[--mag->busy]; 462 463 void *obj = mag->objs[--mag->busy]; 444 464 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 465 445 466 atomic_dec(&cache->cached_objs); 446 467 … … 448 469 } 449 470 450 /** 451 * Assure that the current magazine is empty, return pointer to it, or NULL if 452 * no empty magazine is available and cannot be allocated 453 * 454 * Assume mag_cache[CPU->id].lock is held 455 * 456 * We have 2 magazines bound to processor. 457 * First try the current. 458 * If full, try the last. 459 * If full, put to magazines list. 460 * allocate new, exchange last & current 461 * 462 */ 463 static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 464 { 465 slab_magazine_t *cmag,*lastmag,*newmag; 466 467 cmag = cache->mag_cache[CPU->id].current; 468 lastmag = cache->mag_cache[CPU->id].last; 469 471 /** Assure that the current magazine is empty, return pointer to it, 472 * or NULL if no empty magazine is available and cannot be allocated 473 * 474 * We have 2 magazines bound to processor. 475 * First try the current. 476 * If full, try the last. 477 * If full, put to magazines list. 478 * 479 */ 480 NO_TRACE static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) 481 { 482 slab_magazine_t *cmag = cache->mag_cache[CPU->id].current; 483 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 484 485 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 486 470 487 if (cmag) { 471 488 if (cmag->busy < cmag->size) 472 489 return cmag; 473 if (lastmag && lastmag->busy < lastmag->size) { 490 491 if ((lastmag) && (lastmag->busy < lastmag->size)) { 474 492 cache->mag_cache[CPU->id].last = cmag; 475 493 cache->mag_cache[CPU->id].current = lastmag; … … 477 495 } 478 496 } 497 479 498 /* current | last are full | nonexistent, allocate new */ 480 /* We do not want to sleep just because of caching */ 481 /* Especially we do not want reclaiming to start, as 482 * this would deadlock */ 483 newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); 499 500 /* 501 * We do not want to sleep just because of caching, 502 * especially we do not want reclaiming to start, as 503 * this would deadlock. 504 * 505 */ 506 slab_magazine_t *newmag = slab_alloc(&mag_cache, 507 FRAME_ATOMIC | FRAME_NO_RECLAIM); 484 508 if (!newmag) 485 509 return NULL; 510 486 511 newmag->size = SLAB_MAG_SIZE; 487 512 newmag->busy = 0; 488 513 489 514 /* Flush last to magazine list */ 490 515 if (lastmag) 491 516 put_mag_to_cache(cache, lastmag); 492 517 493 518 /* Move current as last, save new as current */ 494 cache->mag_cache[CPU->id].last = cmag; 495 cache->mag_cache[CPU->id].current = newmag; 496 519 cache->mag_cache[CPU->id].last = cmag; 520 cache->mag_cache[CPU->id].current = newmag; 521 497 522 return newmag; 498 523 } 499 524 500 /** 501 * Put object into CPU-cache magazine 502 * 503 * @return 0 - success, -1 - could not get memory 504 */ 505 static int magazine_obj_put(slab_cache_t *cache, void *obj) 506 { 507 slab_magazine_t *mag; 508 525 /** Put object into CPU-cache magazine 526 * 527 * @return 0 on success, -1 on no memory 528 * 529 */ 530 NO_TRACE static int magazine_obj_put(slab_cache_t *cache, void *obj) 531 { 509 532 if (!CPU) 510 533 return -1; 511 534 512 535 spinlock_lock(&cache->mag_cache[CPU->id].lock); 513 514 mag = make_empty_current_mag(cache);536 537 slab_magazine_t *mag = make_empty_current_mag(cache); 515 538 if (!mag) { 516 539 spinlock_unlock(&cache->mag_cache[CPU->id].lock); … … 519 542 520 543 mag->objs[mag->busy++] = obj; 521 544 522 545 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 546 523 547 atomic_inc(&cache->cached_objs); 548 524 549 return 0; 525 550 } 526 551 527 528 /**************************************/ 552 /************************/ 529 553 /* Slab cache functions */ 530 531 /** Return number of objects that fit in certain cache size */ 532 static unsigned int comp_objects(slab_cache_t *cache) 554 /************************/ 555 556 /** Return number of objects that fit in certain cache size 557 * 558 */ 559 NO_TRACE static size_t comp_objects(slab_cache_t *cache) 533 560 { 534 561 if (cache->flags & SLAB_CACHE_SLINSIDE) 535 return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /536 cache->size;537 else 562 return ((PAGE_SIZE << cache->order) 563 - sizeof(slab_t)) / cache->size; 564 else 538 565 return (PAGE_SIZE << cache->order) / cache->size; 539 566 } 540 567 541 /** Return wasted space in slab */542 static unsigned int badness(slab_cache_t *cache) 543 { 544 unsigned int objects; 545 unsigned int ssize; 546 547 objects = comp_objects(cache);548 ssize = PAGE_SIZE << cache->order;568 /** Return wasted space in slab 569 * 570 */ 571 NO_TRACE static size_t badness(slab_cache_t *cache) 572 { 573 size_t objects = comp_objects(cache); 574 size_t ssize = PAGE_SIZE << cache->order; 575 549 576 if (cache->flags & SLAB_CACHE_SLINSIDE) 550 577 ssize -= sizeof(slab_t); 578 551 579 return ssize - objects * cache->size; 552 580 } 553 581 554 /** 555 * Initialize mag_cache structure in slab cache 556 */ 557 static void make_magcache(slab_cache_t *cache) 558 { 559 unsigned int i; 560 582 /** Initialize mag_cache structure in slab cache 583 * 584 */ 585 NO_TRACE static bool make_magcache(slab_cache_t *cache) 586 { 561 587 ASSERT(_slab_initialized >= 2); 562 588 563 589 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, 564 0); 590 FRAME_ATOMIC); 591 if (!cache->mag_cache) 592 return false; 593 594 size_t i; 565 595 for (i = 0; i < config.cpu_count; i++) { 566 596 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); 567 597 spinlock_initialize(&cache->mag_cache[i].lock, 568 "slab _maglock_cpu");569 } 570 } 571 572 /** Initialize allocated memory as a slab cache */ 573 static void 574 _slab_cache_create(slab_cache_t *cache, char *name, size_t size, size_t align, 575 int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),576 int flags)577 { 578 int pages; 579 ipl_t ipl; 580 598 "slab.cache.mag_cache[].lock"); 599 } 600 601 return true; 602 } 603 604 /** Initialize allocated memory as a slab cache 605 * 606 */ 607 NO_TRACE static void _slab_cache_create(slab_cache_t *cache, const char *name, 608 size_t size, size_t align, int (*constructor)(void *obj, 609 unsigned int kmflag), size_t (*destructor)(void *obj), unsigned int flags) 610 { 581 611 memsetb(cache, sizeof(*cache), 0); 582 612 cache->name = name; 583 584 if (align < sizeof(unative_t)) 585 align = sizeof(unative_t); 613 614 if (align < sizeof(sysarg_t)) 615 align = sizeof(sysarg_t); 616 586 617 size = ALIGN_UP(size, align); 587 618 588 619 cache->size = size; 589 590 620 cache->constructor = constructor; 591 621 cache->destructor = destructor; 592 622 cache->flags = flags; 593 623 594 624 list_initialize(&cache->full_slabs); 595 625 list_initialize(&cache->partial_slabs); 596 626 list_initialize(&cache->magazines); 597 spinlock_initialize(&cache->slablock, "slab_lock"); 598 spinlock_initialize(&cache->maglock, "slab_maglock"); 627 628 spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 629 spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 630 599 631 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 600 make_magcache(cache);601 632 (void) make_magcache(cache); 633 602 634 /* Compute slab sizes, object counts in slabs etc. */ 603 635 if (cache->size < SLAB_INSIDE_SIZE) 604 636 cache->flags |= SLAB_CACHE_SLINSIDE; 605 637 606 638 /* Minimum slab order */ 607 pages = SIZE2FRAMES(cache->size); 639 size_t pages = SIZE2FRAMES(cache->size); 640 608 641 /* We need the 2^order >= pages */ 609 642 if (pages == 1) … … 611 644 else 612 645 cache->order = fnzb(pages - 1) + 1; 613 614 while (badness(cache) > SLAB_MAX_BADNESS(cache)) {646 647 while (badness(cache) > SLAB_MAX_BADNESS(cache)) 615 648 cache->order += 1; 616 }649 617 650 cache->objects = comp_objects(cache); 651 618 652 /* If info fits in, put it inside */ 619 653 if (badness(cache) > sizeof(slab_t)) 620 654 cache->flags |= SLAB_CACHE_SLINSIDE; 621 655 622 656 /* Add cache to cache list */ 623 ipl = interrupts_disable(); 624 spinlock_lock(&slab_cache_lock); 625 657 irq_spinlock_lock(&slab_cache_lock, true); 626 658 list_append(&cache->link, &slab_cache_list); 627 628 spinlock_unlock(&slab_cache_lock); 629 interrupts_restore(ipl); 630 } 631 632 /** Create slab cache */ 633 slab_cache_t * 634 slab_cache_create(char *name, size_t size, size_t align, 635 int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), 636 int flags) 637 { 638 slab_cache_t *cache; 639 640 cache = slab_alloc(&slab_cache_cache, 0); 659 irq_spinlock_unlock(&slab_cache_lock, true); 660 } 661 662 /** Create slab cache 663 * 664 */ 665 slab_cache_t *slab_cache_create(const char *name, size_t size, size_t align, 666 int (*constructor)(void *obj, unsigned int kmflag), 667 size_t (*destructor)(void *obj), unsigned int flags) 668 { 669 slab_cache_t *cache = slab_alloc(&slab_cache_cache, 0); 641 670 _slab_cache_create(cache, name, size, align, constructor, destructor, 642 671 flags); 672 643 673 return cache; 644 674 } 645 675 646 /** 647 * Reclaim space occupied by objects that are already free 676 /** Reclaim space occupied by objects that are already free 648 677 * 649 678 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing 679 * 650 680 * @return Number of freed pages 651 */ 652 static size_t _slab_reclaim(slab_cache_t *cache, int flags) 653 { 654 unsigned int i; 681 * 682 */ 683 NO_TRACE static size_t _slab_reclaim(slab_cache_t *cache, unsigned int flags) 684 { 685 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 686 return 0; /* Nothing to do */ 687 688 /* 689 * We count up to original magazine count to avoid 690 * endless loop 691 */ 692 atomic_count_t magcount = atomic_get(&cache->magazine_counter); 693 655 694 slab_magazine_t *mag; 656 695 size_t frames = 0; 657 int magcount; 658 659 if (cache->flags & SLAB_CACHE_NOMAGAZINE) 660 return 0; /* Nothing to do */ 661 662 /* We count up to original magazine count to avoid 663 * endless loop 664 */ 665 magcount = atomic_get(&cache->magazine_counter); 666 while (magcount-- && (mag=get_mag_from_cache(cache, 0))) { 667 frames += magazine_destroy(cache,mag); 668 if (!(flags & SLAB_RECLAIM_ALL) && frames) 696 697 while ((magcount--) && (mag = get_mag_from_cache(cache, 0))) { 698 frames += magazine_destroy(cache, mag); 699 if ((!(flags & SLAB_RECLAIM_ALL)) && (frames)) 669 700 break; 670 701 } … … 673 704 /* Free cpu-bound magazines */ 674 705 /* Destroy CPU magazines */ 706 size_t i; 675 707 for (i = 0; i < config.cpu_count; i++) { 676 708 spinlock_lock(&cache->mag_cache[i].lock); 677 709 678 710 mag = cache->mag_cache[i].current; 679 711 if (mag) … … 685 717 frames += magazine_destroy(cache, mag); 686 718 cache->mag_cache[i].last = NULL; 687 719 688 720 spinlock_unlock(&cache->mag_cache[i].lock); 689 721 } 690 722 } 691 723 692 724 return frames; 693 725 } 694 726 695 /** Check that there are no slabs and remove cache from system */ 727 /** Check that there are no slabs and remove cache from system 728 * 729 */ 696 730 void slab_cache_destroy(slab_cache_t *cache) 697 731 { 698 ipl_t ipl; 699 700 /* First remove cache from link, so that we don't need 732 /* 733 * First remove cache from link, so that we don't need 701 734 * to disable interrupts later 735 * 702 736 */ 703 704 ipl = interrupts_disable(); 705 spinlock_lock(&slab_cache_lock); 706 737 irq_spinlock_lock(&slab_cache_lock, true); 707 738 list_remove(&cache->link); 708 709 spinlock_unlock(&slab_cache_lock); 710 interrupts_restore(ipl); 711 712 /* Do not lock anything, we assume the software is correct and 713 * does not touch the cache when it decides to destroy it */ 739 irq_spinlock_unlock(&slab_cache_lock, true); 740 741 /* 742 * Do not lock anything, we assume the software is correct and 743 * does not touch the cache when it decides to destroy it 744 * 745 */ 714 746 715 747 /* Destroy all magazines */ 716 748 _slab_reclaim(cache, SLAB_RECLAIM_ALL); 717 749 718 750 /* All slabs must be empty */ 719 if ( !list_empty(&cache->full_slabs) ||720 !list_empty(&cache->partial_slabs))751 if ((!list_empty(&cache->full_slabs)) || 752 (!list_empty(&cache->partial_slabs))) 721 753 panic("Destroying cache that is not empty."); 722 754 723 755 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 724 756 free(cache->mag_cache); 757 725 758 slab_free(&slab_cache_cache, cache); 726 759 } 727 760 728 /** Allocate new object from cache - if no flags given, always returns memory */ 729 void *slab_alloc(slab_cache_t *cache, int flags) 730 { 731 ipl_t ipl; 761 /** Allocate new object from cache - if no flags given, always returns memory 762 * 763 */ 764 void *slab_alloc(slab_cache_t *cache, unsigned int flags) 765 { 766 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 767 ipl_t ipl = interrupts_disable(); 768 732 769 void *result = NULL; 733 770 734 /* Disable interrupts to avoid deadlocks with interrupt handlers */ 735 ipl = interrupts_disable(); 736 737 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { 771 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 738 772 result = magazine_obj_get(cache); 739 }773 740 774 if (!result) 741 775 result = slab_obj_create(cache, flags); 742 776 743 777 interrupts_restore(ipl); 744 778 745 779 if (result) 746 780 atomic_inc(&cache->allocated_objs); 747 781 748 782 return result; 749 783 } 750 784 751 /** Return object to cache, use slab if known */752 static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 753 { 754 ipl_t ipl; 755 756 ipl = interrupts_disable();757 785 /** Return object to cache, use slab if known 786 * 787 */ 788 NO_TRACE static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) 789 { 790 ipl_t ipl = interrupts_disable(); 791 758 792 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) || 759 magazine_obj_put(cache, obj)) {793 (magazine_obj_put(cache, obj))) 760 794 slab_obj_destroy(cache, obj, slab); 761 762 } 795 763 796 interrupts_restore(ipl); 764 797 atomic_dec(&cache->allocated_objs); 765 798 } 766 799 767 /** Return slab object to cache */ 800 /** Return slab object to cache 801 * 802 */ 768 803 void slab_free(slab_cache_t *cache, void *obj) 769 804 { … … 771 806 } 772 807 773 /* Go through all caches and reclaim what is possible */ 774 size_t slab_reclaim(int flags) 775 { 776 slab_cache_t *cache; 808 /** Go through all caches and reclaim what is possible */ 809 size_t slab_reclaim(unsigned int flags) 810 { 811 irq_spinlock_lock(&slab_cache_lock, true); 812 813 size_t frames = 0; 777 814 link_t *cur; 778 size_t frames = 0;779 780 spinlock_lock(&slab_cache_lock);781 782 /* TODO: Add assert, that interrupts are disabled, otherwise783 * memory allocation from interrupts can deadlock.784 */785 786 815 for (cur = slab_cache_list.next; cur != &slab_cache_list; 787 816 cur = cur->next) { 788 cache = list_get_instance(cur, slab_cache_t, link);817 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 789 818 frames += _slab_reclaim(cache, flags); 790 819 } 791 792 spinlock_unlock(&slab_cache_lock);793 820 821 irq_spinlock_unlock(&slab_cache_lock, true); 822 794 823 return frames; 795 824 } 796 825 797 798 /* Print list of slabs */ 826 /* Print list of slabs 827 * 828 */ 799 829 void slab_print_list(void) 800 830 { 801 int skip = 0; 802 803 printf("slab name size pages obj/pg slabs cached allocated" 804 " ctl\n"); 805 printf("---------------- -------- ------ ------ ------ ------ ---------" 806 " ---\n"); 807 831 printf("[slab name ] [size ] [pages ] [obj/pg] [slabs ]" 832 " [cached] [alloc ] [ctl]\n"); 833 834 size_t skip = 0; 808 835 while (true) { 809 slab_cache_t *cache;810 link_t *cur;811 ipl_t ipl;812 int i;813 814 836 /* 815 837 * We must not hold the slab_cache_lock spinlock when printing … … 834 856 * statistics. 835 857 */ 836 837 ipl = interrupts_disable(); 838 spinlock_lock(&slab_cache_lock); 839 858 859 irq_spinlock_lock(&slab_cache_lock, true); 860 861 link_t *cur; 862 size_t i; 840 863 for (i = 0, cur = slab_cache_list.next; 841 i < skip && cur != &slab_cache_list; 842 i++, cur = cur->next) 843 ; 844 864 (i < skip) && (cur != &slab_cache_list); 865 i++, cur = cur->next); 866 845 867 if (cur == &slab_cache_list) { 846 spinlock_unlock(&slab_cache_lock); 847 interrupts_restore(ipl); 868 irq_spinlock_unlock(&slab_cache_lock, true); 848 869 break; 849 870 } 850 871 851 872 skip++; 852 853 cache = list_get_instance(cur, slab_cache_t, link);854 855 c har *name = cache->name;873 874 slab_cache_t *cache = list_get_instance(cur, slab_cache_t, link); 875 876 const char *name = cache->name; 856 877 uint8_t order = cache->order; 857 878 size_t size = cache->size; 858 unsigned int objects = cache->objects;879 size_t objects = cache->objects; 859 880 long allocated_slabs = atomic_get(&cache->allocated_slabs); 860 881 long cached_objs = atomic_get(&cache->cached_objs); 861 882 long allocated_objs = atomic_get(&cache->allocated_objs); 862 int flags = cache->flags; 863 864 spinlock_unlock(&slab_cache_lock); 865 interrupts_restore(ipl); 866 867 printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", 883 unsigned int flags = cache->flags; 884 885 irq_spinlock_unlock(&slab_cache_lock, true); 886 887 printf("%-18s %8zu %8u %8zu %8ld %8ld %8ld %-5s\n", 868 888 name, size, (1 << order), objects, allocated_slabs, 869 889 cached_objs, allocated_objs, … … 874 894 void slab_cache_init(void) 875 895 { 876 int i, size;877 878 896 /* Initialize magazine cache */ 879 897 _slab_cache_create(&mag_cache, "slab_magazine", … … 881 899 sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | 882 900 SLAB_CACHE_SLINSIDE); 901 883 902 /* Initialize slab_cache cache */ 884 903 _slab_cache_create(&slab_cache_cache, "slab_cache", 885 904 sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, 886 905 SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); 906 887 907 /* Initialize external slab cache */ 888 908 slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0, 889 909 NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 890 910 891 911 /* Initialize structures for malloc */ 912 size_t i; 913 size_t size; 914 892 915 for (i = 0, size = (1 << SLAB_MIN_MALLOC_W); 893 916 i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); … … 896 919 NULL, NULL, SLAB_CACHE_MAGDEFERRED); 897 920 } 898 #ifdef CONFIG_DEBUG 921 922 #ifdef CONFIG_DEBUG 899 923 _slab_initialized = 1; 900 924 #endif … … 904 928 * 905 929 * Kernel calls this function, when it knows the real number of 906 * processors. 907 * Allocate slab for cpucache and enable it on all existing908 * slabs that are SLAB_CACHE_MAGDEFERRED930 * processors. Allocate slab for cpucache and enable it on all 931 * existing slabs that are SLAB_CACHE_MAGDEFERRED 932 * 909 933 */ 910 934 void slab_enable_cpucache(void) 911 935 { 912 link_t *cur;913 slab_cache_t *s;914 915 936 #ifdef CONFIG_DEBUG 916 937 _slab_initialized = 2; 917 938 #endif 918 919 spinlock_lock(&slab_cache_lock); 920 939 940 irq_spinlock_lock(&slab_cache_lock, false); 941 942 link_t *cur; 921 943 for (cur = slab_cache_list.next; cur != &slab_cache_list; 922 cur = cur->next) {923 s = list_get_instance(cur, slab_cache_t, link);924 if ((s ->flags & SLAB_CACHE_MAGDEFERRED) !=944 cur = cur->next) { 945 slab_cache_t *slab = list_get_instance(cur, slab_cache_t, link); 946 if ((slab->flags & SLAB_CACHE_MAGDEFERRED) != 925 947 SLAB_CACHE_MAGDEFERRED) 926 948 continue; 927 make_magcache(s); 928 s->flags &= ~SLAB_CACHE_MAGDEFERRED; 929 } 930 931 spinlock_unlock(&slab_cache_lock); 932 } 933 934 /**************************************/ 935 /* kalloc/kfree functions */ 936 void *malloc(unsigned int size, int flags) 949 950 (void) make_magcache(slab); 951 slab->flags &= ~SLAB_CACHE_MAGDEFERRED; 952 } 953 954 irq_spinlock_unlock(&slab_cache_lock, false); 955 } 956 957 void *malloc(size_t size, unsigned int flags) 937 958 { 938 959 ASSERT(_slab_initialized); … … 941 962 if (size < (1 << SLAB_MIN_MALLOC_W)) 942 963 size = (1 << SLAB_MIN_MALLOC_W); 943 944 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;945 964 965 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 966 946 967 return slab_alloc(malloc_caches[idx], flags); 947 968 } 948 969 949 void *realloc(void *ptr, unsigned int size,int flags)970 void *realloc(void *ptr, size_t size, unsigned int flags) 950 971 { 951 972 ASSERT(_slab_initialized); … … 957 978 if (size < (1 << SLAB_MIN_MALLOC_W)) 958 979 size = (1 << SLAB_MIN_MALLOC_W); 959 int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;980 uint8_t idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; 960 981 961 982 new_ptr = slab_alloc(malloc_caches[idx], flags); … … 978 999 if (!ptr) 979 1000 return; 980 1001 981 1002 slab_t *slab = obj2slab(ptr); 982 1003 _slab_free(slab->cache, ptr, slab);
Note:
See TracChangeset
for help on using the changeset viewer.