Changeset 428aabf in mainline for generic/src/mm/slab.c
- Timestamp:
- 2006-02-04T15:01:56Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 55ab0f1
- Parents:
- 10e16a7
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
generic/src/mm/slab.c
r10e16a7 r428aabf 86 86 * magazine cache. 87 87 * 88 * - it might be good to add granularity of locks even to slab level, 89 * we could then try_spinlock over all partial slabs and thus improve 90 * scalability even on slab level 88 91 */ 89 92 … … 219 222 * Return object to slab and call a destructor 220 223 * 221 * Assume the cache->lock is held;222 *223 224 * @param slab If the caller knows directly slab of the object, otherwise NULL 224 225 * … … 234 235 235 236 ASSERT(slab->cache == cache); 237 238 spinlock_lock(&cache->slablock); 236 239 237 240 *((int *)obj) = slab->nextavail; … … 248 251 /* Free associated memory */ 249 252 list_remove(&slab->link); 250 /* Avoid deadlock */ 251 spinlock_unlock(&cache->lock); 253 /* This should not produce deadlock, as 254 * magazine is always allocated with NO reclaim, 255 * keep all locks */ 252 256 frames = slab_space_free(cache, slab); 253 spinlock_lock(&cache->lock); 254 } 257 } 258 259 spinlock_unlock(&cache->slablock); 255 260 256 261 return frames; … … 260 265 * Take new object from slab or create new if needed 261 266 * 262 * Assume cache->lock is held.263 *264 267 * @return Object address or null 265 268 */ … … 268 271 slab_t *slab; 269 272 void *obj; 273 274 spinlock_lock(&cache->slablock); 270 275 271 276 if (list_empty(&cache->partial_slabs)) { … … 276 281 * that's why we should get recursion at most 1-level deep 277 282 */ 278 spinlock_unlock(&cache-> lock);283 spinlock_unlock(&cache->slablock); 279 284 slab = slab_space_alloc(cache, flags); 280 spinlock_lock(&cache->lock); 281 if (!slab) { 282 return NULL; 283 } 285 spinlock_lock(&cache->slablock); 286 if (!slab) 287 goto err; 284 288 } else { 285 289 slab = list_get_instance(cache->partial_slabs.next, … … 295 299 else 296 300 list_prepend(&slab->link, &cache->partial_slabs); 301 302 spinlock_unlock(&cache->slablock); 297 303 return obj; 304 err: 305 spinlock_unlock(&cache->slablock); 306 return NULL; 298 307 } 299 308 … … 303 312 /** 304 313 * Free all objects in magazine and free memory associated with magazine 305 *306 * Assume cache->lock is held307 314 * 308 315 * @return Number of freed pages … … 346 353 } 347 354 /* Local magazines are empty, import one from magazine list */ 348 spinlock_lock(&cache-> lock);355 spinlock_lock(&cache->maglock); 349 356 if (list_empty(&cache->magazines)) { 350 spinlock_unlock(&cache-> lock);357 spinlock_unlock(&cache->maglock); 351 358 return NULL; 352 359 } … … 355 362 link); 356 363 list_remove(&newmag->link); 357 spinlock_unlock(&cache-> lock);364 spinlock_unlock(&cache->maglock); 358 365 359 366 if (lastmag) … … 432 439 /* Flush last to magazine list */ 433 440 if (lastmag) { 434 spinlock_lock(&cache-> lock);441 spinlock_lock(&cache->maglock); 435 442 list_prepend(&lastmag->link, &cache->magazines); 436 spinlock_unlock(&cache-> lock);443 spinlock_unlock(&cache->maglock); 437 444 } 438 445 /* Move current as last, save new as current */ … … 525 532 list_initialize(&cache->partial_slabs); 526 533 list_initialize(&cache->magazines); 527 spinlock_initialize(&cache->lock, "cachelock"); 534 spinlock_initialize(&cache->slablock, "slab_lock"); 535 spinlock_initialize(&cache->maglock, "slab_maglock"); 528 536 if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { 529 537 for (i=0; i < config.cpu_count; i++) { … … 531 539 sizeof(cache->mag_cache[i]), 0); 532 540 spinlock_initialize(&cache->mag_cache[i].lock, 533 " cpucachelock");541 "slab_maglock_cpu"); 534 542 } 535 543 } … … 595 603 spinlock_lock(&cache->mag_cache[i].lock); 596 604 } 597 spinlock_lock(&cache-> lock);605 spinlock_lock(&cache->maglock); 598 606 599 607 if (flags & SLAB_RECLAIM_ALL) { … … 612 620 } 613 621 } 622 /* We can release the cache locks now */ 623 if (flags & SLAB_RECLAIM_ALL) { 624 for (i=0; i < config.cpu_count; i++) 625 spinlock_unlock(&cache->mag_cache[i].lock); 626 } 614 627 /* Destroy full magazines */ 615 628 cur=cache->magazines.prev; … … 627 640 } 628 641 629 spinlock_unlock(&cache->lock); 630 /* We can release the cache locks now */ 631 if (flags & SLAB_RECLAIM_ALL) { 632 for (i=0; i < config.cpu_count; i++) 633 spinlock_unlock(&cache->mag_cache[i].lock); 634 } 642 spinlock_unlock(&cache->maglock); 635 643 636 644 return frames; … … 671 679 result = magazine_obj_get(cache); 672 680 673 if (!result) { 674 spinlock_lock(&cache->lock); 681 if (!result) 675 682 result = slab_obj_create(cache, flags); 676 spinlock_unlock(&cache->lock);677 }678 683 679 684 interrupts_restore(ipl); … … 694 699 if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ 695 700 || magazine_obj_put(cache, obj)) { 696 spinlock_lock(&cache->lock); 701 697 702 slab_obj_destroy(cache, obj, slab); 698 spinlock_unlock(&cache->lock); 703 699 704 } 700 705 interrupts_restore(ipl); … … 716 721 717 722 spinlock_lock(&slab_cache_lock); 723 724 /* TODO: Add assert, that interrupts are disabled, otherwise 725 * memory allocation from interrupts can deadlock. 726 */ 718 727 719 728 for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
Note:
See TracChangeset
for help on using the changeset viewer.