Changes in / [8264867:99d05e1] in mainline
- Location:
- kernel
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/include/mm/page.h
r8264867 r99d05e1 31 31 */ 32 32 /** @file 33 */ 34 35 /** Paging on AMD64 36 * 37 * The space is divided in positive numbers (uspace) and 38 * negative numbers (kernel). The 'negative' space starting 39 * with 0xffff800000000000 and ending with 0xffffffffffffffff 40 * is identically mapped physical memory. 41 * 33 42 */ 34 43 -
kernel/genarch/src/mm/page_pt.c
r8264867 r99d05e1 48 48 #include <align.h> 49 49 #include <macros.h> 50 #include <bitops.h>51 50 52 51 static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); … … 293 292 } 294 293 295 /** Return the size of the region mapped by a single PTL0 entry.296 *297 * @return Size of the region mapped by a single PTL0 entry.298 */299 static uintptr_t ptl0_step_get(void)300 {301 size_t va_bits;302 303 va_bits = fnzb(PTL0_ENTRIES) + fnzb(PTL1_ENTRIES) + fnzb(PTL2_ENTRIES) +304 fnzb(PTL3_ENTRIES) + PAGE_WIDTH;305 306 return 1UL << (va_bits - fnzb(PTL0_ENTRIES));307 }308 309 294 /** Make the mappings in the given range global accross all address spaces. 310 295 * … … 324 309 { 325 310 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table); 326 uintptr_t ptl0 _step = ptl0_step_get();311 uintptr_t ptl0step = (((uintptr_t) -1) / PTL0_ENTRIES) + 1; 327 312 size_t order; 328 313 uintptr_t addr; … … 336 321 #endif 337 322 323 ASSERT(ispwr2(ptl0step)); 338 324 ASSERT(size > 0); 339 325 340 for (addr = ALIGN_DOWN(base, ptl0 _step); addr - 1 < base + size - 1;341 addr += ptl0 _step) {326 for (addr = ALIGN_DOWN(base, ptl0step); addr - 1 < base + size - 1; 327 addr += ptl0step) { 342 328 uintptr_t l1; 343 329 -
kernel/generic/include/lib/ra.h
r8264867 r99d05e1 42 42 43 43 typedef struct { 44 IRQ_SPINLOCK_DECLARE(lock);44 SPINLOCK_DECLARE(lock); 45 45 list_t spans; /**< List of arena's spans. */ 46 46 } ra_arena_t; -
kernel/generic/include/mm/slab.h
r8264867 r99d05e1 81 81 slab_magazine_t *current; 82 82 slab_magazine_t *last; 83 IRQ_SPINLOCK_DECLARE(lock);83 SPINLOCK_DECLARE(lock); 84 84 } slab_mag_cache_t; 85 85 … … 113 113 list_t full_slabs; /**< List of full slabs */ 114 114 list_t partial_slabs; /**< List of partial slabs */ 115 IRQ_SPINLOCK_DECLARE(slablock);115 SPINLOCK_DECLARE(slablock); 116 116 /* Magazines */ 117 117 list_t magazines; /**< List o full magazines */ 118 IRQ_SPINLOCK_DECLARE(maglock);118 SPINLOCK_DECLARE(maglock); 119 119 120 120 /** CPU cache */ -
kernel/generic/src/lib/ra.c
r8264867 r99d05e1 185 185 return NULL; 186 186 187 irq_spinlock_initialize(&arena->lock, "arena_lock");187 spinlock_initialize(&arena->lock, "arena_lock"); 188 188 list_initialize(&arena->spans); 189 189 … … 209 209 210 210 /* TODO: check for overlaps */ 211 irq_spinlock_lock(&arena->lock, true);211 spinlock_lock(&arena->lock); 212 212 list_append(&span->span_link, &arena->spans); 213 irq_spinlock_unlock(&arena->lock, true);213 spinlock_unlock(&arena->lock); 214 214 return true; 215 215 } … … 390 390 ASSERT(ispwr2(alignment)); 391 391 392 irq_spinlock_lock(&arena->lock, true);392 spinlock_lock(&arena->lock); 393 393 list_foreach(arena->spans, cur) { 394 394 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); … … 398 398 break; 399 399 } 400 irq_spinlock_unlock(&arena->lock, true);400 spinlock_unlock(&arena->lock); 401 401 402 402 return base; … … 406 406 void ra_free(ra_arena_t *arena, uintptr_t base, size_t size) 407 407 { 408 irq_spinlock_lock(&arena->lock, true);408 spinlock_lock(&arena->lock); 409 409 list_foreach(arena->spans, cur) { 410 410 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); … … 412 412 if (iswithin(span->base, span->size, base, size)) { 413 413 ra_span_free(span, base, size); 414 irq_spinlock_unlock(&arena->lock, true);414 spinlock_unlock(&arena->lock); 415 415 return; 416 416 } 417 417 } 418 irq_spinlock_unlock(&arena->lock, true);418 spinlock_unlock(&arena->lock); 419 419 420 420 panic("Freeing to wrong arena (base=%" PRIxn ", size=%" PRIdn ").", -
kernel/generic/src/mm/frame.c
r8264867 r99d05e1 1086 1086 #endif 1087 1087 1088 /*1089 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts1090 * to prevent deadlock with TLB shootdown.1091 */1092 ipl_t ipl = interrupts_disable();1093 1088 mutex_lock(&mem_avail_mtx); 1094 1089 … … 1103 1098 1104 1099 mutex_unlock(&mem_avail_mtx); 1105 interrupts_restore(ipl);1106 1100 1107 1101 #ifdef CONFIG_DEBUG … … 1167 1161 * Signal that some memory has been freed. 1168 1162 */ 1169 1170 1171 /*1172 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts1173 * to prevent deadlock with TLB shootdown.1174 */1175 ipl_t ipl = interrupts_disable();1176 1163 mutex_lock(&mem_avail_mtx); 1177 1164 if (mem_avail_req > 0) … … 1183 1170 } 1184 1171 mutex_unlock(&mem_avail_mtx); 1185 interrupts_restore(ipl);1186 1172 1187 1173 if (!(flags & FRAME_NO_RESERVE)) -
kernel/generic/src/mm/slab.c
r8264867 r99d05e1 264 264 freed = cache->destructor(obj); 265 265 266 irq_spinlock_lock(&cache->slablock, true);266 spinlock_lock(&cache->slablock); 267 267 ASSERT(slab->available < cache->objects); 268 268 … … 275 275 /* Free associated memory */ 276 276 list_remove(&slab->link); 277 irq_spinlock_unlock(&cache->slablock, true);277 spinlock_unlock(&cache->slablock); 278 278 279 279 return freed + slab_space_free(cache, slab); … … 284 284 } 285 285 286 irq_spinlock_unlock(&cache->slablock, true);286 spinlock_unlock(&cache->slablock); 287 287 return freed; 288 288 } … … 295 295 NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags) 296 296 { 297 irq_spinlock_lock(&cache->slablock, true);297 spinlock_lock(&cache->slablock); 298 298 299 299 slab_t *slab; … … 308 308 * 309 309 */ 310 irq_spinlock_unlock(&cache->slablock, true);310 spinlock_unlock(&cache->slablock); 311 311 slab = slab_space_alloc(cache, flags); 312 312 if (!slab) 313 313 return NULL; 314 314 315 irq_spinlock_lock(&cache->slablock, true);315 spinlock_lock(&cache->slablock); 316 316 } else { 317 317 slab = list_get_instance(list_first(&cache->partial_slabs), … … 329 329 list_prepend(&slab->link, &cache->partial_slabs); 330 330 331 irq_spinlock_unlock(&cache->slablock, true);331 spinlock_unlock(&cache->slablock); 332 332 333 333 if ((cache->constructor) && (cache->constructor(obj, flags))) { … … 355 355 link_t *cur; 356 356 357 irq_spinlock_lock(&cache->maglock, true);357 spinlock_lock(&cache->maglock); 358 358 if (!list_empty(&cache->magazines)) { 359 359 if (first) … … 366 366 atomic_dec(&cache->magazine_counter); 367 367 } 368 irq_spinlock_unlock(&cache->maglock, true);369 368 369 spinlock_unlock(&cache->maglock); 370 370 return mag; 371 371 } … … 377 377 slab_magazine_t *mag) 378 378 { 379 irq_spinlock_lock(&cache->maglock, true);379 spinlock_lock(&cache->maglock); 380 380 381 381 list_prepend(&mag->link, &cache->magazines); 382 382 atomic_inc(&cache->magazine_counter); 383 383 384 irq_spinlock_unlock(&cache->maglock, true);384 spinlock_unlock(&cache->maglock); 385 385 } 386 386 … … 414 414 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 415 415 416 ASSERT( irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));416 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 417 417 418 418 if (cmag) { /* First try local CPU magazines */ … … 451 451 return NULL; 452 452 453 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true);453 spinlock_lock(&cache->mag_cache[CPU->id].lock); 454 454 455 455 slab_magazine_t *mag = get_full_current_mag(cache); 456 456 if (!mag) { 457 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);457 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 458 458 return NULL; 459 459 } 460 460 461 461 void *obj = mag->objs[--mag->busy]; 462 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);462 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 463 463 464 464 atomic_dec(&cache->cached_objs); … … 481 481 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 482 482 483 ASSERT( irq_spinlock_locked(&cache->mag_cache[CPU->id].lock));483 ASSERT(spinlock_locked(&cache->mag_cache[CPU->id].lock)); 484 484 485 485 if (cmag) { … … 531 531 return -1; 532 532 533 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true);533 spinlock_lock(&cache->mag_cache[CPU->id].lock); 534 534 535 535 slab_magazine_t *mag = make_empty_current_mag(cache); 536 536 if (!mag) { 537 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);537 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 538 538 return -1; 539 539 } … … 541 541 mag->objs[mag->busy++] = obj; 542 542 543 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true);543 spinlock_unlock(&cache->mag_cache[CPU->id].lock); 544 544 545 545 atomic_inc(&cache->cached_objs); … … 593 593 for (i = 0; i < config.cpu_count; i++) { 594 594 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); 595 irq_spinlock_initialize(&cache->mag_cache[i].lock,595 spinlock_initialize(&cache->mag_cache[i].lock, 596 596 "slab.cache.mag_cache[].lock"); 597 597 } … … 624 624 list_initialize(&cache->magazines); 625 625 626 irq_spinlock_initialize(&cache->slablock, "slab.cache.slablock");627 irq_spinlock_initialize(&cache->maglock, "slab.cache.maglock");626 spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 627 spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 628 628 629 629 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) … … 704 704 size_t i; 705 705 for (i = 0; i < config.cpu_count; i++) { 706 irq_spinlock_lock(&cache->mag_cache[i].lock, true);706 spinlock_lock(&cache->mag_cache[i].lock); 707 707 708 708 mag = cache->mag_cache[i].current; … … 716 716 cache->mag_cache[i].last = NULL; 717 717 718 irq_spinlock_unlock(&cache->mag_cache[i].lock, true);718 spinlock_unlock(&cache->mag_cache[i].lock); 719 719 } 720 720 } -
kernel/generic/src/synch/mutex.c
r8264867 r99d05e1 40 40 #include <debug.h> 41 41 #include <arch.h> 42 #include <stacktrace.h>43 42 44 43 /** Initialize mutex. … … 88 87 ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE)); 89 88 90 unsigned int cnt = 0;91 bool deadlock_reported = false;92 89 do { 93 if (cnt++ > DEADLOCK_THRESHOLD) {94 printf("cpu%u: looping on active mutex %p\n",95 CPU->id, mtx);96 stack_trace();97 cnt = 0;98 deadlock_reported = true;99 }100 90 rc = semaphore_trydown(&mtx->sem); 101 91 } while (SYNCH_FAILED(rc) && 102 92 !(flags & SYNCH_FLAGS_NON_BLOCKING)); 103 if (deadlock_reported)104 printf("cpu%u: not deadlocked\n", CPU->id);105 93 } 106 94 -
kernel/generic/src/synch/spinlock.c
r8264867 r99d05e1 44 44 #include <debug.h> 45 45 #include <symtab.h> 46 #include <stacktrace.h>47 46 48 47 #ifdef CONFIG_SMP … … 105 104 "caller=%p (%s)\n", CPU->id, lock, lock->name, 106 105 (void *) CALLER, symtab_fmt_name_lookup(CALLER)); 107 stack_trace();108 106 109 107 i = 0; … … 262 260 int rc = spinlock_trylock(&(lock->lock)); 263 261 264 ASSERT_IRQ_SPINLOCK(! rc || !lock->guard, lock);262 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 265 263 return rc; 266 264 }
Note:
See TracChangeset
for help on using the changeset viewer.