Changes in / [99d05e1:8264867] in mainline
- Location:
- kernel
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/include/mm/page.h
r99d05e1 r8264867 33 33 */ 34 34 35 /** Paging on AMD6436 *37 * The space is divided in positive numbers (uspace) and38 * negative numbers (kernel). The 'negative' space starting39 * with 0xffff800000000000 and ending with 0xffffffffffffffff40 * is identically mapped physical memory.41 *42 */43 44 35 #ifndef KERN_amd64_PAGE_H_ 45 36 #define KERN_amd64_PAGE_H_ -
kernel/genarch/src/mm/page_pt.c
r99d05e1 r8264867 48 48 #include <align.h> 49 49 #include <macros.h> 50 #include <bitops.h> 50 51 51 52 static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); … … 292 293 } 293 294 295 /** Return the size of the region mapped by a single PTL0 entry. 296 * 297 * @return Size of the region mapped by a single PTL0 entry. 298 */ 299 static uintptr_t ptl0_step_get(void) 300 { 301 size_t va_bits; 302 303 va_bits = fnzb(PTL0_ENTRIES) + fnzb(PTL1_ENTRIES) + fnzb(PTL2_ENTRIES) + 304 fnzb(PTL3_ENTRIES) + PAGE_WIDTH; 305 306 return 1UL << (va_bits - fnzb(PTL0_ENTRIES)); 307 } 308 294 309 /** Make the mappings in the given range global accross all address spaces. 295 310 * … … 309 324 { 310 325 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table); 311 uintptr_t ptl0 step = (((uintptr_t) -1) / PTL0_ENTRIES) + 1;326 uintptr_t ptl0_step = ptl0_step_get(); 312 327 size_t order; 313 328 uintptr_t addr; … … 321 336 #endif 322 337 323 ASSERT(ispwr2(ptl0step));324 338 ASSERT(size > 0); 325 339 326 for (addr = ALIGN_DOWN(base, ptl0 step); addr - 1 < base + size - 1;327 addr += ptl0 step) {340 for (addr = ALIGN_DOWN(base, ptl0_step); addr - 1 < base + size - 1; 341 addr += ptl0_step) { 328 342 uintptr_t l1; 329 343 -
kernel/generic/include/lib/ra.h
r99d05e1 r8264867 42 42 43 43 typedef struct { 44 SPINLOCK_DECLARE(lock);44 IRQ_SPINLOCK_DECLARE(lock); 45 45 list_t spans; /**< List of arena's spans. */ 46 46 } ra_arena_t; -
kernel/generic/include/mm/slab.h
r99d05e1 r8264867 81 81 slab_magazine_t *current; 82 82 slab_magazine_t *last; 83 SPINLOCK_DECLARE(lock);83 IRQ_SPINLOCK_DECLARE(lock); 84 84 } slab_mag_cache_t; 85 85 … … 113 113 list_t full_slabs; /**< List of full slabs */ 114 114 list_t partial_slabs; /**< List of partial slabs */ 115 SPINLOCK_DECLARE(slablock);115 IRQ_SPINLOCK_DECLARE(slablock); 116 116 /* Magazines */ 117 117 list_t magazines; /**< List o full magazines */ 118 SPINLOCK_DECLARE(maglock);118 IRQ_SPINLOCK_DECLARE(maglock); 119 119 120 120 /** CPU cache */ -
kernel/generic/src/lib/ra.c
r99d05e1 r8264867 185 185 return NULL; 186 186 187 spinlock_initialize(&arena->lock, "arena_lock");187 irq_spinlock_initialize(&arena->lock, "arena_lock"); 188 188 list_initialize(&arena->spans); 189 189 … … 209 209 210 210 /* TODO: check for overlaps */ 211 spinlock_lock(&arena->lock);211 irq_spinlock_lock(&arena->lock, true); 212 212 list_append(&span->span_link, &arena->spans); 213 spinlock_unlock(&arena->lock);213 irq_spinlock_unlock(&arena->lock, true); 214 214 return true; 215 215 } … … 390 390 ASSERT(ispwr2(alignment)); 391 391 392 spinlock_lock(&arena->lock);392 irq_spinlock_lock(&arena->lock, true); 393 393 list_foreach(arena->spans, cur) { 394 394 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); … … 398 398 break; 399 399 } 400 spinlock_unlock(&arena->lock);400 irq_spinlock_unlock(&arena->lock, true); 401 401 402 402 return base; … … 406 406 void ra_free(ra_arena_t *arena, uintptr_t base, size_t size) 407 407 { 408 spinlock_lock(&arena->lock);408 irq_spinlock_lock(&arena->lock, true); 409 409 list_foreach(arena->spans, cur) { 410 410 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); … … 412 412 if (iswithin(span->base, span->size, base, size)) { 413 413 ra_span_free(span, base, size); 414 spinlock_unlock(&arena->lock);414 irq_spinlock_unlock(&arena->lock, true); 415 415 return; 416 416 } 417 417 } 418 spinlock_unlock(&arena->lock);418 irq_spinlock_unlock(&arena->lock, true); 419 419 420 420 panic("Freeing to wrong arena (base=%" PRIxn ", size=%" PRIdn ").", -
kernel/generic/src/mm/frame.c
r99d05e1 r8264867 1086 1086 #endif 1087 1087 1088 /* 1089 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts 1090 * to prevent deadlock with TLB shootdown. 1091 */ 1092 ipl_t ipl = interrupts_disable(); 1088 1093 mutex_lock(&mem_avail_mtx); 1089 1094 … … 1098 1103 1099 1104 mutex_unlock(&mem_avail_mtx); 1105 interrupts_restore(ipl); 1100 1106 1101 1107 #ifdef CONFIG_DEBUG … … 1161 1167 * Signal that some memory has been freed. 1162 1168 */ 1169 1170 1171 /* 1172 * Since the mem_avail_mtx is an active mutex, we need to disable interrupts 1173 * to prevent deadlock with TLB shootdown. 1174 */ 1175 ipl_t ipl = interrupts_disable(); 1163 1176 mutex_lock(&mem_avail_mtx); 1164 1177 if (mem_avail_req > 0) … … 1170 1183 } 1171 1184 mutex_unlock(&mem_avail_mtx); 1185 interrupts_restore(ipl); 1172 1186 1173 1187 if (!(flags & FRAME_NO_RESERVE)) -
kernel/generic/src/mm/slab.c
r99d05e1 r8264867 264 264 freed = cache->destructor(obj); 265 265 266 spinlock_lock(&cache->slablock);266 irq_spinlock_lock(&cache->slablock, true); 267 267 ASSERT(slab->available < cache->objects); 268 268 … … 275 275 /* Free associated memory */ 276 276 list_remove(&slab->link); 277 spinlock_unlock(&cache->slablock);277 irq_spinlock_unlock(&cache->slablock, true); 278 278 279 279 return freed + slab_space_free(cache, slab); … … 284 284 } 285 285 286 spinlock_unlock(&cache->slablock);286 irq_spinlock_unlock(&cache->slablock, true); 287 287 return freed; 288 288 } … … 295 295 NO_TRACE static void *slab_obj_create(slab_cache_t *cache, unsigned int flags) 296 296 { 297 spinlock_lock(&cache->slablock);297 irq_spinlock_lock(&cache->slablock, true); 298 298 299 299 slab_t *slab; … … 308 308 * 309 309 */ 310 spinlock_unlock(&cache->slablock);310 irq_spinlock_unlock(&cache->slablock, true); 311 311 slab = slab_space_alloc(cache, flags); 312 312 if (!slab) 313 313 return NULL; 314 314 315 spinlock_lock(&cache->slablock);315 irq_spinlock_lock(&cache->slablock, true); 316 316 } else { 317 317 slab = list_get_instance(list_first(&cache->partial_slabs), … … 329 329 list_prepend(&slab->link, &cache->partial_slabs); 330 330 331 spinlock_unlock(&cache->slablock);331 irq_spinlock_unlock(&cache->slablock, true); 332 332 333 333 if ((cache->constructor) && (cache->constructor(obj, flags))) { … … 355 355 link_t *cur; 356 356 357 spinlock_lock(&cache->maglock);357 irq_spinlock_lock(&cache->maglock, true); 358 358 if (!list_empty(&cache->magazines)) { 359 359 if (first) … … 366 366 atomic_dec(&cache->magazine_counter); 367 367 } 368 369 spinlock_unlock(&cache->maglock); 368 irq_spinlock_unlock(&cache->maglock, true); 369 370 370 return mag; 371 371 } … … 377 377 slab_magazine_t *mag) 378 378 { 379 spinlock_lock(&cache->maglock);379 irq_spinlock_lock(&cache->maglock, true); 380 380 381 381 list_prepend(&mag->link, &cache->magazines); 382 382 atomic_inc(&cache->magazine_counter); 383 383 384 spinlock_unlock(&cache->maglock);384 irq_spinlock_unlock(&cache->maglock, true); 385 385 } 386 386 … … 414 414 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 415 415 416 ASSERT( spinlock_locked(&cache->mag_cache[CPU->id].lock));416 ASSERT(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock)); 417 417 418 418 if (cmag) { /* First try local CPU magazines */ … … 451 451 return NULL; 452 452 453 spinlock_lock(&cache->mag_cache[CPU->id].lock);453 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true); 454 454 455 455 slab_magazine_t *mag = get_full_current_mag(cache); 456 456 if (!mag) { 457 spinlock_unlock(&cache->mag_cache[CPU->id].lock);457 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true); 458 458 return NULL; 459 459 } 460 460 461 461 void *obj = mag->objs[--mag->busy]; 462 spinlock_unlock(&cache->mag_cache[CPU->id].lock);462 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true); 463 463 464 464 atomic_dec(&cache->cached_objs); … … 481 481 slab_magazine_t *lastmag = cache->mag_cache[CPU->id].last; 482 482 483 ASSERT( spinlock_locked(&cache->mag_cache[CPU->id].lock));483 ASSERT(irq_spinlock_locked(&cache->mag_cache[CPU->id].lock)); 484 484 485 485 if (cmag) { … … 531 531 return -1; 532 532 533 spinlock_lock(&cache->mag_cache[CPU->id].lock);533 irq_spinlock_lock(&cache->mag_cache[CPU->id].lock, true); 534 534 535 535 slab_magazine_t *mag = make_empty_current_mag(cache); 536 536 if (!mag) { 537 spinlock_unlock(&cache->mag_cache[CPU->id].lock);537 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true); 538 538 return -1; 539 539 } … … 541 541 mag->objs[mag->busy++] = obj; 542 542 543 spinlock_unlock(&cache->mag_cache[CPU->id].lock);543 irq_spinlock_unlock(&cache->mag_cache[CPU->id].lock, true); 544 544 545 545 atomic_inc(&cache->cached_objs); … … 593 593 for (i = 0; i < config.cpu_count; i++) { 594 594 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); 595 spinlock_initialize(&cache->mag_cache[i].lock,595 irq_spinlock_initialize(&cache->mag_cache[i].lock, 596 596 "slab.cache.mag_cache[].lock"); 597 597 } … … 624 624 list_initialize(&cache->magazines); 625 625 626 spinlock_initialize(&cache->slablock, "slab.cache.slablock");627 spinlock_initialize(&cache->maglock, "slab.cache.maglock");626 irq_spinlock_initialize(&cache->slablock, "slab.cache.slablock"); 627 irq_spinlock_initialize(&cache->maglock, "slab.cache.maglock"); 628 628 629 629 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) … … 704 704 size_t i; 705 705 for (i = 0; i < config.cpu_count; i++) { 706 spinlock_lock(&cache->mag_cache[i].lock);706 irq_spinlock_lock(&cache->mag_cache[i].lock, true); 707 707 708 708 mag = cache->mag_cache[i].current; … … 716 716 cache->mag_cache[i].last = NULL; 717 717 718 spinlock_unlock(&cache->mag_cache[i].lock);718 irq_spinlock_unlock(&cache->mag_cache[i].lock, true); 719 719 } 720 720 } -
kernel/generic/src/synch/mutex.c
r99d05e1 r8264867 40 40 #include <debug.h> 41 41 #include <arch.h> 42 #include <stacktrace.h> 42 43 43 44 /** Initialize mutex. … … 87 88 ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE)); 88 89 90 unsigned int cnt = 0; 91 bool deadlock_reported = false; 89 92 do { 93 if (cnt++ > DEADLOCK_THRESHOLD) { 94 printf("cpu%u: looping on active mutex %p\n", 95 CPU->id, mtx); 96 stack_trace(); 97 cnt = 0; 98 deadlock_reported = true; 99 } 90 100 rc = semaphore_trydown(&mtx->sem); 91 101 } while (SYNCH_FAILED(rc) && 92 102 !(flags & SYNCH_FLAGS_NON_BLOCKING)); 103 if (deadlock_reported) 104 printf("cpu%u: not deadlocked\n", CPU->id); 93 105 } 94 106 -
kernel/generic/src/synch/spinlock.c
r99d05e1 r8264867 44 44 #include <debug.h> 45 45 #include <symtab.h> 46 #include <stacktrace.h> 46 47 47 48 #ifdef CONFIG_SMP … … 104 105 "caller=%p (%s)\n", CPU->id, lock, lock->name, 105 106 (void *) CALLER, symtab_fmt_name_lookup(CALLER)); 107 stack_trace(); 106 108 107 109 i = 0; … … 260 262 int rc = spinlock_trylock(&(lock->lock)); 261 263 262 ASSERT_IRQ_SPINLOCK(! lock->guard, lock);264 ASSERT_IRQ_SPINLOCK(!rc || !lock->guard, lock); 263 265 return rc; 264 266 }
Note:
See TracChangeset
for help on using the changeset viewer.