Changeset 31d8e10 in mainline
- Timestamp:
- 2007-04-05T16:09:49Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 547fa39
- Parents:
- 879585a3
- Location:
- kernel/generic
- Files:
-
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/mm/as.h
r879585a3 r31d8e10 102 102 asid_t asid; 103 103 104 /** Number of references (i.e tasks that reference this as). */ 105 atomic_t refcount; 106 104 107 mutex_t lock; 105 106 /** Number of references (i.e tasks that reference this as). */107 count_t refcount;108 108 109 109 /** B+tree of address space areas. */ … … 148 148 asid_t asid; 149 149 150 /** Number of references (i.e tasks that reference this as). */ 151 atomic_t refcount; 152 150 153 mutex_t lock; 151 152 /** Number of references (i.e tasks that reference this as). */153 count_t refcount;154 154 155 155 /** B+tree of address space areas. */ -
kernel/generic/include/synch/mutex.h
r879585a3 r31d8e10 45 45 46 46 #define mutex_lock(mtx) \ 47 _mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)47 _mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE) 48 48 #define mutex_trylock(mtx) \ 49 _mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING) 50 #define mutex_lock_timeout(mtx,usec) \ 51 _mutex_lock_timeout((mtx),(usec),SYNCH_FLAGS_NON_BLOCKING) 52 #define mutex_lock_active(mtx) \ 53 while (mutex_trylock((mtx)) != ESYNCH_OK_ATOMIC) 49 _mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING) 50 #define mutex_lock_timeout(mtx, usec) \ 51 _mutex_lock_timeout((mtx), (usec), SYNCH_FLAGS_NON_BLOCKING) 54 52 55 53 extern void mutex_initialize(mutex_t *mtx); -
kernel/generic/include/synch/spinlock.h
r879585a3 r31d8e10 102 102 } 103 103 104 #ifdef CONFIG_DEBUG_SPINLOCK 105 106 extern int printf(const char *, ...); 107 108 #define DEADLOCK_THRESHOLD 100000000 109 #define DEADLOCK_PROBE_INIT(pname) count_t pname = 0 110 #define DEADLOCK_PROBE(pname, value) \ 111 if ((pname)++ > (value)) { \ 112 (pname) = 0; \ 113 printf("Deadlock probe %s: exceeded threshold %d\n", \ 114 "cpu%d: function=%s, line=%d\n", \ 115 #pname, (value), CPU->id, __FUNCTION__, __LINE__); \ 116 } 117 #else 118 #define DEADLOCK_PROBE_INIT(pname) 119 #define DEADLOCK_PROBE(pname, value) 120 #endif 121 104 122 #else 105 123 … … 114 132 #define spinlock_unlock(x) preemption_enable() 115 133 134 #define DEADLOCK_PROBE_INIT(pname) 135 #define DEADLOCK_PROBE(pname, value) 136 116 137 #endif 117 138 -
kernel/generic/src/ipc/ipc.c
r879585a3 r31d8e10 375 375 call_t *call; 376 376 phone_t *phone; 377 DEADLOCK_PROBE_INIT(p_phonelck); 377 378 378 379 /* Disconnect all our phones ('ipc_phone_hangup') */ … … 388 389 while (!list_empty(&TASK->answerbox.connected_phones)) { 389 390 phone = list_get_instance(TASK->answerbox.connected_phones.next, 390 391 phone_t, link); 391 392 if (! spinlock_trylock(&phone->lock)) { 392 393 spinlock_unlock(&TASK->answerbox.lock); 394 DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD); 393 395 goto restart_phones; 394 396 } -
kernel/generic/src/ipc/irq.c
r879585a3 r31d8e10 337 337 link_t *cur = box->irq_head.next; 338 338 irq_t *irq; 339 DEADLOCK_PROBE_INIT(p_irqlock); 339 340 340 341 irq = list_get_instance(cur, irq_t, notif_cfg.link); … … 345 346 spinlock_unlock(&box->irq_lock); 346 347 interrupts_restore(ipl); 348 DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD); 347 349 goto loop; 348 350 } -
kernel/generic/src/mm/as.c
r879585a3 r31d8e10 58 58 #include <mm/asid.h> 59 59 #include <arch/mm/asid.h> 60 #include <preemption.h> 60 61 #include <synch/spinlock.h> 61 62 #include <synch/mutex.h> … … 182 183 as->asid = ASID_INVALID; 183 184 184 a s->refcount = 0;185 atomic_set(&as->refcount, 0); 185 186 as->cpu_refcount = 0; 186 187 #ifdef AS_PAGE_TABLE … … 197 198 * When there are no tasks referencing this address space (i.e. its refcount is 198 199 * zero), the address space can be destroyed. 200 * 201 * We know that we don't hold any spinlock. 199 202 */ 200 203 void as_destroy(as_t *as) … … 202 205 ipl_t ipl; 203 206 bool cond; 204 205 ASSERT(as->refcount == 0); 207 DEADLOCK_PROBE_INIT(p_asidlock); 208 209 ASSERT(atomic_get(&as->refcount) == 0); 206 210 207 211 /* … … 210 214 */ 211 215 212 ipl = interrupts_disable(); 213 spinlock_lock(&asidlock); 216 /* 217 * We need to avoid deadlock between TLB shootdown and asidlock. 218 * We therefore try to take asid conditionally and if we don't succeed, 219 * we enable interrupts and try again. This is done while preemption is 220 * disabled to prevent nested context switches. We also depend on the 221 * fact that so far no spinlocks are held. 222 */ 223 preemption_disable(); 224 ipl = interrupts_read(); 225 retry: 226 interrupts_disable(); 227 if (!spinlock_trylock(&asidlock)) { 228 interrupts_enable(); 229 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); 230 goto retry; 231 } 232 preemption_enable(); /* Interrupts disabled, enable preemption */ 214 233 if (as->asid != ASID_INVALID && as != AS_KERNEL) { 215 234 if (as != AS && as->cpu_refcount == 0) … … 473 492 * Finish TLB shootdown sequence. 474 493 */ 494 475 495 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 476 496 area->pages - pages); 497 /* 498 * Invalidate software translation caches (e.g. TSB on sparc64). 499 */ 500 as_invalidate_translation_cache(as, area->base + 501 pages * PAGE_SIZE, area->pages - pages); 477 502 tlb_shootdown_finalize(); 478 503 479 /*480 * Invalidate software translation caches (e.g. TSB on sparc64).481 */482 as_invalidate_translation_cache(as, area->base +483 pages * PAGE_SIZE, area->pages - pages);484 504 } else { 485 505 /* … … 569 589 * Finish TLB shootdown sequence. 570 590 */ 591 571 592 tlb_invalidate_pages(as->asid, area->base, area->pages); 572 tlb_shootdown_finalize();573 574 593 /* 575 594 * Invalidate potential software translation caches (e.g. TSB on … … 577 596 */ 578 597 as_invalidate_translation_cache(as, area->base, area->pages); 598 tlb_shootdown_finalize(); 579 599 580 600 btree_destroy(&area->used_space); … … 868 888 * thing which is forbidden in this context is locking the address space. 869 889 * 890 * When this function is enetered, no spinlocks may be held. 891 * 870 892 * @param old Old address space or NULL. 871 893 * @param new New address space. … … 873 895 void as_switch(as_t *old_as, as_t *new_as) 874 896 { 875 spinlock_lock(&asidlock); 897 DEADLOCK_PROBE_INIT(p_asidlock); 898 preemption_disable(); 899 retry: 900 (void) interrupts_disable(); 901 if (!spinlock_trylock(&asidlock)) { 902 /* 903 * Avoid deadlock with TLB shootdown. 904 * We can enable interrupts here because 905 * preemption is disabled. We should not be 906 * holding any other lock. 907 */ 908 (void) interrupts_enable(); 909 DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD); 910 goto retry; 911 } 912 preemption_enable(); 876 913 877 914 /* -
kernel/generic/src/proc/scheduler.c
r879585a3 r31d8e10 378 378 { 379 379 int priority; 380 380 DEADLOCK_PROBE_INIT(p_joinwq); 381 381 382 ASSERT(CPU != NULL); 382 383 … … 407 408 delay(10); 408 409 spinlock_lock(&THREAD->lock); 410 DEADLOCK_PROBE(p_joinwq, 411 DEADLOCK_THRESHOLD); 409 412 goto repeat; 410 413 } -
kernel/generic/src/proc/task.c
r879585a3 r31d8e10 42 42 #include <mm/as.h> 43 43 #include <mm/slab.h> 44 #include <atomic.h> 44 45 #include <synch/spinlock.h> 45 46 #include <synch/waitq.h> … … 141 142 /* 142 143 * Increment address space reference count. 143 * TODO: Reconsider the locking scheme. 144 */ 145 mutex_lock(&as->lock); 146 as->refcount++; 147 mutex_unlock(&as->lock); 144 */ 145 atomic_inc(&as->refcount); 148 146 149 147 spinlock_lock(&tasks_lock); … … 167 165 btree_destroy(&t->futexes); 168 166 169 mutex_lock_active(&t->as->lock); 170 if (--t->as->refcount == 0) { 171 mutex_unlock(&t->as->lock); 167 if (atomic_predec(&t->as->refcount) == 0) 172 168 as_destroy(t->as); 173 /*174 * t->as is destroyed.175 */176 } else177 mutex_unlock(&t->as->lock);178 169 179 170 free(t); -
kernel/generic/src/proc/thread.c
r879585a3 r31d8e10 497 497 498 498 /* 499 * Since the thread is expected to notbe already detached,499 * Since the thread is expected not to be already detached, 500 500 * pointer to it must be still valid. 501 501 */ -
kernel/generic/src/synch/spinlock.c
r879585a3 r31d8e10 74 74 */ 75 75 #ifdef CONFIG_DEBUG_SPINLOCK 76 #define DEADLOCK_THRESHOLD 10000000077 76 void spinlock_lock_debug(spinlock_t *sl) 78 77 { -
kernel/generic/src/synch/waitq.c
r879585a3 r31d8e10 87 87 waitq_t *wq; 88 88 bool do_wakeup = false; 89 DEADLOCK_PROBE_INIT(p_wqlock); 89 90 90 91 spinlock_lock(&threads_lock); … … 97 98 if (!spinlock_trylock(&wq->lock)) { 98 99 spinlock_unlock(&t->lock); 100 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 99 101 goto grab_locks; /* avoid deadlock */ 100 102 } … … 129 131 bool do_wakeup = false; 130 132 ipl_t ipl; 133 DEADLOCK_PROBE_INIT(p_wqlock); 131 134 132 135 ipl = interrupts_disable(); … … 148 151 if (!spinlock_trylock(&wq->lock)) { 149 152 spinlock_unlock(&t->lock); 153 DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD); 150 154 goto grab_locks; /* avoid deadlock */ 151 155 } -
kernel/generic/src/time/timeout.c
r879585a3 r31d8e10 45 45 #include <arch/asm.h> 46 46 #include <arch.h> 47 48 47 49 48 /** Initialize timeouts … … 176 175 link_t *l; 177 176 ipl_t ipl; 177 DEADLOCK_PROBE_INIT(p_tolock); 178 178 179 179 grab_locks: … … 187 187 if (!spinlock_trylock(&t->cpu->timeoutlock)) { 188 188 spinlock_unlock(&t->lock); 189 interrupts_restore(ipl); 189 interrupts_restore(ipl); 190 DEADLOCK_PROBE(p_tolock, DEADLOCK_THRESHOLD); 190 191 goto grab_locks; 191 192 }
Note:
See TracChangeset
for help on using the changeset viewer.