Changes in kernel/generic/src/proc/thread.c [cd3b380:8ad7dd1] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
rcd3b380 r8ad7dd1 46 46 #include <synch/spinlock.h> 47 47 #include <synch/waitq.h> 48 #include <synch/workqueue.h> 49 #include <synch/rcu.h> 48 50 #include <cpu.h> 49 51 #include <str.h> … … 263 265 } 264 266 267 /** Invoked right before thread_ready() readies the thread. thread is locked. */ 268 static void before_thread_is_ready(thread_t *thread) 269 { 270 ASSERT(irq_spinlock_locked(&thread->lock)); 271 workq_before_thread_is_ready(thread); 272 } 273 265 274 /** Make thread ready 266 275 * … … 275 284 276 285 ASSERT(thread->state != Ready); 286 287 before_thread_is_ready(thread); 277 288 278 289 int i = (thread->priority < RQ_COUNT - 1) ? 279 290 ++thread->priority : thread->priority; 280 291 281 292 cpu_t *cpu; 282 293 if (thread->wired || thread->nomigrate || thread->fpu_context_engaged) { 294 /* Cannot ready to another CPU */ 283 295 ASSERT(thread->cpu != NULL); 284 296 cpu = thread->cpu; 285 } else 297 } else if (thread->stolen) { 298 /* Ready to the stealing CPU */ 286 299 cpu = CPU; 300 } else if (thread->cpu) { 301 /* Prefer the CPU on which the thread ran last */ 302 ASSERT(thread->cpu != NULL); 303 cpu = thread->cpu; 304 } else { 305 cpu = CPU; 306 } 287 307 288 308 thread->state = Ready; … … 300 320 301 321 atomic_inc(&nrdy); 302 // FIXME: Why is the avg value not used303 // avg = atomic_get(&nrdy) / config.cpu_active;304 322 atomic_inc(&cpu->nrdy); 305 323 } … … 377 395 thread->task = task; 378 396 397 thread->workq = NULL; 398 379 399 thread->fpu_context_exists = false; 380 400 thread->fpu_context_engaged = false; … … 391 411 /* Might depend on previous initialization */ 392 412 thread_create_arch(thread); 413 414 rcu_thread_init(thread); 393 415 394 416 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) … … 501 523 */ 502 524 ipc_cleanup(); 503 futex_ cleanup();525 futex_task_cleanup(); 504 526 LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid); 505 527 } … … 521 543 /* Not reached */ 522 544 while (true); 545 } 546 547 /** Interrupts an existing thread so that it may exit as soon as possible. 548 * 549 * Threads that are blocked waiting for a synchronization primitive 550 * are woken up with a return code of ESYNCH_INTERRUPTED if the 551 * blocking call was interruptable. See waitq_sleep_timeout(). 552 * 553 * The caller must guarantee the thread object is valid during the entire 554 * function, eg by holding the threads_lock lock. 555 * 556 * Interrupted threads automatically exit when returning back to user space. 557 * 558 * @param thread A valid thread object. The caller must guarantee it 559 * will remain valid until thread_interrupt() exits. 560 */ 561 void thread_interrupt(thread_t *thread) 562 { 563 ASSERT(thread != NULL); 564 565 irq_spinlock_lock(&thread->lock, true); 566 567 thread->interrupted = true; 568 bool sleeping = (thread->state == Sleeping); 569 570 irq_spinlock_unlock(&thread->lock, true); 571 572 if (sleeping) 573 waitq_interrupt_sleep(thread); 574 } 575 576 /** Returns true if the thread was interrupted. 577 * 578 * @param thread A valid thread object. User must guarantee it will 579 * be alive during the entire call. 580 * @return true if the thread was already interrupted via thread_interrupt(). 581 */ 582 bool thread_interrupted(thread_t *thread) 583 { 584 ASSERT(thread != NULL); 585 586 bool interrupted; 587 588 irq_spinlock_lock(&thread->lock, true); 589 interrupted = thread->interrupted; 590 irq_spinlock_unlock(&thread->lock, true); 591 592 return interrupted; 523 593 } 524 594
Note:
See TracChangeset
for help on using the changeset viewer.