Changes in kernel/generic/src/proc/thread.c [7ed8530:ee42e43] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
r7ed8530 ree42e43 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Thread management functions. 36 36 */ 37 37 … … 48 48 #include <synch/spinlock.h> 49 49 #include <synch/waitq.h> 50 #include <synch/rwlock.h>51 50 #include <cpu.h> 52 51 #include <str.h> … … 94 93 * 95 94 * For locking rules, see declaration thereof. 96 */ 97 SPINLOCK_INITIALIZE(threads_lock); 95 * 96 */ 97 IRQ_SPINLOCK_INITIALIZE(threads_lock); 98 98 99 99 /** AVL tree of all threads. … … 101 101 * When a thread is found in the threads_tree AVL tree, it is guaranteed to 102 102 * exist as long as the threads_lock is held. 103 */ 104 avltree_t threads_tree; 105 106 SPINLOCK_INITIALIZE(tidlock); 107 thread_id_t last_tid = 0; 103 * 104 */ 105 avltree_t threads_tree; 106 107 IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock); 108 static thread_id_t last_tid = 0; 108 109 109 110 static slab_cache_t *thread_slab; 111 110 112 #ifdef CONFIG_FPU 111 113 slab_cache_t *fpu_context_slab; … … 125 127 void *arg = THREAD->thread_arg; 126 128 THREAD->last_cycle = get_cycle(); 127 129 128 130 /* This is where each thread wakes up after its creation */ 129 spinlock_unlock(&THREAD->lock);131 irq_spinlock_unlock(&THREAD->lock, false); 130 132 interrupts_enable(); 131 133 132 134 f(arg); 133 135 134 136 /* Accumulate accounting to the task */ 135 ipl_t ipl = interrupts_disable(); 136 137 spinlock_lock(&THREAD->lock); 137 irq_spinlock_lock(&THREAD->lock, true); 138 138 if (!THREAD->uncounted) { 139 139 thread_update_accounting(true); … … 142 142 uint64_t kcycles = THREAD->kcycles; 143 143 THREAD->kcycles = 0; 144 145 spinlock_unlock(&THREAD->lock);146 144 147 spinlock_lock(&TASK->lock);145 irq_spinlock_pass(&THREAD->lock, &TASK->lock); 148 146 TASK->ucycles += ucycles; 149 147 TASK->kcycles += kcycles; 150 spinlock_unlock(&TASK->lock);148 irq_spinlock_unlock(&TASK->lock, true); 151 149 } else 152 spinlock_unlock(&THREAD->lock); 153 154 interrupts_restore(ipl); 150 irq_spinlock_unlock(&THREAD->lock, true); 155 151 156 152 thread_exit(); 157 /* not reached */ 158 } 159 160 /** Initialization and allocation for thread_t structure */ 161 static int thr_constructor(void *obj, int kmflags) 162 { 163 thread_t *t = (thread_t *) obj; 164 165 spinlock_initialize(&t->lock, "thread_t_lock"); 166 link_initialize(&t->rq_link); 167 link_initialize(&t->wq_link); 168 link_initialize(&t->th_link); 169 153 154 /* Not reached */ 155 } 156 157 /** Initialization and allocation for thread_t structure 158 * 159 */ 160 static int thr_constructor(void *obj, unsigned int kmflags) 161 { 162 thread_t *thread = (thread_t *) obj; 163 164 irq_spinlock_initialize(&thread->lock, "thread_t_lock"); 165 link_initialize(&thread->rq_link); 166 link_initialize(&thread->wq_link); 167 link_initialize(&thread->th_link); 168 170 169 /* call the architecture-specific part of the constructor */ 171 thr_constructor_arch(t );170 thr_constructor_arch(thread); 172 171 173 172 #ifdef CONFIG_FPU 174 173 #ifdef CONFIG_FPU_LAZY 175 t ->saved_fpu_context = NULL;176 #else 177 t ->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);178 if (!t ->saved_fpu_context)174 thread->saved_fpu_context = NULL; 175 #else /* CONFIG_FPU_LAZY */ 176 thread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags); 177 if (!thread->saved_fpu_context) 179 178 return -1; 180 #endif 181 #endif 182 183 t ->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);184 if (!t ->kstack) {179 #endif /* CONFIG_FPU_LAZY */ 180 #endif /* CONFIG_FPU */ 181 182 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 183 if (!thread->kstack) { 185 184 #ifdef CONFIG_FPU 186 if (t ->saved_fpu_context)187 slab_free(fpu_context_slab, t ->saved_fpu_context);185 if (thread->saved_fpu_context) 186 slab_free(fpu_context_slab, thread->saved_fpu_context); 188 187 #endif 189 188 return -1; 190 189 } 191 190 192 191 #ifdef CONFIG_UDEBUG 193 mutex_initialize(&t ->udebug.lock, MUTEX_PASSIVE);194 #endif 195 192 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE); 193 #endif 194 196 195 return 0; 197 196 } 198 197 199 198 /** Destruction of thread_t object */ 200 static int thr_destructor(void *obj)201 { 202 thread_t *t = (thread_t *) obj;203 199 static size_t thr_destructor(void *obj) 200 { 201 thread_t *thread = (thread_t *) obj; 202 204 203 /* call the architecture-specific part of the destructor */ 205 thr_destructor_arch(t); 206 207 frame_free(KA2PA(t->kstack)); 204 thr_destructor_arch(thread); 205 206 frame_free(KA2PA(thread->kstack)); 207 208 208 #ifdef CONFIG_FPU 209 if (t->saved_fpu_context) 210 slab_free(fpu_context_slab, t->saved_fpu_context); 211 #endif 212 return 1; /* One page freed */ 209 if (thread->saved_fpu_context) 210 slab_free(fpu_context_slab, thread->saved_fpu_context); 211 #endif 212 213 return 1; /* One page freed */ 213 214 } 214 215 … … 221 222 { 222 223 THREAD = NULL; 224 223 225 atomic_set(&nrdy, 0); 224 226 thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0, 225 227 thr_constructor, thr_destructor, 0); 226 228 227 229 #ifdef CONFIG_FPU 228 230 fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), 229 231 FPU_CONTEXT_ALIGN, NULL, NULL, 0); 230 232 #endif 231 233 232 234 avltree_create(&threads_tree); 233 235 } … … 235 237 /** Make thread ready 236 238 * 237 * Switch thread t to the ready state.239 * Switch thread to the ready state. 238 240 * 239 241 * @param t Thread to make ready. 240 242 * 241 243 */ 242 void thread_ready(thread_t *t) 243 { 244 cpu_t *cpu; 245 runq_t *r; 246 ipl_t ipl; 247 int i, avg; 248 249 ipl = interrupts_disable(); 250 251 spinlock_lock(&t->lock); 252 253 ASSERT(!(t->state == Ready)); 254 255 i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority; 256 257 cpu = CPU; 258 if (t->flags & THREAD_FLAG_WIRED) { 259 ASSERT(t->cpu != NULL); 260 cpu = t->cpu; 244 void thread_ready(thread_t *thread) 245 { 246 irq_spinlock_lock(&thread->lock, true); 247 248 ASSERT(!(thread->state == Ready)); 249 250 int i = (thread->priority < RQ_COUNT - 1) 251 ? ++thread->priority : thread->priority; 252 253 cpu_t *cpu = CPU; 254 if (thread->flags & THREAD_FLAG_WIRED) { 255 ASSERT(thread->cpu != NULL); 256 cpu = thread->cpu; 261 257 } 262 t->state = Ready; 263 spinlock_unlock(&t->lock); 258 thread->state = Ready; 259 260 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 264 261 265 262 /* 266 * Append t to respective ready queue on respective processor. 263 * Append thread to respective ready queue 264 * on respective processor. 267 265 */ 268 r = &cpu->rq[i]; 269 spinlock_lock(&r->lock); 270 list_append(&t->rq_link, &r->rq_head); 271 r->n++; 272 spinlock_unlock(&r->lock); 273 266 267 list_append(&thread->rq_link, &cpu->rq[i].rq_head); 268 cpu->rq[i].n++; 269 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 270 274 271 atomic_inc(&nrdy); 275 // FIXME: Why is the avg value n ever read?276 avg = atomic_get(&nrdy) / config.cpu_active;272 // FIXME: Why is the avg value not used 273 // avg = atomic_get(&nrdy) / config.cpu_active; 277 274 atomic_inc(&cpu->nrdy); 278 275 } 276 277 /** Create new thread 278 * 279 * Create a new thread. 280 * 281 * @param func Thread's implementing function. 282 * @param arg Thread's implementing function argument. 283 * @param task Task to which the thread belongs. The caller must 284 * guarantee that the task won't cease to exist during the 285 * call. The task's lock may not be held. 286 * @param flags Thread flags. 287 * @param name Symbolic name (a copy is made). 288 * @param uncounted Thread's accounting doesn't affect accumulated task 289 * accounting. 290 * 291 * @return New thread's structure on success, NULL on failure. 292 * 293 */ 294 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 295 unsigned int flags, const char *name, bool uncounted) 296 { 297 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0); 298 if (!thread) 299 return NULL; 300 301 /* Not needed, but good for debugging */ 302 memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 303 304 irq_spinlock_lock(&tidlock, true); 305 thread->tid = ++last_tid; 306 irq_spinlock_unlock(&tidlock, true); 307 308 context_save(&thread->saved_context); 309 context_set(&thread->saved_context, FADDR(cushion), 310 (uintptr_t) thread->kstack, THREAD_STACK_SIZE); 311 312 the_initialize((the_t *) thread->kstack); 313 314 ipl_t ipl = interrupts_disable(); 315 thread->saved_context.ipl = interrupts_read(); 279 316 interrupts_restore(ipl); 280 } 281 282 /** Create new thread 283 * 284 * Create a new thread. 285 * 286 * @param func Thread's implementing function. 287 * @param arg Thread's implementing function argument. 288 * @param task Task to which the thread belongs. The caller must 289 * guarantee that the task won't cease to exist during the 290 * call. The task's lock may not be held. 291 * @param flags Thread flags. 292 * @param name Symbolic name (a copy is made). 293 * @param uncounted Thread's accounting doesn't affect accumulated task 294 * accounting. 295 * 296 * @return New thread's structure on success, NULL on failure. 297 * 298 */ 299 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 300 int flags, const char *name, bool uncounted) 301 { 302 thread_t *t; 303 ipl_t ipl; 304 305 t = (thread_t *) slab_alloc(thread_slab, 0); 306 if (!t) 307 return NULL; 308 309 /* Not needed, but good for debugging */ 310 memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 311 312 ipl = interrupts_disable(); 313 spinlock_lock(&tidlock); 314 t->tid = ++last_tid; 315 spinlock_unlock(&tidlock); 316 interrupts_restore(ipl); 317 318 context_save(&t->saved_context); 319 context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, 320 THREAD_STACK_SIZE); 321 322 the_initialize((the_t *) t->kstack); 323 324 ipl = interrupts_disable(); 325 t->saved_context.ipl = interrupts_read(); 326 interrupts_restore(ipl); 327 328 memcpy(t->name, name, THREAD_NAME_BUFLEN); 329 t->name[THREAD_NAME_BUFLEN - 1] = 0; 330 331 t->thread_code = func; 332 t->thread_arg = arg; 333 t->ticks = -1; 334 t->ucycles = 0; 335 t->kcycles = 0; 336 t->uncounted = uncounted; 337 t->priority = -1; /* start in rq[0] */ 338 t->cpu = NULL; 339 t->flags = flags; 340 t->state = Entering; 341 t->call_me = NULL; 342 t->call_me_with = NULL; 343 344 timeout_initialize(&t->sleep_timeout); 345 t->sleep_interruptible = false; 346 t->sleep_queue = NULL; 347 t->timeout_pending = 0; 348 349 t->in_copy_from_uspace = false; 350 t->in_copy_to_uspace = false; 351 352 t->interrupted = false; 353 t->detached = false; 354 waitq_initialize(&t->join_wq); 355 356 t->rwlock_holder_type = RWLOCK_NONE; 357 358 t->task = task; 359 360 t->fpu_context_exists = 0; 361 t->fpu_context_engaged = 0; 362 363 avltree_node_initialize(&t->threads_tree_node); 364 t->threads_tree_node.key = (uintptr_t) t; 365 317 318 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); 319 320 thread->thread_code = func; 321 thread->thread_arg = arg; 322 thread->ticks = -1; 323 thread->ucycles = 0; 324 thread->kcycles = 0; 325 thread->uncounted = uncounted; 326 thread->priority = -1; /* Start in rq[0] */ 327 thread->cpu = NULL; 328 thread->flags = flags; 329 thread->state = Entering; 330 331 timeout_initialize(&thread->sleep_timeout); 332 thread->sleep_interruptible = false; 333 thread->sleep_queue = NULL; 334 thread->timeout_pending = false; 335 336 thread->in_copy_from_uspace = false; 337 thread->in_copy_to_uspace = false; 338 339 thread->interrupted = false; 340 thread->detached = false; 341 waitq_initialize(&thread->join_wq); 342 343 thread->task = task; 344 345 thread->fpu_context_exists = 0; 346 thread->fpu_context_engaged = 0; 347 348 avltree_node_initialize(&thread->threads_tree_node); 349 thread->threads_tree_node.key = (uintptr_t) thread; 350 366 351 #ifdef CONFIG_UDEBUG 367 352 /* Init debugging stuff */ 368 udebug_thread_initialize(&t ->udebug);369 #endif 370 371 /* might depend on previous initialization */372 thread_create_arch(t );373 353 udebug_thread_initialize(&thread->udebug); 354 #endif 355 356 /* Might depend on previous initialization */ 357 thread_create_arch(thread); 358 374 359 if (!(flags & THREAD_FLAG_NOATTACH)) 375 thread_attach(t , task);376 377 return t ;360 thread_attach(thread, task); 361 362 return thread; 378 363 } 379 364 … … 382 367 * Detach thread from all queues, cpus etc. and destroy it. 383 368 * 384 * Assume thread->lock is held!! 385 */ 386 void thread_destroy(thread_t *t) 387 { 388 ASSERT(t->state == Exiting || t->state == Lingering); 389 ASSERT(t->task); 390 ASSERT(t->cpu); 391 392 spinlock_lock(&t->cpu->lock); 393 if (t->cpu->fpu_owner == t) 394 t->cpu->fpu_owner = NULL; 395 spinlock_unlock(&t->cpu->lock); 396 397 spinlock_unlock(&t->lock); 398 399 spinlock_lock(&threads_lock); 400 avltree_delete(&threads_tree, &t->threads_tree_node); 401 spinlock_unlock(&threads_lock); 402 369 * @param thread Thread to be destroyed. 370 * @param irq_res Indicate whether it should unlock thread->lock 371 * in interrupts-restore mode. 372 * 373 */ 374 void thread_destroy(thread_t *thread, bool irq_res) 375 { 376 ASSERT(irq_spinlock_locked(&thread->lock)); 377 ASSERT((thread->state == Exiting) || (thread->state == Lingering)); 378 ASSERT(thread->task); 379 ASSERT(thread->cpu); 380 381 irq_spinlock_lock(&thread->cpu->lock, false); 382 if (thread->cpu->fpu_owner == thread) 383 thread->cpu->fpu_owner = NULL; 384 irq_spinlock_unlock(&thread->cpu->lock, false); 385 386 irq_spinlock_pass(&thread->lock, &threads_lock); 387 388 avltree_delete(&threads_tree, &thread->threads_tree_node); 389 390 irq_spinlock_pass(&threads_lock, &thread->task->lock); 391 403 392 /* 404 393 * Detach from the containing task. 405 394 */ 406 spinlock_lock(&t->task->lock); 407 list_remove(&t->th_link); 408 spinlock_unlock(&t->task->lock); 409 395 list_remove(&thread->th_link); 396 irq_spinlock_unlock(&thread->task->lock, irq_res); 397 410 398 /* 411 399 * Drop the reference to the containing task. 412 400 */ 413 task_release(t->task); 414 415 slab_free(thread_slab, t); 401 task_release(thread->task); 402 slab_free(thread_slab, thread); 416 403 } 417 404 … … 421 408 * threads_tree. 422 409 * 423 * @param t Thread to be attached to the task. 424 * @param task Task to which the thread is to be attached. 425 */ 426 void thread_attach(thread_t *t, task_t *task) 427 { 428 ipl_t ipl; 429 410 * @param t Thread to be attached to the task. 411 * @param task Task to which the thread is to be attached. 412 * 413 */ 414 void thread_attach(thread_t *thread, task_t *task) 415 { 430 416 /* 431 417 * Attach to the specified task. 432 418 */ 433 ipl = interrupts_disable(); 434 spinlock_lock(&task->lock); 435 419 irq_spinlock_lock(&task->lock, true); 420 436 421 /* Hold a reference to the task. */ 437 422 task_hold(task); 438 423 439 424 /* Must not count kbox thread into lifecount */ 440 if (t ->flags & THREAD_FLAG_USPACE)425 if (thread->flags & THREAD_FLAG_USPACE) 441 426 atomic_inc(&task->lifecount); 442 443 list_append(&t->th_link, &task->th_head); 444 spinlock_unlock(&task->lock); 445 427 428 list_append(&thread->th_link, &task->th_head); 429 430 irq_spinlock_pass(&task->lock, &threads_lock); 431 446 432 /* 447 433 * Register this thread in the system-wide list. 448 434 */ 449 spinlock_lock(&threads_lock); 450 avltree_insert(&threads_tree, &t->threads_tree_node); 451 spinlock_unlock(&threads_lock); 452 453 interrupts_restore(ipl); 435 avltree_insert(&threads_tree, &thread->threads_tree_node); 436 irq_spinlock_unlock(&threads_lock, true); 454 437 } 455 438 456 439 /** Terminate thread. 457 440 * 458 * End current thread execution and switch it to the exiting state. All pending 459 * timeouts are executed. 441 * End current thread execution and switch it to the exiting state. 442 * All pending timeouts are executed. 443 * 460 444 */ 461 445 void thread_exit(void) 462 446 { 463 ipl_t ipl;464 465 447 if (THREAD->flags & THREAD_FLAG_USPACE) { 466 448 #ifdef CONFIG_UDEBUG 467 449 /* Generate udebug THREAD_E event */ 468 450 udebug_thread_e_event(); 451 452 /* 453 * This thread will not execute any code or system calls from 454 * now on. 455 */ 456 udebug_stoppable_begin(); 469 457 #endif 470 458 if (atomic_predec(&TASK->lifecount) == 0) { … … 475 463 * can only be created by threads of the same task. 476 464 * We are safe to perform cleanup. 465 * 477 466 */ 478 467 ipc_cleanup(); … … 481 470 } 482 471 } 483 472 484 473 restart: 485 ipl = interrupts_disable(); 486 spinlock_lock(&THREAD->lock); 487 if (THREAD->timeout_pending) { 488 /* busy waiting for timeouts in progress */ 489 spinlock_unlock(&THREAD->lock); 490 interrupts_restore(ipl); 474 irq_spinlock_lock(&THREAD->lock, true); 475 if (THREAD->timeout_pending) { 476 /* Busy waiting for timeouts in progress */ 477 irq_spinlock_unlock(&THREAD->lock, true); 491 478 goto restart; 492 479 } 493 480 494 481 THREAD->state = Exiting; 495 spinlock_unlock(&THREAD->lock); 482 irq_spinlock_unlock(&THREAD->lock, true); 483 496 484 scheduler(); 497 485 498 486 /* Not reached */ 499 while (1) 500 ; 501 } 502 487 while (true); 488 } 503 489 504 490 /** Thread sleep … … 515 501 while (sec > 0) { 516 502 uint32_t period = (sec > 1000) ? 1000 : sec; 517 503 518 504 thread_usleep(period * 1000000); 519 505 sec -= period; … … 523 509 /** Wait for another thread to exit. 524 510 * 525 * @param t Thread to join on exit.526 * @param usec Timeout in microseconds.527 * @param flags Mode of operation.511 * @param thread Thread to join on exit. 512 * @param usec Timeout in microseconds. 513 * @param flags Mode of operation. 528 514 * 529 515 * @return An error code from errno.h or an error code from synch.h. 530 */ 531 int thread_join_timeout(thread_t *t, uint32_t usec, int flags) 532 { 533 ipl_t ipl; 534 int rc; 535 536 if (t == THREAD) 516 * 517 */ 518 int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags) 519 { 520 if (thread == THREAD) 537 521 return EINVAL; 538 522 539 523 /* 540 524 * Since thread join can only be called once on an undetached thread, … … 542 526 */ 543 527 544 ipl = interrupts_disable(); 545 spinlock_lock(&t->lock); 546 ASSERT(!t->detached); 547 spinlock_unlock(&t->lock); 548 interrupts_restore(ipl); 549 550 rc = waitq_sleep_timeout(&t->join_wq, usec, flags); 551 552 return rc; 528 irq_spinlock_lock(&thread->lock, true); 529 ASSERT(!thread->detached); 530 irq_spinlock_unlock(&thread->lock, true); 531 532 return waitq_sleep_timeout(&thread->join_wq, usec, flags); 553 533 } 554 534 … … 558 538 * state, deallocate its resources. 559 539 * 560 * @param t Thread to be detached. 561 */ 562 void thread_detach(thread_t *t) 563 { 564 ipl_t ipl; 565 540 * @param thread Thread to be detached. 541 * 542 */ 543 void thread_detach(thread_t *thread) 544 { 566 545 /* 567 546 * Since the thread is expected not to be already detached, 568 547 * pointer to it must be still valid. 569 548 */ 570 ipl = interrupts_disable(); 571 spinlock_lock(&t->lock); 572 ASSERT(!t->detached); 573 if (t->state == Lingering) { 574 thread_destroy(t); /* unlocks &t->lock */ 575 interrupts_restore(ipl); 549 irq_spinlock_lock(&thread->lock, true); 550 ASSERT(!thread->detached); 551 552 if (thread->state == Lingering) { 553 /* 554 * Unlock &thread->lock and restore 555 * interrupts in thread_destroy(). 556 */ 557 thread_destroy(thread, true); 576 558 return; 577 559 } else { 578 t ->detached = true;560 thread->detached = true; 579 561 } 580 spinlock_unlock(&t->lock);581 i nterrupts_restore(ipl);562 563 irq_spinlock_unlock(&thread->lock, true); 582 564 } 583 565 … … 598 580 } 599 581 600 /** Register thread out-of-context invocation601 *602 * Register a function and its argument to be executed603 * on next context switch to the current thread.604 *605 * @param call_me Out-of-context function.606 * @param call_me_with Out-of-context function argument.607 *608 */609 void thread_register_call_me(void (* call_me)(void *), void *call_me_with)610 {611 ipl_t ipl;612 613 ipl = interrupts_disable();614 spinlock_lock(&THREAD->lock);615 THREAD->call_me = call_me;616 THREAD->call_me_with = call_me_with;617 spinlock_unlock(&THREAD->lock);618 interrupts_restore(ipl);619 }620 621 582 static bool thread_walker(avltree_node_t *node, void *arg) 622 583 { 623 thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node); 584 bool *additional = (bool *) arg; 585 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node); 624 586 625 587 uint64_t ucycles, kcycles; 626 588 char usuffix, ksuffix; 627 order_suffix(t ->ucycles, &ucycles, &usuffix);628 order_suffix(t ->kcycles, &kcycles, &ksuffix);629 589 order_suffix(thread->ucycles, &ucycles, &usuffix); 590 order_suffix(thread->kcycles, &kcycles, &ksuffix); 591 630 592 #ifdef __32_BITS__ 631 printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9" 632 PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t, 633 thread_states[t->state], t->task, t->task->context, t->thread_code, 634 t->kstack, ucycles, usuffix, kcycles, ksuffix); 635 #endif 636 593 if (*additional) 594 printf("%-8" PRIu64" %10p %9" PRIu64 "%c %9" PRIu64 "%c ", 595 thread->tid, thread->kstack, ucycles, usuffix, 596 kcycles, ksuffix); 597 else 598 printf("%-8" PRIu64" %-14s %10p %-8s %10p %-5" PRIu32 " %10p\n", 599 thread->tid, thread->name, thread, thread_states[thread->state], 600 thread->task, thread->task->context, thread->thread_code); 601 #endif 602 637 603 #ifdef __64_BITS__ 638 printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9" 639 PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t, 640 thread_states[t->state], t->task, t->task->context, t->thread_code, 641 t->kstack, ucycles, usuffix, kcycles, ksuffix); 604 if (*additional) 605 printf("%-8" PRIu64" %18p %18p\n" 606 " %9" PRIu64 "%c %9" PRIu64 "%c ", 607 thread->tid, thread->thread_code, thread->kstack, 608 ucycles, usuffix, kcycles, ksuffix); 609 else 610 printf("%-8" PRIu64" %-14s %18p %-8s %18p %-5" PRIu32 "\n", 611 thread->tid, thread->name, thread, thread_states[thread->state], 612 thread->task, thread->task->context); 613 #endif 614 615 if (*additional) { 616 if (thread->cpu) 617 printf("%-5u", thread->cpu->id); 618 else 619 printf("none "); 620 621 if (thread->state == Sleeping) { 622 #ifdef __32_BITS__ 623 printf(" %10p", thread->sleep_queue); 642 624 #endif 643 625 644 if (t->cpu) 645 printf("%-4u", t->cpu->id); 626 #ifdef __64_BITS__ 627 printf(" %18p", thread->sleep_queue); 628 #endif 629 } 630 631 printf("\n"); 632 } 633 634 return true; 635 } 636 637 /** Print list of threads debug info 638 * 639 * @param additional Print additional information. 640 * 641 */ 642 void thread_print_list(bool additional) 643 { 644 /* Messing with thread structures, avoid deadlock */ 645 irq_spinlock_lock(&threads_lock, true); 646 647 #ifdef __32_BITS__ 648 if (additional) 649 printf("[id ] [stack ] [ucycles ] [kcycles ] [cpu]" 650 " [waitqueue]\n"); 646 651 else 647 printf("none"); 648 649 if (t->state == Sleeping) { 650 #ifdef __32_BITS__ 651 printf(" %10p", t->sleep_queue); 652 #endif 653 652 printf("[id ] [name ] [address ] [state ] [task ]" 653 " [ctx] [code ]\n"); 654 #endif 655 654 656 #ifdef __64_BITS__ 655 printf(" %18p", t->sleep_queue); 656 #endif 657 } 658 659 printf("\n"); 660 661 return true; 662 } 663 664 /** Print list of threads debug info */ 665 void thread_print_list(void) 666 { 667 ipl_t ipl; 668 669 /* Messing with thread structures, avoid deadlock */ 670 ipl = interrupts_disable(); 671 spinlock_lock(&threads_lock); 672 673 #ifdef __32_BITS__ 674 printf("tid name address state task " 675 "ctx code stack ucycles kcycles cpu " 676 "waitqueue\n"); 677 printf("------ ---------- ---------- -------- ---------- " 678 "--- ---------- ---------- ---------- ---------- ---- " 679 "----------\n"); 680 #endif 681 682 #ifdef __64_BITS__ 683 printf("tid name address state task " 684 "ctx code stack ucycles kcycles cpu " 685 "waitqueue\n"); 686 printf("------ ---------- ------------------ -------- ------------------ " 687 "--- ------------------ ------------------ ---------- ---------- ---- " 688 "------------------\n"); 689 #endif 690 691 avltree_walk(&threads_tree, thread_walker, NULL); 692 693 spinlock_unlock(&threads_lock); 694 interrupts_restore(ipl); 657 if (additional) { 658 printf("[id ] [code ] [stack ]\n" 659 " [ucycles ] [kcycles ] [cpu] [waitqueue ]\n"); 660 } else 661 printf("[id ] [name ] [address ] [state ]" 662 " [task ] [ctx]\n"); 663 #endif 664 665 avltree_walk(&threads_tree, thread_walker, &additional); 666 667 irq_spinlock_unlock(&threads_lock, true); 695 668 } 696 669 … … 700 673 * interrupts must be already disabled. 701 674 * 702 * @param t Pointer to thread.675 * @param thread Pointer to thread. 703 676 * 704 677 * @return True if thread t is known to the system, false otherwise. 705 */ 706 bool thread_exists(thread_t *t) 707 { 708 avltree_node_t *node; 709 710 node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t)); 678 * 679 */ 680 bool thread_exists(thread_t *thread) 681 { 682 ASSERT(interrupts_disabled()); 683 ASSERT(irq_spinlock_locked(&threads_lock)); 684 685 avltree_node_t *node = 686 avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread)); 711 687 712 688 return node != NULL; … … 718 694 * interrupts must be already disabled. 719 695 * 720 * @param user True to update user accounting, false for kernel. 696 * @param user True to update user accounting, false for kernel. 697 * 721 698 */ 722 699 void thread_update_accounting(bool user) 723 700 { 724 701 uint64_t time = get_cycle(); 725 if (user) { 702 703 ASSERT(interrupts_disabled()); 704 ASSERT(irq_spinlock_locked(&THREAD->lock)); 705 706 if (user) 726 707 THREAD->ucycles += time - THREAD->last_cycle; 727 } else {708 else 728 709 THREAD->kcycles += time - THREAD->last_cycle; 729 }710 730 711 THREAD->last_cycle = time; 731 712 } … … 757 738 thread_t *thread_find_by_id(thread_id_t thread_id) 758 739 { 740 ASSERT(interrupts_disabled()); 741 ASSERT(irq_spinlock_locked(&threads_lock)); 742 759 743 thread_iterator_t iterator; 760 744 … … 774 758 size_t name_len, thread_id_t *uspace_thread_id) 775 759 { 776 thread_t *t;777 char namebuf[THREAD_NAME_BUFLEN];778 uspace_arg_t *kernel_uarg;779 int rc;780 781 760 if (name_len > THREAD_NAME_BUFLEN - 1) 782 761 name_len = THREAD_NAME_BUFLEN - 1; 783 784 rc = copy_from_uspace(namebuf, uspace_name, name_len); 762 763 char namebuf[THREAD_NAME_BUFLEN]; 764 int rc = copy_from_uspace(namebuf, uspace_name, name_len); 785 765 if (rc != 0) 786 766 return (unative_t) rc; 787 767 788 768 namebuf[name_len] = 0; 789 769 790 770 /* 791 771 * In case of failure, kernel_uarg will be deallocated in this function. 792 772 * In case of success, kernel_uarg will be freed in uinit(). 773 * 793 774 */ 794 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 775 uspace_arg_t *kernel_uarg = 776 (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 795 777 796 778 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); … … 799 781 return (unative_t) rc; 800 782 } 801 802 t = thread_create(uinit, kernel_uarg, TASK,783 784 thread_t *thread = thread_create(uinit, kernel_uarg, TASK, 803 785 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false); 804 if (t ) {786 if (thread) { 805 787 if (uspace_thread_id != NULL) { 806 int rc; 807 808 rc = copy_to_uspace(uspace_thread_id, &t->tid, 809 sizeof(t->tid)); 788 rc = copy_to_uspace(uspace_thread_id, &thread->tid, 789 sizeof(thread->tid)); 810 790 if (rc != 0) { 811 791 /* … … 813 793 * has already been created. We need to undo its 814 794 * creation now. 795 * 815 796 */ 816 797 817 798 /* 818 799 * The new thread structure is initialized, but … … 820 801 * We can safely deallocate it. 821 802 */ 822 slab_free(thread_slab, t );823 824 803 slab_free(thread_slab, thread); 804 free(kernel_uarg); 805 825 806 return (unative_t) rc; 826 807 } 827 808 } 809 828 810 #ifdef CONFIG_UDEBUG 829 811 /* … … 833 815 * THREAD_B events for threads that already existed 834 816 * and could be detected with THREAD_READ before. 817 * 835 818 */ 836 udebug_thread_b_event_attach(t , TASK);819 udebug_thread_b_event_attach(thread, TASK); 837 820 #else 838 thread_attach(t , TASK);839 #endif 840 thread_ready(t );841 821 thread_attach(thread, TASK); 822 #endif 823 thread_ready(thread); 824 842 825 return 0; 843 826 } else 844 827 free(kernel_uarg); 845 828 846 829 return (unative_t) ENOMEM; 847 830 } … … 853 836 { 854 837 thread_exit(); 838 855 839 /* Unreachable */ 856 840 return 0; … … 863 847 * 864 848 * @return 0 on success or an error code from @ref errno.h. 849 * 865 850 */ 866 851 unative_t sys_thread_get_id(thread_id_t *uspace_thread_id) … … 869 854 * No need to acquire lock on THREAD because tid 870 855 * remains constant for the lifespan of the thread. 856 * 871 857 */ 872 858 return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
Note:
See TracChangeset
for help on using the changeset viewer.