Changes in kernel/generic/src/proc/thread.c [2d3ddad:7ed8530] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
r2d3ddad r7ed8530 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Thread management functions. 36 36 */ 37 37 … … 94 94 * 95 95 * For locking rules, see declaration thereof. 96 * 97 */ 98 IRQ_SPINLOCK_INITIALIZE(threads_lock); 96 */ 97 SPINLOCK_INITIALIZE(threads_lock); 99 98 100 99 /** AVL tree of all threads. … … 102 101 * When a thread is found in the threads_tree AVL tree, it is guaranteed to 103 102 * exist as long as the threads_lock is held. 104 * 105 */ 106 avltree_t threads_tree; 107 108 IRQ_SPINLOCK_STATIC_INITIALIZE(tidlock); 109 static thread_id_t last_tid = 0; 103 */ 104 avltree_t threads_tree; 105 106 SPINLOCK_INITIALIZE(tidlock); 107 thread_id_t last_tid = 0; 110 108 111 109 static slab_cache_t *thread_slab; 112 113 110 #ifdef CONFIG_FPU 114 111 slab_cache_t *fpu_context_slab; … … 128 125 void *arg = THREAD->thread_arg; 129 126 THREAD->last_cycle = get_cycle(); 130 127 131 128 /* This is where each thread wakes up after its creation */ 132 irq_spinlock_unlock(&THREAD->lock, false);129 spinlock_unlock(&THREAD->lock); 133 130 interrupts_enable(); 134 131 135 132 f(arg); 136 133 137 134 /* Accumulate accounting to the task */ 138 irq_spinlock_lock(&THREAD->lock, true); 135 ipl_t ipl = interrupts_disable(); 136 137 spinlock_lock(&THREAD->lock); 139 138 if (!THREAD->uncounted) { 140 139 thread_update_accounting(true); … … 143 142 uint64_t kcycles = THREAD->kcycles; 144 143 THREAD->kcycles = 0; 144 145 spinlock_unlock(&THREAD->lock); 145 146 146 irq_spinlock_pass(&THREAD->lock,&TASK->lock);147 spinlock_lock(&TASK->lock); 147 148 TASK->ucycles += ucycles; 148 149 TASK->kcycles += kcycles; 149 irq_spinlock_unlock(&TASK->lock, true);150 spinlock_unlock(&TASK->lock); 150 151 } else 151 irq_spinlock_unlock(&THREAD->lock, true); 152 spinlock_unlock(&THREAD->lock); 153 154 interrupts_restore(ipl); 152 155 153 156 thread_exit(); 154 155 /* Not reached */ 156 } 157 158 /** Initialization and allocation for thread_t structure 159 * 160 */ 161 static int thr_constructor(void *obj, unsigned int kmflags) 162 { 163 thread_t *thread = (thread_t *) obj; 164 165 irq_spinlock_initialize(&thread->lock, "thread_t_lock"); 166 link_initialize(&thread->rq_link); 167 link_initialize(&thread->wq_link); 168 link_initialize(&thread->th_link); 169 157 /* not reached */ 158 } 159 160 /** Initialization and allocation for thread_t structure */ 161 static int thr_constructor(void *obj, int kmflags) 162 { 163 thread_t *t = (thread_t *) obj; 164 165 spinlock_initialize(&t->lock, "thread_t_lock"); 166 link_initialize(&t->rq_link); 167 link_initialize(&t->wq_link); 168 link_initialize(&t->th_link); 169 170 170 /* call the architecture-specific part of the constructor */ 171 thr_constructor_arch(t hread);171 thr_constructor_arch(t); 172 172 173 173 #ifdef CONFIG_FPU 174 174 #ifdef CONFIG_FPU_LAZY 175 t hread->saved_fpu_context = NULL;176 #else /* CONFIG_FPU_LAZY */177 t hread->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags);178 if (!t hread->saved_fpu_context)175 t->saved_fpu_context = NULL; 176 #else 177 t->saved_fpu_context = slab_alloc(fpu_context_slab, kmflags); 178 if (!t->saved_fpu_context) 179 179 return -1; 180 #endif /* CONFIG_FPU_LAZY */181 #endif /* CONFIG_FPU */182 183 t hread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags);184 if (!t hread->kstack) {180 #endif 181 #endif 182 183 t->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 184 if (!t->kstack) { 185 185 #ifdef CONFIG_FPU 186 if (t hread->saved_fpu_context)187 slab_free(fpu_context_slab, t hread->saved_fpu_context);186 if (t->saved_fpu_context) 187 slab_free(fpu_context_slab, t->saved_fpu_context); 188 188 #endif 189 189 return -1; 190 190 } 191 191 192 192 #ifdef CONFIG_UDEBUG 193 mutex_initialize(&t hread->udebug.lock, MUTEX_PASSIVE);194 #endif 195 193 mutex_initialize(&t->udebug.lock, MUTEX_PASSIVE); 194 #endif 195 196 196 return 0; 197 197 } 198 198 199 199 /** Destruction of thread_t object */ 200 static size_t thr_destructor(void *obj)201 { 202 thread_t *t hread= (thread_t *) obj;203 200 static int thr_destructor(void *obj) 201 { 202 thread_t *t = (thread_t *) obj; 203 204 204 /* call the architecture-specific part of the destructor */ 205 thr_destructor_arch(thread); 206 207 frame_free(KA2PA(thread->kstack)); 208 205 thr_destructor_arch(t); 206 207 frame_free(KA2PA(t->kstack)); 209 208 #ifdef CONFIG_FPU 210 if (thread->saved_fpu_context) 211 slab_free(fpu_context_slab, thread->saved_fpu_context); 212 #endif 213 214 return 1; /* One page freed */ 209 if (t->saved_fpu_context) 210 slab_free(fpu_context_slab, t->saved_fpu_context); 211 #endif 212 return 1; /* One page freed */ 215 213 } 216 214 … … 223 221 { 224 222 THREAD = NULL; 225 226 223 atomic_set(&nrdy, 0); 227 224 thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0, 228 225 thr_constructor, thr_destructor, 0); 229 226 230 227 #ifdef CONFIG_FPU 231 228 fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), 232 229 FPU_CONTEXT_ALIGN, NULL, NULL, 0); 233 230 #endif 234 231 235 232 avltree_create(&threads_tree); 236 233 } … … 238 235 /** Make thread ready 239 236 * 240 * Switch thread t o the ready state.237 * Switch thread t to the ready state. 241 238 * 242 239 * @param t Thread to make ready. 243 240 * 244 241 */ 245 void thread_ready(thread_t *thread) 246 { 247 irq_spinlock_lock(&thread->lock, true); 248 249 ASSERT(!(thread->state == Ready)); 250 251 int i = (thread->priority < RQ_COUNT - 1) 252 ? ++thread->priority : thread->priority; 253 254 cpu_t *cpu = CPU; 255 if (thread->flags & THREAD_FLAG_WIRED) { 256 ASSERT(thread->cpu != NULL); 257 cpu = thread->cpu; 258 } 259 thread->state = Ready; 260 261 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 242 void thread_ready(thread_t *t) 243 { 244 cpu_t *cpu; 245 runq_t *r; 246 ipl_t ipl; 247 int i, avg; 248 249 ipl = interrupts_disable(); 250 251 spinlock_lock(&t->lock); 252 253 ASSERT(!(t->state == Ready)); 254 255 i = (t->priority < RQ_COUNT - 1) ? ++t->priority : t->priority; 256 257 cpu = CPU; 258 if (t->flags & THREAD_FLAG_WIRED) { 259 ASSERT(t->cpu != NULL); 260 cpu = t->cpu; 261 } 262 t->state = Ready; 263 spinlock_unlock(&t->lock); 262 264 263 265 /* 264 * Append thread to respective ready queue 265 * on respective processor. 266 * Append t to respective ready queue on respective processor. 266 267 */ 267 268 list_append(&thread->rq_link, &cpu->rq[i].rq_head); 269 cpu->rq[i].n++; 270 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 271 268 r = &cpu->rq[i]; 269 spinlock_lock(&r->lock); 270 list_append(&t->rq_link, &r->rq_head); 271 r->n++; 272 spinlock_unlock(&r->lock); 273 272 274 atomic_inc(&nrdy); 273 // FIXME: Why is the avg value n ot used274 //avg = atomic_get(&nrdy) / config.cpu_active;275 // FIXME: Why is the avg value never read? 276 avg = atomic_get(&nrdy) / config.cpu_active; 275 277 atomic_inc(&cpu->nrdy); 278 279 interrupts_restore(ipl); 276 280 } 277 281 … … 280 284 * Create a new thread. 281 285 * 282 * @param func 283 * @param arg 284 * @param task 285 * 286 * 287 * @param flags 288 * @param name 289 * @param uncounted 290 * 291 * 292 * @return New thread's structure on success, NULL on failure.286 * @param func Thread's implementing function. 287 * @param arg Thread's implementing function argument. 288 * @param task Task to which the thread belongs. The caller must 289 * guarantee that the task won't cease to exist during the 290 * call. The task's lock may not be held. 291 * @param flags Thread flags. 292 * @param name Symbolic name (a copy is made). 293 * @param uncounted Thread's accounting doesn't affect accumulated task 294 * accounting. 295 * 296 * @return New thread's structure on success, NULL on failure. 293 297 * 294 298 */ 295 299 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, 296 unsigned int flags, const char *name, bool uncounted) 297 { 298 thread_t *thread = (thread_t *) slab_alloc(thread_slab, 0); 299 if (!thread) 300 int flags, const char *name, bool uncounted) 301 { 302 thread_t *t; 303 ipl_t ipl; 304 305 t = (thread_t *) slab_alloc(thread_slab, 0); 306 if (!t) 300 307 return NULL; 301 308 302 309 /* Not needed, but good for debugging */ 303 memsetb(thread->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 304 305 irq_spinlock_lock(&tidlock, true); 306 thread->tid = ++last_tid; 307 irq_spinlock_unlock(&tidlock, true); 308 309 context_save(&thread->saved_context); 310 context_set(&thread->saved_context, FADDR(cushion), 311 (uintptr_t) thread->kstack, THREAD_STACK_SIZE); 312 313 the_initialize((the_t *) thread->kstack); 314 315 ipl_t ipl = interrupts_disable(); 316 thread->saved_context.ipl = interrupts_read(); 310 memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); 311 312 ipl = interrupts_disable(); 313 spinlock_lock(&tidlock); 314 t->tid = ++last_tid; 315 spinlock_unlock(&tidlock); 317 316 interrupts_restore(ipl); 318 317 319 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); 320 321 thread->thread_code = func; 322 thread->thread_arg = arg; 323 thread->ticks = -1; 324 thread->ucycles = 0; 325 thread->kcycles = 0; 326 thread->uncounted = uncounted; 327 thread->priority = -1; /* Start in rq[0] */ 328 thread->cpu = NULL; 329 thread->flags = flags; 330 thread->state = Entering; 331 thread->call_me = NULL; 332 thread->call_me_with = NULL; 333 334 timeout_initialize(&thread->sleep_timeout); 335 thread->sleep_interruptible = false; 336 thread->sleep_queue = NULL; 337 thread->timeout_pending = false; 338 339 thread->in_copy_from_uspace = false; 340 thread->in_copy_to_uspace = false; 341 342 thread->interrupted = false; 343 thread->detached = false; 344 waitq_initialize(&thread->join_wq); 345 346 thread->rwlock_holder_type = RWLOCK_NONE; 347 348 thread->task = task; 349 350 thread->fpu_context_exists = 0; 351 thread->fpu_context_engaged = 0; 352 353 avltree_node_initialize(&thread->threads_tree_node); 354 thread->threads_tree_node.key = (uintptr_t) thread; 355 318 context_save(&t->saved_context); 319 context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, 320 THREAD_STACK_SIZE); 321 322 the_initialize((the_t *) t->kstack); 323 324 ipl = interrupts_disable(); 325 t->saved_context.ipl = interrupts_read(); 326 interrupts_restore(ipl); 327 328 memcpy(t->name, name, THREAD_NAME_BUFLEN); 329 t->name[THREAD_NAME_BUFLEN - 1] = 0; 330 331 t->thread_code = func; 332 t->thread_arg = arg; 333 t->ticks = -1; 334 t->ucycles = 0; 335 t->kcycles = 0; 336 t->uncounted = uncounted; 337 t->priority = -1; /* start in rq[0] */ 338 t->cpu = NULL; 339 t->flags = flags; 340 t->state = Entering; 341 t->call_me = NULL; 342 t->call_me_with = NULL; 343 344 timeout_initialize(&t->sleep_timeout); 345 t->sleep_interruptible = false; 346 t->sleep_queue = NULL; 347 t->timeout_pending = 0; 348 349 t->in_copy_from_uspace = false; 350 t->in_copy_to_uspace = false; 351 352 t->interrupted = false; 353 t->detached = false; 354 waitq_initialize(&t->join_wq); 355 356 t->rwlock_holder_type = RWLOCK_NONE; 357 358 t->task = task; 359 360 t->fpu_context_exists = 0; 361 t->fpu_context_engaged = 0; 362 363 avltree_node_initialize(&t->threads_tree_node); 364 t->threads_tree_node.key = (uintptr_t) t; 365 356 366 #ifdef CONFIG_UDEBUG 357 367 /* Init debugging stuff */ 358 udebug_thread_initialize(&t hread->udebug);359 #endif 360 361 /* Might depend on previous initialization */362 thread_create_arch(t hread);363 368 udebug_thread_initialize(&t->udebug); 369 #endif 370 371 /* might depend on previous initialization */ 372 thread_create_arch(t); 373 364 374 if (!(flags & THREAD_FLAG_NOATTACH)) 365 thread_attach(t hread, task);366 367 return t hread;375 thread_attach(t, task); 376 377 return t; 368 378 } 369 379 … … 372 382 * Detach thread from all queues, cpus etc. and destroy it. 373 383 * 374 * @param thread Thread to be destroyed. 375 * @param irq_res Indicate whether it should unlock thread->lock 376 * in interrupts-restore mode. 377 * 378 */ 379 void thread_destroy(thread_t *thread, bool irq_res) 380 { 381 ASSERT(irq_spinlock_locked(&thread->lock)); 382 ASSERT((thread->state == Exiting) || (thread->state == Lingering)); 383 ASSERT(thread->task); 384 ASSERT(thread->cpu); 385 386 irq_spinlock_lock(&thread->cpu->lock, false); 387 if (thread->cpu->fpu_owner == thread) 388 thread->cpu->fpu_owner = NULL; 389 irq_spinlock_unlock(&thread->cpu->lock, false); 390 391 irq_spinlock_pass(&thread->lock, &threads_lock); 392 393 avltree_delete(&threads_tree, &thread->threads_tree_node); 394 395 irq_spinlock_pass(&threads_lock, &thread->task->lock); 396 384 * Assume thread->lock is held!! 385 */ 386 void thread_destroy(thread_t *t) 387 { 388 ASSERT(t->state == Exiting || t->state == Lingering); 389 ASSERT(t->task); 390 ASSERT(t->cpu); 391 392 spinlock_lock(&t->cpu->lock); 393 if (t->cpu->fpu_owner == t) 394 t->cpu->fpu_owner = NULL; 395 spinlock_unlock(&t->cpu->lock); 396 397 spinlock_unlock(&t->lock); 398 399 spinlock_lock(&threads_lock); 400 avltree_delete(&threads_tree, &t->threads_tree_node); 401 spinlock_unlock(&threads_lock); 402 397 403 /* 398 404 * Detach from the containing task. 399 405 */ 400 list_remove(&thread->th_link); 401 irq_spinlock_unlock(&thread->task->lock, irq_res); 402 406 spinlock_lock(&t->task->lock); 407 list_remove(&t->th_link); 408 spinlock_unlock(&t->task->lock); 409 403 410 /* 404 411 * Drop the reference to the containing task. 405 412 */ 406 task_release(thread->task); 407 slab_free(thread_slab, thread); 413 task_release(t->task); 414 415 slab_free(thread_slab, t); 408 416 } 409 417 … … 413 421 * threads_tree. 414 422 * 415 * @param t Thread to be attached to the task. 416 * @param task Task to which the thread is to be attached. 417 * 418 */ 419 void thread_attach(thread_t *thread, task_t *task) 420 { 423 * @param t Thread to be attached to the task. 424 * @param task Task to which the thread is to be attached. 425 */ 426 void thread_attach(thread_t *t, task_t *task) 427 { 428 ipl_t ipl; 429 421 430 /* 422 431 * Attach to the specified task. 423 432 */ 424 irq_spinlock_lock(&task->lock, true); 425 433 ipl = interrupts_disable(); 434 spinlock_lock(&task->lock); 435 426 436 /* Hold a reference to the task. */ 427 437 task_hold(task); 428 438 429 439 /* Must not count kbox thread into lifecount */ 430 if (t hread->flags & THREAD_FLAG_USPACE)440 if (t->flags & THREAD_FLAG_USPACE) 431 441 atomic_inc(&task->lifecount); 432 433 list_append(&thread->th_link, &task->th_head); 434 435 irq_spinlock_pass(&task->lock, &threads_lock); 436 442 443 list_append(&t->th_link, &task->th_head); 444 spinlock_unlock(&task->lock); 445 437 446 /* 438 447 * Register this thread in the system-wide list. 439 448 */ 440 avltree_insert(&threads_tree, &thread->threads_tree_node); 441 irq_spinlock_unlock(&threads_lock, true); 449 spinlock_lock(&threads_lock); 450 avltree_insert(&threads_tree, &t->threads_tree_node); 451 spinlock_unlock(&threads_lock); 452 453 interrupts_restore(ipl); 442 454 } 443 455 444 456 /** Terminate thread. 445 457 * 446 * End current thread execution and switch it to the exiting state. 447 * All pending timeouts are executed. 448 * 458 * End current thread execution and switch it to the exiting state. All pending 459 * timeouts are executed. 449 460 */ 450 461 void thread_exit(void) 451 462 { 463 ipl_t ipl; 464 452 465 if (THREAD->flags & THREAD_FLAG_USPACE) { 453 466 #ifdef CONFIG_UDEBUG 454 467 /* Generate udebug THREAD_E event */ 455 468 udebug_thread_e_event(); 456 457 /*458 * This thread will not execute any code or system calls from459 * now on.460 */461 udebug_stoppable_begin();462 469 #endif 463 470 if (atomic_predec(&TASK->lifecount) == 0) { … … 468 475 * can only be created by threads of the same task. 469 476 * We are safe to perform cleanup. 470 *471 477 */ 472 478 ipc_cleanup(); … … 475 481 } 476 482 } 477 483 478 484 restart: 479 irq_spinlock_lock(&THREAD->lock, true); 480 if (THREAD->timeout_pending) { 481 /* Busy waiting for timeouts in progress */ 482 irq_spinlock_unlock(&THREAD->lock, true); 485 ipl = interrupts_disable(); 486 spinlock_lock(&THREAD->lock); 487 if (THREAD->timeout_pending) { 488 /* busy waiting for timeouts in progress */ 489 spinlock_unlock(&THREAD->lock); 490 interrupts_restore(ipl); 483 491 goto restart; 484 492 } 485 493 486 494 THREAD->state = Exiting; 487 irq_spinlock_unlock(&THREAD->lock, true); 488 495 spinlock_unlock(&THREAD->lock); 489 496 scheduler(); 490 497 491 498 /* Not reached */ 492 while (true); 493 } 499 while (1) 500 ; 501 } 502 494 503 495 504 /** Thread sleep … … 506 515 while (sec > 0) { 507 516 uint32_t period = (sec > 1000) ? 1000 : sec; 508 517 509 518 thread_usleep(period * 1000000); 510 519 sec -= period; … … 514 523 /** Wait for another thread to exit. 515 524 * 516 * @param t hreadThread to join on exit.517 * @param usec 518 * @param flags 525 * @param t Thread to join on exit. 526 * @param usec Timeout in microseconds. 527 * @param flags Mode of operation. 519 528 * 520 529 * @return An error code from errno.h or an error code from synch.h. 521 * 522 */ 523 int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags) 524 { 525 if (thread == THREAD) 530 */ 531 int thread_join_timeout(thread_t *t, uint32_t usec, int flags) 532 { 533 ipl_t ipl; 534 int rc; 535 536 if (t == THREAD) 526 537 return EINVAL; 527 538 528 539 /* 529 540 * Since thread join can only be called once on an undetached thread, … … 531 542 */ 532 543 533 irq_spinlock_lock(&thread->lock, true); 534 ASSERT(!thread->detached); 535 irq_spinlock_unlock(&thread->lock, true); 536 537 return waitq_sleep_timeout(&thread->join_wq, usec, flags); 544 ipl = interrupts_disable(); 545 spinlock_lock(&t->lock); 546 ASSERT(!t->detached); 547 spinlock_unlock(&t->lock); 548 interrupts_restore(ipl); 549 550 rc = waitq_sleep_timeout(&t->join_wq, usec, flags); 551 552 return rc; 538 553 } 539 554 … … 543 558 * state, deallocate its resources. 544 559 * 545 * @param thread Thread to be detached. 546 * 547 */ 548 void thread_detach(thread_t *thread) 549 { 560 * @param t Thread to be detached. 561 */ 562 void thread_detach(thread_t *t) 563 { 564 ipl_t ipl; 565 550 566 /* 551 567 * Since the thread is expected not to be already detached, 552 568 * pointer to it must be still valid. 553 569 */ 554 irq_spinlock_lock(&thread->lock, true); 555 ASSERT(!thread->detached); 556 557 if (thread->state == Lingering) { 558 /* 559 * Unlock &thread->lock and restore 560 * interrupts in thread_destroy(). 561 */ 562 thread_destroy(thread, true); 570 ipl = interrupts_disable(); 571 spinlock_lock(&t->lock); 572 ASSERT(!t->detached); 573 if (t->state == Lingering) { 574 thread_destroy(t); /* unlocks &t->lock */ 575 interrupts_restore(ipl); 563 576 return; 564 577 } else { 565 t hread->detached = true;566 } 567 568 i rq_spinlock_unlock(&thread->lock, true);578 t->detached = true; 579 } 580 spinlock_unlock(&t->lock); 581 interrupts_restore(ipl); 569 582 } 570 583 … … 588 601 * 589 602 * Register a function and its argument to be executed 590 * on next context switch to the current thread. Must 591 * be called with interrupts disabled. 603 * on next context switch to the current thread. 592 604 * 593 605 * @param call_me Out-of-context function. … … 597 609 void thread_register_call_me(void (* call_me)(void *), void *call_me_with) 598 610 { 599 irq_spinlock_lock(&THREAD->lock, false); 611 ipl_t ipl; 612 613 ipl = interrupts_disable(); 614 spinlock_lock(&THREAD->lock); 600 615 THREAD->call_me = call_me; 601 616 THREAD->call_me_with = call_me_with; 602 irq_spinlock_unlock(&THREAD->lock, false); 617 spinlock_unlock(&THREAD->lock); 618 interrupts_restore(ipl); 603 619 } 604 620 605 621 static bool thread_walker(avltree_node_t *node, void *arg) 606 622 { 607 thread_t *t hread= avltree_get_instance(node, thread_t, threads_tree_node);623 thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node); 608 624 609 625 uint64_t ucycles, kcycles; 610 626 char usuffix, ksuffix; 611 order_suffix(t hread->ucycles, &ucycles, &usuffix);612 order_suffix(t hread->kcycles, &kcycles, &ksuffix);613 627 order_suffix(t->ucycles, &ucycles, &usuffix); 628 order_suffix(t->kcycles, &kcycles, &ksuffix); 629 614 630 #ifdef __32_BITS__ 615 631 printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9" 616 PRIu64 "%c %9" PRIu64 "%c ", t hread->tid, thread->name, thread,617 thread_states[t hread->state], thread->task, thread->task->context,618 t hread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix);619 #endif 620 632 PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t, 633 thread_states[t->state], t->task, t->task->context, t->thread_code, 634 t->kstack, ucycles, usuffix, kcycles, ksuffix); 635 #endif 636 621 637 #ifdef __64_BITS__ 622 638 printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9" 623 PRIu64 "%c %9" PRIu64 "%c ", t hread->tid, thread->name, thread,624 thread_states[t hread->state], thread->task, thread->task->context,625 t hread->thread_code, thread->kstack, ucycles, usuffix, kcycles, ksuffix);626 #endif 627 628 if (t hread->cpu)629 printf("%-4u", t hread->cpu->id);639 PRIu64 "%c %9" PRIu64 "%c ", t->tid, t->name, t, 640 thread_states[t->state], t->task, t->task->context, t->thread_code, 641 t->kstack, ucycles, usuffix, kcycles, ksuffix); 642 #endif 643 644 if (t->cpu) 645 printf("%-4u", t->cpu->id); 630 646 else 631 647 printf("none"); 632 633 if (t hread->state == Sleeping) {648 649 if (t->state == Sleeping) { 634 650 #ifdef __32_BITS__ 635 printf(" %10p", t hread->sleep_queue);636 #endif 637 651 printf(" %10p", t->sleep_queue); 652 #endif 653 638 654 #ifdef __64_BITS__ 639 printf(" %18p", t hread->sleep_queue);640 #endif 641 } 642 655 printf(" %18p", t->sleep_queue); 656 #endif 657 } 658 643 659 printf("\n"); 644 660 645 661 return true; 646 662 } 647 663 648 /** Print list of threads debug info 649 * 650 */ 664 /** Print list of threads debug info */ 651 665 void thread_print_list(void) 652 666 { 667 ipl_t ipl; 668 653 669 /* Messing with thread structures, avoid deadlock */ 654 irq_spinlock_lock(&threads_lock, true); 655 656 #ifdef __32_BITS__ 670 ipl = interrupts_disable(); 671 spinlock_lock(&threads_lock); 672 673 #ifdef __32_BITS__ 657 674 printf("tid name address state task " 658 675 "ctx code stack ucycles kcycles cpu " … … 662 679 "----------\n"); 663 680 #endif 664 681 665 682 #ifdef __64_BITS__ 666 683 printf("tid name address state task " … … 671 688 "------------------\n"); 672 689 #endif 673 690 674 691 avltree_walk(&threads_tree, thread_walker, NULL); 675 676 irq_spinlock_unlock(&threads_lock, true); 692 693 spinlock_unlock(&threads_lock); 694 interrupts_restore(ipl); 677 695 } 678 696 … … 682 700 * interrupts must be already disabled. 683 701 * 684 * @param t hreadPointer to thread.702 * @param t Pointer to thread. 685 703 * 686 704 * @return True if thread t is known to the system, false otherwise. 687 * 688 */ 689 bool thread_exists(thread_t *thread) 690 { 691 ASSERT(interrupts_disabled()); 692 ASSERT(irq_spinlock_locked(&threads_lock)); 693 694 avltree_node_t *node = 695 avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread)); 705 */ 706 bool thread_exists(thread_t *t) 707 { 708 avltree_node_t *node; 709 710 node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t)); 696 711 697 712 return node != NULL; … … 703 718 * interrupts must be already disabled. 704 719 * 705 * @param user True to update user accounting, false for kernel. 706 * 720 * @param user True to update user accounting, false for kernel. 707 721 */ 708 722 void thread_update_accounting(bool user) 709 723 { 710 724 uint64_t time = get_cycle(); 711 712 ASSERT(interrupts_disabled()); 713 ASSERT(irq_spinlock_locked(&THREAD->lock)); 714 715 if (user) 725 if (user) { 716 726 THREAD->ucycles += time - THREAD->last_cycle; 717 else727 } else { 718 728 THREAD->kcycles += time - THREAD->last_cycle; 719 729 } 720 730 THREAD->last_cycle = time; 721 731 } … … 747 757 thread_t *thread_find_by_id(thread_id_t thread_id) 748 758 { 749 ASSERT(interrupts_disabled());750 ASSERT(irq_spinlock_locked(&threads_lock));751 752 759 thread_iterator_t iterator; 753 760 … … 767 774 size_t name_len, thread_id_t *uspace_thread_id) 768 775 { 776 thread_t *t; 777 char namebuf[THREAD_NAME_BUFLEN]; 778 uspace_arg_t *kernel_uarg; 779 int rc; 780 769 781 if (name_len > THREAD_NAME_BUFLEN - 1) 770 782 name_len = THREAD_NAME_BUFLEN - 1; 771 772 char namebuf[THREAD_NAME_BUFLEN]; 773 int rc = copy_from_uspace(namebuf, uspace_name, name_len); 783 784 rc = copy_from_uspace(namebuf, uspace_name, name_len); 774 785 if (rc != 0) 775 786 return (unative_t) rc; 776 787 777 788 namebuf[name_len] = 0; 778 789 779 790 /* 780 791 * In case of failure, kernel_uarg will be deallocated in this function. 781 792 * In case of success, kernel_uarg will be freed in uinit(). 782 *783 793 */ 784 uspace_arg_t *kernel_uarg = 785 (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 794 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 786 795 787 796 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); … … 790 799 return (unative_t) rc; 791 800 } 792 793 t hread_t *thread= thread_create(uinit, kernel_uarg, TASK,801 802 t = thread_create(uinit, kernel_uarg, TASK, 794 803 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf, false); 795 if (t hread) {804 if (t) { 796 805 if (uspace_thread_id != NULL) { 797 rc = copy_to_uspace(uspace_thread_id, &thread->tid, 798 sizeof(thread->tid)); 806 int rc; 807 808 rc = copy_to_uspace(uspace_thread_id, &t->tid, 809 sizeof(t->tid)); 799 810 if (rc != 0) { 800 811 /* … … 802 813 * has already been created. We need to undo its 803 814 * creation now. 804 *805 815 */ 806 816 807 817 /* 808 818 * The new thread structure is initialized, but … … 810 820 * We can safely deallocate it. 811 821 */ 812 slab_free(thread_slab, t hread);813 free(kernel_uarg);814 822 slab_free(thread_slab, t); 823 free(kernel_uarg); 824 815 825 return (unative_t) rc; 816 826 } 817 827 } 818 819 828 #ifdef CONFIG_UDEBUG 820 829 /* … … 824 833 * THREAD_B events for threads that already existed 825 834 * and could be detected with THREAD_READ before. 826 *827 835 */ 828 udebug_thread_b_event_attach(t hread, TASK);836 udebug_thread_b_event_attach(t, TASK); 829 837 #else 830 thread_attach(t hread, TASK);831 #endif 832 thread_ready(t hread);833 838 thread_attach(t, TASK); 839 #endif 840 thread_ready(t); 841 834 842 return 0; 835 843 } else 836 844 free(kernel_uarg); 837 845 838 846 return (unative_t) ENOMEM; 839 847 } … … 845 853 { 846 854 thread_exit(); 847 848 855 /* Unreachable */ 849 856 return 0; … … 856 863 * 857 864 * @return 0 on success or an error code from @ref errno.h. 858 *859 865 */ 860 866 unative_t sys_thread_get_id(thread_id_t *uspace_thread_id) … … 863 869 * No need to acquire lock on THREAD because tid 864 870 * remains constant for the lifespan of the thread. 865 *866 871 */ 867 872 return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
Note:
See TracChangeset
for help on using the changeset viewer.