Changeset a35b458 in mainline for kernel/generic/src/proc/thread.c
- Timestamp:
- 2018-03-02T20:10:49Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f1380b7
- Parents:
- 3061bc1
- git-author:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-02-28 17:38:31)
- git-committer:
- Jiří Zárevúcky <zarevucky.jiri@…> (2018-03-02 20:10:49)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/proc/thread.c
r3061bc1 ra35b458 122 122 void *arg = THREAD->thread_arg; 123 123 THREAD->last_cycle = get_cycle(); 124 124 125 125 /* This is where each thread wakes up after its creation */ 126 126 irq_spinlock_unlock(&THREAD->lock, false); 127 127 interrupts_enable(); 128 128 129 129 f(arg); 130 130 131 131 /* Accumulate accounting to the task */ 132 132 irq_spinlock_lock(&THREAD->lock, true); … … 137 137 uint64_t kcycles = THREAD->kcycles; 138 138 THREAD->kcycles = 0; 139 139 140 140 irq_spinlock_pass(&THREAD->lock, &TASK->lock); 141 141 TASK->ucycles += ucycles; … … 144 144 } else 145 145 irq_spinlock_unlock(&THREAD->lock, true); 146 146 147 147 thread_exit(); 148 148 149 149 /* Not reached */ 150 150 } … … 156 156 { 157 157 thread_t *thread = (thread_t *) obj; 158 158 159 159 irq_spinlock_initialize(&thread->lock, "thread_t_lock"); 160 160 link_initialize(&thread->rq_link); 161 161 link_initialize(&thread->wq_link); 162 162 link_initialize(&thread->th_link); 163 163 164 164 /* call the architecture-specific part of the constructor */ 165 165 thr_constructor_arch(thread); 166 166 167 167 #ifdef CONFIG_FPU 168 168 #ifdef CONFIG_FPU_LAZY … … 174 174 #endif /* CONFIG_FPU_LAZY */ 175 175 #endif /* CONFIG_FPU */ 176 176 177 177 /* 178 178 * Allocate the kernel stack from the low-memory to prevent an infinite … … 193 193 kmflags |= FRAME_LOWMEM; 194 194 kmflags &= ~FRAME_HIGHMEM; 195 195 196 196 uintptr_t stack_phys = 197 197 frame_alloc(STACK_FRAMES, kmflags, STACK_SIZE - 1); … … 203 203 return ENOMEM; 204 204 } 205 205 206 206 thread->kstack = (uint8_t *) PA2KA(stack_phys); 207 207 208 208 #ifdef CONFIG_UDEBUG 209 209 mutex_initialize(&thread->udebug.lock, MUTEX_PASSIVE); 210 210 #endif 211 211 212 212 return EOK; 213 213 } … … 217 217 { 218 218 thread_t *thread = (thread_t *) obj; 219 219 220 220 /* call the architecture-specific part of the destructor */ 221 221 thr_destructor_arch(thread); 222 222 223 223 frame_free(KA2PA(thread->kstack), STACK_FRAMES); 224 224 225 225 #ifdef CONFIG_FPU 226 226 if (thread->saved_fpu_context) 227 227 slab_free(fpu_context_cache, thread->saved_fpu_context); 228 228 #endif 229 229 230 230 return STACK_FRAMES; /* number of frames freed */ 231 231 } … … 239 239 { 240 240 THREAD = NULL; 241 241 242 242 atomic_set(&nrdy, 0); 243 243 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0, 244 244 thr_constructor, thr_destructor, 0); 245 245 246 246 #ifdef CONFIG_FPU 247 247 fpu_context_cache = slab_cache_create("fpu_context_t", 248 248 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0); 249 249 #endif 250 250 251 251 avltree_create(&threads_tree); 252 252 } … … 282 282 { 283 283 irq_spinlock_lock(&thread->lock, true); 284 284 285 285 assert(thread->state != Ready); 286 286 287 287 before_thread_is_ready(thread); 288 288 289 289 int i = (thread->priority < RQ_COUNT - 1) ? 290 290 ++thread->priority : thread->priority; … … 305 305 cpu = CPU; 306 306 } 307 307 308 308 thread->state = Ready; 309 309 310 310 irq_spinlock_pass(&thread->lock, &(cpu->rq[i].lock)); 311 311 312 312 /* 313 313 * Append thread to respective ready queue 314 314 * on respective processor. 315 315 */ 316 316 317 317 list_append(&thread->rq_link, &cpu->rq[i].rq); 318 318 cpu->rq[i].n++; 319 319 irq_spinlock_unlock(&(cpu->rq[i].lock), true); 320 320 321 321 atomic_inc(&nrdy); 322 322 atomic_inc(&cpu->nrdy); … … 344 344 if (!thread) 345 345 return NULL; 346 346 347 347 /* Not needed, but good for debugging */ 348 348 memsetb(thread->kstack, STACK_SIZE, 0); 349 349 350 350 irq_spinlock_lock(&tidlock, true); 351 351 thread->tid = ++last_tid; 352 352 irq_spinlock_unlock(&tidlock, true); 353 353 354 354 context_save(&thread->saved_context); 355 355 context_set(&thread->saved_context, FADDR(cushion), 356 356 (uintptr_t) thread->kstack, STACK_SIZE); 357 357 358 358 the_initialize((the_t *) thread->kstack); 359 359 360 360 ipl_t ipl = interrupts_disable(); 361 361 thread->saved_context.ipl = interrupts_read(); 362 362 interrupts_restore(ipl); 363 363 364 364 str_cpy(thread->name, THREAD_NAME_BUFLEN, name); 365 365 366 366 thread->thread_code = func; 367 367 thread->thread_arg = arg; … … 377 377 thread->uspace = 378 378 ((flags & THREAD_FLAG_USPACE) == THREAD_FLAG_USPACE); 379 379 380 380 thread->nomigrate = 0; 381 381 thread->state = Entering; 382 382 383 383 timeout_initialize(&thread->sleep_timeout); 384 384 thread->sleep_interruptible = false; 385 385 thread->sleep_queue = NULL; 386 386 thread->timeout_pending = false; 387 387 388 388 thread->in_copy_from_uspace = false; 389 389 thread->in_copy_to_uspace = false; 390 390 391 391 thread->interrupted = false; 392 392 thread->detached = false; 393 393 waitq_initialize(&thread->join_wq); 394 394 395 395 thread->task = task; 396 396 397 397 thread->workq = NULL; 398 398 399 399 thread->fpu_context_exists = false; 400 400 thread->fpu_context_engaged = false; 401 401 402 402 avltree_node_initialize(&thread->threads_tree_node); 403 403 thread->threads_tree_node.key = (uintptr_t) thread; 404 404 405 405 #ifdef CONFIG_UDEBUG 406 406 /* Initialize debugging stuff */ … … 408 408 udebug_thread_initialize(&thread->udebug); 409 409 #endif 410 410 411 411 /* Might depend on previous initialization */ 412 412 thread_create_arch(thread); 413 413 414 414 rcu_thread_init(thread); 415 415 416 416 if ((flags & THREAD_FLAG_NOATTACH) != THREAD_FLAG_NOATTACH) 417 417 thread_attach(thread, task); 418 418 419 419 return thread; 420 420 } … … 435 435 assert(thread->task); 436 436 assert(thread->cpu); 437 437 438 438 irq_spinlock_lock(&thread->cpu->lock, false); 439 439 if (thread->cpu->fpu_owner == thread) 440 440 thread->cpu->fpu_owner = NULL; 441 441 irq_spinlock_unlock(&thread->cpu->lock, false); 442 442 443 443 irq_spinlock_pass(&thread->lock, &threads_lock); 444 444 445 445 avltree_delete(&threads_tree, &thread->threads_tree_node); 446 446 447 447 irq_spinlock_pass(&threads_lock, &thread->task->lock); 448 448 449 449 /* 450 450 * Detach from the containing task. … … 452 452 list_remove(&thread->th_link); 453 453 irq_spinlock_unlock(&thread->task->lock, irq_res); 454 454 455 455 /* 456 456 * Drop the reference to the containing task. … … 475 475 */ 476 476 irq_spinlock_lock(&task->lock, true); 477 477 478 478 /* Hold a reference to the task. */ 479 479 task_hold(task); 480 480 481 481 /* Must not count kbox thread into lifecount */ 482 482 if (thread->uspace) 483 483 atomic_inc(&task->lifecount); 484 484 485 485 list_append(&thread->th_link, &task->threads); 486 486 487 487 irq_spinlock_pass(&task->lock, &threads_lock); 488 488 489 489 /* 490 490 * Register this thread in the system-wide list. … … 506 506 /* Generate udebug THREAD_E event */ 507 507 udebug_thread_e_event(); 508 508 509 509 /* 510 510 * This thread will not execute any code or system calls from … … 527 527 } 528 528 } 529 529 530 530 restart: 531 531 irq_spinlock_lock(&THREAD->lock, true); … … 535 535 goto restart; 536 536 } 537 537 538 538 THREAD->state = Exiting; 539 539 irq_spinlock_unlock(&THREAD->lock, true); 540 540 541 541 scheduler(); 542 542 543 543 /* Not reached */ 544 544 while (true); … … 562 562 { 563 563 assert(thread != NULL); 564 564 565 565 irq_spinlock_lock(&thread->lock, true); 566 566 567 567 thread->interrupted = true; 568 568 bool sleeping = (thread->state == Sleeping); 569 569 570 570 irq_spinlock_unlock(&thread->lock, true); 571 571 572 572 if (sleeping) 573 573 waitq_interrupt_sleep(thread); … … 583 583 { 584 584 assert(thread != NULL); 585 585 586 586 bool interrupted; 587 587 588 588 irq_spinlock_lock(&thread->lock, true); 589 589 interrupted = thread->interrupted; 590 590 irq_spinlock_unlock(&thread->lock, true); 591 591 592 592 return interrupted; 593 593 } … … 597 597 { 598 598 assert(THREAD); 599 599 600 600 THREAD->nomigrate++; 601 601 } … … 606 606 assert(THREAD); 607 607 assert(THREAD->nomigrate > 0); 608 608 609 609 if (THREAD->nomigrate > 0) 610 610 THREAD->nomigrate--; … … 624 624 while (sec > 0) { 625 625 uint32_t period = (sec > 1000) ? 1000 : sec; 626 626 627 627 thread_usleep(period * 1000000); 628 628 sec -= period; … … 643 643 if (thread == THREAD) 644 644 return EINVAL; 645 645 646 646 /* 647 647 * Since thread join can only be called once on an undetached thread, 648 648 * the thread pointer is guaranteed to be still valid. 649 649 */ 650 650 651 651 irq_spinlock_lock(&thread->lock, true); 652 652 assert(!thread->detached); 653 653 irq_spinlock_unlock(&thread->lock, true); 654 654 655 655 return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL); 656 656 } … … 672 672 irq_spinlock_lock(&thread->lock, true); 673 673 assert(!thread->detached); 674 674 675 675 if (thread->state == Lingering) { 676 676 /* … … 683 683 thread->detached = true; 684 684 } 685 685 686 686 irq_spinlock_unlock(&thread->lock, true); 687 687 } … … 697 697 { 698 698 waitq_t wq; 699 699 700 700 waitq_initialize(&wq); 701 701 702 702 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL); 703 703 } … … 707 707 bool *additional = (bool *) arg; 708 708 thread_t *thread = avltree_get_instance(node, thread_t, threads_tree_node); 709 709 710 710 uint64_t ucycles, kcycles; 711 711 char usuffix, ksuffix; 712 712 order_suffix(thread->ucycles, &ucycles, &usuffix); 713 713 order_suffix(thread->kcycles, &kcycles, &ksuffix); 714 714 715 715 char *name; 716 716 if (str_cmp(thread->name, "uinit") == 0) … … 718 718 else 719 719 name = thread->name; 720 720 721 721 #ifdef __32_BITS__ 722 722 if (*additional) … … 729 729 thread->task, thread->task->container); 730 730 #endif 731 731 732 732 #ifdef __64_BITS__ 733 733 if (*additional) … … 741 741 thread->task, thread->task->container); 742 742 #endif 743 743 744 744 if (*additional) { 745 745 if (thread->cpu) … … 747 747 else 748 748 printf("none "); 749 749 750 750 if (thread->state == Sleeping) { 751 751 #ifdef __32_BITS__ 752 752 printf(" %10p", thread->sleep_queue); 753 753 #endif 754 754 755 755 #ifdef __64_BITS__ 756 756 printf(" %18p", thread->sleep_queue); 757 757 #endif 758 758 } 759 759 760 760 printf("\n"); 761 761 } 762 762 763 763 return true; 764 764 } … … 773 773 /* Messing with thread structures, avoid deadlock */ 774 774 irq_spinlock_lock(&threads_lock, true); 775 775 776 776 #ifdef __32_BITS__ 777 777 if (additional) … … 782 782 " [ctn]\n"); 783 783 #endif 784 784 785 785 #ifdef __64_BITS__ 786 786 if (additional) { … … 791 791 " [task ] [ctn]\n"); 792 792 #endif 793 793 794 794 avltree_walk(&threads_tree, thread_walker, &additional); 795 795 796 796 irq_spinlock_unlock(&threads_lock, true); 797 797 } … … 814 814 avltree_node_t *node = 815 815 avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) thread)); 816 816 817 817 return node != NULL; 818 818 } … … 832 832 assert(interrupts_disabled()); 833 833 assert(irq_spinlock_locked(&THREAD->lock)); 834 834 835 835 if (user) 836 836 THREAD->ucycles += time - THREAD->last_cycle; 837 837 else 838 838 THREAD->kcycles += time - THREAD->last_cycle; 839 839 840 840 THREAD->last_cycle = time; 841 841 } … … 846 846 (thread_t *) avltree_get_instance(node, thread_t, threads_tree_node); 847 847 thread_iterator_t *iterator = (thread_iterator_t *) arg; 848 848 849 849 if (thread->tid == iterator->thread_id) { 850 850 iterator->thread = thread; 851 851 return false; 852 852 } 853 853 854 854 return true; 855 855 } … … 869 869 assert(interrupts_disabled()); 870 870 assert(irq_spinlock_locked(&threads_lock)); 871 871 872 872 thread_iterator_t iterator; 873 873 874 874 iterator.thread_id = thread_id; 875 875 iterator.thread = NULL; 876 876 877 877 avltree_walk(&threads_tree, thread_search_walker, (void *) &iterator); 878 878 879 879 return iterator.thread; 880 880 } … … 885 885 { 886 886 irq_spinlock_lock(&threads_lock, true); 887 887 888 888 thread_t *thread = thread_find_by_id(thread_id); 889 889 if (thread == NULL) { … … 892 892 return; 893 893 } 894 894 895 895 irq_spinlock_lock(&thread->lock, false); 896 896 897 897 /* 898 898 * Schedule a stack trace to be printed … … 906 906 * is probably justifiable. 907 907 */ 908 908 909 909 bool sleeping = false; 910 910 istate_t *istate = thread->udebug.uspace_state; … … 916 916 } else 917 917 printf("Thread interrupt state not available.\n"); 918 918 919 919 irq_spinlock_unlock(&thread->lock, false); 920 920 921 921 if (sleeping) 922 922 waitq_interrupt_sleep(thread); 923 923 924 924 irq_spinlock_unlock(&threads_lock, true); 925 925 } … … 935 935 if (name_len > THREAD_NAME_BUFLEN - 1) 936 936 name_len = THREAD_NAME_BUFLEN - 1; 937 937 938 938 char namebuf[THREAD_NAME_BUFLEN]; 939 939 errno_t rc = copy_from_uspace(namebuf, uspace_name, name_len); 940 940 if (rc != EOK) 941 941 return (sys_errno_t) rc; 942 942 943 943 namebuf[name_len] = 0; 944 944 945 945 /* 946 946 * In case of failure, kernel_uarg will be deallocated in this function. … … 949 949 uspace_arg_t *kernel_uarg = 950 950 (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 951 951 952 952 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); 953 953 if (rc != EOK) { … … 955 955 return (sys_errno_t) rc; 956 956 } 957 957 958 958 thread_t *thread = thread_create(uinit, kernel_uarg, TASK, 959 959 THREAD_FLAG_USPACE | THREAD_FLAG_NOATTACH, namebuf); … … 968 968 * creation now. 969 969 */ 970 970 971 971 /* 972 972 * The new thread structure is initialized, but … … 976 976 slab_free(thread_cache, thread); 977 977 free(kernel_uarg); 978 978 979 979 return (sys_errno_t) rc; 980 980 } 981 981 } 982 982 983 983 #ifdef CONFIG_UDEBUG 984 984 /* … … 994 994 #endif 995 995 thread_ready(thread); 996 996 997 997 return 0; 998 998 } else 999 999 free(kernel_uarg); 1000 1000 1001 1001 return (sys_errno_t) ENOMEM; 1002 1002 }
Note:
See TracChangeset
for help on using the changeset viewer.