00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00038 #include <proc/scheduler.h>
00039 #include <proc/thread.h>
00040 #include <proc/task.h>
00041 #include <proc/uarg.h>
00042 #include <mm/frame.h>
00043 #include <mm/page.h>
00044 #include <arch/asm.h>
00045 #include <arch.h>
00046 #include <synch/synch.h>
00047 #include <synch/spinlock.h>
00048 #include <synch/waitq.h>
00049 #include <synch/rwlock.h>
00050 #include <cpu.h>
00051 #include <func.h>
00052 #include <context.h>
00053 #include <adt/btree.h>
00054 #include <adt/list.h>
00055 #include <typedefs.h>
00056 #include <time/clock.h>
00057 #include <config.h>
00058 #include <arch/interrupt.h>
00059 #include <smp/ipi.h>
00060 #include <arch/faddr.h>
00061 #include <atomic.h>
00062 #include <memstr.h>
00063 #include <print.h>
00064 #include <mm/slab.h>
00065 #include <debug.h>
00066 #include <main/uinit.h>
00067 #include <syscall/copy.h>
00068 #include <errno.h>
00069
00070
00072 char *thread_states[] = {
00073 "Invalid",
00074 "Running",
00075 "Sleeping",
00076 "Ready",
00077 "Entering",
00078 "Exiting",
00079 "Undead"
00080 };
00081
00083 SPINLOCK_INITIALIZE(threads_lock);
00084
00090 btree_t threads_btree;
00091
00092 SPINLOCK_INITIALIZE(tidlock);
00093 __u32 last_tid = 0;
00094
00095 static slab_cache_t *thread_slab;
00096 #ifdef ARCH_HAS_FPU
00097 slab_cache_t *fpu_context_slab;
00098 #endif
00099
00109 static void cushion(void)
00110 {
00111 void (*f)(void *) = THREAD->thread_code;
00112 void *arg = THREAD->thread_arg;
00113
00114
00115 spinlock_unlock(&THREAD->lock);
00116 interrupts_enable();
00117
00118 f(arg);
00119 thread_exit();
00120
00121 }
00122
00124 static int thr_constructor(void *obj, int kmflags)
00125 {
00126 thread_t *t = (thread_t *)obj;
00127 pfn_t pfn;
00128 int status;
00129
00130 spinlock_initialize(&t->lock, "thread_t_lock");
00131 link_initialize(&t->rq_link);
00132 link_initialize(&t->wq_link);
00133 link_initialize(&t->th_link);
00134
00135 #ifdef ARCH_HAS_FPU
00136 # ifdef CONFIG_FPU_LAZY
00137 t->saved_fpu_context = NULL;
00138 # else
00139 t->saved_fpu_context = slab_alloc(fpu_context_slab,kmflags);
00140 if (!t->saved_fpu_context)
00141 return -1;
00142 # endif
00143 #endif
00144
00145 pfn = frame_alloc_rc(STACK_FRAMES, FRAME_KA | kmflags,&status);
00146 if (status) {
00147 #ifdef ARCH_HAS_FPU
00148 if (t->saved_fpu_context)
00149 slab_free(fpu_context_slab,t->saved_fpu_context);
00150 #endif
00151 return -1;
00152 }
00153 t->kstack = (__u8 *)PA2KA(PFN2ADDR(pfn));
00154
00155 return 0;
00156 }
00157
00159 static int thr_destructor(void *obj)
00160 {
00161 thread_t *t = (thread_t *)obj;
00162
00163 frame_free(ADDR2PFN(KA2PA(t->kstack)));
00164 #ifdef ARCH_HAS_FPU
00165 if (t->saved_fpu_context)
00166 slab_free(fpu_context_slab,t->saved_fpu_context);
00167 #endif
00168 return 1;
00169 }
00170
00176 void thread_init(void)
00177 {
00178 THREAD = NULL;
00179 atomic_set(&nrdy,0);
00180 thread_slab = slab_cache_create("thread_slab",
00181 sizeof(thread_t),0,
00182 thr_constructor, thr_destructor, 0);
00183 #ifdef ARCH_HAS_FPU
00184 fpu_context_slab = slab_cache_create("fpu_slab",
00185 sizeof(fpu_context_t),
00186 FPU_CONTEXT_ALIGN,
00187 NULL, NULL, 0);
00188 #endif
00189
00190 btree_create(&threads_btree);
00191 }
00192
00200 void thread_ready(thread_t *t)
00201 {
00202 cpu_t *cpu;
00203 runq_t *r;
00204 ipl_t ipl;
00205 int i, avg;
00206
00207 ipl = interrupts_disable();
00208
00209 spinlock_lock(&t->lock);
00210
00211 ASSERT(! (t->state == Ready));
00212
00213 i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority;
00214
00215 cpu = CPU;
00216 if (t->flags & X_WIRED) {
00217 cpu = t->cpu;
00218 }
00219 t->state = Ready;
00220 spinlock_unlock(&t->lock);
00221
00222
00223
00224
00225 r = &cpu->rq[i];
00226 spinlock_lock(&r->lock);
00227 list_append(&t->rq_link, &r->rq_head);
00228 r->n++;
00229 spinlock_unlock(&r->lock);
00230
00231 atomic_inc(&nrdy);
00232 avg = atomic_get(&nrdy) / config.cpu_active;
00233 atomic_inc(&cpu->nrdy);
00234
00235 interrupts_restore(ipl);
00236 }
00237
00244 void thread_destroy(thread_t *t)
00245 {
00246 bool destroy_task = false;
00247
00248 ASSERT(t->state == Exiting || t->state == Undead);
00249 ASSERT(t->task);
00250 ASSERT(t->cpu);
00251
00252 spinlock_lock(&t->cpu->lock);
00253 if(t->cpu->fpu_owner==t)
00254 t->cpu->fpu_owner=NULL;
00255 spinlock_unlock(&t->cpu->lock);
00256
00257 spinlock_unlock(&t->lock);
00258
00259 spinlock_lock(&threads_lock);
00260 btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL);
00261 spinlock_unlock(&threads_lock);
00262
00263
00264
00265
00266 spinlock_lock(&t->task->lock);
00267 list_remove(&t->th_link);
00268 if (--t->task->refcount == 0) {
00269 t->task->accept_new_threads = false;
00270 destroy_task = true;
00271 }
00272 spinlock_unlock(&t->task->lock);
00273
00274 if (destroy_task)
00275 task_destroy(t->task);
00276
00277 slab_free(thread_slab, t);
00278 }
00279
00293 thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name)
00294 {
00295 thread_t *t;
00296 ipl_t ipl;
00297
00298 t = (thread_t *) slab_alloc(thread_slab, 0);
00299 if (!t)
00300 return NULL;
00301
00302 thread_create_arch(t);
00303
00304
00305 memsetb((__address)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0);
00306
00307 ipl = interrupts_disable();
00308 spinlock_lock(&tidlock);
00309 t->tid = ++last_tid;
00310 spinlock_unlock(&tidlock);
00311 interrupts_restore(ipl);
00312
00313 context_save(&t->saved_context);
00314 context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE);
00315
00316 the_initialize((the_t *) t->kstack);
00317
00318 ipl = interrupts_disable();
00319 t->saved_context.ipl = interrupts_read();
00320 interrupts_restore(ipl);
00321
00322 memcpy(t->name, name, THREAD_NAME_BUFLEN);
00323
00324 t->thread_code = func;
00325 t->thread_arg = arg;
00326 t->ticks = -1;
00327 t->priority = -1;
00328 t->cpu = NULL;
00329 t->flags = 0;
00330 t->state = Entering;
00331 t->call_me = NULL;
00332 t->call_me_with = NULL;
00333
00334 timeout_initialize(&t->sleep_timeout);
00335 t->sleep_interruptible = false;
00336 t->sleep_queue = NULL;
00337 t->timeout_pending = 0;
00338
00339 t->in_copy_from_uspace = false;
00340 t->in_copy_to_uspace = false;
00341
00342 t->interrupted = false;
00343 t->join_type = None;
00344 t->detached = false;
00345 waitq_initialize(&t->join_wq);
00346
00347 t->rwlock_holder_type = RWLOCK_NONE;
00348
00349 t->task = task;
00350
00351 t->fpu_context_exists = 0;
00352 t->fpu_context_engaged = 0;
00353
00354
00355
00356
00357 ipl = interrupts_disable();
00358 spinlock_lock(&task->lock);
00359 if (!task->accept_new_threads) {
00360 spinlock_unlock(&task->lock);
00361 slab_free(thread_slab, t);
00362 interrupts_restore(ipl);
00363 return NULL;
00364 }
00365 list_append(&t->th_link, &task->th_head);
00366 if (task->refcount++ == 0)
00367 task->main_thread = t;
00368 spinlock_unlock(&task->lock);
00369
00370
00371
00372
00373 spinlock_lock(&threads_lock);
00374 btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL);
00375 spinlock_unlock(&threads_lock);
00376
00377 interrupts_restore(ipl);
00378
00379 return t;
00380 }
00381
00388 void thread_exit(void)
00389 {
00390 ipl_t ipl;
00391
00392 restart:
00393 ipl = interrupts_disable();
00394 spinlock_lock(&THREAD->lock);
00395 if (THREAD->timeout_pending) {
00396 spinlock_unlock(&THREAD->lock);
00397 interrupts_restore(ipl);
00398 goto restart;
00399 }
00400 THREAD->state = Exiting;
00401 spinlock_unlock(&THREAD->lock);
00402 scheduler();
00403
00404
00405 while (1)
00406 ;
00407 }
00408
00409
00417 void thread_sleep(__u32 sec)
00418 {
00419 thread_usleep(sec*1000000);
00420 }
00421
00430 int thread_join_timeout(thread_t *t, __u32 usec, int flags)
00431 {
00432 ipl_t ipl;
00433 int rc;
00434
00435 if (t == THREAD)
00436 return EINVAL;
00437
00438
00439
00440
00441
00442
00443 ipl = interrupts_disable();
00444 spinlock_lock(&t->lock);
00445 ASSERT(!t->detached);
00446 spinlock_unlock(&t->lock);
00447 interrupts_restore(ipl);
00448
00449 rc = waitq_sleep_timeout(&t->join_wq, usec, flags);
00450
00451 return rc;
00452 }
00453
00461 void thread_detach(thread_t *t)
00462 {
00463 ipl_t ipl;
00464
00465
00466
00467
00468
00469 ipl = interrupts_disable();
00470 spinlock_lock(&t->lock);
00471 ASSERT(!t->detached);
00472 if (t->state == Undead) {
00473 thread_destroy(t);
00474 interrupts_restore(ipl);
00475 return;
00476 } else {
00477 t->detached = true;
00478 }
00479 spinlock_unlock(&t->lock);
00480 interrupts_restore(ipl);
00481 }
00482
00490 void thread_usleep(__u32 usec)
00491 {
00492 waitq_t wq;
00493
00494 waitq_initialize(&wq);
00495
00496 (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING);
00497 }
00498
00508 void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
00509 {
00510 ipl_t ipl;
00511
00512 ipl = interrupts_disable();
00513 spinlock_lock(&THREAD->lock);
00514 THREAD->call_me = call_me;
00515 THREAD->call_me_with = call_me_with;
00516 spinlock_unlock(&THREAD->lock);
00517 interrupts_restore(ipl);
00518 }
00519
00521 void thread_print_list(void)
00522 {
00523 link_t *cur;
00524 ipl_t ipl;
00525
00526
00527 ipl = interrupts_disable();
00528 spinlock_lock(&threads_lock);
00529
00530 for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) {
00531 btree_node_t *node;
00532 int i;
00533
00534 node = list_get_instance(cur, btree_node_t, leaf_link);
00535 for (i = 0; i < node->keys; i++) {
00536 thread_t *t;
00537
00538 t = (thread_t *) node->value[i];
00539 printf("%s: address=%#zx, tid=%zd, state=%s, task=%#zx, code=%#zx, stack=%#zx, cpu=",
00540 t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack);
00541 if (t->cpu)
00542 printf("cpu%zd", t->cpu->id);
00543 else
00544 printf("none");
00545 if (t->state == Sleeping) {
00546 printf(", kst=%#zx", t->kstack);
00547 printf(", wq=%#zx", t->sleep_queue);
00548 }
00549 printf("\n");
00550 }
00551 }
00552
00553 spinlock_unlock(&threads_lock);
00554 interrupts_restore(ipl);
00555 }
00556
00566 bool thread_exists(thread_t *t)
00567 {
00568 btree_node_t *leaf;
00569
00570 return btree_search(&threads_btree, (btree_key_t) ((__address) t), &leaf) != NULL;
00571 }
00572
00576 __native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name)
00577 {
00578 thread_t *t;
00579 char namebuf[THREAD_NAME_BUFLEN];
00580 uspace_arg_t *kernel_uarg;
00581 __u32 tid;
00582 int rc;
00583
00584 rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN);
00585 if (rc != 0)
00586 return (__native) rc;
00587
00588 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
00589 rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t));
00590 if (rc != 0) {
00591 free(kernel_uarg);
00592 return (__native) rc;
00593 }
00594
00595 if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) {
00596 tid = t->tid;
00597 thread_ready(t);
00598 return (__native) tid;
00599 } else {
00600 free(kernel_uarg);
00601 }
00602
00603 return (__native) ENOMEM;
00604 }
00605
00609 __native sys_thread_exit(int uspace_status)
00610 {
00611 thread_exit();
00612
00613 return 0;
00614 }
00615