Changeset 6f4495f5 in mainline
- Timestamp:
- 2007-01-27T17:32:13Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 1ba41c5
- Parents:
- 51baa8a
- Location:
- kernel
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/src/mm/memory_init.c
r51baa8a r6f4495f5 44 44 size_t get_memory_size(void) 45 45 { 46 return e801memorysize *1024;46 return e801memorysize * 1024; 47 47 } 48 48 … … 51 51 uint8_t i; 52 52 53 for (i =0;i<e820counter;i++) {53 for (i = 0; i < e820counter; i++) { 54 54 printf("E820 base: %#llx size: %#llx type: ", e820table[i].base_address, e820table[i].size); 55 55 switch (e820table[i].type) { 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 56 case MEMMAP_MEMORY_AVAILABLE: 57 printf("available memory\n"); 58 break; 59 case MEMMAP_MEMORY_RESERVED: 60 printf("reserved memory\n"); 61 break; 62 case MEMMAP_MEMORY_ACPI: 63 printf("ACPI table\n"); 64 break; 65 case MEMMAP_MEMORY_NVS: 66 printf("NVS\n"); 67 break; 68 case MEMMAP_MEMORY_UNUSABLE: 69 printf("unusable memory\n"); 70 break; 71 default: 72 printf("undefined memory type\n"); 73 73 } 74 74 } -
kernel/arch/ia32/src/mm/frame.c
r51baa8a r6f4495f5 66 66 size = SIZE2FRAMES(ALIGN_DOWN(e820table[i].size, 67 67 FRAME_SIZE)); 68 if (minconf < start || minconf >= start +size)68 if (minconf < start || minconf >= start + size) 69 69 conf = start; 70 70 else -
kernel/arch/ia32/src/mm/memory_init.c
r51baa8a r6f4495f5 44 44 size_t get_memory_size(void) 45 45 { 46 return e801memorysize *1024;46 return e801memorysize * 1024; 47 47 } 48 48 … … 51 51 uint8_t i; 52 52 53 for (i =0;i<e820counter;i++) {53 for (i = 0; i < e820counter; i++) { 54 54 printf("E820 base: %#.16llx size: %#.16llx type: ", e820table[i].base_address, e820table[i].size); 55 55 switch (e820table[i].type) { 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 56 case MEMMAP_MEMORY_AVAILABLE: 57 printf("available memory\n"); 58 break; 59 case MEMMAP_MEMORY_RESERVED: 60 printf("reserved memory\n"); 61 break; 62 case MEMMAP_MEMORY_ACPI: 63 printf("ACPI table\n"); 64 break; 65 case MEMMAP_MEMORY_NVS: 66 printf("NVS\n"); 67 break; 68 case MEMMAP_MEMORY_UNUSABLE: 69 printf("unusable memory\n"); 70 break; 71 default: 72 printf("undefined memory type\n"); 73 73 } 74 74 } -
kernel/generic/src/main/kinit.c
r51baa8a r6f4495f5 99 99 * Just a beautification. 100 100 */ 101 if ((t = thread_create(kmp, NULL, TASK, THREAD_FLAG_WIRED, "kmp", true))) { 101 if ((t = thread_create(kmp, NULL, TASK, THREAD_FLAG_WIRED, 102 "kmp", true))) { 102 103 spinlock_lock(&t->lock); 103 104 t->cpu = &cpus[0]; … … 124 125 for (i = 0; i < config.cpu_count; i++) { 125 126 126 if ((t = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_WIRED, "kcpulb", true))) { 127 if ((t = thread_create(kcpulb, NULL, TASK, 128 THREAD_FLAG_WIRED, "kcpulb", true))) { 127 129 spinlock_lock(&t->lock); 128 130 t->cpu = &cpus[i]; … … 144 146 * Create kernel console. 145 147 */ 146 if ((t = thread_create(kconsole, "kconsole", TASK, 0, "kconsole", false))) 148 t = thread_create(kconsole, "kconsole", TASK, 0, "kconsole", false); 149 if (t) 147 150 thread_ready(t); 148 151 else … … 162 165 } 163 166 164 task_t *utask = task_run_program((void *) init.tasks[i].addr, "uspace"); 167 task_t *utask = task_run_program((void *) init.tasks[i].addr, 168 "uspace"); 165 169 if (utask) { 166 170 /* 167 171 * Set capabilities to init userspace tasks. 168 172 */ 169 cap_set(utask, CAP_CAP | CAP_MEM_MANAGER | CAP_IO_MANAGER | CAP_PREEMPT_CONTROL | CAP_IRQ_REG); 173 cap_set(utask, CAP_CAP | CAP_MEM_MANAGER | 174 CAP_IO_MANAGER | CAP_PREEMPT_CONTROL | CAP_IRQ_REG); 170 175 171 176 if (!ipc_phone_0) 172 177 ipc_phone_0 = &utask->answerbox; 173 178 } else { 174 int rd = init_rd((rd_header *) init.tasks[i].addr, init.tasks[i].size); 179 int rd = init_rd((rd_header *) init.tasks[i].addr, 180 init.tasks[i].size); 175 181 176 182 if (rd != RE_OK) -
kernel/generic/src/main/main.c
r51baa8a r6f4495f5 147 147 148 148 config.kernel_size = ALIGN_UP(hardcoded_ktext_size + 149 149 hardcoded_kdata_size, PAGE_SIZE); 150 150 config.stack_size = CONFIG_STACK_SIZE; 151 151 … … 157 157 for (i = 0; i < init.cnt; i++) { 158 158 if (PA_overlaps(config.stack_base, config.stack_size, 159 159 init.tasks[i].addr, init.tasks[i].size)) 160 160 config.stack_base = ALIGN_UP(init.tasks[i].addr + 161 161 init.tasks[i].size, config.stack_size); 162 162 } 163 163 … … 165 165 if (ballocs.size) { 166 166 if (PA_overlaps(config.stack_base, config.stack_size, 167 167 ballocs.base, ballocs.size)) 168 168 config.stack_base = ALIGN_UP(ballocs.base + 169 169 ballocs.size, PAGE_SIZE); 170 170 } 171 171 … … 175 175 context_save(&ctx); 176 176 context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base, 177 177 THREAD_STACK_SIZE); 178 178 context_restore(&ctx); 179 179 /* not reached */ … … 223 223 version_print(); 224 224 printf("kernel: %.*p hardcoded_ktext_size=%zdK, " 225 226 config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >>227 225 "hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2, 226 config.base, hardcoded_ktext_size >> 10, 227 hardcoded_kdata_size >> 10); 228 228 printf("stack: %.*p size=%zdK\n", sizeof(uintptr_t) * 2, 229 229 config.stack_base, config.stack_size >> 10); 230 230 231 231 arch_pre_smp_init(); … … 250 250 for (i = 0; i < init.cnt; i++) 251 251 printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, 252 253 252 sizeof(uintptr_t) * 2, init.tasks[i].addr, i, 253 init.tasks[i].size); 254 254 } else 255 255 printf("No init binaries found\n"); … … 324 324 */ 325 325 context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), 326 326 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 327 327 context_restore(&CPU->saved_context); 328 328 /* not reached */ -
kernel/generic/src/main/uinit.c
r51baa8a r6f4495f5 49 49 /** Thread used to bring up userspace thread. 50 50 * 51 * @param arg Pointer to structure containing userspace entry and stack addresses. 51 * @param arg Pointer to structure containing userspace entry and stack 52 * addresses. 52 53 */ 53 54 void uinit(void *arg) -
kernel/generic/src/mm/as.c
r51baa8a r6f4495f5 94 94 static slab_cache_t *as_slab; 95 95 96 /** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */ 96 /** 97 * This lock protects inactive_as_with_asid_head list. It must be acquired 98 * before as_t mutex. 99 */ 97 100 SPINLOCK_INITIALIZE(inactive_as_with_asid_lock); 98 101 … … 108 111 static int area_flags_to_page_flags(int aflags); 109 112 static as_area_t *find_area_and_lock(as_t *as, uintptr_t va); 110 static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area); 113 static bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 114 as_area_t *avoid_area); 111 115 static void sh_info_remove_reference(share_info_t *sh_info); 112 116 … … 137 141 138 142 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, 139 143 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); 140 144 141 145 AS_KERNEL = as_create(FLAG_AS_KERNEL); … … 172 176 /** Destroy adress space. 173 177 * 174 * When there are no tasks referencing this address space (i.e. its refcount is zero),175 * the address space can be destroyed.178 * When there are no tasks referencing this address space (i.e. its refcount is 179 * zero), the address space can be destroyed. 176 180 */ 177 181 void as_destroy(as_t *as) … … 204 208 205 209 ASSERT(!list_empty(&as->as_area_btree.leaf_head)); 206 node = list_get_instance(as->as_area_btree.leaf_head.next, btree_node_t, leaf_link); 210 node = list_get_instance(as->as_area_btree.leaf_head.next, 211 btree_node_t, leaf_link); 207 212 208 213 if ((cond = node->keys)) { … … 273 278 a->backend_data = *backend_data; 274 279 else 275 memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0); 280 memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 281 0); 276 282 277 283 btree_create(&a->used_space); … … 288 294 * 289 295 * @param as Address space. 290 * @param address Virtual address belonging to the area to be changed. Must be page-aligned. 296 * @param address Virtual address belonging to the area to be changed. Must be 297 * page-aligned. 291 298 * @param size New size of the virtual memory block starting at address. 292 299 * @param flags Flags influencing the remap operation. Currently unused. … … 357 364 * Start TLB shootdown sequence. 358 365 */ 359 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); 366 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + 367 pages * PAGE_SIZE, area->pages - pages); 360 368 361 369 /* … … 370 378 371 379 ASSERT(!list_empty(&area->used_space.leaf_head)); 372 node = list_get_instance(area->used_space.leaf_head.prev, btree_node_t, leaf_link); 380 node = 381 list_get_instance(area->used_space.leaf_head.prev, 382 btree_node_t, leaf_link); 373 383 if ((cond = (bool) node->keys)) { 374 384 uintptr_t b = node->key[node->keys - 1]; 375 count_t c = (count_t) node->value[node->keys - 1]; 385 count_t c = 386 (count_t) node->value[node->keys - 1]; 376 387 int i = 0; 377 388 378 if (overlaps(b, c*PAGE_SIZE, area->base, pages*PAGE_SIZE)) { 389 if (overlaps(b, c * PAGE_SIZE, area->base, 390 pages*PAGE_SIZE)) { 379 391 380 if (b + c *PAGE_SIZE <= start_free) {392 if (b + c * PAGE_SIZE <= start_free) { 381 393 /* 382 * The whole interval fits completely 383 * in the resized address space area. 394 * The whole interval fits 395 * completely in the resized 396 * address space area. 384 397 */ 385 398 break; … … 387 400 388 401 /* 389 * Part of the interval corresponding to b and c 390 * overlaps with the resized address space area. 402 * Part of the interval corresponding 403 * to b and c overlaps with the resized 404 * address space area. 391 405 */ 392 406 393 407 cond = false; /* we are almost done */ 394 408 i = (start_free - b) >> PAGE_WIDTH; 395 if (!used_space_remove(area, start_free, c - i)) 396 panic("Could not remove used space.\n"); 409 if (!used_space_remove(area, start_free, 410 c - i)) 411 panic("Could not remove used " 412 "space.\n"); 397 413 } else { 398 414 /* 399 * The interval of used space can be completely removed. 415 * The interval of used space can be 416 * completely removed. 400 417 */ 401 418 if (!used_space_remove(area, b, c)) 402 panic("Could not remove used space.\n"); 419 panic("Could not remove used " 420 "space.\n"); 403 421 } 404 422 … … 407 425 408 426 page_table_lock(as, false); 409 pte = page_mapping_find(as, b + i*PAGE_SIZE); 410 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); 411 if (area->backend && area->backend->frame_free) { 427 pte = page_mapping_find(as, b + 428 i * PAGE_SIZE); 429 ASSERT(pte && PTE_VALID(pte) && 430 PTE_PRESENT(pte)); 431 if (area->backend && 432 area->backend->frame_free) { 412 433 area->backend->frame_free(area, 413 b + i*PAGE_SIZE, PTE_GET_FRAME(pte)); 434 b + i * PAGE_SIZE, 435 PTE_GET_FRAME(pte)); 414 436 } 415 page_mapping_remove(as, b + i*PAGE_SIZE); 437 page_mapping_remove(as, b + 438 i * PAGE_SIZE); 416 439 page_table_unlock(as, false); 417 440 } … … 422 445 * Finish TLB shootdown sequence. 423 446 */ 424 tlb_invalidate_pages(as->asid, area->base + pages*PAGE_SIZE, area->pages - pages); 447 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 448 area->pages - pages); 425 449 tlb_shootdown_finalize(); 426 450 … … 428 452 * Invalidate software translation caches (e.g. TSB on sparc64). 429 453 */ 430 as_invalidate_translation_cache(as, area->base + pages*PAGE_SIZE, area->pages - pages); 454 as_invalidate_translation_cache(as, area->base + 455 pages * PAGE_SIZE, area->pages - pages); 431 456 } else { 432 457 /* … … 434 459 * Check for overlaps with other address space areas. 435 460 */ 436 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { 461 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 462 area)) { 437 463 mutex_unlock(&area->lock); 438 464 mutex_unlock(&as->lock); … … 485 511 * Visit only the pages mapped by used_space B+tree. 486 512 */ 487 for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) { 513 for (cur = area->used_space.leaf_head.next; 514 cur != &area->used_space.leaf_head; cur = cur->next) { 488 515 btree_node_t *node; 489 516 int i; … … 497 524 for (j = 0; j < (count_t) node->value[i]; j++) { 498 525 page_table_lock(as, false); 499 pte = page_mapping_find(as, b + j*PAGE_SIZE); 500 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); 501 if (area->backend && area->backend->frame_free) { 502 area->backend->frame_free(area, 503 b + j*PAGE_SIZE, PTE_GET_FRAME(pte)); 526 pte = page_mapping_find(as, b + j * PAGE_SIZE); 527 ASSERT(pte && PTE_VALID(pte) && 528 PTE_PRESENT(pte)); 529 if (area->backend && 530 area->backend->frame_free) { 531 area->backend->frame_free(area, b + 532 j * PAGE_SIZE, PTE_GET_FRAME(pte)); 504 533 } 505 page_mapping_remove(as, b + j *PAGE_SIZE);534 page_mapping_remove(as, b + j * PAGE_SIZE); 506 535 page_table_unlock(as, false); 507 536 } … … 516 545 517 546 /* 518 * Invalidate potential software translation caches (e.g. TSB on sparc64). 547 * Invalidate potential software translation caches (e.g. TSB on 548 * sparc64). 519 549 */ 520 550 as_invalidate_translation_cache(as, area->base, area->pages); … … 606 636 dst_flags_mask |= AS_AREA_CACHEABLE; 607 637 608 if (src_size != acc_size || (src_flags & dst_flags_mask) != dst_flags_mask) { 638 if (src_size != acc_size || 639 (src_flags & dst_flags_mask) != dst_flags_mask) { 609 640 mutex_unlock(&src_area->lock); 610 641 mutex_unlock(&src_as->lock); … … 659 690 */ 660 691 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, 661 692 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 662 693 if (!dst_area) { 663 694 /* … … 777 808 if (PTE_PRESENT(pte)) { 778 809 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || 779 780 810 (access == PF_ACCESS_WRITE && PTE_WRITABLE(pte)) || 811 (access == PF_ACCESS_EXEC && PTE_EXECUTABLE(pte))) { 781 812 page_table_unlock(AS, false); 782 813 mutex_unlock(&area->lock); … … 805 836 if (THREAD->in_copy_from_uspace) { 806 837 THREAD->in_copy_from_uspace = false; 807 istate_set_retaddr(istate, (uintptr_t) &memcpy_from_uspace_failover_address); 838 istate_set_retaddr(istate, 839 (uintptr_t) &memcpy_from_uspace_failover_address); 808 840 } else if (THREAD->in_copy_to_uspace) { 809 841 THREAD->in_copy_to_uspace = false; 810 istate_set_retaddr(istate, (uintptr_t) &memcpy_to_uspace_failover_address); 842 istate_set_retaddr(istate, 843 (uintptr_t) &memcpy_to_uspace_failover_address); 811 844 } else { 812 845 return AS_PF_FAULT; … … 846 879 */ 847 880 ASSERT(old->asid != ASID_INVALID); 848 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); 881 list_append(&old->inactive_as_with_asid_link, 882 &inactive_as_with_asid_head); 849 883 } 850 884 mutex_unlock(&old->lock); … … 862 896 mutex_lock_active(&new->lock); 863 897 if ((new->cpu_refcount++ == 0) && (new != AS_KERNEL)) { 864 if (new->asid != ASID_INVALID) 898 if (new->asid != ASID_INVALID) { 865 899 list_remove(&new->inactive_as_with_asid_link); 866 else 867 needs_asid = true; /* defer call to asid_get() until new->lock is released */ 900 } else { 901 /* 902 * Defer call to asid_get() until new->lock is released. 903 */ 904 needs_asid = true; 905 } 868 906 } 869 907 SET_PTL0_ADDRESS(new->page_table); … … 1007 1045 * @param va Virtual address. 1008 1046 * 1009 * @return Locked address space area containing va on success or NULL on failure. 1047 * @return Locked address space area containing va on success or NULL on 1048 * failure. 1010 1049 */ 1011 1050 as_area_t *find_area_and_lock(as_t *as, uintptr_t va) … … 1042 1081 * Because of its position in the B+tree, it must have base < va. 1043 1082 */ 1044 if ((lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { 1083 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1084 if (lnode) { 1045 1085 a = (as_area_t *) lnode->value[lnode->keys - 1]; 1046 1086 mutex_lock(&a->lock); … … 1065 1105 * @return True if there is no conflict, false otherwise. 1066 1106 */ 1067 bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) 1107 bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 1108 as_area_t *avoid_area) 1068 1109 { 1069 1110 as_area_t *a; … … 1100 1141 mutex_unlock(&a->lock); 1101 1142 } 1102 if ((node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf))) { 1143 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 1144 if (node) { 1103 1145 a = (as_area_t *) node->value[0]; 1104 1146 mutex_lock(&a->lock); … … 1131 1173 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 1132 1174 return !overlaps(va, size, 1133 KERNEL_ADDRESS_SPACE_START, KERNEL_ADDRESS_SPACE_END-KERNEL_ADDRESS_SPACE_START); 1175 KERNEL_ADDRESS_SPACE_START, 1176 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 1134 1177 } 1135 1178 … … 1190 1233 node = btree_leaf_node_left_neighbour(&a->used_space, leaf); 1191 1234 if (node) { 1192 uintptr_t left_pg = node->key[node->keys - 1], right_pg = leaf->key[0]; 1193 count_t left_cnt = (count_t) node->value[node->keys - 1], right_cnt = (count_t) leaf->value[0]; 1235 uintptr_t left_pg = node->key[node->keys - 1]; 1236 uintptr_t right_pg = leaf->key[0]; 1237 count_t left_cnt = (count_t) node->value[node->keys - 1]; 1238 count_t right_cnt = (count_t) leaf->value[0]; 1194 1239 1195 1240 /* … … 1201 1246 if (page >= right_pg) { 1202 1247 /* Do nothing. */ 1203 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { 1248 } else if (overlaps(page, count * PAGE_SIZE, left_pg, 1249 left_cnt * PAGE_SIZE)) { 1204 1250 /* The interval intersects with the left interval. */ 1205 1251 return 0; 1206 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { 1252 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1253 right_cnt * PAGE_SIZE)) { 1207 1254 /* The interval intersects with the right interval. */ 1208 1255 return 0; 1209 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { 1210 /* The interval can be added by merging the two already present intervals. */ 1256 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1257 (page + count * PAGE_SIZE == right_pg)) { 1258 /* 1259 * The interval can be added by merging the two already 1260 * present intervals. 1261 */ 1211 1262 node->value[node->keys - 1] += count + right_cnt; 1212 1263 btree_remove(&a->used_space, right_pg, leaf); 1213 1264 return 1; 1214 } else if (page == left_pg + left_cnt*PAGE_SIZE) { 1215 /* The interval can be added by simply growing the left interval. */ 1265 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1266 /* 1267 * The interval can be added by simply growing the left 1268 * interval. 1269 */ 1216 1270 node->value[node->keys - 1] += count; 1217 1271 return 1; 1218 } else if (page + count *PAGE_SIZE == right_pg) {1272 } else if (page + count * PAGE_SIZE == right_pg) { 1219 1273 /* 1220 * The interval can be addded by simply moving base of the right 1221 * interval down and increasing its size accordingly. 1274 * The interval can be addded by simply moving base of 1275 * the right interval down and increasing its size 1276 * accordingly. 1222 1277 */ 1223 1278 leaf->value[0] += count; … … 1229 1284 * but cannot be merged with any of them. 1230 1285 */ 1231 btree_insert(&a->used_space, page, (void *) count, leaf); 1286 btree_insert(&a->used_space, page, (void *) count, 1287 leaf); 1232 1288 return 1; 1233 1289 } … … 1237 1293 1238 1294 /* 1239 * Investigate the border case in which the left neighbour does not1240 * exist but the interval fits from the left.1295 * Investigate the border case in which the left neighbour does 1296 * not exist but the interval fits from the left. 1241 1297 */ 1242 1298 1243 if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { 1299 if (overlaps(page, count * PAGE_SIZE, right_pg, 1300 right_cnt * PAGE_SIZE)) { 1244 1301 /* The interval intersects with the right interval. */ 1245 1302 return 0; 1246 } else if (page + count *PAGE_SIZE == right_pg) {1303 } else if (page + count * PAGE_SIZE == right_pg) { 1247 1304 /* 1248 * The interval can be added by moving the base of the right interval down 1249 * and increasing its size accordingly. 1305 * The interval can be added by moving the base of the 1306 * right interval down and increasing its size 1307 * accordingly. 1250 1308 */ 1251 1309 leaf->key[0] = page; … … 1257 1315 * It must be added individually. 1258 1316 */ 1259 btree_insert(&a->used_space, page, (void *) count, leaf); 1317 btree_insert(&a->used_space, page, (void *) count, 1318 leaf); 1260 1319 return 1; 1261 1320 } … … 1264 1323 node = btree_leaf_node_right_neighbour(&a->used_space, leaf); 1265 1324 if (node) { 1266 uintptr_t left_pg = leaf->key[leaf->keys - 1], right_pg = node->key[0]; 1267 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1], right_cnt = (count_t) node->value[0]; 1325 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1326 uintptr_t right_pg = node->key[0]; 1327 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; 1328 count_t right_cnt = (count_t) node->value[0]; 1268 1329 1269 1330 /* … … 1275 1336 if (page < left_pg) { 1276 1337 /* Do nothing. */ 1277 } else if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { 1338 } else if (overlaps(page, count * PAGE_SIZE, left_pg, 1339 left_cnt * PAGE_SIZE)) { 1278 1340 /* The interval intersects with the left interval. */ 1279 1341 return 0; 1280 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { 1342 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1343 right_cnt * PAGE_SIZE)) { 1281 1344 /* The interval intersects with the right interval. */ 1282 1345 return 0; 1283 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { 1284 /* The interval can be added by merging the two already present intervals. */ 1346 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1347 (page + count * PAGE_SIZE == right_pg)) { 1348 /* 1349 * The interval can be added by merging the two already 1350 * present intervals. 1351 * */ 1285 1352 leaf->value[leaf->keys - 1] += count + right_cnt; 1286 1353 btree_remove(&a->used_space, right_pg, node); 1287 1354 return 1; 1288 } else if (page == left_pg + left_cnt*PAGE_SIZE) { 1289 /* The interval can be added by simply growing the left interval. */ 1355 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1356 /* 1357 * The interval can be added by simply growing the left 1358 * interval. 1359 * */ 1290 1360 leaf->value[leaf->keys - 1] += count; 1291 1361 return 1; 1292 } else if (page + count *PAGE_SIZE == right_pg) {1362 } else if (page + count * PAGE_SIZE == right_pg) { 1293 1363 /* 1294 * The interval can be addded by simply moving base of the right 1295 * interval down and increasing its size accordingly. 1364 * The interval can be addded by simply moving base of 1365 * the right interval down and increasing its size 1366 * accordingly. 1296 1367 */ 1297 1368 node->value[0] += count; … … 1303 1374 * but cannot be merged with any of them. 1304 1375 */ 1305 btree_insert(&a->used_space, page, (void *) count, leaf); 1376 btree_insert(&a->used_space, page, (void *) count, 1377 leaf); 1306 1378 return 1; 1307 1379 } … … 1311 1383 1312 1384 /* 1313 * Investigate the border case in which the right neighbour does not1314 * exist but the interval fits from the right.1385 * Investigate the border case in which the right neighbour 1386 * does not exist but the interval fits from the right. 1315 1387 */ 1316 1388 1317 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { 1389 if (overlaps(page, count * PAGE_SIZE, left_pg, 1390 left_cnt * PAGE_SIZE)) { 1318 1391 /* The interval intersects with the left interval. */ 1319 1392 return 0; 1320 } else if (left_pg + left_cnt*PAGE_SIZE == page) { 1321 /* The interval can be added by growing the left interval. */ 1393 } else if (left_pg + left_cnt * PAGE_SIZE == page) { 1394 /* 1395 * The interval can be added by growing the left 1396 * interval. 1397 */ 1322 1398 leaf->value[leaf->keys - 1] += count; 1323 1399 return 1; … … 1327 1403 * It must be added individually. 1328 1404 */ 1329 btree_insert(&a->used_space, page, (void *) count, leaf); 1405 btree_insert(&a->used_space, page, (void *) count, 1406 leaf); 1330 1407 return 1; 1331 1408 } … … 1333 1410 1334 1411 /* 1335 * Note that if the algorithm made it thus far, the interval can fit only1336 * between two other intervals of the leaf. The two border cases were already1337 * resolved.1412 * Note that if the algorithm made it thus far, the interval can fit 1413 * only between two other intervals of the leaf. The two border cases 1414 * were already resolved. 1338 1415 */ 1339 1416 for (i = 1; i < leaf->keys; i++) { 1340 1417 if (page < leaf->key[i]) { 1341 uintptr_t left_pg = leaf->key[i - 1], right_pg = leaf->key[i]; 1342 count_t left_cnt = (count_t) leaf->value[i - 1], right_cnt = (count_t) leaf->value[i]; 1418 uintptr_t left_pg = leaf->key[i - 1]; 1419 uintptr_t right_pg = leaf->key[i]; 1420 count_t left_cnt = (count_t) leaf->value[i - 1]; 1421 count_t right_cnt = (count_t) leaf->value[i]; 1343 1422 1344 1423 /* … … 1346 1425 */ 1347 1426 1348 if (overlaps(page, count*PAGE_SIZE, left_pg, left_cnt*PAGE_SIZE)) { 1349 /* The interval intersects with the left interval. */ 1427 if (overlaps(page, count * PAGE_SIZE, left_pg, 1428 left_cnt * PAGE_SIZE)) { 1429 /* 1430 * The interval intersects with the left 1431 * interval. 1432 */ 1350 1433 return 0; 1351 } else if (overlaps(page, count*PAGE_SIZE, right_pg, right_cnt*PAGE_SIZE)) { 1352 /* The interval intersects with the right interval. */ 1434 } else if (overlaps(page, count * PAGE_SIZE, right_pg, 1435 right_cnt * PAGE_SIZE)) { 1436 /* 1437 * The interval intersects with the right 1438 * interval. 1439 */ 1353 1440 return 0; 1354 } else if ((page == left_pg + left_cnt*PAGE_SIZE) && (page + count*PAGE_SIZE == right_pg)) { 1355 /* The interval can be added by merging the two already present intervals. */ 1441 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1442 (page + count * PAGE_SIZE == right_pg)) { 1443 /* 1444 * The interval can be added by merging the two 1445 * already present intervals. 1446 */ 1356 1447 leaf->value[i - 1] += count + right_cnt; 1357 1448 btree_remove(&a->used_space, right_pg, leaf); 1358 1449 return 1; 1359 } else if (page == left_pg + left_cnt*PAGE_SIZE) { 1360 /* The interval can be added by simply growing the left interval. */ 1450 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1451 /* 1452 * The interval can be added by simply growing 1453 * the left interval. 1454 */ 1361 1455 leaf->value[i - 1] += count; 1362 1456 return 1; 1363 } else if (page + count *PAGE_SIZE == right_pg) {1457 } else if (page + count * PAGE_SIZE == right_pg) { 1364 1458 /* 1365 * The interval can be addded by simply moving base of the right 1366 * interval down and increasing its size accordingly. 1459 * The interval can be addded by simply moving 1460 * base of the right interval down and 1461 * increasing its size accordingly. 1367 1462 */ 1368 1463 leaf->value[i] += count; … … 1371 1466 } else { 1372 1467 /* 1373 * The interval is between both neigbouring intervals, 1374 * but cannot be merged with any of them. 1468 * The interval is between both neigbouring 1469 * intervals, but cannot be merged with any of 1470 * them. 1375 1471 */ 1376 btree_insert(&a->used_space, page, (void *) count, leaf); 1472 btree_insert(&a->used_space, page, 1473 (void *) count, leaf); 1377 1474 return 1; 1378 1475 } … … 1380 1477 } 1381 1478 1382 panic("Inconsistency detected while adding %d pages of used space at %p.\n", count, page); 1479 panic("Inconsistency detected while adding %d pages of used space at " 1480 "%p.\n", count, page); 1383 1481 } 1384 1482 … … 1419 1517 for (i = 0; i < leaf->keys; i++) { 1420 1518 if (leaf->key[i] == page) { 1421 leaf->key[i] += count *PAGE_SIZE;1519 leaf->key[i] += count * PAGE_SIZE; 1422 1520 leaf->value[i] -= count; 1423 1521 return 1; … … 1433 1531 count_t left_cnt = (count_t) node->value[node->keys - 1]; 1434 1532 1435 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { 1436 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { 1533 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1534 count * PAGE_SIZE)) { 1535 if (page + count * PAGE_SIZE == 1536 left_pg + left_cnt * PAGE_SIZE) { 1437 1537 /* 1438 * The interval is contained in the rightmost interval 1439 * of the left neighbour and can be removed by 1440 * updating the size of the bigger interval. 1538 * The interval is contained in the rightmost 1539 * interval of the left neighbour and can be 1540 * removed by updating the size of the bigger 1541 * interval. 1441 1542 */ 1442 1543 node->value[node->keys - 1] -= count; 1443 1544 return 1; 1444 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { 1545 } else if (page + count * PAGE_SIZE < 1546 left_pg + left_cnt*PAGE_SIZE) { 1445 1547 count_t new_cnt; 1446 1548 1447 1549 /* 1448 * The interval is contained in the rightmost interval 1449 * of the left neighbour but its removal requires 1450 * both updating the size of the original interval and 1451 * also inserting a new interval. 1550 * The interval is contained in the rightmost 1551 * interval of the left neighbour but its 1552 * removal requires both updating the size of 1553 * the original interval and also inserting a 1554 * new interval. 1452 1555 */ 1453 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1556 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1557 (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1454 1558 node->value[node->keys - 1] -= count + new_cnt; 1455 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); 1559 btree_insert(&a->used_space, page + 1560 count * PAGE_SIZE, (void *) new_cnt, leaf); 1456 1561 return 1; 1457 1562 } … … 1466 1571 count_t left_cnt = (count_t) leaf->value[leaf->keys - 1]; 1467 1572 1468 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { 1469 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { 1573 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1574 count * PAGE_SIZE)) { 1575 if (page + count * PAGE_SIZE == 1576 left_pg + left_cnt * PAGE_SIZE) { 1470 1577 /* 1471 * The interval is contained in the rightmost interval1472 * of the leaf and can be removed by updating the size1473 * of the bigger interval.1578 * The interval is contained in the rightmost 1579 * interval of the leaf and can be removed by 1580 * updating the size of the bigger interval. 1474 1581 */ 1475 1582 leaf->value[leaf->keys - 1] -= count; 1476 1583 return 1; 1477 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { 1584 } else if (page + count * PAGE_SIZE < left_pg + 1585 left_cnt * PAGE_SIZE) { 1478 1586 count_t new_cnt; 1479 1587 1480 1588 /* 1481 * The interval is contained in the rightmost interval 1482 * of the leaf but its removal requires both updating 1483 * the size of the original interval and 1484 * also inserting a new interval. 1589 * The interval is contained in the rightmost 1590 * interval of the leaf but its removal 1591 * requires both updating the size of the 1592 * original interval and also inserting a new 1593 * interval. 1485 1594 */ 1486 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1595 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1596 (page + count * PAGE_SIZE)) >> PAGE_WIDTH; 1487 1597 leaf->value[leaf->keys - 1] -= count + new_cnt; 1488 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); 1598 btree_insert(&a->used_space, page + 1599 count * PAGE_SIZE, (void *) new_cnt, leaf); 1489 1600 return 1; 1490 1601 } … … 1503 1614 1504 1615 /* 1505 * Now the interval is between intervals corresponding to (i - 1) and i. 1616 * Now the interval is between intervals corresponding 1617 * to (i - 1) and i. 1506 1618 */ 1507 if (overlaps(left_pg, left_cnt*PAGE_SIZE, page, count*PAGE_SIZE)) { 1508 if (page + count*PAGE_SIZE == left_pg + left_cnt*PAGE_SIZE) { 1619 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1620 count * PAGE_SIZE)) { 1621 if (page + count * PAGE_SIZE == 1622 left_pg + left_cnt*PAGE_SIZE) { 1509 1623 /* 1510 * The interval is contained in the interval (i - 1) 1511 * of the leaf and can be removed by updating the size 1512 * of the bigger interval. 1624 * The interval is contained in the 1625 * interval (i - 1) of the leaf and can 1626 * be removed by updating the size of 1627 * the bigger interval. 1513 1628 */ 1514 1629 leaf->value[i - 1] -= count; 1515 1630 return 1; 1516 } else if (page + count*PAGE_SIZE < left_pg + left_cnt*PAGE_SIZE) { 1631 } else if (page + count * PAGE_SIZE < 1632 left_pg + left_cnt * PAGE_SIZE) { 1517 1633 count_t new_cnt; 1518 1634 1519 1635 /* 1520 * The interval is contained in the interval (i - 1) 1521 * of the leaf but its removal requires both updating 1522 * the size of the original interval and 1636 * The interval is contained in the 1637 * interval (i - 1) of the leaf but its 1638 * removal requires both updating the 1639 * size of the original interval and 1523 1640 * also inserting a new interval. 1524 1641 */ 1525 new_cnt = ((left_pg + left_cnt*PAGE_SIZE) - (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1642 new_cnt = ((left_pg + 1643 left_cnt * PAGE_SIZE) - 1644 (page + count * PAGE_SIZE)) >> 1645 PAGE_WIDTH; 1526 1646 leaf->value[i - 1] -= count + new_cnt; 1527 btree_insert(&a->used_space, page + count*PAGE_SIZE, (void *) new_cnt, leaf); 1647 btree_insert(&a->used_space, page + 1648 count * PAGE_SIZE, (void *) new_cnt, 1649 leaf); 1528 1650 return 1; 1529 1651 } … … 1534 1656 1535 1657 error: 1536 panic("Inconsistency detected while removing %d pages of used space from %p.\n", count, page); 1658 panic("Inconsistency detected while removing %d pages of used space " 1659 "from %p.\n", count, page); 1537 1660 } 1538 1661 … … 1557 1680 * reference from all frames found there. 1558 1681 */ 1559 for (cur = sh_info->pagemap.leaf_head.next; cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 1682 for (cur = sh_info->pagemap.leaf_head.next; 1683 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 1560 1684 btree_node_t *node; 1561 1685 int i; … … 1582 1706 unative_t sys_as_area_create(uintptr_t address, size_t size, int flags) 1583 1707 { 1584 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL)) 1708 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, 1709 AS_AREA_ATTR_NONE, &anon_backend, NULL)) 1585 1710 return (unative_t) address; 1586 1711 else … … 1613 1738 /* print out info about address space areas */ 1614 1739 link_t *cur; 1615 for (cur = as->as_area_btree.leaf_head.next; cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1616 btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link); 1740 for (cur = as->as_area_btree.leaf_head.next; 1741 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1742 btree_node_t *node; 1743 1744 node = list_get_instance(cur, btree_node_t, leaf_link); 1617 1745 1618 1746 int i; … … 1622 1750 mutex_lock(&area->lock); 1623 1751 printf("as_area: %p, base=%p, pages=%d (%p - %p)\n", 1624 area, area->base, area->pages, area->base, area->base + area->pages*PAGE_SIZE); 1752 area, area->base, area->pages, area->base, 1753 area->base + area->pages*PAGE_SIZE); 1625 1754 mutex_unlock(&area->lock); 1626 1755 } -
kernel/generic/src/proc/scheduler.c
r51baa8a r6f4495f5 361 361 context_save(&CPU->saved_context); 362 362 context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), 363 363 (uintptr_t) CPU->stack, CPU_STACK_SIZE); 364 364 context_restore(&CPU->saved_context); 365 365 /* not reached */ … … 501 501 #ifdef SCHEDULER_VERBOSE 502 502 printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n", 503 504 503 CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, 504 atomic_get(&CPU->nrdy)); 505 505 #endif 506 506 … … 636 636 #ifdef KCPULB_VERBOSE 637 637 printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, " 638 639 640 638 "avg=%nd\n", CPU->id, t->tid, CPU->id, 639 atomic_get(&CPU->nrdy), 640 atomic_get(&nrdy) / config.cpu_active); 641 641 #endif 642 642 t->flags |= THREAD_FLAG_STOLEN; … … 704 704 spinlock_lock(&cpus[cpu].lock); 705 705 printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n", 706 707 706 cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy), 707 cpus[cpu].needs_relink); 708 708 709 709 for (i = 0; i < RQ_COUNT; i++) { … … 719 719 t = list_get_instance(cur, thread_t, rq_link); 720 720 printf("%d(%s) ", t->tid, 721 721 thread_states[t->state]); 722 722 } 723 723 printf("\n"); -
kernel/generic/src/proc/task.c
r51baa8a r6f4495f5 66 66 /** B+tree of active tasks. 67 67 * 68 * The task is guaranteed to exist after it was found in the tasks_btree as long as: 68 * The task is guaranteed to exist after it was found in the tasks_btree as 69 * long as: 69 70 * @li the tasks_lock is held, 70 * @li the task's lock is held when task's lock is acquired before releasing tasks_lock or 71 * @li the task's lock is held when task's lock is acquired before releasing 72 * tasks_lock or 71 73 * @li the task's refcount is greater than 0 72 74 * … … 126 128 for (i = 0; i < IPC_MAX_PHONES; i++) 127 129 ipc_phone_init(&ta->phones[i]); 128 if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context, ta->context))) 130 if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context, 131 ta->context))) 129 132 ipc_phone_connect(&ta->phones[0], ipc_phone_0); 130 133 atomic_set(&ta->active_calls, 0); … … 203 206 204 207 kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); 205 kernel_uarg->uspace_entry = (void *) ((elf_header_t *) program_addr)->e_entry; 208 kernel_uarg->uspace_entry = 209 (void *) ((elf_header_t *) program_addr)->e_entry; 206 210 kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS; 207 211 kernel_uarg->uspace_thread_function = NULL; … … 215 219 * Create the data as_area. 216 220 */ 217 a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 218 LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE,219 USTACK_ADDRESS,AS_AREA_ATTR_NONE, &anon_backend, NULL);221 a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 222 LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS, 223 AS_AREA_ATTR_NONE, &anon_backend, NULL); 220 224 221 225 /* 222 226 * Create the main thread. 223 227 */ 224 t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE, "uinit", false); 228 t1 = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE, 229 "uinit", false); 225 230 ASSERT(t1); 226 231 … … 239 244 /** Syscall for reading task ID from userspace. 240 245 * 241 * @param uspace_task_id Userspace address of 8-byte buffer where to store current task ID. 246 * @param uspace_task_id Userspace address of 8-byte buffer where to store 247 * current task ID. 242 248 * 243 249 * @return 0 on success or an error code from @ref errno.h. … … 249 255 * remains constant for the lifespan of the task. 250 256 */ 251 return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid, sizeof(TASK->taskid)); 257 return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid, 258 sizeof(TASK->taskid)); 252 259 } 253 260 … … 289 296 /* Process only counted threads */ 290 297 if (!thr->uncounted) { 291 if (thr == THREAD) /* Update accounting of current thread */ 292 thread_update_accounting(); 298 if (thr == THREAD) { 299 /* Update accounting of current thread */ 300 thread_update_accounting(); 301 } 293 302 ret += thr->cycles; 294 303 } … … 377 386 spinlock_lock(&tasks_lock); 378 387 379 printf("taskid name ctx address as cycles threads calls callee\n"); 380 printf("------ ---------- --- ---------- ---------- ---------- ------- ------ ------>\n"); 381 382 for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head; cur = cur->next) { 388 printf("taskid name ctx address as cycles threads " 389 "calls callee\n"); 390 printf("------ ---------- --- ---------- ---------- ---------- ------- " "------ ------>\n"); 391 392 for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head; 393 cur = cur->next) { 383 394 btree_node_t *node; 384 395 int i; … … 397 408 order(task_get_accounting(t), &cycles, &suffix); 398 409 399 printf("%-6lld %-10s %-3ld %#10zx %#10zx %9llu%c %7zd %6zd", t->taskid, t->name, t->context, t, t->as, cycles, suffix, t->refcount, atomic_get(&t->active_calls)); 410 printf("%-6lld %-10s %-3ld %#10zx %#10zx %9llu%c %7zd " 411 "%6zd", t->taskid, t->name, t->context, t, t->as, 412 cycles, suffix, t->refcount, 413 atomic_get(&t->active_calls)); 400 414 for (j = 0; j < IPC_MAX_PHONES; j++) { 401 415 if (t->phones[j].callee) 402 printf(" %zd:%#zx", j, t->phones[j].callee); 416 printf(" %zd:%#zx", j, 417 t->phones[j].callee); 403 418 } 404 419 printf("\n"); … … 466 481 467 482 if (t != THREAD) { 468 ASSERT(t != main_thread); /* uninit is joined and detached in ktaskgc */ 483 ASSERT(t != main_thread); /* uninit is joined and detached 484 * in ktaskgc */ 469 485 thread_join(t); 470 486 thread_detach(t); 471 goto loop; /* go for another thread */487 goto loop; /* go for another thread */ 472 488 } 473 489 … … 498 514 * therefore the thread pointer is guaranteed to be valid. 499 515 */ 500 if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT) { /* sleep uninterruptibly here! */ 516 if (thread_join_timeout(t, 1000000, SYNCH_FLAGS_NONE) == 517 ESYNCH_TIMEOUT) { /* sleep uninterruptibly here! */ 501 518 ipl_t ipl; 502 519 link_t *cur; … … 504 521 505 522 /* 506 * The join timed out. Try to do some garbage collection of Undead threads. 523 * The join timed out. Try to do some garbage collection of 524 * Undead threads. 507 525 */ 508 526 more_gc: … … 510 528 spinlock_lock(&TASK->lock); 511 529 512 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 530 for (cur = TASK->th_head.next; cur != &TASK->th_head; 531 cur = cur->next) { 513 532 thr = list_get_instance(cur, thread_t, th_link); 514 533 spinlock_lock(&thr->lock); 515 if (thr != t && thr->state == Undead && thr->join_type == None) { 534 if (thr != t && thr->state == Undead && 535 thr->join_type == None) { 516 536 thr->join_type = TaskGC; 517 537 spinlock_unlock(&thr->lock); -
kernel/generic/src/proc/thread.c
r51baa8a r6f4495f5 205 205 atomic_set(&nrdy,0); 206 206 thread_slab = slab_cache_create("thread_slab", sizeof(thread_t), 0, 207 207 thr_constructor, thr_destructor, 0); 208 208 209 209 #ifdef ARCH_HAS_FPU 210 210 fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), 211 211 FPU_CONTEXT_ALIGN, NULL, NULL, 0); 212 212 #endif 213 213 … … 329 329 /* Not needed, but good for debugging */ 330 330 memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 331 331 0); 332 332 333 333 ipl = interrupts_disable(); … … 339 339 context_save(&t->saved_context); 340 340 context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, 341 341 THREAD_STACK_SIZE); 342 342 343 343 the_initialize((the_t *) t->kstack); … … 405 405 spinlock_lock(&threads_lock); 406 406 btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, 407 407 NULL); 408 408 spinlock_unlock(&threads_lock); 409 409 … … 561 561 spinlock_lock(&threads_lock); 562 562 563 printf("tid name address state task ctx code stack cycles cpu kstack waitqueue\n"); 564 printf("------ ---------- ---------- -------- ---------- --- ---------- ---------- ---------- ---- ---------- ----------\n"); 565 566 for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { 563 printf("tid name address state task ctx code " 564 " stack cycles cpu kstack waitqueue\n"); 565 printf("------ ---------- ---------- -------- ---------- --- --------" 566 "-- ---------- ---------- ---- ---------- ----------\n"); 567 568 for (cur = threads_btree.leaf_head.next; 569 cur != &threads_btree.leaf_head; cur = cur->next) { 567 570 btree_node_t *node; 568 571 int i; … … 578 581 order(t->cycles, &cycles, &suffix); 579 582 580 printf("%-6zd %-10s %#10zx %-8s %#10zx %-3ld %#10zx %#10zx %9llu%c ", t->tid, t->name, t, thread_states[t->state], t->task, t->task->context, t->thread_code, t->kstack, cycles, suffix); 583 printf("%-6zd %-10s %#10zx %-8s %#10zx %-3ld %#10zx " 584 "%#10zx %9llu%c ", t->tid, t->name, t, 585 thread_states[t->state], t->task, t->task->context, 586 t->thread_code, t->kstack, cycles, suffix); 581 587 582 588 if (t->cpu) … … 586 592 587 593 if (t->state == Sleeping) 588 printf(" %#10zx %#10zx", t->kstack, t->sleep_queue); 594 printf(" %#10zx %#10zx", t->kstack, 595 t->sleep_queue); 589 596 590 597 printf("\n"); … … 609 616 btree_node_t *leaf; 610 617 611 return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t), &leaf) != NULL; 618 return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t), 619 &leaf) != NULL; 612 620 } 613 621 … … 648 656 } 649 657 650 if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf, false))) { 658 t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf, 659 false); 660 if (t) { 651 661 tid = t->tid; 652 662 thread_ready(t); … … 671 681 /** @} 672 682 */ 683 -
kernel/generic/src/synch/futex.c
r51baa8a r6f4495f5 103 103 * 104 104 * @param uaddr Userspace address of the futex counter. 105 * @param usec If non-zero, number of microseconds this thread is willing to sleep. 105 * @param usec If non-zero, number of microseconds this thread is willing to 106 * sleep. 106 107 * @param flags Select mode of operation. 107 108 * 108 * @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See synch.h.109 * 109 * @return One of ESYNCH_TIMEOUT, ESYNCH_OK_ATOMIC and ESYNCH_OK_BLOCKED. See 110 * synch.h. If there is no physical mapping for uaddr ENOENT is returned. 110 111 */ 111 112 unative_t sys_futex_sleep_timeout(uintptr_t uaddr, uint32_t usec, int flags) … … 135 136 futex = futex_find(paddr); 136 137 137 return (unative_t) waitq_sleep_timeout(&futex->wq, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE); 138 return (unative_t) waitq_sleep_timeout(&futex->wq, usec, flags | 139 SYNCH_FLAGS_INTERRUPTIBLE); 138 140 } 139 141 … … 243 245 */ 244 246 futex->refcount++; 245 btree_insert(&TASK->futexes, paddr, futex, leaf); 247 btree_insert(&TASK->futexes, paddr, futex, 248 leaf); 246 249 } 247 250 mutex_unlock(&TASK->futexes_lock); … … 272 275 /** Compute hash index into futex hash table. 273 276 * 274 * @param key Address where the key (i.e. physical address of futex counter) is stored. 277 * @param key Address where the key (i.e. physical address of futex counter) is 278 * stored. 275 279 * 276 280 * @return Index into futex hash table. … … 283 287 /** Compare futex hash table item with a key. 284 288 * 285 * @param key Address where the key (i.e. physical address of futex counter) is stored. 289 * @param key Address where the key (i.e. physical address of futex counter) is 290 * stored. 286 291 * 287 292 * @return True if the item matches the key. False otherwise. … … 317 322 mutex_lock(&TASK->futexes_lock); 318 323 319 for (cur = TASK->futexes.leaf_head.next; cur != &TASK->futexes.leaf_head; cur = cur->next) { 324 for (cur = TASK->futexes.leaf_head.next; 325 cur != &TASK->futexes.leaf_head; cur = cur->next) { 320 326 btree_node_t *node; 321 327 int i; -
kernel/generic/src/synch/rwlock.c
r51baa8a r6f4495f5 221 221 case ESYNCH_OK_BLOCKED: 222 222 /* 223 * We were woken with rwl->readers_in already incremented. 224 * Note that this arrangement avoids race condition between 225 * two concurrent readers. (Race is avoided if 'exclusive' is 226 * locked at the same time as 'readers_in' is incremented. 227 * Same time means both events happen atomically when 228 * rwl->lock is held.) 223 * We were woken with rwl->readers_in already 224 * incremented. 225 * 226 * Note that this arrangement avoids race condition 227 * between two concurrent readers. (Race is avoided if 228 * 'exclusive' is locked at the same time as 229 * 'readers_in' is incremented. Same time means both 230 * events happen atomically when rwl->lock is held.) 229 231 */ 230 232 interrupts_restore(ipl); … … 324 326 325 327 if (!list_empty(&rwl->exclusive.sem.wq.head)) 326 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link); 328 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, 329 wq_link); 327 330 do { 328 331 if (t) { … … 344 347 /* 345 348 * Waking up a reader. 346 * We are responsible for incrementing rwl->readers_in for it. 349 * We are responsible for incrementing rwl->readers_in 350 * for it. 347 351 */ 348 352 rwl->readers_in++; … … 361 365 t = NULL; 362 366 if (!list_empty(&rwl->exclusive.sem.wq.head)) { 363 t = list_get_instance(rwl->exclusive.sem.wq.head.next, thread_t, wq_link); 367 t = list_get_instance(rwl->exclusive.sem.wq.head.next, 368 thread_t, wq_link); 364 369 if (t) { 365 370 spinlock_lock(&t->lock); -
kernel/generic/src/synch/spinlock.c
r51baa8a r6f4495f5 108 108 #endif 109 109 if (i++ > DEADLOCK_THRESHOLD) { 110 printf("cpu%d: looping on spinlock %.*p:%s, caller=%.*p", 111 CPU->id, sizeof(uintptr_t) * 2, sl, sl->name, sizeof(uintptr_t) * 2, CALLER); 110 printf("cpu%d: looping on spinlock %.*p:%s, " 111 "caller=%.*p", CPU->id, sizeof(uintptr_t) * 2, sl, 112 sl->name, sizeof(uintptr_t) * 2, CALLER); 112 113 symbol = get_symtab_entry(CALLER); 113 114 if (symbol) -
kernel/generic/src/synch/waitq.c
r51baa8a r6f4495f5 187 187 * The sleep can be interrupted only if the 188 188 * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. 189 189 * 190 190 * If usec is greater than zero, regardless of the value of the 191 191 * SYNCH_FLAGS_NON_BLOCKING bit in flags, the call will not return until either … … 353 353 THREAD->timeout_pending = true; 354 354 timeout_register(&THREAD->sleep_timeout, (uint64_t) usec, 355 355 waitq_timeouted_sleep, THREAD); 356 356 } 357 357
Note:
See TracChangeset
for help on using the changeset viewer.