Changeset 771cd22 in mainline
- Timestamp:
- 2006-12-16T19:07:02Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 7e7c8747
- Parents:
- 1ecdbb0
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
boot/genarch/ofw.c
r1ecdbb0 r771cd22 234 234 } 235 235 236 236 /** Save OpenFirmware physical memory map. 237 * 238 * @param map Memory map structure where the map will be saved. 239 * 240 * @return Zero on failure, non-zero on success. 241 */ 237 242 int ofw_memmap(memmap_t *map) 238 243 { … … 240 245 unsigned int sc = ofw_get_size_cells(ofw_memory); 241 246 242 uint32_t buf[((ac +sc)*MEMMAP_MAX_RECORDS)];247 uint32_t buf[((ac + sc) * MEMMAP_MAX_RECORDS)]; 243 248 int ret = ofw_get_property(ofw_memory, "reg", buf, sizeof(buf)); 244 249 if (ret <= 0) /* ret is the number of written bytes */ … … 248 253 map->total = 0; 249 254 map->count = 0; 250 for (pos = 0; (pos < ret / sizeof(uint32_t)) && (map->count < MEMMAP_MAX_RECORDS); pos += ac + sc) { 255 for (pos = 0; (pos < ret / sizeof(uint32_t)) && (map->count < 256 MEMMAP_MAX_RECORDS); pos += ac + sc) { 251 257 void * start = (void *) ((uintptr_t) buf[pos + ac - 1]); 252 258 unsigned int size = buf[pos + ac + sc - 1]; -
kernel/arch/sparc64/include/mm/page.h
r1ecdbb0 r771cd22 41 41 #define PAGE_SIZE FRAME_SIZE 42 42 43 #define PAGE_COLOR_BITS 1 43 #define PAGE_COLOR_BITS 1 /**< 14 - 13; 2^14 == 16K == alias boundary. */ 44 44 45 45 #ifdef KERNEL -
kernel/arch/sparc64/include/mm/tsb.h
r1ecdbb0 r771cd22 43 43 * in TLBs - only one TLB entry will do. 44 44 */ 45 #define TSB_SIZE 2 /* when changing this, change as.c as well */ 46 #define ITSB_ENTRY_COUNT (512*(1<<TSB_SIZE)) 47 #define DTSB_ENTRY_COUNT (512*(1<<TSB_SIZE)) 45 #define TSB_SIZE 2 /* when changing this, change 46 * as.c as well */ 47 #define ITSB_ENTRY_COUNT (512 * (1 << TSB_SIZE)) 48 #define DTSB_ENTRY_COUNT (512 * (1 << TSB_SIZE)) 48 49 49 50 #define TSB_TAG_TARGET_CONTEXT_SHIFT 48 … … 81 82 struct { 82 83 uint64_t base : 51; /**< TSB base address, bits 63:13. */ 83 unsigned split : 1; /**< Split vs. common TSB for 8K and 64K pages. 84 * HelenOS uses only 8K pages for user mappings, 85 * so we always set this to 0. 86 */ 84 unsigned split : 1; /**< Split vs. common TSB for 8K and 64K 85 * pages. HelenOS uses only 8K pages 86 * for user mappings, so we always set 87 * this to 0. 88 */ 87 89 unsigned : 9; 88 unsigned size : 3; /**< TSB size. Number of entries is 512*2^size. */ 90 unsigned size : 3; /**< TSB size. Number of entries is 91 * 512 * 2^size. */ 89 92 } __attribute__ ((packed)); 90 93 }; -
kernel/arch/sparc64/src/mm/as.c
r1ecdbb0 r771cd22 62 62 { 63 63 #ifdef CONFIG_TSB 64 int order = fnzb32(((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH); 64 int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * 65 sizeof(tsb_entry_t)) >> FRAME_WIDTH); 65 66 uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA); 66 67 … … 69 70 70 71 as->arch.itsb = (tsb_entry_t *) tsb; 71 as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * sizeof(tsb_entry_t)); 72 memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t), 0); 72 as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * 73 sizeof(tsb_entry_t)); 74 memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) 75 * sizeof(tsb_entry_t), 0); 73 76 #endif 74 77 return 0; … … 78 81 { 79 82 #ifdef CONFIG_TSB 80 count_t cnt = ((ITSB_ENTRY_COUNT+DTSB_ENTRY_COUNT)*sizeof(tsb_entry_t))>>FRAME_WIDTH; 83 count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * 84 sizeof(tsb_entry_t)) >> FRAME_WIDTH; 81 85 frame_free(KA2PA((uintptr_t) as->arch.itsb)); 82 86 return cnt; … … 100 104 } 101 105 102 /** Perform sparc64-specific tasks when an address space becomes active on the processor. 106 /** Perform sparc64-specific tasks when an address space becomes active on the 107 * processor. 103 108 * 104 109 * Install ASID and map TSBs. … … 135 140 uintptr_t tsb = (uintptr_t) as->arch.itsb; 136 141 137 if (!overlaps(tsb, 8 *PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {142 if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 138 143 /* 139 144 * TSBs were allocated from memory not covered … … 161 166 } 162 167 163 /** Perform sparc64-specific tasks when an address space is removed from the processor. 168 /** Perform sparc64-specific tasks when an address space is removed from the 169 * processor. 164 170 * 165 171 * Demap TSBs. … … 184 190 uintptr_t tsb = (uintptr_t) as->arch.itsb; 185 191 186 if (!overlaps(tsb, 8 *PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {192 if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 187 193 /* 188 194 * TSBs were allocated from memory not covered -
kernel/arch/sparc64/src/mm/frame.c
r1ecdbb0 r771cd22 65 65 if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0)))) 66 66 confdata = ADDR2PFN(KA2PA(PFN2ADDR(2))); 67 zone_create(ADDR2PFN(start), SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)), confdata, 0); 68 last_frame = max(last_frame, start + ALIGN_UP(size, FRAME_SIZE)); 67 zone_create(ADDR2PFN(start), 68 SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)), 69 confdata, 0); 70 last_frame = max(last_frame, start + ALIGN_UP(size, 71 FRAME_SIZE)); 69 72 } 70 73 … … 72 75 * On sparc64, physical memory can start on a non-zero address. 73 76 * The generic frame_init() only marks PFN 0 as not free, so we 74 * must mark the physically first frame not free explicitly here,75 * no matter what is its address.77 * must mark the physically first frame not free explicitly 78 * here, no matter what is its address. 76 79 */ 77 80 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); -
kernel/arch/sparc64/src/mm/page.c
r1ecdbb0 r771cd22 46 46 /** Entries locked in DTLB of BSP. 47 47 * 48 * Application processors need to have the same locked entries 49 * in their DTLBs asthe bootstrap processor.48 * Application processors need to have the same locked entries in their DTLBs as 49 * the bootstrap processor. 50 50 */ 51 51 static struct { … … 85 85 /** Map memory-mapped device into virtual memory. 86 86 * 87 * So far, only DTLB is used to map devices into memory. 88 * Chances are that there will be only a limited amount of89 * devices that the kernel itself needs tolock in DTLB.87 * So far, only DTLB is used to map devices into memory. Chances are that there 88 * will be only a limited amount of devices that the kernel itself needs to 89 * lock in DTLB. 90 90 * 91 * @param physaddr Physical address of the page where the 92 * device is located. Must be at least 93 * page-aligned. 94 * @param size Size of the device's registers. Must not 95 * exceed 4M and must include extra space 96 * caused by the alignment. 91 * @param physaddr Physical address of the page where the device is located. 92 * Must be at least page-aligned. 93 * @param size Size of the device's registers. Must not exceed 4M and must 94 * include extra space caused by the alignment. 97 95 * 98 * @return Virtual address of the page where the device is 99 * mapped. 96 * @return Virtual address of the page where the device is mapped. 100 97 */ 101 98 uintptr_t hw_map(uintptr_t physaddr, size_t size) … … 115 112 { PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */ 116 113 { PAGESIZE_64K, 0, 1}, /* 64K */ 117 { PAGESIZE_64K, 8 *PAGE_SIZE, 2 }, /* 128K */118 { PAGESIZE_64K, 8 *PAGE_SIZE, 4 }, /* 256K */114 { PAGESIZE_64K, 8 * PAGE_SIZE, 2 }, /* 128K */ 115 { PAGESIZE_64K, 8 * PAGE_SIZE, 4 }, /* 256K */ 119 116 { PAGESIZE_512K, 0, 1 }, /* 512K */ 120 { PAGESIZE_512K, 64 *PAGE_SIZE, 2 }, /* 1M */121 { PAGESIZE_512K, 64 *PAGE_SIZE, 4 }, /* 2M */117 { PAGESIZE_512K, 64 * PAGE_SIZE, 2 }, /* 1M */ 118 { PAGESIZE_512K, 64 * PAGE_SIZE, 4 }, /* 2M */ 122 119 { PAGESIZE_4M, 0, 1 }, /* 4M */ 123 { PAGESIZE_4M, 512 *PAGE_SIZE, 2 } /* 8M */120 { PAGESIZE_4M, 512 * PAGE_SIZE, 2 } /* 8M */ 124 121 }; 125 122 126 123 ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr); 127 ASSERT(size <= 8 *1024*1024);124 ASSERT(size <= 8 * 1024 * 1024); 128 125 129 126 if (size <= FRAME_SIZE) … … 145 142 * First, insert the mapping into DTLB. 146 143 */ 147 dtlb_insert_mapping(virtaddr + i *sizemap[order].increment,148 physaddr + i*sizemap[order].increment,149 144 dtlb_insert_mapping(virtaddr + i * sizemap[order].increment, 145 physaddr + i * sizemap[order].increment, 146 sizemap[order].pagesize_code, true, false); 150 147 151 148 #ifdef CONFIG_SMP … … 154 151 */ 155 152 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = 156 virtaddr + i *sizemap[order].increment;153 virtaddr + i * sizemap[order].increment; 157 154 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = 158 physaddr + i *sizemap[order].increment;155 physaddr + i * sizemap[order].increment; 159 156 bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = 160 157 sizemap[order].pagesize_code; -
kernel/arch/sparc64/src/mm/tlb.c
r1ecdbb0 r771cd22 58 58 static void dtlb_pte_copy(pte_t *t, bool ro); 59 59 static void itlb_pte_copy(pte_t *t); 60 static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str); 61 static void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str); 62 static void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str); 60 static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const 61 char *str); 62 static void do_fast_data_access_mmu_miss_fault(istate_t *istate, 63 tlb_tag_access_reg_t tag, const char *str); 64 static void do_fast_data_access_protection_fault(istate_t *istate, 65 tlb_tag_access_reg_t tag, const char *str); 63 66 64 67 char *context_encoding[] = { … … 91 94 * @param cacheable True if the mapping is cacheable, false otherwise. 92 95 */ 93 void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable) 96 void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool 97 locked, bool cacheable) 94 98 { 95 99 tlb_tag_access_reg_t tag; … … 125 129 * 126 130 * @param t Page Table Entry to be copied. 127 * @param ro If true, the entry will be created read-only, regardless of its w field. 131 * @param ro If true, the entry will be created read-only, regardless of its w 132 * field. 128 133 */ 129 134 void dtlb_pte_copy(pte_t *t, bool ro) … … 213 218 } else { 214 219 /* 215 * Forward the page fault to the address space page fault handler. 220 * Forward the page fault to the address space page fault 221 * handler. 216 222 */ 217 223 page_table_unlock(AS, true); 218 224 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 219 do_fast_instruction_access_mmu_miss_fault(istate, __FUNCTION__); 225 do_fast_instruction_access_mmu_miss_fault(istate, 226 __FUNCTION__); 220 227 } 221 228 } … … 224 231 /** DTLB miss handler. 225 232 * 226 * Note that some faults (e.g. kernel faults) were already resolved 227 * by the low-level, assembly language part of the fast_data_access_mmu_miss 228 * handler. 233 * Note that some faults (e.g. kernel faults) were already resolved by the 234 * low-level, assembly language part of the fast_data_access_mmu_miss handler. 229 235 */ 230 236 void fast_data_access_mmu_miss(int n, istate_t *istate) … … 240 246 if (!tag.vpn) { 241 247 /* NULL access in kernel */ 242 do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__); 243 } 244 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected kernel page fault."); 248 do_fast_data_access_mmu_miss_fault(istate, tag, 249 __FUNCTION__); 250 } 251 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " 252 "kernel page fault."); 245 253 } 246 254 … … 264 272 page_table_unlock(AS, true); 265 273 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 266 do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__); 274 do_fast_data_access_mmu_miss_fault(istate, tag, 275 __FUNCTION__); 267 276 } 268 277 } … … 283 292 if (t && PTE_WRITABLE(t)) { 284 293 /* 285 * The mapping was found in the software page hash table and is writable. 286 * Demap the old mapping and insert an updated mapping into DTLB. 294 * The mapping was found in the software page hash table and is 295 * writable. Demap the old mapping and insert an updated mapping 296 * into DTLB. 287 297 */ 288 298 t->a = true; … … 296 306 } else { 297 307 /* 298 * Forward the page fault to the address space page fault handler. 308 * Forward the page fault to the address space page fault 309 * handler. 299 310 */ 300 311 page_table_unlock(AS, true); 301 312 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 302 do_fast_data_access_protection_fault(istate, tag, __FUNCTION__); 313 do_fast_data_access_protection_fault(istate, tag, 314 __FUNCTION__); 303 315 } 304 316 } … … 317 329 t.value = itlb_tag_read_read(i); 318 330 319 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", 320 i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); 331 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " 332 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " 333 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, 334 t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, 335 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); 321 336 } 322 337 … … 326 341 t.value = dtlb_tag_read_read(i); 327 342 328 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", 329 i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); 330 } 331 332 } 333 334 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str) 343 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " 344 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " 345 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, 346 t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, 347 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); 348 } 349 350 } 351 352 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char 353 *str) 335 354 { 336 355 fault_if_from_uspace(istate, "%s\n", str); … … 339 358 } 340 359 341 void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str) 360 void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t 361 tag, const char *str) 342 362 { 343 363 uintptr_t va; … … 345 365 va = tag.vpn << PAGE_WIDTH; 346 366 347 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context); 367 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, 368 tag.context); 348 369 dump_istate(istate); 349 370 printf("Faulting page: %p, ASID=%d\n", va, tag.context); … … 351 372 } 352 373 353 void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str) 374 void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t 375 tag, const char *str) 354 376 { 355 377 uintptr_t va; … … 357 379 va = tag.vpn << PAGE_WIDTH; 358 380 359 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context); 381 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, 382 tag.context); 360 383 printf("Faulting page: %p, ASID=%d\n", va, tag.context); 361 384 dump_istate(istate); … … 371 394 sfar = dtlb_sfar_read(); 372 395 373 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, fv=%d\n", 374 sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv); 396 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, " 397 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, 398 sfsr.ow, sfsr.fv); 375 399 printf("DTLB SFAR: address=%p\n", sfar); 376 400 … … 407 431 } 408 432 409 /** Invalidate all ITLB and DTLB entries that belong to specified ASID (Context). 433 /** Invalidate all ITLB and DTLB entries that belong to specified ASID 434 * (Context). 410 435 * 411 436 * @param asid Address Space ID. … … 430 455 } 431 456 432 /** Invalidate all ITLB and DTLB entries for specified page range in specified address space. 457 /** Invalidate all ITLB and DTLB entries for specified page range in specified 458 * address space. 433 459 * 434 460 * @param asid Address Space ID. … … 449 475 450 476 for (i = 0; i < cnt; i++) { 451 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE); 452 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE); 477 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * 478 PAGE_SIZE); 479 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * 480 PAGE_SIZE); 453 481 } 454 482 -
kernel/arch/sparc64/src/mm/tsb.c
r1ecdbb0 r771cd22 42 42 #include <debug.h> 43 43 44 #define TSB_INDEX_MASK ((1 <<(21+1+TSB_SIZE-PAGE_WIDTH))-1)44 #define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - PAGE_WIDTH)) - 1) 45 45 46 46 /** Invalidate portion of TSB. 47 47 * 48 * We assume that the address space is already locked. 49 * Note that respective portions of both TSBs 50 * are invalidated at a time. 48 * We assume that the address space is already locked. Note that respective 49 * portions of both TSBs are invalidated at a time. 51 50 * 52 51 * @param as Address space. 53 52 * @param page First page to invalidate in TSB. 54 * @param pages Number of pages to invalidate. 55 * Value of (count_t) -1 means thewhole TSB.53 * @param pages Number of pages to invalidate. Value of (count_t) -1 means the 54 * whole TSB. 56 55 */ 57 56 void tsb_invalidate(as_t *as, uintptr_t page, count_t pages) … … 66 65 67 66 for (i = 0; i < cnt; i++) { 68 as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT-1)].tag.invalid = true; 69 as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT-1)].tag.invalid = true; 67 as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid = 68 true; 69 as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid = 70 true; 70 71 } 71 72 } -
kernel/arch/sparc64/src/smp/ipi.c
r1ecdbb0 r771cd22 62 62 63 63 /* 64 * This functi n might enable interrupts for a while.64 * This function might enable interrupts for a while. 65 65 * In order to prevent migration to another processor, 66 66 * we explicitly disable preemption. -
kernel/arch/sparc64/src/sparc64.c
r1ecdbb0 r771cd22 51 51 bootinfo_t bootinfo; 52 52 53 /** Perform sparc64 specific initialization before main_bsp() is called. */ 53 54 void arch_pre_main(void) 54 55 { … … 70 71 } 71 72 73 /** Perform sparc64 specific initialization before mm is initialized. */ 72 74 void arch_pre_mm_init(void) 73 75 { … … 76 78 } 77 79 80 /** Perform sparc64 specific initialization afterr mm is initialized. */ 78 81 void arch_post_mm_init(void) 79 82 { 80 83 if (config.cpu_active == 1) { 81 irq_init(1<<11, 128); 84 /* 85 * We have 2^11 different interrupt vectors. 86 * But we only create 128 buckets. 87 */ 88 irq_init(1 << 11, 128); 89 82 90 standalone_sparc64_console_init(); 83 91 } -
kernel/generic/include/macros.h
r1ecdbb0 r771cd22 39 39 #include <typedefs.h> 40 40 41 #define is_digit(d) (((d) >= '0') && ((d) <= '9')) 42 #define is_lower(c) (((c) >= 'a') && ((c) <= 'z')) 43 #define is_upper(c) (((c) >= 'A') && ((c) <= 'Z')) 44 #define is_alpha(c) (is_lower(c) || is_upper(c)) 45 #define is_alphanum(c) (is_alpha(c) || is_digit(c)) 46 #define is_white(c) (((c) == ' ') || ((c) == '\t') || ((c) == '\n') || ((c) == '\r')) 41 #define is_digit(d) (((d) >= '0') && ((d) <= '9')) 42 #define is_lower(c) (((c) >= 'a') && ((c) <= 'z')) 43 #define is_upper(c) (((c) >= 'A') && ((c) <= 'Z')) 44 #define is_alpha(c) (is_lower(c) || is_upper(c)) 45 #define is_alphanum(c) (is_alpha(c) || is_digit(c)) 46 #define is_white(c) (((c) == ' ') || ((c) == '\t') || ((c) == '\n') || \ 47 ((c) == '\r')) 47 48 48 49 #define min(a,b) ((a) < (b) ? (a) : (b)) -
kernel/generic/src/main/main.c
r1ecdbb0 r771cd22 127 127 #endif 128 128 129 #define CONFIG_STACK_SIZE ((1 <<STACK_FRAMES)*STACK_SIZE)129 #define CONFIG_STACK_SIZE ((1 << STACK_FRAMES) * STACK_SIZE) 130 130 131 131 /** Main kernel routine for bootstrap CPU. -
kernel/generic/src/mm/frame.c
r1ecdbb0 r771cd22 1139 1139 printf("Zone size: %zd frames (%zdK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10); 1140 1140 printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10); 1141 printf("Available space: %zd (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10);1141 printf("Available space: %zd frames (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10); 1142 1142 buddy_system_structure_print(zone->buddy_system, FRAME_SIZE); 1143 1143 -
kernel/generic/src/proc/task.c
r1ecdbb0 r771cd22 269 269 /** Get accounting data of given task. 270 270 * 271 * Note that task _lock on @tmust be already held and271 * Note that task lock of 't' must be already held and 272 272 * interrupts must be already disabled. 273 273 *
Note:
See TracChangeset
for help on using the changeset viewer.