Changes in / [7aaed09:1761268] in mainline
- Files:
-
- 31 deleted
- 66 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/Makefile
r7aaed09 r1761268 229 229 generic/src/syscall/syscall.c \ 230 230 generic/src/syscall/copy.c \ 231 generic/src/mm/km.c \232 231 generic/src/mm/reserve.c \ 233 232 generic/src/mm/buddy.c \ … … 246 245 generic/src/lib/str.c \ 247 246 generic/src/lib/elf.c \ 248 generic/src/lib/ra.c \249 247 generic/src/lib/rd.c \ 250 248 generic/src/printf/printf_core.c \ -
kernel/arch/abs32le/Makefile.inc
r7aaed09 r1761268 57 57 arch/$(KARCH)/src/smp/smp.c \ 58 58 arch/$(KARCH)/src/smp/ipi.c \ 59 arch/$(KARCH)/src/mm/km.c \60 59 arch/$(KARCH)/src/mm/as.c \ 61 60 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/abs32le/include/mm/frame.h
r7aaed09 r1761268 41 41 #include <typedefs.h> 42 42 43 extern void frame_low_arch_init(void); 44 extern void frame_high_arch_init(void); 43 extern void frame_arch_init(void); 45 44 extern void physmem_print(void); 46 45 -
kernel/arch/abs32le/src/mm/frame.c
r7aaed09 r1761268 50 50 51 51 52 void frame_low_arch_init(void) 53 { 54 } 55 56 void frame_high_arch_init(void) 52 void frame_arch_init(void) 57 53 { 58 54 } -
kernel/arch/abs32le/src/mm/page.c
r7aaed09 r1761268 56 56 } 57 57 58 59 uintptr_t hw_map(uintptr_t physaddr, size_t size) 60 { 61 return physaddr; 62 } 63 58 64 void page_fault(unsigned int n __attribute__((unused)), istate_t *istate) 59 65 { -
kernel/arch/amd64/Makefile.inc
r7aaed09 r1761268 86 86 arch/$(KARCH)/src/bios/bios.c \ 87 87 arch/$(KARCH)/src/interrupt.c \ 88 arch/$(KARCH)/src/mm/km.c \89 88 arch/$(KARCH)/src/mm/as.c \ 90 89 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/amd64/include/mm/frame.h
r7aaed09 r1761268 43 43 #include <typedefs.h> 44 44 45 extern void frame_low_arch_init(void);46 extern void frame_ high_arch_init(void);45 extern uintptr_t last_frame; 46 extern void frame_arch_init(void); 47 47 extern void physmem_print(void); 48 48 -
kernel/arch/amd64/src/mm/page.c
r7aaed09 r1761268 46 46 #include <panic.h> 47 47 #include <align.h> 48 #include <macros.h>49 48 50 49 void page_arch_init(void) 51 50 { 52 if (config.cpu_active > 1) { 51 if (config.cpu_active == 1) { 52 uintptr_t cur; 53 unsigned int identity_flags = 54 PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE; 55 56 page_mapping_operations = &pt_mapping_operations; 57 58 page_table_lock(AS_KERNEL, true); 59 60 /* 61 * PA2KA(identity) mapping for all frames. 62 */ 63 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) 64 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags); 65 66 page_table_unlock(AS_KERNEL, true); 67 68 exc_register(14, "page_fault", true, (iroutine_t) page_fault); 53 69 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 54 return; 55 } 56 57 uintptr_t cur; 58 unsigned int identity_flags = 59 PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE; 60 61 page_mapping_operations = &pt_mapping_operations; 62 63 page_table_lock(AS_KERNEL, true); 64 65 /* 66 * PA2KA(identity) mapping for all low-memory frames. 67 */ 68 for (cur = 0; cur < min(config.identity_size, config.physmem_end); 69 cur += FRAME_SIZE) 70 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, identity_flags); 71 72 page_table_unlock(AS_KERNEL, true); 73 74 exc_register(14, "page_fault", true, (iroutine_t) page_fault); 75 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 70 } else 71 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 76 72 } 77 73 … … 98 94 } 99 95 96 uintptr_t hw_map(uintptr_t physaddr, size_t size) 97 { 98 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) 99 panic("Unable to map physical memory %p (%zu bytes).", 100 (void *) physaddr, size); 101 102 uintptr_t virtaddr = PA2KA(last_frame); 103 pfn_t i; 104 105 page_table_lock(AS_KERNEL, true); 106 107 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) 108 page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE); 109 110 page_table_unlock(AS_KERNEL, true); 111 112 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE); 113 114 return virtaddr; 115 } 116 100 117 /** @} 101 118 */ -
kernel/arch/arm32/Makefile.inc
r7aaed09 r1761268 53 53 arch/$(KARCH)/src/debug/stacktrace.c \ 54 54 arch/$(KARCH)/src/debug/stacktrace_asm.S \ 55 arch/$(KARCH)/src/mm/km.c \56 55 arch/$(KARCH)/src/mm/as.c \ 57 56 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/arm32/include/mach/integratorcp/integratorcp.h
r7aaed09 r1761268 103 103 extern void icp_cpu_halt(void); 104 104 extern void icp_irq_exception(unsigned int, istate_t *); 105 extern void icp_get_memory_extents(uintptr_t *, size_t *);105 extern void icp_get_memory_extents(uintptr_t *, uintptr_t *); 106 106 extern void icp_frame_init(void); 107 107 extern size_t icp_get_irq_count(void); -
kernel/arch/arm32/include/mach/testarm/testarm.h
r7aaed09 r1761268 71 71 extern void gxemul_cpu_halt(void); 72 72 extern void gxemul_irq_exception(unsigned int, istate_t *); 73 extern void gxemul_get_memory_extents(uintptr_t *, size_t *);73 extern void gxemul_get_memory_extents(uintptr_t *, uintptr_t *); 74 74 extern void gxemul_frame_init(void); 75 75 extern size_t gxemul_get_irq_count(void); -
kernel/arch/arm32/include/machine_func.h
r7aaed09 r1761268 50 50 void (*machine_timer_irq_start)(void); 51 51 void (*machine_cpu_halt)(void); 52 void (*machine_get_memory_extents)(uintptr_t *, size_t *);52 void (*machine_get_memory_extents)(uintptr_t *, uintptr_t *); 53 53 void (*machine_irq_exception)(unsigned int, istate_t *); 54 54 void (*machine_frame_init)(void); … … 81 81 * @param size Place to store memory size. 82 82 */ 83 extern void machine_get_memory_extents(uintptr_t *start, size_t *size);83 extern void machine_get_memory_extents(uintptr_t *start, uintptr_t *size); 84 84 85 85 /** Interrupt exception handler. -
kernel/arch/arm32/include/mm/frame.h
r7aaed09 r1761268 61 61 #endif 62 62 63 extern void frame_low_arch_init(void); 64 extern void frame_high_arch_init(void); 63 extern uintptr_t last_frame; 64 65 extern void frame_arch_init(void); 65 66 extern void boot_page_table_free(void); 66 67 #define physmem_print() -
kernel/arch/arm32/include/mm/page.h
r7aaed09 r1761268 54 54 55 55 /* Number of entries in each level. */ 56 #define PTL0_ENTRIES_ARCH ( 1<< 12) /* 4096 */56 #define PTL0_ENTRIES_ARCH (2 << 12) /* 4096 */ 57 57 #define PTL1_ENTRIES_ARCH 0 58 58 #define PTL2_ENTRIES_ARCH 0 59 59 /* coarse page tables used (256 * 4 = 1KB per page) */ 60 #define PTL3_ENTRIES_ARCH ( 1<< 8) /* 256 */60 #define PTL3_ENTRIES_ARCH (2 << 8) /* 256 */ 61 61 62 62 /* Page table sizes for each level. */ -
kernel/arch/arm32/src/mach/gta02/gta02.c
r7aaed09 r1761268 65 65 static void gta02_timer_irq_start(void); 66 66 static void gta02_cpu_halt(void); 67 static void gta02_get_memory_extents(uintptr_t *start, size_t *size);67 static void gta02_get_memory_extents(uintptr_t *start, uintptr_t *size); 68 68 static void gta02_irq_exception(unsigned int exc_no, istate_t *istate); 69 69 static void gta02_frame_init(void); … … 123 123 * @param size Place to store memory size. 124 124 */ 125 static void gta02_get_memory_extents(uintptr_t *start, size_t *size)125 static void gta02_get_memory_extents(uintptr_t *start, uintptr_t *size) 126 126 { 127 127 *start = GTA02_MEMORY_START + GTA02_MEMORY_SKIP; -
kernel/arch/arm32/src/mach/integratorcp/integratorcp.c
r7aaed09 r1761268 220 220 * @param size Place to store memory size. 221 221 */ 222 void icp_get_memory_extents(uintptr_t *start, size_t *size)222 void icp_get_memory_extents(uintptr_t *start, uintptr_t *size) 223 223 { 224 224 *start = 0; -
kernel/arch/arm32/src/mach/testarm/testarm.c
r7aaed09 r1761268 202 202 * @param size Place to store memory size. 203 203 */ 204 void gxemul_get_memory_extents(uintptr_t *start, size_t *size)204 void gxemul_get_memory_extents(uintptr_t *start, uintptr_t *size) 205 205 { 206 206 *start = 0; 207 207 *size = *((uintptr_t *) (GXEMUL_MP_ADDRESS + GXEMUL_MP_MEMSIZE_OFFSET)); 208 208 } 209 209 -
kernel/arch/arm32/src/machine_func.c
r7aaed09 r1761268 85 85 * @param size Place to store memory size. 86 86 */ 87 void machine_get_memory_extents(uintptr_t *start, size_t *size)87 void machine_get_memory_extents(uintptr_t *start, uintptr_t *size) 88 88 { 89 89 (machine_ops->machine_get_memory_extents)(start, size); -
kernel/arch/arm32/src/mm/frame.c
r7aaed09 r1761268 39 39 #include <config.h> 40 40 #include <align.h> 41 #include <macros.h>42 41 43 static void frame_common_arch_init(bool low) 42 /** Address of the last frame in the memory. */ 43 uintptr_t last_frame = 0; 44 45 /** Creates memory zones. */ 46 void frame_arch_init(void) 44 47 { 45 uintptr_t base; 46 size_t size; 48 uintptr_t mem_start, mem_size; 49 uintptr_t first_frame; 50 uintptr_t num_frames; 47 51 48 machine_get_memory_extents(&base, &size); 49 base = ALIGN_UP(base, FRAME_SIZE); 50 size = ALIGN_DOWN(size, FRAME_SIZE); 52 machine_get_memory_extents(&mem_start, &mem_size); 53 first_frame = ALIGN_UP(mem_start, FRAME_SIZE); 54 last_frame = ALIGN_DOWN(mem_start + mem_size, FRAME_SIZE); 55 num_frames = (last_frame - first_frame) >> FRAME_WIDTH; 51 56 52 if (!frame_adjust_zone_bounds(low, &base, &size)) 53 return; 54 55 if (low) { 56 zone_create(ADDR2PFN(base), SIZE2FRAMES(size), 57 BOOT_PAGE_TABLE_START_FRAME + 58 BOOT_PAGE_TABLE_SIZE_IN_FRAMES, 59 ZONE_AVAILABLE | ZONE_LOWMEM); 60 } else { 61 pfn_t conf = zone_external_conf_alloc(SIZE2FRAMES(size)); 62 63 zone_create(ADDR2PFN(base), SIZE2FRAMES(size), conf, 64 ZONE_AVAILABLE | ZONE_HIGHMEM); 65 } 57 /* All memory as one zone */ 58 zone_create(first_frame >> FRAME_WIDTH, num_frames, 59 BOOT_PAGE_TABLE_START_FRAME + BOOT_PAGE_TABLE_SIZE_IN_FRAMES, 0); 66 60 67 }68 69 /** Create low memory zones. */70 void frame_low_arch_init(void)71 {72 frame_common_arch_init(true);73 74 61 /* blacklist boot page table */ 75 62 frame_mark_unavailable(BOOT_PAGE_TABLE_START_FRAME, … … 77 64 78 65 machine_frame_init(); 79 }80 81 /** Create high memory zones. */82 void frame_high_arch_init(void)83 {84 frame_common_arch_init(false);85 66 } 86 67 -
kernel/arch/arm32/src/mm/page.c
r7aaed09 r1761268 37 37 #include <genarch/mm/page_pt.h> 38 38 #include <mm/page.h> 39 #include <arch/mm/frame.h>40 39 #include <align.h> 41 40 #include <config.h> … … 43 42 #include <typedefs.h> 44 43 #include <interrupt.h> 45 #include < macros.h>44 #include <arch/mm/frame.h> 46 45 47 46 /** Initializes page tables. … … 58 57 59 58 uintptr_t cur; 60 61 59 /* Kernel identity mapping */ 62 for (cur = PHYSMEM_START_ADDR; 63 cur < min(config.identity_size, config.physmem_end); 64 cur += FRAME_SIZE) 60 for (cur = PHYSMEM_START_ADDR; cur < last_frame; cur += FRAME_SIZE) 65 61 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); 66 62 67 63 /* Create mapping for exception table at high offset */ 68 64 #ifdef HIGH_EXCEPTION_VECTORS 69 // XXX: fixme to use proper non-identity page70 65 void *virtaddr = frame_alloc(ONE_FRAME, FRAME_KA); 71 page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, KA2PA(virtaddr), 72 flags); 66 page_mapping_insert(AS_KERNEL, EXC_BASE_ADDRESS, KA2PA(virtaddr), flags); 73 67 #else 74 68 #error "Only high exception vector supported now" … … 84 78 } 85 79 80 /** Maps device into the kernel space. 81 * 82 * Maps physical address of device into kernel virtual address space (so it can 83 * be accessed only by kernel through virtual address). 84 * 85 * @param physaddr Physical address where device is connected. 86 * @param size Length of area where device is present. 87 * 88 * @return Virtual address where device will be accessible. 89 */ 90 uintptr_t hw_map(uintptr_t physaddr, size_t size) 91 { 92 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > 93 KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) { 94 panic("Unable to map physical memory %p (%d bytes).", 95 (void *) physaddr, size); 96 } 97 98 uintptr_t virtaddr = PA2KA(last_frame); 99 pfn_t i; 100 101 page_table_lock(AS_KERNEL, true); 102 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) { 103 page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), 104 physaddr + PFN2ADDR(i), 105 PAGE_NOT_CACHEABLE | PAGE_READ | PAGE_WRITE | PAGE_KERNEL); 106 } 107 page_table_unlock(AS_KERNEL, true); 108 109 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE); 110 return virtaddr; 111 } 112 86 113 /** @} 87 114 */ -
kernel/arch/ia32/Makefile.inc
r7aaed09 r1761268 99 99 arch/$(KARCH)/src/userspace.c \ 100 100 arch/$(KARCH)/src/cpu/cpu.c \ 101 arch/$(KARCH)/src/mm/km.c \102 101 arch/$(KARCH)/src/mm/as.c \ 103 102 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/ia32/include/mm/frame.h
r7aaed09 r1761268 43 43 #include <typedefs.h> 44 44 45 extern void frame_low_arch_init(void); 46 extern void frame_high_arch_init(void); 45 extern uintptr_t last_frame; 46 47 extern void frame_arch_init(void); 47 48 extern void physmem_print(void); 48 49 -
kernel/arch/ia32/src/mm/frame.c
r7aaed09 r1761268 46 46 #include <print.h> 47 47 48 #define PHYSMEM_LIMIT32 UINT64_C(0x07c000000) 49 #define PHYSMEM_LIMIT64 UINT64_C(0x200000000) 50 48 51 size_t hardcoded_unmapped_ktext_size = 0; 49 52 size_t hardcoded_unmapped_kdata_size = 0; 50 53 51 static void init_e820_memory(pfn_t minconf, bool low) 54 uintptr_t last_frame = 0; 55 56 static void init_e820_memory(pfn_t minconf) 52 57 { 53 58 unsigned int i; 54 59 55 60 for (i = 0; i < e820counter; i++) { 56 uintptr_t base = (uintptr_t) e820table[i].base_address; 57 size_t size = (size_t) e820table[i].size; 58 59 if (!frame_adjust_zone_bounds(low, &base, &size)) 61 uint64_t base = e820table[i].base_address; 62 uint64_t size = e820table[i].size; 63 64 #ifdef __32_BITS__ 65 /* 66 * XXX FIXME: 67 * 68 * Ignore zones which start above PHYSMEM_LIMIT32 69 * or clip zones which go beyond PHYSMEM_LIMIT32. 70 * 71 * The PHYSMEM_LIMIT32 (2 GB - 64 MB) is a rather 72 * arbitrary constant which allows to have at 73 * least 64 MB in the kernel address space to 74 * map hardware resources. 75 * 76 * The kernel uses fixed 1:1 identity mapping 77 * of the physical memory with 2:2 GB split. 78 * This is a severe limitation of the current 79 * kernel memory management. 80 * 81 */ 82 83 if (base > PHYSMEM_LIMIT32) 60 84 continue; 85 86 if (base + size > PHYSMEM_LIMIT32) 87 size = PHYSMEM_LIMIT32 - base; 88 #endif 89 90 #ifdef __64_BITS__ 91 /* 92 * XXX FIXME: 93 * 94 * Ignore zones which start above PHYSMEM_LIMIT64 95 * or clip zones which go beyond PHYSMEM_LIMIT64. 96 * 97 * The PHYSMEM_LIMIT64 (8 GB) is the size of the 98 * fixed 1:1 identically mapped physical memory 99 * accessible during the bootstrap process. 100 * This is a severe limitation of the current 101 * kernel memory management. 102 * 103 */ 104 105 if (base > PHYSMEM_LIMIT64) 106 continue; 107 108 if (base + size > PHYSMEM_LIMIT64) 109 size = PHYSMEM_LIMIT64 - base; 110 #endif 61 111 62 112 if (e820table[i].type == MEMMAP_MEMORY_AVAILABLE) { … … 66 116 FRAME_SIZE); 67 117 118 pfn_t pfn = ADDR2PFN(new_base); 68 119 size_t count = SIZE2FRAMES(new_size); 69 pfn_t pfn = ADDR2PFN(new_base);120 70 121 pfn_t conf; 71 72 if (low) { 73 if ((minconf < pfn) || (minconf >= pfn + count)) 74 conf = pfn; 75 else 76 conf = minconf; 77 zone_create(pfn, count, conf, 78 ZONE_AVAILABLE | ZONE_LOWMEM); 79 } else { 80 conf = zone_external_conf_alloc(count); 81 zone_create(pfn, count, conf, 82 ZONE_AVAILABLE | ZONE_HIGHMEM); 83 } 122 if ((minconf < pfn) || (minconf >= pfn + count)) 123 conf = pfn; 124 else 125 conf = minconf; 126 127 zone_create(pfn, count, conf, ZONE_AVAILABLE); 128 129 // XXX this has to be removed 130 if (last_frame < ALIGN_UP(new_base + new_size, FRAME_SIZE)) 131 last_frame = ALIGN_UP(new_base + new_size, FRAME_SIZE); 84 132 } else if ((e820table[i].type == MEMMAP_MEMORY_ACPI) || 85 133 (e820table[i].type == MEMMAP_MEMORY_NVS)) { … … 131 179 132 180 133 void frame_ low_arch_init(void)181 void frame_arch_init(void) 134 182 { 135 183 pfn_t minconf; … … 144 192 #endif 145 193 146 init_e820_memory(minconf , true);194 init_e820_memory(minconf); 147 195 148 196 /* Reserve frame 0 (BIOS data) */ … … 158 206 } 159 207 160 void frame_high_arch_init(void)161 {162 if (config.cpu_active == 1)163 init_e820_memory(0, false);164 }165 166 208 /** @} 167 209 */ -
kernel/arch/ia32/src/mm/page.c
r7aaed09 r1761268 49 49 #include <print.h> 50 50 #include <interrupt.h> 51 #include <macros.h>52 51 53 52 void page_arch_init(void) … … 56 55 int flags; 57 56 58 if (config.cpu_active > 1) { 59 /* Fast path for non-boot CPUs */ 57 if (config.cpu_active == 1) { 58 page_mapping_operations = &pt_mapping_operations; 59 60 /* 61 * PA2KA(identity) mapping for all frames until last_frame. 62 */ 63 page_table_lock(AS_KERNEL, true); 64 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) { 65 flags = PAGE_CACHEABLE | PAGE_WRITE; 66 if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size)) 67 flags |= PAGE_GLOBAL; 68 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); 69 } 70 page_table_unlock(AS_KERNEL, true); 71 72 exc_register(14, "page_fault", true, (iroutine_t) page_fault); 60 73 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 61 paging_on(); 62 return; 63 } 74 } else 75 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table); 76 77 paging_on(); 78 } 64 79 65 page_mapping_operations = &pt_mapping_operations; 80 81 uintptr_t hw_map(uintptr_t physaddr, size_t size) 82 { 83 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) 84 panic("Unable to map physical memory %p (%zu bytes).", 85 (void *) physaddr, size); 66 86 67 /* 68 * PA2KA(identity) mapping for all low-memory frames. 69 */ 87 uintptr_t virtaddr = PA2KA(last_frame); 88 pfn_t i; 70 89 page_table_lock(AS_KERNEL, true); 71 for (cur = 0; cur < min(config.identity_size, config.physmem_end); 72 cur += FRAME_SIZE) { 73 flags = PAGE_CACHEABLE | PAGE_WRITE; 74 if ((PA2KA(cur) >= config.base) && 75 (PA2KA(cur) < config.base + config.kernel_size)) 76 flags |= PAGE_GLOBAL; 77 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); 90 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) { 91 uintptr_t addr = PFN2ADDR(i); 92 page_mapping_insert(AS_KERNEL, virtaddr + addr, physaddr + addr, PAGE_NOT_CACHEABLE | PAGE_WRITE); 78 93 } 79 94 page_table_unlock(AS_KERNEL, true); 80 81 exc_register(14, "page_fault", true, (iroutine_t) page_fault);82 write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);83 95 84 paging_on(); 96 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE); 97 98 return virtaddr; 85 99 } 86 100 -
kernel/arch/ia64/Makefile.inc
r7aaed09 r1761268 52 52 arch/$(KARCH)/src/ivt.S \ 53 53 arch/$(KARCH)/src/interrupt.c \ 54 arch/$(KARCH)/src/mm/km.c \55 54 arch/$(KARCH)/src/mm/as.c \ 56 55 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/ia64/include/mm/frame.h
r7aaed09 r1761268 43 43 #include <typedefs.h> 44 44 45 extern void frame_low_arch_init(void); 46 extern void frame_high_arch_init(void); 45 extern uintptr_t last_frame; 46 47 extern void frame_arch_init(void); 47 48 #define physmem_print() 48 49 -
kernel/arch/ia64/include/mm/page.h
r7aaed09 r1761268 57 57 58 58 /* Firmware area (bellow 4GB in phys mem) */ 59 #define FW_OFFSET 0x00000000F0000000 // FIXME: [non-ident]59 #define FW_OFFSET 0x00000000F0000000 60 60 /* Legacy IO space */ 61 #define IO_OFFSET 0x0001000000000000 // FIXME: [non-ident]61 #define IO_OFFSET 0x0001000000000000 62 62 /* Videoram - now mapped to 0 as VGA text mode vram on 0xb8000 */ 63 #define VIO_OFFSET 0x0002000000000000 // FIXME: [non-ident]63 #define VIO_OFFSET 0x0002000000000000 64 64 65 65 -
kernel/arch/ia64/src/mm/frame.c
r7aaed09 r1761268 51 51 #define MINCONF 1 52 52 53 static void frame_common_arch_init(bool low) 53 uintptr_t last_frame = 0; 54 55 void frame_arch_init(void) 54 56 { 55 unsigned int i; 57 if (config.cpu_active == 1) { 58 unsigned int i; 59 for (i = 0; i < bootinfo->memmap_items; i++) { 60 if (bootinfo->memmap[i].type == MEMMAP_FREE_MEM) { 61 uint64_t base = bootinfo->memmap[i].base; 62 uint64_t size = bootinfo->memmap[i].size; 63 uint64_t abase = ALIGN_UP(base, FRAME_SIZE); 56 64 57 for (i = 0; i < bootinfo->memmap_items; i++) { 58 if (bootinfo->memmap[i].type != MEMMAP_FREE_MEM) 59 continue; 65 if (size > FRAME_SIZE) 66 size -= abase - base; 60 67 61 uintptr_t base = bootinfo->memmap[i].base; 62 size_t size = bootinfo->memmap[i].size; 63 uintptr_t abase = ALIGN_UP(base, FRAME_SIZE); 64 65 if (size > FRAME_SIZE) 66 size -= abase - base; 67 68 if (!frame_adjust_zone_bounds(low, &abase, &size)) 69 continue; 70 71 if (size > MIN_ZONE_SIZE) { 72 pfn_t pfn = ADDR2PFN(abase); 73 size_t count = SIZE2FRAMES(size); 74 75 if (low) { 76 zone_create(pfn, count, max(MINCONF, pfn), 77 ZONE_AVAILABLE | ZONE_LOWMEM); 78 } else { 79 pfn_t conf; 80 81 conf = zone_external_conf_alloc(count); 82 zone_create(pfn, count, conf, 83 ZONE_AVAILABLE | ZONE_HIGHMEM); 68 if (size > MIN_ZONE_SIZE) { 69 zone_create(abase >> FRAME_WIDTH, 70 size >> FRAME_WIDTH, 71 max(MINCONF, abase >> FRAME_WIDTH), 72 0); 73 } 74 if (abase + size > last_frame) 75 last_frame = abase + size; 84 76 } 85 77 } 86 } 87 } 78 79 /* 80 * Blacklist ROM regions. 81 */ 82 frame_mark_unavailable(ADDR2PFN(ROM_BASE), 83 SIZE2FRAMES(ROM_SIZE)); 88 84 89 void frame_low_arch_init(void) 90 { 91 if (config.cpu_active > 1) 92 return; 93 94 frame_common_arch_init(true); 95 96 /* 97 * Blacklist ROM regions. 98 */ 99 frame_mark_unavailable(ADDR2PFN(ROM_BASE), 100 SIZE2FRAMES(ROM_SIZE)); 101 102 frame_mark_unavailable(ADDR2PFN(KERNEL_RESERVED_AREA_BASE), 103 SIZE2FRAMES(KERNEL_RESERVED_AREA_SIZE)); 104 } 105 106 void frame_high_arch_init(void) 107 { 108 if (config.cpu_active > 1) 109 return; 110 111 frame_common_arch_init(false); 85 frame_mark_unavailable(ADDR2PFN(KERNEL_RESERVED_AREA_BASE), 86 SIZE2FRAMES(KERNEL_RESERVED_AREA_SIZE)); 87 } 112 88 } 113 89 -
kernel/arch/ia64/src/mm/page.c
r7aaed09 r1761268 255 255 } 256 256 257 uintptr_t hw_map(uintptr_t physaddr, size_t size __attribute__ ((unused))) 258 { 259 /* THIS is a dirty hack. */ 260 return (uintptr_t)((uint64_t)(PA2KA(physaddr)) + VIO_OFFSET); 261 } 262 257 263 /** @} 258 264 */ -
kernel/arch/ia64/src/mm/tlb.c
r7aaed09 r1761268 480 480 va = istate->cr_ifa; /* faulting address */ 481 481 482 page_table_lock(AS, true); 482 483 t = page_mapping_find(AS, va, true); 483 484 if (t) { … … 487 488 */ 488 489 itc_pte_copy(t); 490 page_table_unlock(AS, true); 489 491 } else { 490 492 /* 491 493 * Forward the page fault to address space page fault handler. 492 494 */ 495 page_table_unlock(AS, true); 493 496 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 494 497 fault_if_from_uspace(istate, "Page fault at %p.", … … 595 598 596 599 600 page_table_lock(AS, true); 597 601 pte_t *entry = page_mapping_find(AS, va, true); 598 602 if (entry) { … … 602 606 */ 603 607 dtc_pte_copy(entry); 608 page_table_unlock(AS, true); 604 609 } else { 610 page_table_unlock(AS, true); 605 611 if (try_memmap_io_insertion(va, istate)) 606 612 return; … … 644 650 va = istate->cr_ifa; /* faulting address */ 645 651 652 page_table_lock(AS, true); 646 653 t = page_mapping_find(AS, va, true); 647 654 ASSERT((t) && (t->p)); … … 660 667 } 661 668 } 669 page_table_unlock(AS, true); 662 670 } 663 671 … … 675 683 va = istate->cr_ifa; /* faulting address */ 676 684 685 page_table_lock(AS, true); 677 686 t = page_mapping_find(AS, va, true); 678 687 ASSERT((t) && (t->p)); … … 691 700 } 692 701 } 702 page_table_unlock(AS, true); 693 703 } 694 704 … … 706 716 va = istate->cr_ifa; /* faulting address */ 707 717 718 page_table_lock(AS, true); 708 719 t = page_mapping_find(AS, va, true); 709 720 ASSERT((t) && (t->p)); … … 722 733 } 723 734 } 735 page_table_unlock(AS, true); 724 736 } 725 737 … … 740 752 * Assume a write to a read-only page. 741 753 */ 754 page_table_lock(AS, true); 742 755 t = page_mapping_find(AS, va, true); 743 756 ASSERT((t) && (t->p)); … … 748 761 panic_memtrap(istate, PF_ACCESS_WRITE, va, NULL); 749 762 } 763 page_table_unlock(AS, true); 750 764 } 751 765 … … 763 777 va = istate->cr_ifa; /* faulting address */ 764 778 779 page_table_lock(AS, true); 765 780 t = page_mapping_find(AS, va, true); 766 781 ASSERT(t); … … 775 790 else 776 791 dtc_pte_copy(t); 792 page_table_unlock(AS, true); 777 793 } else { 794 page_table_unlock(AS, true); 778 795 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 779 796 fault_if_from_uspace(istate, "Page fault at %p.", -
kernel/arch/ia64/src/start.S
r7aaed09 r1761268 38 38 #define KERNEL_TRANSLATION_I 0x0010000000000661 39 39 #define KERNEL_TRANSLATION_D 0x0010000000000661 40 #define KERNEL_TRANSLATION_VIO 0x0010000000000671 // FIXME: [non-ident]41 #define KERNEL_TRANSLATION_IO 0x00100FFFFC000671 // FIXME: [non-ident]42 #define KERNEL_TRANSLATION_FW 0x00100000F0000671 // FIXME: [non-ident]40 #define KERNEL_TRANSLATION_VIO 0x0010000000000671 41 #define KERNEL_TRANSLATION_IO 0x00100FFFFC000671 42 #define KERNEL_TRANSLATION_FW 0x00100000F0000671 43 43 44 44 .section K_TEXT_START, "ax" -
kernel/arch/mips32/Makefile.inc
r7aaed09 r1761268 63 63 arch/$(KARCH)/src/debug/stacktrace.c \ 64 64 arch/$(KARCH)/src/debug/stacktrace_asm.S \ 65 arch/$(KARCH)/src/mm/km.c \66 65 arch/$(KARCH)/src/mm/frame.c \ 67 66 arch/$(KARCH)/src/mm/page.c \ -
kernel/arch/mips32/include/mm/frame.h
r7aaed09 r1761268 41 41 #ifndef __ASM__ 42 42 43 extern void frame_low_arch_init(void); 44 extern void frame_high_arch_init(void); 43 extern void frame_arch_init(void); 45 44 extern void physmem_print(void); 46 45 -
kernel/arch/mips32/src/mm/frame.c
r7aaed09 r1761268 131 131 } 132 132 133 static void frame_add_region(pfn_t start_frame, pfn_t end_frame, bool low) 134 { 135 if (end_frame <= start_frame) 136 return; 137 138 uintptr_t base = start_frame << ZERO_PAGE_WIDTH; 139 size_t size = (end_frame - start_frame) << ZERO_PAGE_WIDTH; 140 141 if (!frame_adjust_zone_bounds(low, &base, &size)) 142 return; 143 144 pfn_t first = ADDR2PFN(base); 145 size_t count = SIZE2FRAMES(size); 146 pfn_t conf_frame; 147 148 if (low) { 133 static void frame_add_region(pfn_t start_frame, pfn_t end_frame) 134 { 135 if (end_frame > start_frame) { 136 /* Convert 1M frames to 16K frames */ 137 pfn_t first = ADDR2PFN(start_frame << ZERO_PAGE_WIDTH); 138 pfn_t count = ADDR2PFN((end_frame - start_frame) << ZERO_PAGE_WIDTH); 139 149 140 /* Interrupt vector frame is blacklisted */ 141 pfn_t conf_frame; 150 142 if (first == 0) 151 143 conf_frame = 1; 152 144 else 153 145 conf_frame = first; 154 zone_create(first, count, conf_frame, 155 ZONE_AVAILABLE | ZONE_LOWMEM); 156 } else { 157 conf_frame = zone_external_conf_alloc(count); 158 zone_create(first, count, conf_frame, 159 ZONE_AVAILABLE | ZONE_HIGHMEM); 160 } 161 162 163 if (phys_regions_count < MAX_REGIONS) { 164 phys_regions[phys_regions_count].start = first; 165 phys_regions[phys_regions_count].count = count; 166 phys_regions_count++; 146 147 zone_create(first, count, conf_frame, 0); 148 149 if (phys_regions_count < MAX_REGIONS) { 150 phys_regions[phys_regions_count].start = first; 151 phys_regions[phys_regions_count].count = count; 152 phys_regions_count++; 153 } 167 154 } 168 155 } … … 178 165 * 179 166 */ 180 void frame_ low_arch_init(void)167 void frame_arch_init(void) 181 168 { 182 169 ipl_t ipl = interrupts_disable(); … … 237 224 238 225 if (!avail) { 239 frame_add_region(start_frame, frame , true);226 frame_add_region(start_frame, frame); 240 227 start_frame = frame + 1; 241 228 avail = true; … … 243 230 } 244 231 245 frame_add_region(start_frame, frame , true);232 frame_add_region(start_frame, frame); 246 233 247 234 /* Blacklist interrupt vector frame */ … … 259 246 } 260 247 261 void frame_high_arch_init(void)262 {263 }264 248 265 249 void physmem_print(void) -
kernel/arch/mips32/src/mm/page.c
r7aaed09 r1761268 41 41 { 42 42 page_mapping_operations = &pt_mapping_operations; 43 as_switch(NULL, AS_KERNEL); 43 } 44 45 /** Map device into kernel space 46 * - on mips, all devices are already mapped into kernel space, 47 * translate the physical address to uncached area 48 */ 49 uintptr_t hw_map(uintptr_t physaddr, size_t size) 50 { 51 return physaddr + 0xa0000000; 44 52 } 45 53 -
kernel/arch/mips64/Makefile.inc
r7aaed09 r1761268 55 55 arch/$(KARCH)/src/debug/stacktrace.c \ 56 56 arch/$(KARCH)/src/debug/stacktrace_asm.S \ 57 arch/$(KARCH)/src/mm/km.c \58 57 arch/$(KARCH)/src/mm/frame.c \ 59 58 arch/$(KARCH)/src/mm/page.c \ -
kernel/arch/mips64/include/mm/frame.h
r7aaed09 r1761268 41 41 #ifndef __ASM__ 42 42 43 extern void frame_low_arch_init(void); 44 extern void frame_high_arch_init(void); 43 extern void frame_arch_init(void); 45 44 extern void physmem_print(void); 46 45 -
kernel/arch/mips64/src/mm/frame.c
r7aaed09 r1761268 123 123 } 124 124 125 static void frame_add_region(pfn_t start_frame, pfn_t end_frame, bool low) 126 { 127 if (end_frame <= start_frame) 128 return; 129 130 uintptr_t base = start_frame << ZERO_PAGE_WIDTH; 131 size_t size = (end_frame - start_frame) << ZERO_PAGE_WIDTH; 132 133 if (!frame_adjust_zone_bounds(low, &base, &size)) 134 return; 135 136 pfn_t first = ADDR2PFN(base); 137 size_t count = SIZE2FRAMES(size); 138 pfn_t conf_frame; 139 140 if (low) { 125 static void frame_add_region(pfn_t start_frame, pfn_t end_frame) 126 { 127 if (end_frame > start_frame) { 128 /* Convert 1M frames to 16K frames */ 129 pfn_t first = ADDR2PFN(start_frame << ZERO_PAGE_WIDTH); 130 pfn_t count = ADDR2PFN((end_frame - start_frame) << ZERO_PAGE_WIDTH); 131 141 132 /* Interrupt vector frame is blacklisted */ 133 pfn_t conf_frame; 142 134 if (first == 0) 143 135 conf_frame = 1; 144 136 else 145 137 conf_frame = first; 146 zone_create(first, count, conf_frame, 147 ZONE_AVAILABLE | ZONE_LOWMEM); 148 } else { 149 conf_frame = zone_external_conf_alloc(count); 150 zone_create(first, count, conf_frame, 151 ZONE_AVAILABLE | ZONE_HIGHMEM); 152 } 153 154 155 if (phys_regions_count < MAX_REGIONS) { 156 phys_regions[phys_regions_count].start = first; 157 phys_regions[phys_regions_count].count = count; 158 phys_regions_count++; 138 139 zone_create(first, count, conf_frame, 0); 140 141 if (phys_regions_count < MAX_REGIONS) { 142 phys_regions[phys_regions_count].start = first; 143 phys_regions[phys_regions_count].count = count; 144 phys_regions_count++; 145 } 159 146 } 160 147 } … … 169 156 * 170 157 */ 171 void frame_ low_arch_init(void)158 void frame_arch_init(void) 172 159 { 173 160 ipl_t ipl = interrupts_disable(); … … 220 207 221 208 if (!avail) { 222 frame_add_region(start_frame, frame , true);209 frame_add_region(start_frame, frame); 223 210 start_frame = frame + 1; 224 211 avail = true; … … 226 213 } 227 214 228 frame_add_region(start_frame, frame , true);215 frame_add_region(start_frame, frame); 229 216 230 217 /* Blacklist interrupt vector frame */ … … 242 229 } 243 230 244 void frame_high_arch_init(void)245 {246 }247 248 231 void physmem_print(void) 249 232 { -
kernel/arch/mips64/src/mm/page.c
r7aaed09 r1761268 43 43 } 44 44 45 /** Map device into kernel space 46 * - on mips, all devices are already mapped into kernel space, 47 * translate the physical address to uncached area 48 */ 49 uintptr_t hw_map(uintptr_t physaddr, size_t size) 50 { 51 return physaddr + 0xffffffffa0000000; 52 } 53 45 54 /** @} 46 55 */ -
kernel/arch/ppc32/Makefile.inc
r7aaed09 r1761268 52 52 arch/$(KARCH)/src/proc/scheduler.c \ 53 53 arch/$(KARCH)/src/ddi/ddi.c \ 54 arch/$(KARCH)/src/mm/km.c \55 54 arch/$(KARCH)/src/mm/as.c \ 56 55 arch/$(KARCH)/src/mm/frame.c \ -
kernel/arch/ppc32/include/mm/frame.h
r7aaed09 r1761268 44 44 #include <trace.h> 45 45 46 extern uintptr_t last_frame; 47 46 48 NO_TRACE static inline uint32_t physmem_top(void) 47 49 { … … 56 58 } 57 59 58 extern void frame_low_arch_init(void); 59 extern void frame_high_arch_init(void); 60 extern void frame_arch_init(void); 60 61 extern void physmem_print(void); 61 62 -
kernel/arch/ppc32/src/mm/frame.c
r7aaed09 r1761268 40 40 #include <print.h> 41 41 42 uintptr_t last_frame = 0; 42 43 memmap_t memmap; 43 44 … … 53 54 } 54 55 55 static void frame_common_arch_init(bool low)56 void frame_arch_init(void) 56 57 { 57 58 pfn_t minconf = 2; … … 60 61 for (i = 0; i < memmap.cnt; i++) { 61 62 /* To be safe, make the available zone possibly smaller */ 62 uintptr_t base= ALIGN_UP((uintptr_t) memmap.zones[i].start,63 uintptr_t new_start = ALIGN_UP((uintptr_t) memmap.zones[i].start, 63 64 FRAME_SIZE); 64 size_t size = ALIGN_DOWN(memmap.zones[i].size -65 ( base- ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE);65 size_t new_size = ALIGN_DOWN(memmap.zones[i].size - 66 (new_start - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 66 67 67 if (!frame_adjust_zone_bounds(low, &base, &size)) 68 return; 69 70 pfn_t pfn = ADDR2PFN(base); 71 size_t count = SIZE2FRAMES(size); 68 pfn_t pfn = ADDR2PFN(new_start); 69 size_t count = SIZE2FRAMES(new_size); 70 72 71 pfn_t conf; 73 74 if (low) { 75 if ((minconf < pfn) || (minconf >= pfn + count)) 76 conf = pfn; 77 else 78 conf = minconf; 79 zone_create(pfn, count, conf, 80 ZONE_AVAILABLE | ZONE_LOWMEM); 81 } else { 82 conf = zone_external_conf_alloc(count); 83 zone_create(pfn, count, conf, 84 ZONE_AVAILABLE | ZONE_HIGHMEM); 85 } 72 if ((minconf < pfn) || (minconf >= pfn + count)) 73 conf = pfn; 74 else 75 conf = minconf; 76 77 zone_create(pfn, count, conf, 0); 78 79 if (last_frame < ALIGN_UP(new_start + new_size, FRAME_SIZE)) 80 last_frame = ALIGN_UP(new_start + new_size, FRAME_SIZE); 86 81 } 87 88 }89 90 void frame_low_arch_init(void)91 {92 frame_common_arch_init(true);93 82 94 83 /* First is exception vector, second is 'implementation specific', … … 103 92 } 104 93 105 void frame_high_arch_init(void)106 {107 frame_common_arch_init(false);108 }109 110 94 /** @} 111 95 */ -
kernel/arch/ppc32/src/mm/page.c
r7aaed09 r1761268 46 46 } 47 47 48 uintptr_t hw_map(uintptr_t physaddr, size_t size) 49 { 50 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > 51 KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) 52 panic("Unable to map physical memory %p (%zu bytes).", 53 (void *) physaddr, size); 54 55 uintptr_t virtaddr = PA2KA(last_frame); 56 pfn_t i; 57 page_table_lock(AS_KERNEL, true); 58 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) 59 page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), 60 physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE); 61 page_table_unlock(AS_KERNEL, true); 62 63 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE); 64 65 return virtaddr; 66 } 67 48 68 /** @} 49 69 */ -
kernel/arch/sparc64/Makefile.inc
r7aaed09 r1761268 68 68 arch/$(KARCH)/src/fpu_context.c \ 69 69 arch/$(KARCH)/src/dummy.s \ 70 arch/$(KARCH)/src/mm/$(USARCH)/km.c \71 70 arch/$(KARCH)/src/mm/$(USARCH)/as.c \ 72 71 arch/$(KARCH)/src/mm/$(USARCH)/frame.c \ -
kernel/arch/sparc64/include/mm/sun4u/frame.h
r7aaed09 r1761268 72 72 typedef union frame_address frame_address_t; 73 73 74 extern uintptr_t last_frame; 74 75 extern uintptr_t end_of_identity; 75 76 76 extern void frame_low_arch_init(void); 77 extern void frame_high_arch_init(void); 77 extern void frame_arch_init(void); 78 78 #define physmem_print() 79 79 -
kernel/arch/sparc64/include/mm/sun4v/frame.h
r7aaed09 r1761268 46 46 #include <typedefs.h> 47 47 48 extern void frame_low_arch_init(void);49 extern void frame_ high_arch_init(void);48 extern uintptr_t last_frame; 49 extern void frame_arch_init(void); 50 50 #define physmem_print() 51 51 -
kernel/arch/sparc64/src/mm/page.c
r7aaed09 r1761268 51 51 } 52 52 53 /** Map memory-mapped device into virtual memory. 54 * 55 * We are currently using identity mapping for mapping device registers. 56 * 57 * @param physaddr Physical address of the page where the device is 58 * located. 59 * @param size Size of the device's registers. 60 * 61 * @return Virtual address of the page where the device is mapped. 62 * 63 */ 64 uintptr_t hw_map(uintptr_t physaddr, size_t size) 65 { 66 return PA2KA(physaddr); 67 } 68 53 69 /** @} 54 70 */ -
kernel/arch/sparc64/src/mm/sun4u/frame.c
r7aaed09 r1761268 41 41 #include <macros.h> 42 42 43 uintptr_t last_frame = (uintptr_t) NULL; 44 43 45 /** Create memory zones according to information stored in memmap. 44 46 * 45 47 * Walk the memory map and create frame zones according to it. 46 48 */ 47 static void frame_common_arch_init(bool low)49 void frame_arch_init(void) 48 50 { 49 unsigned int i; 50 51 for (i = 0; i < memmap.cnt; i++) { 52 uintptr_t base; 53 size_t size; 54 55 /* 56 * The memmap is created by HelenOS boot loader. 57 * It already contains no holes. 58 */ 59 60 /* To be safe, make the available zone possibly smaller */ 61 base = ALIGN_UP((uintptr_t) memmap.zones[i].start, FRAME_SIZE); 62 size = ALIGN_DOWN(memmap.zones[i].size - 63 (base - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 51 if (config.cpu_active == 1) { 52 unsigned int i; 64 53 65 if (!frame_adjust_zone_bounds(low, &base, &size)) 66 continue; 67 68 pfn_t confdata; 69 pfn_t pfn = ADDR2PFN(base); 70 size_t count = SIZE2FRAMES(size); 71 72 if (low) { 73 confdata = pfn; 54 for (i = 0; i < memmap.cnt; i++) { 55 /* To be safe, make the available zone possibly smaller */ 56 uintptr_t new_start = ALIGN_UP((uintptr_t) memmap.zones[i].start, 57 FRAME_SIZE); 58 size_t new_size = ALIGN_DOWN(memmap.zones[i].size - 59 (new_start - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 60 61 /* 62 * The memmap is created by HelenOS boot loader. 63 * It already contains no holes. 64 */ 65 66 pfn_t confdata = ADDR2PFN(new_start); 67 74 68 if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0)))) 75 69 confdata = ADDR2PFN(KA2PA(PFN2ADDR(2))); 76 70 77 zone_create(pfn, count, confdata, 78 ZONE_AVAILABLE | ZONE_LOWMEM); 79 } else { 80 confdata = zone_external_conf_alloc(count); 81 zone_create(pfn, count, confdata, 82 ZONE_AVAILABLE | ZONE_HIGHMEM); 71 zone_create(ADDR2PFN(new_start), SIZE2FRAMES(new_size), 72 confdata, 0); 73 74 last_frame = max(last_frame, new_start + new_size); 83 75 } 76 77 /* 78 * On sparc64, physical memory can start on a non-zero address. 79 * The generic frame_init() only marks PFN 0 as not free, so we 80 * must mark the physically first frame not free explicitly 81 * here, no matter what is its address. 82 */ 83 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 84 84 } 85 86 }87 88 void frame_low_arch_init(void)89 {90 if (config.cpu_active > 1)91 return;92 85 93 frame_common_arch_init(true); 94 95 /* 96 * On sparc64, physical memory can start on a non-zero address. 97 * The generic frame_init() only marks PFN 0 as not free, so we 98 * must mark the physically first frame not free explicitly 99 * here, no matter what is its address. 100 */ 101 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 102 103 /* PA2KA will work only on low-memory. */ 104 end_of_identity = PA2KA(config.physmem_end - FRAME_SIZE) + PAGE_SIZE; 105 } 106 107 void frame_high_arch_init(void) 108 { 109 if (config.cpu_active > 1) 110 return; 111 112 frame_common_arch_init(false); 86 end_of_identity = PA2KA(last_frame); 113 87 } 114 88 -
kernel/arch/sparc64/src/mm/sun4u/tlb.c
r7aaed09 r1761268 206 206 pte_t *t; 207 207 208 page_table_lock(AS, true); 208 209 t = page_mapping_find(AS, page_16k, true); 209 210 if (t && PTE_EXECUTABLE(t)) { … … 217 218 itsb_pte_copy(t, index); 218 219 #endif 220 page_table_unlock(AS, true); 219 221 } else { 220 222 /* … … 222 224 * handler. 223 225 */ 226 page_table_unlock(AS, true); 224 227 if (as_page_fault(page_16k, PF_ACCESS_EXEC, istate) == 225 228 AS_PF_FAULT) { … … 247 250 size_t index; 248 251 pte_t *t; 249 as_t *as = AS;250 252 251 253 page_8k = (uint64_t) tag.vpn << MMU_PAGE_WIDTH; … … 259 261 "Dereferencing NULL pointer."); 260 262 } else if (page_8k >= end_of_identity) { 261 /* Kernel non-identity. */ 262 as = AS_KERNEL; 263 } else { 264 do_fast_data_access_mmu_miss_fault(istate, tag, 265 "Unexpected kernel page fault."); 263 /* 264 * The kernel is accessing the I/O space. 265 * We still do identity mapping for I/O, 266 * but without caching. 267 */ 268 dtlb_insert_mapping(page_8k, KA2PA(page_8k), 269 PAGESIZE_8K, false, false); 270 return; 266 271 } 267 } 268 269 t = page_mapping_find(as, page_16k, true); 272 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " 273 "kernel page fault."); 274 } 275 276 page_table_lock(AS, true); 277 t = page_mapping_find(AS, page_16k, true); 270 278 if (t) { 271 279 /* … … 278 286 dtsb_pte_copy(t, index, true); 279 287 #endif 288 page_table_unlock(AS, true); 280 289 } else { 281 290 /* 282 291 * Forward the page fault to the address space page fault 283 292 * handler. 284 */ 293 */ 294 page_table_unlock(AS, true); 285 295 if (as_page_fault(page_16k, PF_ACCESS_READ, istate) == 286 296 AS_PF_FAULT) { … … 304 314 size_t index; 305 315 pte_t *t; 306 as_t *as = AS;307 316 308 317 page_16k = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); 309 318 index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ 310 319 311 if (tag.context == ASID_KERNEL) 312 as = AS_KERNEL; 313 314 t = page_mapping_find(as, page_16k, true); 320 page_table_lock(AS, true); 321 t = page_mapping_find(AS, page_16k, true); 315 322 if (t && PTE_WRITABLE(t)) { 316 323 /* … … 327 334 dtsb_pte_copy(t, index, false); 328 335 #endif 336 page_table_unlock(AS, true); 329 337 } else { 330 338 /* … … 332 340 * handler. 333 341 */ 342 page_table_unlock(AS, true); 334 343 if (as_page_fault(page_16k, PF_ACCESS_WRITE, istate) == 335 344 AS_PF_FAULT) { -
kernel/arch/sparc64/src/mm/sun4v/frame.c
r7aaed09 r1761268 45 45 * Walk the memory map and create frame zones according to it. 46 46 */ 47 static void frame_common_arch_init(bool low)47 void frame_arch_init(void) 48 48 { 49 unsigned int i; 49 if (config.cpu_active == 1) { 50 unsigned int i; 50 51 51 for (i = 0; i < memmap.cnt; i++) { 52 uintptr_t base; 53 size_t size; 54 55 /* 56 * The memmap is created by HelenOS boot loader. 57 * It already contains no holes. 58 */ 59 60 /* To be safe, make the available zone possibly smaller */ 61 base = ALIGN_UP((uintptr_t) memmap.zones[i].start, FRAME_SIZE); 62 size = ALIGN_DOWN(memmap.zones[i].size - 63 (base - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 64 65 if (!frame_adjust_zone_bounds(low, &base, &size)) 66 continue; 67 68 pfn_t confdata; 69 pfn_t pfn = ADDR2PFN(base); 70 size_t count = SIZE2FRAMES(size); 71 72 if (low) { 73 confdata = pfn; 52 for (i = 0; i < memmap.cnt; i++) { 53 /* To be safe, make the available zone possibly smaller */ 54 uintptr_t new_start = ALIGN_UP((uintptr_t) memmap.zones[i].start, 55 FRAME_SIZE); 56 size_t new_size = ALIGN_DOWN(memmap.zones[i].size - 57 (new_start - ((uintptr_t) memmap.zones[i].start)), FRAME_SIZE); 58 59 /* 60 * The memmap is created by HelenOS boot loader. 61 * It already contains no holes. 62 */ 63 64 pfn_t confdata = ADDR2PFN(new_start); 65 74 66 if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0)))) 75 67 confdata = ADDR2PFN(KA2PA(PFN2ADDR(2))); 76 68 77 zone_create(pfn, count, confdata, 78 ZONE_AVAILABLE | ZONE_LOWMEM); 79 } else { 80 confdata = zone_external_conf_alloc(count); 81 zone_create(pfn, count, confdata, 82 ZONE_AVAILABLE | ZONE_HIGHMEM); 69 zone_create(ADDR2PFN(new_start), SIZE2FRAMES(new_size), 70 confdata, 0); 83 71 } 72 73 /* 74 * On sparc64, physical memory can start on a non-zero address. 75 * The generic frame_init() only marks PFN 0 as not free, so we 76 * must mark the physically first frame not free explicitly 77 * here, no matter what is its address. 78 */ 79 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1); 84 80 } 85 86 }87 88 89 void frame_low_arch_init(void)90 {91 if (config.cpu_active > 1)92 return;93 94 frame_common_arch_init(true);95 96 /*97 * On sparc64, physical memory can start on a non-zero address.98 * The generic frame_init() only marks PFN 0 as not free, so we99 * must mark the physically first frame not free explicitly100 * here, no matter what is its address.101 */102 frame_mark_unavailable(ADDR2PFN(KA2PA(PFN2ADDR(0))), 1);103 }104 105 void frame_high_arch_init(void)106 {107 if (config.cpu_active > 1)108 return;109 110 frame_common_arch_init(false);111 81 } 112 82 -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
r7aaed09 r1761268 218 218 pte_t *t; 219 219 220 page_table_lock(AS, true); 220 221 t = page_mapping_find(AS, va, true); 221 222 … … 230 231 itsb_pte_copy(t); 231 232 #endif 233 page_table_unlock(AS, true); 232 234 } else { 233 235 /* … … 235 237 * handler. 236 238 */ 239 page_table_unlock(AS, true); 237 240 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 238 241 do_fast_instruction_access_mmu_miss_fault(istate, … … 271 274 } 272 275 276 page_table_lock(AS, true); 273 277 t = page_mapping_find(AS, va, true); 274 278 if (t) { … … 282 286 dtsb_pte_copy(t, true); 283 287 #endif 288 page_table_unlock(AS, true); 284 289 } else { 285 290 /* … … 287 292 * handler. 288 293 */ 294 page_table_unlock(AS, true); 289 295 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 290 296 do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, … … 310 316 uint16_t ctx = DMISS_CONTEXT(page_and_ctx); 311 317 318 page_table_lock(AS, true); 312 319 t = page_mapping_find(AS, va, true); 313 320 if (t && PTE_WRITABLE(t)) { … … 324 331 dtsb_pte_copy(t, false); 325 332 #endif 333 page_table_unlock(AS, true); 326 334 } else { 327 335 /* … … 329 337 * handler. 330 338 */ 339 page_table_unlock(AS, true); 331 340 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 332 341 do_fast_data_access_protection_fault(istate, page_and_ctx, -
kernel/genarch/include/mm/page_ht.h
r7aaed09 r1761268 43 43 #include <mm/as.h> 44 44 #include <mm/page.h> 45 #include <mm/slab.h>46 45 #include <synch/mutex.h> 47 46 #include <adt/hash_table.h> … … 65 64 extern page_mapping_operations_t ht_mapping_operations; 66 65 67 extern slab_cache_t *pte_cache;68 66 extern mutex_t page_ht_lock; 69 67 extern hash_table_t page_ht; -
kernel/genarch/src/mm/as_ht.c
r7aaed09 r1761268 41 41 #include <mm/as.h> 42 42 #include <mm/frame.h> 43 #include <mm/slab.h>44 43 #include <typedefs.h> 45 44 #include <memstr.h> … … 78 77 hash_table_create(&page_ht, PAGE_HT_ENTRIES, 2, &ht_operations); 79 78 mutex_initialize(&page_ht_lock, MUTEX_PASSIVE); 80 pte_cache = slab_cache_create("pte_cache", sizeof(pte_t), 0, NULL, NULL,81 SLAB_CACHE_MAGDEFERRED);82 79 } 83 80 -
kernel/genarch/src/mm/as_pt.c
r7aaed09 r1761268 73 73 pte_t *ptl0_create(unsigned int flags) 74 74 { 75 pte_t *dst_ptl0 = (pte_t *) frame_alloc(PTL0_SIZE, 76 FRAME_LOWMEM | FRAME_KA); 75 pte_t *dst_ptl0 = (pte_t *) frame_alloc(PTL0_SIZE, FRAME_KA); 77 76 size_t table_size = FRAME_SIZE << PTL0_SIZE; 78 77 … … 90 89 (pte_t *) PA2KA((uintptr_t) AS_KERNEL->genarch.page_table); 91 90 92 uintptr_t src = (uintptr_t)93 &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];94 uintptr_t dst = (uintptr_t)95 &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];91 uintptr_t src = 92 (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; 93 uintptr_t dst = 94 (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)]; 96 95 97 96 memsetb(dst_ptl0, table_size, 0); -
kernel/genarch/src/mm/page_ht.c
r7aaed09 r1761268 60 60 static pte_t *ht_mapping_find(as_t *, uintptr_t, bool); 61 61 62 slab_cache_t *pte_cache = NULL;63 64 62 /** 65 63 * This lock protects the page hash table. It must be acquired … … 165 163 pte_t *pte = hash_table_get_instance(item, pte_t, link); 166 164 167 slab_free(pte_cache,pte);165 free(pte); 168 166 } 169 167 … … 190 188 191 189 if (!hash_table_find(&page_ht, key)) { 192 pte_t *pte = slab_alloc(pte_cache, FRAME_LOWMEM |FRAME_ATOMIC);190 pte_t *pte = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC); 193 191 ASSERT(pte != NULL); 194 192 -
kernel/genarch/src/mm/page_pt.c
r7aaed09 r1761268 39 39 #include <mm/page.h> 40 40 #include <mm/frame.h> 41 #include <mm/km.h>42 41 #include <mm/as.h> 43 42 #include <arch/mm/page.h> … … 76 75 77 76 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) { 78 pte_t *newpt = (pte_t *) frame_alloc(PTL1_SIZE, 79 FRAME_LOWMEM | FRAME_KA); 77 pte_t *newpt = (pte_t *) frame_alloc(PTL1_SIZE, FRAME_KA); 80 78 memsetb(newpt, FRAME_SIZE << PTL1_SIZE, 0); 81 79 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt)); 82 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), 83 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | 84 PAGE_WRITE); 80 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); 85 81 } 86 82 … … 88 84 89 85 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) { 90 pte_t *newpt = (pte_t *) frame_alloc(PTL2_SIZE, 91 FRAME_LOWMEM | FRAME_KA); 86 pte_t *newpt = (pte_t *) frame_alloc(PTL2_SIZE, FRAME_KA); 92 87 memsetb(newpt, FRAME_SIZE << PTL2_SIZE, 0); 93 88 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt)); 94 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), 95 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | 96 PAGE_WRITE); 89 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); 97 90 } 98 91 … … 100 93 101 94 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) { 102 pte_t *newpt = (pte_t *) frame_alloc(PTL3_SIZE, 103 FRAME_LOWMEM | FRAME_KA); 95 pte_t *newpt = (pte_t *) frame_alloc(PTL3_SIZE, FRAME_KA); 104 96 memsetb(newpt, FRAME_SIZE << PTL3_SIZE, 0); 105 97 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt)); 106 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), 107 PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | 108 PAGE_WRITE); 98 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE); 109 99 } 110 100 … … 154 144 155 145 /* 156 * Second, free all empty tables along the way from PTL3 down to PTL0 157 * except those needed for sharing the kernel non-identity mappings.146 * Second, free all empty tables along the way from PTL3 down to PTL0. 147 * 158 148 */ 159 149 … … 172 162 /* 173 163 * PTL3 is empty. 174 * Release the frame and remove PTL3 pointer from the parent 175 * table. 176 */ 164 * Release the frame and remove PTL3 pointer from preceding table. 165 * 166 */ 167 frame_free(KA2PA((uintptr_t) ptl3)); 177 168 #if (PTL2_ENTRIES != 0) 178 169 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0); … … 180 171 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); 181 172 #else 182 if (km_is_non_identity(page))183 return;184 185 173 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 186 174 #endif 187 frame_free(KA2PA((uintptr_t) ptl3));188 175 } else { 189 176 /* … … 208 195 /* 209 196 * PTL2 is empty. 210 * Release the frame and remove PTL2 pointer from the parent 211 * table. 212 */ 197 * Release the frame and remove PTL2 pointer from preceding table. 198 * 199 */ 200 frame_free(KA2PA((uintptr_t) ptl2)); 213 201 #if (PTL1_ENTRIES != 0) 214 202 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0); 215 203 #else 216 if (km_is_non_identity(page))217 return;218 219 204 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 220 205 #endif 221 frame_free(KA2PA((uintptr_t) ptl2));222 206 } else { 223 207 /* … … 243 227 /* 244 228 * PTL1 is empty. 245 * Release the frame and remove PTL1 pointer from the parent 246 * table. 247 */ 248 if (km_is_non_identity(page)) 249 return; 250 229 * Release the frame and remove PTL1 pointer from preceding table. 230 * 231 */ 232 frame_free(KA2PA((uintptr_t) ptl1)); 251 233 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0); 252 frame_free(KA2PA((uintptr_t) ptl1));253 234 } 254 235 #endif /* PTL1_ENTRIES != 0 */ -
kernel/generic/include/align.h
r7aaed09 r1761268 42 42 * 43 43 * @param s Address or size to be aligned. 44 * @param a Size of alignment, must be apower of 2.44 * @param a Size of alignment, must be power of 2. 45 45 */ 46 46 #define ALIGN_DOWN(s, a) ((s) & ~((a) - 1)) … … 50 50 * 51 51 * @param s Address or size to be aligned. 52 * @param a Size of alignment, must be apower of 2.52 * @param a Size of alignment, must be power of 2. 53 53 */ 54 54 #define ALIGN_UP(s, a) (((s) + ((a) - 1)) & ~((a) - 1)) 55 56 /** Check alignment.57 *58 * @param s Address or size to be checked for alignment.59 * @param a Size of alignment, must be a power of 2.60 */61 #define IS_ALIGNED(s, a) (ALIGN_UP((s), (a)) == (s))62 55 63 56 #endif -
kernel/generic/include/config.h
r7aaed09 r1761268 74 74 75 75 typedef struct { 76 /** Number of processors detected. */ 77 unsigned int cpu_count; 78 /** Number of processors that are up and running. */ 79 volatile size_t cpu_active; 76 unsigned int cpu_count; /**< Number of processors detected. */ 77 volatile size_t cpu_active; /**< Number of processors that are up and running. */ 80 78 81 79 uintptr_t base; 82 /** Size of memory in bytes taken by kernel and stack. */ 83 size_t kernel_size; 80 size_t kernel_size; /**< Size of memory in bytes taken by kernel and stack */ 84 81 85 /** Base adddress of initial stack. */ 86 uintptr_t stack_base; 87 /** Size of initial stack. */ 88 size_t stack_size; 89 90 bool identity_configured; 91 /** Base address of the kernel identity mapped memory. */ 92 uintptr_t identity_base; 93 /** Size of the kernel identity mapped memory. */ 94 size_t identity_size; 95 96 bool non_identity_configured; 97 98 /** End of physical memory. */ 99 uint64_t physmem_end; 82 uintptr_t stack_base; /**< Base adddress of initial stack */ 83 size_t stack_size; /**< Size of initial stack */ 100 84 } config_t; 101 85 -
kernel/generic/include/macros.h
r7aaed09 r1761268 77 77 #endif /* __ASM__ */ 78 78 79 #define ispwr2(x) (((x) & ((x) - 1)) == 0)80 81 79 #define isdigit(d) (((d) >= '0') && ((d) <= '9')) 82 80 #define islower(c) (((c) >= 'a') && ((c) <= 'z')) -
kernel/generic/include/mm/frame.h
r7aaed09 r1761268 50 50 typedef uint8_t frame_flags_t; 51 51 52 #define FRAME_NONE 0x053 52 /** Convert the frame address to kernel VA. */ 54 53 #define FRAME_KA 0x1 … … 59 58 /** Do not reserve / unreserve memory. */ 60 59 #define FRAME_NO_RESERVE 0x8 61 /** Allocate a frame which can be identity-mapped. */62 #define FRAME_LOWMEM 0x1063 /** Allocate a frame which cannot be identity-mapped. */64 #define FRAME_HIGHMEM 0x2065 60 66 61 typedef uint8_t zone_flags_t; 67 62 68 #define ZONE_NONE 0x069 63 /** Available zone (free for allocation) */ 70 #define ZONE_AVAILABLE 0x 164 #define ZONE_AVAILABLE 0x0 71 65 /** Zone is reserved (not available for allocation) */ 72 #define ZONE_RESERVED 0x 266 #define ZONE_RESERVED 0x8 73 67 /** Zone is used by firmware (not available for allocation) */ 74 #define ZONE_FIRMWARE 0x4 75 /** Zone contains memory that can be identity-mapped */ 76 #define ZONE_LOWMEM 0x8 77 /** Zone contains memory that cannot be identity-mapped */ 78 #define ZONE_HIGHMEM 0x10 68 #define ZONE_FIRMWARE 0x10 79 69 80 /** Mask of zone bits that must be matched exactly. */ 81 #define ZONE_EF_MASK 0x7 82 83 #define FRAME_TO_ZONE_FLAGS(ff) \ 84 ((((ff) & FRAME_LOWMEM) ? ZONE_LOWMEM : \ 85 (((ff) & FRAME_HIGHMEM) ? ZONE_HIGHMEM : ZONE_NONE)) | \ 86 (ZONE_AVAILABLE | ZONE_LOWMEM /* | ZONE_HIGHMEM */)) 87 88 #define ZONE_FLAGS_MATCH(zf, f) \ 89 (((((zf) & ZONE_EF_MASK)) == ((f) & ZONE_EF_MASK)) && \ 90 (((zf) & ~ZONE_EF_MASK) & (f))) 70 /** Currently there is no equivalent zone flags 71 for frame flags */ 72 #define FRAME_TO_ZONE_FLAGS(frame_flags) 0 91 73 92 74 typedef struct { 93 75 size_t refcount; /**< Tracking of shared frames */ 76 uint8_t buddy_order; /**< Buddy system block order */ 94 77 link_t buddy_link; /**< Link to the next free block inside 95 78 one order */ 96 79 void *parent; /**< If allocated by slab, this points there */ 97 uint8_t buddy_order; /**< Buddy system block order */98 80 } frame_t; 99 81 … … 147 129 } 148 130 131 NO_TRACE static inline bool zone_flags_available(zone_flags_t flags) 132 { 133 return ((flags & (ZONE_RESERVED | ZONE_FIRMWARE)) == 0); 134 } 135 149 136 #define IS_BUDDY_ORDER_OK(index, order) \ 150 137 ((~(((sysarg_t) -1) << (order)) & (index)) == 0) … … 159 146 160 147 extern void frame_init(void); 161 extern bool frame_adjust_zone_bounds(bool, uintptr_t *, size_t *);162 148 extern void *frame_alloc_generic(uint8_t, frame_flags_t, size_t *); 163 149 extern void *frame_alloc(uint8_t, frame_flags_t); … … 175 161 extern void frame_mark_unavailable(pfn_t, size_t); 176 162 extern size_t zone_conf_size(size_t); 177 extern pfn_t zone_external_conf_alloc(size_t);178 163 extern bool zone_merge(size_t, size_t); 179 164 extern void zone_merge_all(void); -
kernel/generic/src/cpu/cpu.c
r7aaed09 r1761268 74 74 for (i = 0; i < config.cpu_count; i++) { 75 75 cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, 76 FRAME_ LOWMEM | FRAME_KA | FRAME_ATOMIC);76 FRAME_KA | FRAME_ATOMIC); 77 77 cpus[i].id = i; 78 78 -
kernel/generic/src/main/main.c
r7aaed09 r1761268 68 68 #include <mm/page.h> 69 69 #include <genarch/mm/page_pt.h> 70 #include <mm/km.h>71 70 #include <mm/tlb.h> 72 71 #include <mm/as.h> … … 89 88 90 89 /** Global configuration structure. */ 91 config_t config = { 92 .identity_configured = false, 93 .non_identity_configured = false, 94 .physmem_end = 0 95 }; 90 config_t config; 96 91 97 92 /** Initial user-space tasks */ … … 210 205 */ 211 206 arch_pre_mm_init(); 212 km_identity_init();213 207 frame_init(); 208 214 209 /* Initialize at least 1 memory segment big enough for slab to work. */ 215 210 slab_cache_init(); … … 219 214 page_init(); 220 215 tlb_init(); 221 km_non_identity_init();222 216 ddi_init(); 223 217 arch_post_mm_init(); -
kernel/generic/src/mm/frame.c
r7aaed09 r1761268 240 240 NO_TRACE static bool zone_can_alloc(zone_t *zone, uint8_t order) 241 241 { 242 return ( (zone->flags & ZONE_AVAILABLE) &&243 buddy_system_can_alloc(zone->buddy_system, order));242 return (zone_flags_available(zone->flags) 243 && buddy_system_can_alloc(zone->buddy_system, order)); 244 244 } 245 245 … … 265 265 * Check whether the zone meets the search criteria. 266 266 */ 267 if ( ZONE_FLAGS_MATCH(zones.info[i].flags, flags)) {267 if ((zones.info[i].flags & flags) == flags) { 268 268 /* 269 269 * Check if the zone has 2^order frames area available. … … 460 460 NO_TRACE static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order) 461 461 { 462 ASSERT(zone ->flags & ZONE_AVAILABLE);462 ASSERT(zone_flags_available(zone->flags)); 463 463 464 464 /* Allocate frames from zone buddy system */ … … 490 490 NO_TRACE static size_t zone_frame_free(zone_t *zone, size_t frame_idx) 491 491 { 492 ASSERT(zone ->flags & ZONE_AVAILABLE);492 ASSERT(zone_flags_available(zone->flags)); 493 493 494 494 frame_t *frame = &zone->frames[frame_idx]; … … 518 518 NO_TRACE static void zone_mark_unavailable(zone_t *zone, size_t frame_idx) 519 519 { 520 ASSERT(zone ->flags & ZONE_AVAILABLE);520 ASSERT(zone_flags_available(zone->flags)); 521 521 522 522 frame_t *frame = zone_get_frame(zone, frame_idx); … … 549 549 buddy_system_t *buddy) 550 550 { 551 ASSERT(zone s.info[z1].flags & ZONE_AVAILABLE);552 ASSERT(zone s.info[z2].flags & ZONE_AVAILABLE);551 ASSERT(zone_flags_available(zones.info[z1].flags)); 552 ASSERT(zone_flags_available(zones.info[z2].flags)); 553 553 ASSERT(zones.info[z1].flags == zones.info[z2].flags); 554 554 ASSERT(zones.info[z1].base < zones.info[z2].base); … … 645 645 NO_TRACE static void return_config_frames(size_t znum, pfn_t pfn, size_t count) 646 646 { 647 ASSERT(zone s.info[znum].flags & ZONE_AVAILABLE);647 ASSERT(zone_flags_available(zones.info[znum].flags)); 648 648 649 649 size_t cframes = SIZE2FRAMES(zone_conf_size(count)); … … 681 681 size_t count) 682 682 { 683 ASSERT(zone s.info[znum].flags & ZONE_AVAILABLE);683 ASSERT(zone_flags_available(zones.info[znum].flags)); 684 684 ASSERT(frame_idx + count < zones.info[znum].count); 685 685 … … 723 723 * set of flags 724 724 */ 725 if ((z1 >= zones.count) || (z2 >= zones.count) || (z2 - z1 != 1) || 726 (zones.info[z1].flags != zones.info[z2].flags)) { 725 if ((z1 >= zones.count) || (z2 >= zones.count) 726 || (z2 - z1 != 1) 727 || (!zone_flags_available(zones.info[z1].flags)) 728 || (!zone_flags_available(zones.info[z2].flags)) 729 || (zones.info[z1].flags != zones.info[z2].flags)) { 727 730 ret = false; 728 731 goto errout; … … 825 828 zone->buddy_system = buddy; 826 829 827 if ( flags & ZONE_AVAILABLE) {830 if (zone_flags_available(flags)) { 828 831 /* 829 832 * Compute order for buddy system and initialize … … 862 865 { 863 866 return (count * sizeof(frame_t) + buddy_conf_size(fnzb(count))); 864 }865 866 /** Allocate external configuration frames from low memory. */867 pfn_t zone_external_conf_alloc(size_t count)868 {869 size_t size = zone_conf_size(count);870 size_t order = ispwr2(size) ? fnzb(size) : (fnzb(size) + 1);871 872 return ADDR2PFN((uintptr_t) frame_alloc(order - FRAME_WIDTH, FRAME_LOWMEM));873 867 } 874 868 … … 894 888 irq_spinlock_lock(&zones.lock, true); 895 889 896 if ( flags & ZONE_AVAILABLE) { /* Create available zone */890 if (zone_flags_available(flags)) { /* Create available zone */ 897 891 /* Theoretically we could have NULL here, practically make sure 898 892 * nobody tries to do that. If some platform requires, remove … … 900 894 */ 901 895 ASSERT(confframe != ADDR2PFN((uintptr_t ) NULL)); 902 903 /* Update the known end of physical memory. */904 config.physmem_end = max(config.physmem_end, PFN2ADDR(start + count));905 896 906 897 /* If confframe is supposed to be inside our zone, then make sure … … 1241 1232 1242 1233 /* Tell the architecture to create some memory */ 1243 frame_ low_arch_init();1234 frame_arch_init(); 1244 1235 if (config.cpu_active == 1) { 1245 1236 frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), … … 1264 1255 frame_mark_unavailable(0, 1); 1265 1256 } 1266 frame_high_arch_init();1267 }1268 1269 /** Adjust bounds of physical memory region according to low/high memory split.1270 *1271 * @param low[in] If true, the adujstment is performed to make the region1272 * fit in the low memory. Otherwise the adjustment is1273 * performed to make the region fit in the high memory.1274 * @param basep[inout] Pointer to a variable which contains the region's base1275 * address and which may receive the adjusted base address.1276 * @param sizep[inout] Pointer to a variable which contains the region's size1277 * and which may receive the adjusted size.1278 * @retun True if the region still exists even after the1279 * adjustment, false otherwise.1280 */1281 bool frame_adjust_zone_bounds(bool low, uintptr_t *basep, size_t *sizep)1282 {1283 uintptr_t limit = config.identity_size;1284 1285 if (low) {1286 if (*basep > limit)1287 return false;1288 if (*basep + *sizep > limit)1289 *sizep = limit - *basep;1290 } else {1291 if (*basep + *sizep <= limit)1292 return false;1293 if (*basep <= limit) {1294 *sizep -= limit - *basep;1295 *basep = limit;1296 }1297 }1298 return true;1299 1257 } 1300 1258 … … 1335 1293 *total += (uint64_t) FRAMES2SIZE(zones.info[i].count); 1336 1294 1337 if (zone s.info[i].flags & ZONE_AVAILABLE) {1295 if (zone_flags_available(zones.info[i].flags)) { 1338 1296 *busy += (uint64_t) FRAMES2SIZE(zones.info[i].busy_count); 1339 1297 *free += (uint64_t) FRAMES2SIZE(zones.info[i].free_count); … … 1386 1344 irq_spinlock_unlock(&zones.lock, true); 1387 1345 1388 bool available = ((flags & ZONE_AVAILABLE) != 0);1346 bool available = zone_flags_available(flags); 1389 1347 1390 1348 printf("%-4zu", i); … … 1398 1356 #endif 1399 1357 1400 printf(" %12zu %c%c%c%c%c ", count, 1401 available ? 'A' : '-', 1402 (flags & ZONE_RESERVED) ? 'R' : '-', 1403 (flags & ZONE_FIRMWARE) ? 'F' : '-', 1404 (flags & ZONE_LOWMEM) ? 'L' : '-', 1405 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1358 printf(" %12zu %c%c%c ", count, 1359 available ? 'A' : ' ', 1360 (flags & ZONE_RESERVED) ? 'R' : ' ', 1361 (flags & ZONE_FIRMWARE) ? 'F' : ' '); 1406 1362 1407 1363 if (available) … … 1445 1401 irq_spinlock_unlock(&zones.lock, true); 1446 1402 1447 bool available = ((flags & ZONE_AVAILABLE) != 0);1403 bool available = zone_flags_available(flags); 1448 1404 1449 1405 uint64_t size; … … 1455 1411 printf("Zone size: %zu frames (%" PRIu64 " %s)\n", count, 1456 1412 size, size_suffix); 1457 printf("Zone flags: %c%c%c%c%c\n", 1458 available ? 'A' : '-', 1459 (flags & ZONE_RESERVED) ? 'R' : '-', 1460 (flags & ZONE_FIRMWARE) ? 'F' : '-', 1461 (flags & ZONE_LOWMEM) ? 'L' : '-', 1462 (flags & ZONE_HIGHMEM) ? 'H' : '-'); 1413 printf("Zone flags: %c%c%c\n", 1414 available ? 'A' : ' ', 1415 (flags & ZONE_RESERVED) ? 'R' : ' ', 1416 (flags & ZONE_FIRMWARE) ? 'F' : ' '); 1463 1417 1464 1418 if (available) { -
kernel/generic/src/mm/page.c
r7aaed09 r1761268 65 65 #include <arch/mm/asid.h> 66 66 #include <mm/as.h> 67 #include <mm/km.h>68 67 #include <mm/frame.h> 69 68 #include <arch/barrier.h> … … 178 177 } 179 178 180 uintptr_t hw_map(uintptr_t physaddr, size_t size)181 {182 uintptr_t virtaddr;183 size_t asize;184 pfn_t i;185 186 asize = ALIGN_UP(size, PAGE_SIZE);187 virtaddr = km_page_alloc(asize, PAGE_SIZE);188 189 page_table_lock(AS_KERNEL, true);190 for (i = 0; i < ADDR2PFN(asize); i++) {191 uintptr_t addr = PFN2ADDR(i);192 page_mapping_insert(AS_KERNEL, virtaddr + addr, physaddr + addr,193 PAGE_NOT_CACHEABLE | PAGE_WRITE);194 }195 page_table_unlock(AS_KERNEL, true);196 197 return virtaddr;198 }199 200 179 int page_find_mapping(uintptr_t virt, void **phys) 201 180 { -
kernel/generic/src/mm/reserve.c
r7aaed09 r1761268 42 42 #include <typedefs.h> 43 43 #include <arch/types.h> 44 #include <debug.h>45 46 static bool reserve_initialized = false;47 44 48 45 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(reserve_lock, "reserve_lock"); … … 57 54 { 58 55 reserve = frame_total_free_get(); 59 reserve_initialized = true;60 56 } 61 57 … … 71 67 { 72 68 bool reserved = false; 73 74 ASSERT(reserve_initialized);75 69 76 70 irq_spinlock_lock(&reserve_lock, true); … … 117 111 void reserve_force_alloc(size_t size) 118 112 { 119 if (!reserve_initialized)120 return;121 122 113 irq_spinlock_lock(&reserve_lock, true); 123 114 reserve -= size; … … 131 122 void reserve_free(size_t size) 132 123 { 133 if (!reserve_initialized)134 return;135 136 124 irq_spinlock_lock(&reserve_lock, true); 137 125 reserve += size; -
kernel/generic/src/proc/thread.c
r7aaed09 r1761268 173 173 #endif /* CONFIG_FPU */ 174 174 175 /*176 * Allocate the kernel stack from the low-memory to prevent an infinite177 * nesting of TLB-misses when accessing the stack from the part of the178 * TLB-miss handler written in C.179 *180 * Note that low-memory is safe to be used for the stack as it will be181 * covered by the kernel identity mapping, which guarantees not to182 * nest TLB-misses infinitely (either via some hardware mechanism or183 * by the construciton of the assembly-language part of the TLB-miss184 * handler).185 *186 * This restriction can be lifted once each architecture provides187 * a similar guarantee, for example by locking the kernel stack188 * in the TLB whenever it is allocated from the high-memory and the189 * thread is being scheduled to run.190 */191 kmflags |= FRAME_LOWMEM;192 kmflags &= ~FRAME_HIGHMEM;193 194 175 thread->kstack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | kmflags); 195 176 if (!thread->kstack) {
Note:
See TracChangeset
for help on using the changeset viewer.