Changeset c049309 in mainline
- Timestamp:
- 2006-07-30T15:57:07Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- b3e8c90
- Parents:
- 764c302
- Location:
- kernel/arch/xen32
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/xen32/include/asm.h
r764c302 rc049309 43 43 extern uint32_t interrupt_handler_size; 44 44 45 extern void paging_on(void);46 47 45 extern void interrupt_handlers(void); 48 46 … … 75 73 GEN_READ_REG(cr0); 76 74 GEN_READ_REG(cr2); 77 GEN_READ_REG(cr3);78 GEN_WRITE_REG(cr3);79 75 80 76 GEN_READ_REG(dr0); -
kernel/arch/xen32/include/boot/boot.h
r764c302 rc049309 45 45 46 46 typedef struct { 47 char magic[32];/**< "xen-<version>-<platform>" */48 u nsigned long nr_pages; /**< Total pages allocated to this domain*/49 void *shared_info; /**< Machine address of shared info struct*/47 int8_t magic[32]; /**< "xen-<version>-<platform>" */ 48 uint32_t frames; /**< Available frames */ 49 void *shared_info; /**< Shared info structure (machine address) */ 50 50 uint32_t flags; /**< SIF_xxx flags */ 51 void *store_mfn; /**< Machine page number of shared page*/51 pfn_t store_mfn; /**< Shared page (machine page) */ 52 52 uint32_t store_evtchn; /**< Event channel for store communication */ 53 void *console_mfn; /**< Machine address of console page*/53 void *console_mfn; /**< Console page (machine address) */ 54 54 uint32_t console_evtchn; /**< Event channel for console messages */ 55 unsigned long *pt_base; /**< Virtual address of page directory*/56 u nsigned long nr_pt_frames; /**< Number of bootstrap p.t.frames */57 unsigned long *mfn_list; /**< Virtual address of page-frame list*/58 void *mod_start; /**< Virtual address of pre-loaded module*/59 u nsigned long mod_len; /**< Size (bytes) of pre-loaded module*/55 pte_t *ptl0; /**< Boot PTL0 (kernel address) */ 56 uint32_t pt_frames; /**< Number of bootstrap page table frames */ 57 pfn_t *pm_map; /**< Physical->machine frame map (kernel address) */ 58 void *mod_start; /**< Modules start (kernel address) */ 59 uint32_t mod_len; /**< Modules size (bytes) */ 60 60 int8_t cmd_line[GUEST_CMDLINE]; 61 61 } start_info_t; -
kernel/arch/xen32/include/mm/frame.h
r764c302 rc049309 37 37 38 38 #define FRAME_WIDTH 12 /* 4K */ 39 #define FRAME_SIZE (1 <<FRAME_WIDTH)39 #define FRAME_SIZE (1 << FRAME_WIDTH) 40 40 41 41 … … 44 44 45 45 #include <arch/types.h> 46 #include <arch/boot/boot.h> 47 48 #define PA2MA(x) ((start_info.pm_map[((uintptr_t) (x)) >> 12] << 12) + (((uintptr_t) (x)) & 0xfff)) 46 49 47 50 extern uintptr_t last_frame; -
kernel/arch/xen32/include/mm/page.h
r764c302 rc049309 44 44 45 45 #ifndef __ASM__ 46 # include <arch/hypercall.h> 46 47 # define KA2PA(x) (((uintptr_t) (x)) - 0x80000000) 47 48 # define PA2KA(x) (((uintptr_t) (x)) + 0x80000000) … … 60 61 #define PTL3_ENTRIES_ARCH 1024 61 62 62 #define PTL0_INDEX_ARCH(vaddr) (((vaddr) >>22)&0x3ff)63 #define PTL0_INDEX_ARCH(vaddr) (((vaddr) >> 22) & 0x3ff) 63 64 #define PTL1_INDEX_ARCH(vaddr) 0 64 65 #define PTL2_INDEX_ARCH(vaddr) 0 65 #define PTL3_INDEX_ARCH(vaddr) (((vaddr) >>12)&0x3ff)66 #define PTL3_INDEX_ARCH(vaddr) (((vaddr) >> 12) & 0x3ff) 66 67 67 #define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *)((((pte_t *)(ptl0))[(i)].frame_address) <<12))68 #define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *)((((pte_t *)(ptl0))[(i)].frame_address) << 12)) 68 69 #define GET_PTL2_ADDRESS_ARCH(ptl1, i) (ptl1) 69 70 #define GET_PTL3_ADDRESS_ARCH(ptl2, i) (ptl2) 70 #define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((uintptr_t)((((pte_t *)(ptl3))[(i)].frame_address) <<12))71 #define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((uintptr_t)((((pte_t *)(ptl3))[(i)].frame_address) << 12)) 71 72 72 #define SET_PTL0_ADDRESS_ARCH(ptl0) (write_cr3((uintptr_t) (ptl0))) 73 #define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) (((pte_t *)(ptl0))[(i)].frame_address = (a)>>12) 73 #define SET_PTL0_ADDRESS_ARCH(ptl0) { \ 74 mmuext_op_t mmu_ext; \ 75 mmu_ext.cmd = MMUEXT_NEW_BASEPTR; \ 76 mmu_ext.arg1.mfn = ADDR2PFN(PA2MA(ptl0)); \ 77 xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF); \ 78 } 79 #define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) { \ 80 mmu_update_t update; \ 81 update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl0))[(i)])); \ 82 update.val = PA2MA(a); \ 83 xen_mmu_update(&update, 1, NULL, DOMID_SELF); \ 84 } 74 85 #define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) 75 86 #define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) 76 #define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) (((pte_t *)(ptl3))[(i)].frame_address = (a)>>12) 87 #define SET_FRAME_ADDRESS_ARCH(ptl3, i, a) { \ 88 mmu_update_t update; \ 89 update.ptr = PA2MA(KA2PA(&((pte_t *) (ptl3))[(i)])); \ 90 update.val = PA2MA(a); \ 91 xen_mmu_update(&update, 1, NULL, DOMID_SELF); \ 92 } 77 93 78 94 #define GET_PTL1_FLAGS_ARCH(ptl0, i) get_pt_flags((pte_t *)(ptl0), (index_t)(i)) … … 88 104 #define PTE_VALID_ARCH(p) (*((uint32_t *) (p)) != 0) 89 105 #define PTE_PRESENT_ARCH(p) ((p)->present != 0) 90 #define PTE_GET_FRAME_ARCH(p) ((p)->frame_address <<FRAME_WIDTH)106 #define PTE_GET_FRAME_ARCH(p) ((p)->frame_address << FRAME_WIDTH) 91 107 #define PTE_WRITABLE_ARCH(p) ((p)->writeable != 0) 92 108 #define PTE_EXECUTABLE_ARCH(p) 1 … … 102 118 103 119 /** When bit on this position is 0, the page fault was caused by a not-present page. */ 104 #define PFERR_CODE_P (1 <<0)120 #define PFERR_CODE_P (1 << 0) 105 121 106 122 /** When bit on this position is 1, the page fault was caused by a write. */ 107 #define PFERR_CODE_RW (1 <<1)123 #define PFERR_CODE_RW (1 << 1) 108 124 109 125 /** When bit on this position is 1, the page fault was caused in user mode. */ 110 #define PFERR_CODE_US (1 <<2)126 #define PFERR_CODE_US (1 << 2) 111 127 112 128 /** When bit on this position is 1, a reserved bit was set in page directory. */ 113 #define PFERR_CODE_RSVD (1 <<3)129 #define PFERR_CODE_RSVD (1 << 3) 114 130 115 131 /** Page Table Entry. */ -
kernel/arch/xen32/src/asm.S
-
Property mode
changed from
120000
to100644
r764c302 rc049309 1 ../../ia32/src/asm.S 1 # 2 # Copyright (C) 2001-2004 Jakub Jermar 3 # All rights reserved. 4 # 5 # Redistribution and use in source and binary forms, with or without 6 # modification, are permitted provided that the following conditions 7 # are met: 8 # 9 # - Redistributions of source code must retain the above copyright 10 # notice, this list of conditions and the following disclaimer. 11 # - Redistributions in binary form must reproduce the above copyright 12 # notice, this list of conditions and the following disclaimer in the 13 # documentation and/or other materials provided with the distribution. 14 # - The name of the author may not be used to endorse or promote products 15 # derived from this software without specific prior written permission. 16 # 17 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 # 28 29 ## very low and hardware-level functions 30 31 # Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error word 32 # and 1 means interrupt with error word 33 #define ERROR_WORD_INTERRUPT_LIST 0x00027D00 34 35 .text 36 37 .global enable_l_apic_in_msr 38 .global interrupt_handlers 39 .global memcpy 40 .global memcpy_from_uspace 41 .global memcpy_from_uspace_failover_address 42 .global memcpy_to_uspace 43 .global memcpy_to_uspace_failover_address 44 45 46 #define MEMCPY_DST 4 47 #define MEMCPY_SRC 8 48 #define MEMCPY_SIZE 12 49 50 /** Copy memory to/from userspace. 51 * 52 * This is almost conventional memcpy(). 53 * The difference is that there is a failover part 54 * to where control is returned from a page fault 55 * if the page fault occurs during copy_from_uspace() 56 * or copy_to_uspace(). 57 * 58 * @param MEMCPY_DST(%esp) Destination address. 59 * @param MEMCPY_SRC(%esp) Source address. 60 * @param MEMCPY_SIZE(%esp) Size. 61 * 62 * @return MEMCPY_SRC(%esp) on success and 0 on failure. 63 */ 64 memcpy: 65 memcpy_from_uspace: 66 memcpy_to_uspace: 67 movl %edi, %edx /* save %edi */ 68 movl %esi, %eax /* save %esi */ 69 70 movl MEMCPY_SIZE(%esp), %ecx 71 shrl $2, %ecx /* size / 4 */ 72 73 movl MEMCPY_DST(%esp), %edi 74 movl MEMCPY_SRC(%esp), %esi 75 76 rep movsl /* copy as much as possible word by word */ 77 78 movl MEMCPY_SIZE(%esp), %ecx 79 andl $3, %ecx /* size % 4 */ 80 jz 0f 81 82 rep movsb /* copy the rest byte by byte */ 83 84 0: 85 movl %edx, %edi 86 movl %eax, %esi 87 movl MEMCPY_SRC(%esp), %eax /* MEMCPY_SRC(%esp), success */ 88 ret 89 90 /* 91 * We got here from as_page_fault() after the memory operations 92 * above had caused a page fault. 93 */ 94 memcpy_from_uspace_failover_address: 95 memcpy_to_uspace_failover_address: 96 movl %edx, %edi 97 movl %eax, %esi 98 xorl %eax, %eax /* return 0, failure */ 99 ret 100 101 102 ## Enable local APIC 103 # 104 # Enable local APIC in MSR. 105 # 106 enable_l_apic_in_msr: 107 push %eax 108 109 movl $0x1b, %ecx 110 rdmsr 111 orl $(1<<11),%eax 112 orl $(0xfee00000),%eax 113 wrmsr 114 115 pop %eax 116 ret 117 118 # Clear nested flag 119 # overwrites %ecx 120 .macro CLEAR_NT_FLAG 121 pushfl 122 pop %ecx 123 and $0xffffbfff,%ecx 124 push %ecx 125 popfl 126 .endm 127 128 ## Declare interrupt handlers 129 # 130 # Declare interrupt handlers for n interrupt 131 # vectors starting at vector i. 132 # 133 # The handlers setup data segment registers 134 # and call exc_dispatch(). 135 # 136 #define INTERRUPT_ALIGN 64 137 .macro handler i n 138 139 .ifeq \i-0x30 # Syscall handler 140 push %ds 141 push %es 142 push %fs 143 push %gs 144 145 # Push arguments on stack 146 push %edi 147 push %esi 148 push %edx 149 push %ecx 150 push %eax 151 152 # we must fill the data segment registers 153 movw $16,%ax 154 movw %ax,%ds 155 movw %ax,%es 156 157 sti 158 159 call syscall_handler # syscall_handler(ax,cx,dx,si,di) 160 cli 161 addl $20, %esp # clean-up of parameters 162 163 pop %gs 164 pop %fs 165 pop %es 166 pop %ds 167 168 CLEAR_NT_FLAG 169 iret 170 .else 171 /* 172 * This macro distinguishes between two versions of ia32 exceptions. 173 * One version has error word and the other does not have it. 174 * The latter version fakes the error word on the stack so that the 175 * handlers and istate_t can be the same for both types. 176 */ 177 .iflt \i-32 178 .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST 179 /* 180 * With error word, do nothing 181 */ 182 .else 183 /* 184 * Version without error word, 185 */ 186 subl $4, %esp 187 .endif 188 .else 189 /* 190 * Version without error word, 191 */ 192 subl $4, %esp 193 .endif 194 195 push %ds 196 push %es 197 push %fs 198 push %gs 199 200 #ifdef CONFIG_DEBUG_ALLREGS 201 push %ebx 202 push %ebp 203 push %edi 204 push %esi 205 #else 206 sub $16, %esp 207 #endif 208 push %edx 209 push %ecx 210 push %eax 211 212 # we must fill the data segment registers 213 movw $16,%ax 214 movw %ax,%ds 215 movw %ax,%es 216 217 pushl %esp # *istate 218 pushl $(\i) # intnum 219 call exc_dispatch # excdispatch(intnum, *istate) 220 addl $8,%esp # Clear arguments from stack 221 222 CLEAR_NT_FLAG # Modifies %ecx 223 224 pop %eax 225 pop %ecx 226 pop %edx 227 #ifdef CONFIG_DEBUG_ALLREGS 228 pop %esi 229 pop %edi 230 pop %ebp 231 pop %ebx 232 #else 233 add $16, %esp 234 #endif 235 236 pop %gs 237 pop %fs 238 pop %es 239 pop %ds 240 241 addl $4,%esp # Skip error word, no matter whether real or fake. 242 iret 243 .endif 244 245 .align INTERRUPT_ALIGN 246 .if (\n-\i)-1 247 handler "(\i+1)",\n 248 .endif 249 .endm 250 251 # keep in sync with pm.h !!! 252 IDT_ITEMS=64 253 .align INTERRUPT_ALIGN 254 interrupt_handlers: 255 h_start: 256 handler 0 IDT_ITEMS 257 h_end: 258 259 .data 260 .global interrupt_handler_size 261 262 interrupt_handler_size: .long (h_end-h_start)/IDT_ITEMS -
Property mode
changed from
-
kernel/arch/xen32/src/boot/boot.S
r764c302 rc049309 36 36 .ascii "XEN_VER=xen-3.0," 37 37 .ascii "HYPERCALL_PAGE=0x0000," 38 .ascii "LOADER=generic," 39 .ascii "PT_MODE_WRITABLE" 38 .ascii "LOADER=generic" 40 39 .byte 0 41 40 … … 47 46 48 47 kernel_image_start: 49 cld50 51 48 # copy start_info (esi initialized by Xen) 52 49 -
kernel/arch/xen32/src/mm/frame.c
r764c302 rc049309 50 50 uintptr_t last_frame = 0; 51 51 52 #define L 1_PT_SHIFT 1053 #define L 2_PT_SHIFT 052 #define L0_PT_SHIFT 10 53 #define L3_PT_SHIFT 0 54 54 55 #define L 1_PT_ENTRIES 102456 #define L 2_PT_ENTRIES 102455 #define L0_PT_ENTRIES 1024 56 #define L3_PT_ENTRIES 1024 57 57 58 #define L 1_OFFSET_MASK (L1_PT_ENTRIES - 1)59 #define L 2_OFFSET_MASK (L2_PT_ENTRIES - 1)58 #define L0_INDEX_MASK (L0_PT_ENTRIES - 1) 59 #define L3_INDEX_MASK (L3_PT_ENTRIES - 1) 60 60 61 #define PFN2PTL 1_OFFSET(pfn) ((pfn >> L1_PT_SHIFT) & L1_OFFSET_MASK)62 #define PFN2PTL 2_OFFSET(pfn) ((pfn >> L2_PT_SHIFT) & L2_OFFSET_MASK)61 #define PFN2PTL0_INDEX(pfn) ((pfn >> L0_PT_SHIFT) & L0_INDEX_MASK) 62 #define PFN2PTL3_INDEX(pfn) ((pfn >> L3_PT_SHIFT) & L3_INDEX_MASK) 63 63 64 64 #define PAGE_MASK (~(PAGE_SIZE - 1)) 65 66 #define PTE2ADDR(pte) (pte & PAGE_MASK)67 65 68 66 #define _PAGE_PRESENT 0x001UL … … 77 75 #define _PAGE_GLOBAL 0x100UL 78 76 79 #define L 1_PROT (_PAGE_PRESENT | _PAGE_ACCESSED)80 #define L 2_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED)77 #define L0_PROT (_PAGE_PRESENT | _PAGE_ACCESSED) 78 #define L3_PROT (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED) 81 79 82 80 void frame_arch_init(void) … … 84 82 if (config.cpu_active == 1) { 85 83 /* The only memory zone starts just after page table */ 86 pfn_t start = ADDR2PFN(ALIGN_UP(KA2PA(start_info.pt _base), PAGE_SIZE)) + start_info.nr_pt_frames;87 size_t size = start_info. nr_pages - start;84 pfn_t start = ADDR2PFN(ALIGN_UP(KA2PA(start_info.ptl0), PAGE_SIZE)) + start_info.pt_frames; 85 size_t size = start_info.frames - start; 88 86 89 87 /* Create identity mapping */ … … 91 89 count_t count = 0; 92 90 for (phys = start; phys < start + size; phys++) { 93 mmu_update_t updates[L 2_PT_ENTRIES];91 mmu_update_t updates[L3_PT_ENTRIES]; 94 92 pfn_t virt = ADDR2PFN(PA2KA(PFN2ADDR(phys))); 95 93 96 size_t ptl 1_offset = PFN2PTL1_OFFSET(virt);97 size_t ptl 2_offset = PFN2PTL2_OFFSET(virt);94 size_t ptl0_index = PFN2PTL0_INDEX(virt); 95 size_t ptl3_index = PFN2PTL3_INDEX(virt); 98 96 99 unsigned long *ptl2_base = (unsigned long *) PTE2ADDR(start_info.pt_base[ptl1_offset]);97 pte_t *ptl3 = (pte_t *) PFN2ADDR(start_info.ptl0[ptl0_index].frame_address); 100 98 101 if (ptl 2_base== 0) {99 if (ptl3 == 0) { 102 100 mmuext_op_t mmu_ext; 103 101 … … 107 105 memsetb(PFN2ADDR(virt2), PAGE_SIZE, 0); 108 106 109 size_t ptl 1_offset2 = PFN2PTL1_OFFSET(virt2);110 size_t ptl 2_offset2 = PFN2PTL2_OFFSET(virt2);111 unsigned long *ptl2_base2 = (unsigned long *) PTE2ADDR(start_info.pt_base[ptl1_offset2]);107 size_t ptl0_index2 = PFN2PTL0_INDEX(virt2); 108 size_t ptl3_index2 = PFN2PTL3_INDEX(virt2); 109 pte_t *ptl3_2 = (pte_t *) PFN2ADDR(start_info.ptl0[ptl0_index2].frame_address); 112 110 113 if (ptl 2_base2 == 0)111 if (ptl3_2 == 0) 114 112 panic("Unable to find page table reference"); 115 113 116 updates[count].ptr = (uintptr_t) &ptl 2_base2[ptl2_offset2];117 updates[count].val = P FN2ADDR(start_info.mfn_list[start]) | L1_PROT;114 updates[count].ptr = (uintptr_t) &ptl3_2[ptl3_index2]; 115 updates[count].val = PA2MA(PFN2ADDR(start)) | L0_PROT; 118 116 if (xen_mmu_update(updates, count + 1, NULL, DOMID_SELF) < 0) 119 117 panic("Unable to map new page table"); … … 121 119 122 120 mmu_ext.cmd = MMUEXT_PIN_L1_TABLE; 123 mmu_ext.arg1.mfn = start_info.mfn_list[start];121 mmu_ext.arg1.mfn = ADDR2PFN(PA2MA(PFN2ADDR(start))); 124 122 if (xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF) < 0) 125 123 panic("Error pinning new page table"); 126 124 127 unsigned long *ptl0 = (unsigned long *) PFN2ADDR(start_info.mfn_list[ADDR2PFN(KA2PA(start_info.pt_base))]);125 pte_t *ptl0 = (pte_t *) PA2MA(KA2PA(start_info.ptl0)); 128 126 129 updates[count].ptr = (uintptr_t) &ptl0[ptl 1_offset];130 updates[count].val = P FN2ADDR(start_info.mfn_list[start]) | L2_PROT;127 updates[count].ptr = (uintptr_t) &ptl0[ptl0_index]; 128 updates[count].val = PA2MA(PFN2ADDR(start)) | L3_PROT; 131 129 if (xen_mmu_update(updates, count + 1, NULL, DOMID_SELF) < 0) 132 130 panic("Unable to update PTE for page table"); 133 131 count = 0; 134 132 135 ptl 2_base = (unsigned long *) PTE2ADDR(start_info.pt_base[ptl1_offset]);133 ptl3 = (pte_t *) PFN2ADDR(start_info.ptl0[ptl0_index].frame_address); 136 134 start++; 137 135 size--; 138 136 } 139 137 140 updates[count].ptr = (uintptr_t) &ptl 2_base[ptl2_offset];141 updates[count].val = P FN2ADDR(start_info.mfn_list[phys]) | L2_PROT;138 updates[count].ptr = (uintptr_t) &ptl3[ptl3_index]; 139 updates[count].val = PA2MA(PFN2ADDR(phys)) | L3_PROT; 142 140 count++; 143 141 144 if ((count == L 2_PT_ENTRIES) || (phys + 1 == start + size)) {142 if ((count == L3_PT_ENTRIES) || (phys + 1 == start + size)) { 145 143 if (xen_mmu_update(updates, count, NULL, DOMID_SELF) < 0) 146 144 panic("Unable to update PTE"); -
kernel/arch/xen32/src/mm/memory_init.c
r764c302 rc049309 40 40 size_t get_memory_size(void) 41 41 { 42 return start_info. nr_pages * PAGE_SIZE;42 return start_info.frames * PAGE_SIZE; 43 43 } 44 44 -
kernel/arch/xen32/src/mm/page.c
r764c302 rc049309 27 27 */ 28 28 29 29 /** @addtogroup xen32mm 30 30 * @{ 31 31 */ … … 52 52 void page_arch_init(void) 53 53 { 54 uintptr_t cur;55 int flags;56 57 54 if (config.cpu_active == 1) { 58 55 page_mapping_operations = &pt_mapping_operations; 59 60 /* 61 * PA2KA(identity) mapping for all frames until last_frame. 62 */ 63 for (cur = 0; cur < last_frame; cur += FRAME_SIZE) { 64 flags = PAGE_CACHEABLE; 65 if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size)) 66 flags |= PAGE_GLOBAL; 67 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags); 68 } 69 70 exc_register(14, "page_fault", (iroutine) page_fault); 71 // write_cr3((uintptr_t) AS_KERNEL->page_table); 72 } 73 else { 74 // write_cr3((uintptr_t) AS_KERNEL->page_table); 75 } 76 77 // paging_on(); 56 AS_KERNEL->page_table = (pte_t *) KA2PA(start_info.ptl0); 57 } else 58 SET_PTL0_ADDRESS_ARCH(AS_KERNEL->page_table); 78 59 } 79 60 -
kernel/arch/xen32/src/mm/tlb.c
r764c302 rc049309 38 38 #include <arch/asm.h> 39 39 #include <arch/types.h> 40 #include <arch/hypercall.h> 40 41 41 42 /** Invalidate all entries in TLB. */ 42 43 void tlb_invalidate_all(void) 43 44 { 44 write_cr3(read_cr3()); 45 mmuext_op_t mmu_ext; 46 47 mmu_ext.cmd = MMUEXT_TLB_FLUSH_LOCAL; 48 xen_mmuext_op(&mmu_ext, 1, NULL, DOMID_SELF); 45 49 } 46 50
Note:
See TracChangeset
for help on using the changeset viewer.