Changeset 811770c in mainline
- Timestamp:
- 2016-05-05T12:06:04Z (9 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 57c2a87
- Parents:
- 0f17bff
- Location:
- kernel/arch/amd64
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/include/arch/asm.h
r0f17bff r811770c 208 208 } 209 209 210 /** Enable interrupts. 211 * 212 * Enable interrupts and return previous 213 * value of EFLAGS. 214 * 215 * @return Old interrupt priority level. 216 * 217 */ 218 NO_TRACE static inline ipl_t interrupts_enable(void) { 219 ipl_t v; 220 210 NO_TRACE static inline uint64_t read_rflags(void) 211 { 212 uint64_t rflags; 213 221 214 asm volatile ( 222 215 "pushfq\n" 223 216 "popq %[v]\n" 224 "sti\n" 225 : [v] "=r" (v) 226 ); 227 228 return v; 217 : [v] "=r" (rflags) 218 ); 219 220 return rflags; 221 } 222 223 NO_TRACE static inline void write_rflags(uint64_t rflags) 224 { 225 asm volatile ( 226 "pushq %[v]\n" 227 "popfq\n" 228 :: [v] "r" (rflags) 229 ); 230 } 231 232 /** Return interrupt priority level. 233 * 234 * Return the current interrupt priority level. 235 * 236 * @return Current interrupt priority level. 237 */ 238 NO_TRACE static inline ipl_t interrupts_read(void) { 239 return (ipl_t) read_rflags(); 240 } 241 242 /** Enable interrupts. 243 * 244 * Enable interrupts and return the previous interrupt priority level. 245 * 246 * @return Old interrupt priority level. 247 */ 248 NO_TRACE static inline ipl_t interrupts_enable(void) { 249 ipl_t ipl = interrupts_read(); 250 251 asm volatile ("sti\n"); 252 253 return ipl; 229 254 } 230 255 231 256 /** Disable interrupts. 232 257 * 233 * Disable interrupts and return previous 234 * value of EFLAGS. 258 * Disable interrupts and return the previous interrupt priority level. 235 259 * 236 260 * @return Old interrupt priority level. 237 *238 261 */ 239 262 NO_TRACE static inline ipl_t interrupts_disable(void) { 240 ipl_t v; 241 242 asm volatile ( 243 "pushfq\n" 244 "popq %[v]\n" 245 "cli\n" 246 : [v] "=r" (v) 247 ); 248 249 return v; 263 ipl_t ipl = interrupts_read(); 264 265 asm volatile ("cli\n"); 266 267 return ipl; 250 268 } 251 269 252 270 /** Restore interrupt priority level. 253 271 * 254 * Restore EFLAGS.272 * Restore the previously save interrupt priority level. 255 273 * 256 274 * @param ipl Saved interrupt priority level. … … 258 276 */ 259 277 NO_TRACE static inline void interrupts_restore(ipl_t ipl) { 260 asm volatile ( 261 "pushq %[ipl]\n" 262 "popfq\n" 263 :: [ipl] "r" (ipl) 264 ); 265 } 266 267 /** Return interrupt priority level. 268 * 269 * Return EFLAFS. 270 * 271 * @return Current interrupt priority level. 272 * 273 */ 274 NO_TRACE static inline ipl_t interrupts_read(void) { 275 ipl_t v; 276 277 asm volatile ( 278 "pushfq\n" 279 "popq %[v]\n" 280 : [v] "=r" (v) 281 ); 282 283 return v; 278 write_rflags((uint64_t) ipl); 284 279 } 285 280 … … 291 286 NO_TRACE static inline bool interrupts_disabled(void) 292 287 { 293 ipl_t v; 294 295 asm volatile ( 296 "pushfq\n" 297 "popq %[v]\n" 298 : [v] "=r" (v) 299 ); 300 301 return ((v & RFLAGS_IF) == 0); 288 return ((read_rflags() & RFLAGS_IF) == 0); 302 289 } 303 290 … … 324 311 325 312 return ((uint64_t) dx << 32) | ax; 326 }327 328 /** Enable local APIC329 *330 * Enable local APIC in MSR.331 *332 */333 NO_TRACE static inline void enable_l_apic_in_msr(void)334 {335 asm volatile (336 "movl $0x1b, %%ecx\n"337 "rdmsr\n"338 "orl $(1 << 11),%%eax\n"339 "orl $(0xfee00000),%%eax\n"340 "wrmsr\n"341 ::: "%eax", "%ecx", "%edx"342 );343 313 } 344 314 … … 426 396 427 397 GEN_READ_REG(cr0) 398 GEN_WRITE_REG(cr0) 428 399 GEN_READ_REG(cr2) 429 400 GEN_READ_REG(cr3) 430 401 GEN_WRITE_REG(cr3) 402 GEN_READ_REG(cr4) 403 GEN_WRITE_REG(cr4) 431 404 432 405 GEN_READ_REG(dr0) … … 512 485 extern uintptr_t int_63; 513 486 487 extern void enable_l_apic_in_msr(void); 488 514 489 #endif 515 490 -
kernel/arch/amd64/include/arch/cpu.h
r0f17bff r811770c 36 36 #define KERN_amd64_CPU_H_ 37 37 38 #define RFLAGS_CF (1 << 0) 39 #define RFLAGS_PF (1 << 2) 40 #define RFLAGS_AF (1 << 4) 41 #define RFLAGS_ZF (1 << 6) 42 #define RFLAGS_SF (1 << 7) 43 #define RFLAGS_TF (1 << 8) 44 #define RFLAGS_IF (1 << 9) 45 #define RFLAGS_DF (1 << 10) 46 #define RFLAGS_OF (1 << 11) 47 #define RFLAGS_NT (1 << 14) 48 #define RFLAGS_RF (1 << 16) 38 #define RFLAGS_CF (1 << 0) 39 #define RFLAGS_PF (1 << 2) 40 #define RFLAGS_AF (1 << 4) 41 #define RFLAGS_ZF (1 << 6) 42 #define RFLAGS_SF (1 << 7) 43 #define RFLAGS_TF (1 << 8) 44 #define RFLAGS_IF (1 << 9) 45 #define RFLAGS_DF (1 << 10) 46 #define RFLAGS_OF (1 << 11) 47 #define RFLAGS_IOPL (3 << 12) 48 #define RFLAGS_NT (1 << 14) 49 #define RFLAGS_RF (1 << 16) 50 #define RFLAGS_ID (1 << 21) 49 51 50 #define EFER_MSR_NUM 0xc0000080 51 #define AMD_SCE_FLAG 0 52 #define AMD_LME_FLAG 8 53 #define AMD_LMA_FLAG 10 54 #define AMD_FFXSR_FLAG 14 55 #define AMD_NXE_FLAG 11 52 #define CR0_MP (1 << 1) 53 #define CR0_EM (1 << 2) 54 #define CR0_TS (1 << 3) 55 #define CR0_AM (1 << 18) 56 #define CR0_PG (1 << 31) 57 58 #define CR4_PAE (1 << 5) 59 #define CR4_OSFXSR (1 << 9) 60 61 /* EFER bits */ 62 #define AMD_SCE (1 << 0) 63 #define AMD_LME (1 << 8) 64 #define AMD_LMA (1 << 10) 65 #define AMD_NXE (1 << 11) 66 #define AMD_FFXSR (1 << 14) 67 68 #define AMD_APIC_BASE_GE (1 << 11) 56 69 57 70 /* MSR registers */ 71 #define AMD_MSR_APIC_BASE 0x0000001b 72 #define AMD_MSR_EFER 0xc0000080 58 73 #define AMD_MSR_STAR 0xc0000081 59 74 #define AMD_MSR_LSTAR 0xc0000082 … … 85 100 }; 86 101 87 extern void set_efer_flag(int flag);88 extern uint64_t read_efer_flag(void);89 102 void cpu_setup_fpu(void); 90 103 -
kernel/arch/amd64/src/amd64.c
r0f17bff r811770c 64 64 #endif 65 65 66 /** Disable I/O on non-privileged levels67 *68 * Clean IOPL(12,13) and NT(14) flags in EFLAGS register69 */70 static void clean_IOPL_NT_flags(void)71 {72 asm volatile (73 "pushfq\n"74 "pop %%rax\n"75 "and $~(0x7000), %%rax\n"76 "pushq %%rax\n"77 "popfq\n"78 ::: "%rax"79 );80 }81 82 /** Disable alignment check83 *84 * Clean AM(18) flag in CR0 register85 */86 static void clean_AM_flag(void)87 {88 asm volatile (89 "mov %%cr0, %%rax\n"90 "and $~(0x40000), %%rax\n"91 "mov %%rax, %%cr0\n"92 ::: "%rax"93 );94 }95 96 66 /** Perform amd64-specific initialization before main_bsp() is called. 97 67 * … … 116 86 { 117 87 /* Enable no-execute pages */ 118 set_efer_flag(AMD_NXE_FLAG);88 write_msr(AMD_MSR_EFER, read_msr(AMD_MSR_EFER) | AMD_NXE); 119 89 /* Enable FPU */ 120 90 cpu_setup_fpu(); … … 123 93 pm_init(); 124 94 125 /* Disable I/O on nonprivileged levels 126 * clear the NT (nested-thread) flag 127 */ 128 clean_IOPL_NT_flags(); 95 /* Disable I/O on nonprivileged levels, clear the nested-thread flag */ 96 write_rflags(read_rflags() & ~(RFLAGS_IOPL | RFLAGS_NT)); 129 97 /* Disable alignment check */ 130 clean_AM_flag();98 write_cr0(read_cr0() & ~CR0_AM); 131 99 132 100 if (config.cpu_active == 1) { -
kernel/arch/amd64/src/asm.S
r0f17bff r811770c 33 33 #include <arch/kseg_struct.h> 34 34 #include <arch/cpu.h> 35 #include <arch/smp/apic.h> 35 36 36 37 .text … … 93 94 94 95 /* Flip the ID bit */ 95 btcl $21, %edx96 xorl $RFLAGS_ID, %edx 96 97 97 98 /* Store RFLAGS */ … … 102 103 /* Get the ID bit again */ 103 104 popq %rdx 104 andl $ (1 << 21), %eax105 andl $ (1 << 21), %edx105 andl $RFLAGS_ID, %eax 106 andl $RFLAGS_ID, %edx 106 107 107 108 /* 0 if not supported, 1 if supported */ … … 127 128 FUNCTION_END(cpuid) 128 129 129 FUNCTION_BEGIN(set_efer_flag) 130 movl $0xc0000080, %ecx 130 /** Enable local APIC 131 * 132 * Enable local APIC in MSR. 133 * 134 */ 135 FUNCTION_BEGIN(enable_l_apic_in_msr) 136 movl $AMD_MSR_APIC_BASE, %ecx 131 137 rdmsr 132 btsl %edi, %eax138 orl $(L_APIC_BASE | AMD_APIC_BASE_GE), %eax 133 139 wrmsr 134 140 ret 135 FUNCTION_END(set_efer_flag) 136 137 FUNCTION_BEGIN(read_efer_flag) 138 movl $0xc0000080, %ecx 139 rdmsr 140 ret 141 FUNCTION_END(read_efer_flag) 141 FUNCTION_END(enable_l_apic_in_msr) 142 142 143 143 /* … … 541 541 ret 542 542 FUNCTION_END(early_putchar) 543 -
kernel/arch/amd64/src/boot/multiboot.S
r0f17bff r811770c 168 168 169 169 movl %cr4, %eax 170 btsl $5, %eax170 orl $CR4_PAE, %eax 171 171 movl %eax, %cr4 172 172 … … 176 176 177 177 /* Enable long mode */ 178 movl $ EFER_MSR_NUM, %ecx178 movl $AMD_MSR_EFER, %ecx 179 179 rdmsr /* read EFER */ 180 btsl $AMD_LME_FLAG, %eax/* set LME = 1 */180 orl $AMD_LME, %eax /* set LME = 1 */ 181 181 wrmsr 182 182 183 183 /* Enable paging to activate long mode (set CR0.PG = 1) */ 184 184 movl %cr0, %eax 185 btsl $31, %eax185 orl $CR0_PG, %eax 186 186 movl %eax, %cr0 187 187 -
kernel/arch/amd64/src/boot/multiboot2.S
r0f17bff r811770c 209 209 210 210 movl %cr4, %eax 211 btsl $5, %eax211 orl $CR4_PAE, %eax 212 212 movl %eax, %cr4 213 213 … … 217 217 218 218 /* Enable long mode */ 219 movl $ EFER_MSR_NUM, %ecx219 movl $AMD_MSR_EFER, %ecx 220 220 rdmsr /* read EFER */ 221 btsl $AMD_LME_FLAG, %eax/* set LME = 1 */221 orl $AMD_LME, %eax /* set LME = 1 */ 222 222 wrmsr 223 223 224 224 /* Enable paging to activate long mode (set CR0.PG = 1) */ 225 225 movl %cr0, %eax 226 btsl $31, %eax226 orl $CR0_PG, %eax 227 227 movl %eax, %cr0 228 228 -
kernel/arch/amd64/src/cpu/cpu.c
r0f17bff r811770c 76 76 void cpu_setup_fpu(void) 77 77 { 78 asm volatile ( 79 "movq %%cr0, %%rax\n" 80 "btsq $1, %%rax\n" /* cr0.mp */ 81 "btrq $2, %%rax\n" /* cr0.em */ 82 "movq %%rax, %%cr0\n" 83 84 "movq %%cr4, %%rax\n" 85 "bts $9, %%rax\n" /* cr4.osfxsr */ 86 "movq %%rax, %%cr4\n" 87 ::: "%rax" 88 ); 78 write_cr0((read_cr0() & ~CR0_EM) | CR0_MP); 79 write_cr4(read_cr4() | CR4_OSFXSR); 89 80 } 90 81 … … 97 88 void fpu_disable(void) 98 89 { 99 asm volatile ( 100 "mov %%cr0, %%rax\n" 101 "bts $3, %%rax\n" 102 "mov %%rax, %%cr0\n" 103 ::: "%rax" 104 ); 90 write_cr0(read_cr0() | CR0_TS); 105 91 } 106 92 107 93 void fpu_enable(void) 108 94 { 109 asm volatile ( 110 "mov %%cr0, %%rax\n" 111 "btr $3, %%rax\n" 112 "mov %%rax, %%cr0\n" 113 ::: "%rax" 114 ); 95 write_cr0(read_cr0() & ~CR0_TS); 115 96 } 116 97 -
kernel/arch/amd64/src/smp/ap.S
r0f17bff r811770c 75 75 76 76 movl %cr4, %eax 77 btsl $5, %eax77 orl $CR4_PAE, %eax 78 78 movl %eax, %cr4 79 79 … … 82 82 83 83 # Enable long mode 84 movl $ EFER_MSR_NUM, %ecx # EFER MSR number84 movl $AMD_MSR_EFER, %ecx # EFER MSR number 85 85 rdmsr # Read EFER 86 btsl $AMD_LME_FLAG, %eax# Set LME=186 orl $AMD_LME, %eax # Set LME=1 87 87 wrmsr # Write EFER 88 88 89 89 # Enable paging to activate long mode (set CR0.PG = 1) 90 90 movl %cr0, %eax 91 btsl $31, %eax91 orl $CR0_PG, %eax 92 92 movl %eax, %cr0 93 93 -
kernel/arch/amd64/src/syscall.c
r0f17bff r811770c 48 48 { 49 49 /* Enable SYSCALL/SYSRET */ 50 set_efer_flag(AMD_SCE_FLAG);50 write_msr(AMD_MSR_EFER, read_msr(AMD_MSR_EFER) | AMD_SCE); 51 51 52 52 /* Setup syscall entry address */ -
kernel/arch/amd64/src/userspace.c
r0f17bff r811770c 48 48 void userspace(uspace_arg_t *kernel_uarg) 49 49 { 50 ipl_t ipl = interrupts_disable();50 uint64_t rflags = read_rflags(); 51 51 52 ipl &= ~(RFLAGS_CF | RFLAGS_PF | RFLAGS_AF | RFLAGS_ZF | RFLAGS_SF |53 RFLAGS_DF | RFLAGS_OF);52 rflags &= ~RFLAGS_NT; 53 rflags |= RFLAGS_IF; 54 54 55 55 asm volatile ( 56 56 "pushq %[udata_des]\n" 57 57 "pushq %[stack_top]\n" 58 "pushq %[ ipl]\n"58 "pushq %[rflags]\n" 59 59 "pushq %[utext_des]\n" 60 60 "pushq %[entry]\n" … … 67 67 [stack_top] "r" ((uint8_t *) kernel_uarg->uspace_stack + 68 68 kernel_uarg->uspace_stack_size), 69 [ ipl] "r" (ipl),69 [rflags] "r" (rflags), 70 70 [utext_des] "i" (GDT_SELECTOR(UTEXT_DES) | PL_USER), 71 71 [entry] "r" (kernel_uarg->uspace_entry),
Note:
See TracChangeset
for help on using the changeset viewer.