Changeset f24d300 in mainline
- Timestamp:
- 2009-03-03T15:52:55Z (16 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- e762b43
- Parents:
- add04f7
- Location:
- kernel/arch/amd64
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/include/asm.h
radd04f7 rf24d300 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ … … 46 46 * The stack is assumed to be STACK_SIZE bytes long. 47 47 * The stack must start on page boundary. 48 * 48 49 */ 49 50 static inline uintptr_t get_stack_base(void) … … 51 52 uintptr_t v; 52 53 53 asm volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1))); 54 asm volatile ( 55 "andq %%rsp, %[v]\n" 56 : [v] "=r" (v) 57 : "0" (~((uint64_t) STACK_SIZE-1)) 58 ); 54 59 55 60 return v; … … 73 78 * @param port Port to read from 74 79 * @return Value read 80 * 75 81 */ 76 82 static inline uint8_t pio_read_8(ioport8_t *port) 77 83 { 78 84 uint8_t val; 79 80 asm volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port)); 85 86 asm volatile ( 87 "inb %w[port], %b[val]\n" 88 : [val] "=a" (val) 89 : [port] "d" (port) 90 ); 91 81 92 return val; 82 93 } … … 88 99 * @param port Port to read from 89 100 * @return Value read 101 * 90 102 */ 91 103 static inline uint16_t pio_read_16(ioport16_t *port) … … 93 105 uint16_t val; 94 106 95 asm volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port)); 107 asm volatile ( 108 "inw %w[port], %w[val]\n" 109 : [val] "=a" (val) 110 : [port] "d" (port) 111 ); 112 96 113 return val; 97 114 } … … 103 120 * @param port Port to read from 104 121 * @return Value read 122 * 105 123 */ 106 124 static inline uint32_t pio_read_32(ioport32_t *port) … … 108 126 uint32_t val; 109 127 110 asm volatile ("inl %w1, %0 \n" : "=a" (val) : "d" (port)); 128 asm volatile ( 129 "inl %w[port], %[val]\n" 130 : [val] "=a" (val) 131 : [port] "d" (port) 132 ); 133 111 134 return val; 112 135 } … … 118 141 * @param port Port to write to 119 142 * @param val Value to write 143 * 120 144 */ 121 145 static inline void pio_write_8(ioport8_t *port, uint8_t val) 122 146 { 123 asm volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port)); 147 asm volatile ( 148 "outb %b[val], %w[port]\n" 149 :: [val] "a" (val), [port] "d" (port) 150 ); 124 151 } 125 152 … … 130 157 * @param port Port to write to 131 158 * @param val Value to write 159 * 132 160 */ 133 161 static inline void pio_write_16(ioport16_t *port, uint16_t val) 134 162 { 135 asm volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port)); 163 asm volatile ( 164 "outw %w[val], %w[port]\n" 165 :: [val] "a" (val), [port] "d" (port) 166 ); 136 167 } 137 168 … … 142 173 * @param port Port to write to 143 174 * @param val Value to write 175 * 144 176 */ 145 177 static inline void pio_write_32(ioport32_t *port, uint32_t val) 146 178 { 147 asm volatile ("outl %0, %w1\n" : : "a" (val), "d" (port)); 179 asm volatile ( 180 "outl %[val], %w[port]\n" 181 :: [val] "a" (val), [port] "d" (port) 182 ); 148 183 } 149 184 … … 160 195 * 161 196 * @return Old interrupt priority level. 197 * 162 198 */ 163 199 static inline ipl_t interrupts_enable(void) { 164 200 ipl_t v; 165 __asm__ volatile ( 201 202 asm volatile ( 166 203 "pushfq\n" 167 "popq % 0\n"204 "popq %[v]\n" 168 205 "sti\n" 169 : "=r" (v) 170 ); 206 : [v] "=r" (v) 207 ); 208 171 209 return v; 172 210 } … … 178 216 * 179 217 * @return Old interrupt priority level. 218 * 180 219 */ 181 220 static inline ipl_t interrupts_disable(void) { 182 221 ipl_t v; 183 __asm__ volatile ( 222 223 asm volatile ( 184 224 "pushfq\n" 185 "popq % 0\n"225 "popq %[v]\n" 186 226 "cli\n" 187 : "=r" (v) 188 ); 227 : [v] "=r" (v) 228 ); 229 189 230 return v; 190 231 } … … 195 236 * 196 237 * @param ipl Saved interrupt priority level. 238 * 197 239 */ 198 240 static inline void interrupts_restore(ipl_t ipl) { 199 __asm__volatile (200 "pushq % 0\n"241 asm volatile ( 242 "pushq %[ipl]\n" 201 243 "popfq\n" 202 : :"r" (ipl)203 244 :: [ipl] "r" (ipl) 245 ); 204 246 } 205 247 … … 209 251 * 210 252 * @return Current interrupt priority level. 253 * 211 254 */ 212 255 static inline ipl_t interrupts_read(void) { 213 256 ipl_t v; 214 __asm__ volatile ( 257 258 asm volatile ( 215 259 "pushfq\n" 216 "popq %0\n" 217 : "=r" (v) 218 ); 260 "popq %[v]\n" 261 : [v] "=r" (v) 262 ); 263 219 264 return v; 220 265 } … … 223 268 static inline void write_msr(uint32_t msr, uint64_t value) 224 269 { 225 __asm__ volatile ( 226 "wrmsr;" : : "c" (msr), 227 "a" ((uint32_t)(value)), 228 "d" ((uint32_t)(value >> 32)) 229 ); 270 asm volatile ( 271 "wrmsr\n" 272 :: "c" (msr), 273 "a" ((uint32_t) (value)), 274 "d" ((uint32_t) (value >> 32)) 275 ); 230 276 } 231 277 … … 233 279 { 234 280 uint32_t ax, dx; 235 236 __asm__ volatile ( 237 "rdmsr;" : "=a"(ax), "=d"(dx) : "c" (msr) 238 ); 239 return ((uint64_t)dx << 32) | ax; 281 282 asm volatile ( 283 "rdmsr\n" 284 : "=a" (ax), "=d" (dx) 285 : "c" (msr) 286 ); 287 288 return ((uint64_t) dx << 32) | ax; 240 289 } 241 290 … … 244 293 * 245 294 * Enable local APIC in MSR. 295 * 246 296 */ 247 297 static inline void enable_l_apic_in_msr() 248 298 { 249 __asm__volatile (299 asm volatile ( 250 300 "movl $0x1b, %%ecx\n" 251 301 "rdmsr\n" 252 "orl $(1 <<11),%%eax\n"302 "orl $(1 << 11),%%eax\n" 253 303 "orl $(0xfee00000),%%eax\n" 254 304 "wrmsr\n" 255 : 256 : 257 :"%eax","%ecx","%edx" 258 ); 305 ::: "%eax","%ecx","%edx" 306 ); 259 307 } 260 308 … … 262 310 { 263 311 uintptr_t *ip; 264 265 __asm__ volatile ( 266 "mov %%rip, %0" 267 : "=r" (ip) 268 ); 312 313 asm volatile ( 314 "mov %%rip, %[ip]" 315 : [ip] "=r" (ip) 316 ); 317 269 318 return ip; 270 319 } … … 273 322 * 274 323 * @param addr Address on a page whose TLB entry is to be invalidated. 324 * 275 325 */ 276 326 static inline void invlpg(uintptr_t addr) 277 327 { 278 __asm__ volatile ("invlpg %0\n" :: "m" (*((unative_t *)addr))); 328 asm volatile ( 329 "invlpg %[addr]\n" 330 :: [addr] "m" (*((unative_t *) addr)) 331 ); 279 332 } 280 333 … … 282 335 * 283 336 * @param gdtr_reg Address of memory from where to load GDTR. 337 * 284 338 */ 285 339 static inline void gdtr_load(struct ptr_16_64 *gdtr_reg) 286 340 { 287 __asm__ volatile ("lgdtq %0\n" : : "m" (*gdtr_reg)); 341 asm volatile ( 342 "lgdtq %[gdtr_reg]\n" 343 :: [gdtr_reg] "m" (*gdtr_reg) 344 ); 288 345 } 289 346 … … 291 348 * 292 349 * @param gdtr_reg Address of memory to where to load GDTR. 350 * 293 351 */ 294 352 static inline void gdtr_store(struct ptr_16_64 *gdtr_reg) 295 353 { 296 __asm__ volatile ("sgdtq %0\n" : : "m" (*gdtr_reg)); 354 asm volatile ( 355 "sgdtq %[gdtr_reg]\n" 356 :: [gdtr_reg] "m" (*gdtr_reg) 357 ); 297 358 } 298 359 … … 300 361 * 301 362 * @param idtr_reg Address of memory from where to load IDTR. 363 * 302 364 */ 303 365 static inline void idtr_load(struct ptr_16_64 *idtr_reg) 304 366 { 305 __asm__ volatile ("lidtq %0\n" : : "m" (*idtr_reg)); 367 asm volatile ( 368 "lidtq %[idtr_reg]\n" 369 :: [idtr_reg] "m" (*idtr_reg)); 306 370 } 307 371 … … 309 373 * 310 374 * @param sel Selector specifying descriptor of TSS segment. 375 * 311 376 */ 312 377 static inline void tr_load(uint16_t sel) 313 378 { 314 __asm__ volatile ("ltr %0" : : "r" (sel)); 379 asm volatile ( 380 "ltr %[sel]" 381 :: [sel] "r" (sel) 382 ); 315 383 } 316 384 317 385 #define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \ 318 { \ 319 unative_t res; \ 320 __asm__ volatile ("movq %%" #reg ", %0" : "=r" (res) ); \ 321 return res; \ 322 } 386 { \ 387 unative_t res; \ 388 asm volatile ( \ 389 "movq %%" #reg ", %[res]" \ 390 : [res] "=r" (res) \ 391 ); \ 392 return res; \ 393 } 323 394 324 395 #define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \ 325 { \ 326 __asm__ volatile ("movq %0, %%" #reg : : "r" (regn)); \ 327 } 396 { \ 397 asm volatile ( \ 398 "movq %[regn], %%" #reg \ 399 :: [regn] "r" (regn) \ 400 ); \ 401 } 328 402 329 403 GEN_READ_REG(cr0) -
kernel/arch/amd64/include/atomic.h
radd04f7 rf24d300 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ … … 42 42 static inline void atomic_inc(atomic_t *val) { 43 43 #ifdef CONFIG_SMP 44 asm volatile ("lock incq %0\n" : "+m" (val->count)); 44 asm volatile ( 45 "lock incq %[count]\n" 46 : [count] "+m" (val->count) 47 ); 45 48 #else 46 asm volatile ("incq %0\n" : "+m" (val->count)); 49 asm volatile ( 50 "incq %[count]\n" 51 : [count] "+m" (val->count) 52 ); 47 53 #endif /* CONFIG_SMP */ 48 54 } … … 50 56 static inline void atomic_dec(atomic_t *val) { 51 57 #ifdef CONFIG_SMP 52 asm volatile ("lock decq %0\n" : "+m" (val->count)); 58 asm volatile ( 59 "lock decq %[count]\n" 60 : [count] "+m" (val->count) 61 ); 53 62 #else 54 asm volatile ("decq %0\n" : "+m" (val->count)); 63 asm volatile ( 64 "decq %[count]\n" 65 : [count] "+m" (val->count) 66 ); 55 67 #endif /* CONFIG_SMP */ 56 68 } … … 59 71 { 60 72 long r = 1; 61 73 62 74 asm volatile ( 63 "lock xaddq % 1, %0\n"64 : "+m" (val->count),"+r" (r)75 "lock xaddq %[r], %[count]\n" 76 : [count] "+m" (val->count), [r] "+r" (r) 65 77 ); 66 78 67 79 return r; 68 80 } … … 73 85 74 86 asm volatile ( 75 "lock xaddq % 1, %0\n"76 : "+m" (val->count),"+r" (r)87 "lock xaddq %[r], %[count]\n" 88 : [count] "+m" (val->count), [r] "+r" (r) 77 89 ); 78 90 … … 80 92 } 81 93 82 #define atomic_preinc(val) (atomic_postinc(val) + 1)83 #define atomic_predec(val) (atomic_postdec(val) - 1)94 #define atomic_preinc(val) (atomic_postinc(val) + 1) 95 #define atomic_predec(val) (atomic_postdec(val) - 1) 84 96 85 97 static inline uint64_t test_and_set(atomic_t *val) { … … 87 99 88 100 asm volatile ( 89 "movq $1, % 0\n"90 "xchgq % 0, %1\n"91 : "=r" (v),"+m" (val->count)101 "movq $1, %[v]\n" 102 "xchgq %[v], %[count]\n" 103 : [v] "=r" (v), [count] "+m" (val->count) 92 104 ); 93 105 … … 100 112 { 101 113 uint64_t tmp; 102 114 103 115 preemption_disable(); 104 116 asm volatile ( … … 107 119 "pause\n" 108 120 #endif 109 "mov % 0, %1\n"110 "testq % 1, %1\n"121 "mov %[count], %[tmp]\n" 122 "testq %[tmp], %[tmp]\n" 111 123 "jnz 0b\n" /* lightweight looping on locked spinlock */ 112 124 113 "incq % 1\n"/* now use the atomic operation */114 "xchgq % 0, %1\n"115 "testq % 1, %1\n"125 "incq %[tmp]\n" /* now use the atomic operation */ 126 "xchgq %[count], %[tmp]\n" 127 "testq %[tmp], %[tmp]\n" 116 128 "jnz 0b\n" 117 : "+m" (val->count),"=&r" (tmp)129 : [count] "+m" (val->count), [tmp] "=&r" (tmp) 118 130 ); 119 131 /* -
kernel/arch/amd64/src/amd64.c
radd04f7 rf24d300 73 73 static void clean_IOPL_NT_flags(void) 74 74 { 75 asm (75 asm volatile ( 76 76 "pushfq\n" 77 77 "pop %%rax\n" … … 79 79 "pushq %%rax\n" 80 80 "popfq\n" 81 : 82 : 83 : "%rax" 81 ::: "%rax" 84 82 ); 85 83 } … … 91 89 static void clean_AM_flag(void) 92 90 { 93 asm (91 asm volatile ( 94 92 "mov %%cr0, %%rax\n" 95 93 "and $~(0x40000), %%rax\n" 96 94 "mov %%rax, %%cr0\n" 97 : 98 : 99 : "%rax" 95 ::: "%rax" 100 96 ); 101 97 } -
kernel/arch/amd64/src/cpu/cpu.c
radd04f7 rf24d300 78 78 { 79 79 asm volatile ( 80 "movq %%cr0, %%rax;" 81 "btsq $1, %%rax;" /* cr0.mp */ 82 "btrq $2, %%rax;" /* cr0.em */ 83 "movq %%rax, %%cr0;" 84 85 "movq %%cr4, %%rax;" 86 "bts $9, %%rax;" /* cr4.osfxsr */ 87 "movq %%rax, %%cr4;" 88 : 89 : 90 :"%rax" 91 ); 80 "movq %%cr0, %%rax\n" 81 "btsq $1, %%rax\n" /* cr0.mp */ 82 "btrq $2, %%rax\n" /* cr0.em */ 83 "movq %%rax, %%cr0\n" 84 85 "movq %%cr4, %%rax\n" 86 "bts $9, %%rax\n" /* cr4.osfxsr */ 87 "movq %%rax, %%cr4\n" 88 ::: "%rax" 89 ); 92 90 } 93 91 94 /** Set the TS flag to 1. 92 /** Set the TS flag to 1. 95 93 * 96 94 * If a thread accesses coprocessor, exception is run, which … … 100 98 void fpu_disable(void) 101 99 { 102 asm volatile ( 103 "mov %%cr0,%%rax;" 104 "bts $3,%%rax;" 105 "mov %%rax,%%cr0;" 106 : 107 : 108 :"%rax" 109 ); 100 asm volatile ( 101 "mov %%cr0, %%rax\n" 102 "bts $3, %%rax\n" 103 "mov %%rax, %%cr0\n" 104 ::: "%rax" 105 ); 110 106 } 111 107 112 108 void fpu_enable(void) 113 109 { 114 asm volatile ( 115 "mov %%cr0,%%rax;" 116 "btr $3,%%rax;" 117 "mov %%rax,%%cr0;" 118 : 119 : 120 :"%rax" 121 ); 110 asm volatile ( 111 "mov %%cr0, %%rax\n" 112 "btr $3, %%rax\n" 113 "mov %%rax, %%cr0\n" 114 ::: "%rax" 115 ); 122 116 } 123 117 -
kernel/arch/amd64/src/fpu_context.c
radd04f7 rf24d300 40 40 { 41 41 asm volatile ( 42 "fxsave % 0"43 : "=m"(*fctx)44 42 "fxsave %[fctx]\n" 43 : [fctx] "=m" (*fctx) 44 ); 45 45 } 46 46 … … 49 49 { 50 50 asm volatile ( 51 "fxrstor % 0"52 : "=m"(*fctx)53 51 "fxrstor %[fctx]\n" 52 : [fctx] "=m" (*fctx) 53 ); 54 54 } 55 55 … … 58 58 /* TODO: Zero all SSE, MMX etc. registers */ 59 59 asm volatile ( 60 "fninit ;"60 "fninit\n" 61 61 ); 62 62 } -
kernel/arch/amd64/src/userspace.c
radd04f7 rf24d300 27 27 */ 28 28 29 /** @addtogroup amd64 29 /** @addtogroup amd64 30 30 * @{ 31 31 */ … … 48 48 void userspace(uspace_arg_t *kernel_uarg) 49 49 { 50 ipl_t ipl ;50 ipl_t ipl = interrupts_disable(); 51 51 52 ipl = interrupts_disable(); 53 54 /* Clear CF,PF,AF,ZF,SF,DF,OF */ 52 /* Clear CF, PF, AF, ZF, SF, DF, OF */ 55 53 ipl &= ~(0xcd4); 56 57 asm volatile ( ""58 "pushq %0\n"59 "pushq %1\n"60 "pushq %2\n"61 "pushq %3\n"62 "pushq %4\n"63 "movq %5, %%rax\n"64 /* %rdi is defined to hold pcb_ptr - set it to 0 */65 "xorq %%rdi, %%rdi\n"66 "iretq\n"67 : :68 "i" (gdtselector(UDATA_DES) | PL_USER),69 "r" (kernel_uarg->uspace_stack+THREAD_STACK_SIZE),70 "r" (ipl),71 "i" (gdtselector(UTEXT_DES) | PL_USER),72 "r" (kernel_uarg->uspace_entry),73 "r" (kernel_uarg->uspace_uarg)74 75 54 55 asm volatile ( 56 "pushq %[udata_des]\n" 57 "pushq %[stack_size]\n" 58 "pushq %[ipl]\n" 59 "pushq %[utext_des]\n" 60 "pushq %[entry]\n" 61 "movq %[uarg], %%rax\n" 62 63 /* %rdi is defined to hold pcb_ptr - set it to 0 */ 64 "xorq %%rdi, %%rdi\n" 65 "iretq\n" 66 :: [udata_des] "i" (gdtselector(UDATA_DES) | PL_USER), 67 [stack_size] "r" (kernel_uarg->uspace_stack + THREAD_STACK_SIZE), 68 [ipl] "r" (ipl), 69 [utext_des] "i" (gdtselector(UTEXT_DES) | PL_USER), 70 [entry] "r" (kernel_uarg->uspace_entry), 71 [uarg] "r" (kernel_uarg->uspace_uarg) 72 : "rax" 73 ); 76 74 77 75 /* Unreachable */ 78 for(;;) 79 ; 76 while (1); 80 77 } 81 78
Note:
See TracChangeset
for help on using the changeset viewer.