Changeset e7b7be3f in mainline
- Timestamp:
- 2007-01-22T13:10:08Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0f3fc9b
- Parents:
- 62c63fc
- Files:
-
- 49 edited
Legend:
- Unmodified
- Added
- Removed
-
boot/arch/sparc64/loader/ofwarch.c
r62c63fc re7b7be3f 70 70 uint64_t current_mid; 71 71 72 __asm__volatile ("ldxa [%1] %2, %0\n" : "=r" (current_mid) : "r" (0), "i" (ASI_UPA_CONFIG));72 asm volatile ("ldxa [%1] %2, %0\n" : "=r" (current_mid) : "r" (0), "i" (ASI_UPA_CONFIG)); 73 73 current_mid >>= UPA_CONFIG_MID_SHIFT; 74 74 current_mid &= UPA_CONFIG_MID_MASK; -
kernel/arch/amd64/include/asm.h
r62c63fc re7b7be3f 53 53 uintptr_t v; 54 54 55 __asm__volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1)));55 asm volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1))); 56 56 57 57 return v; -
kernel/arch/amd64/include/atomic.h
r62c63fc re7b7be3f 43 43 static inline void atomic_inc(atomic_t *val) { 44 44 #ifdef CONFIG_SMP 45 __asm__volatile ("lock incq %0\n" : "=m" (val->count));45 asm volatile ("lock incq %0\n" : "=m" (val->count)); 46 46 #else 47 __asm__volatile ("incq %0\n" : "=m" (val->count));47 asm volatile ("incq %0\n" : "=m" (val->count)); 48 48 #endif /* CONFIG_SMP */ 49 49 } … … 51 51 static inline void atomic_dec(atomic_t *val) { 52 52 #ifdef CONFIG_SMP 53 __asm__volatile ("lock decq %0\n" : "=m" (val->count));53 asm volatile ("lock decq %0\n" : "=m" (val->count)); 54 54 #else 55 __asm__volatile ("decq %0\n" : "=m" (val->count));55 asm volatile ("decq %0\n" : "=m" (val->count)); 56 56 #endif /* CONFIG_SMP */ 57 57 } … … 61 61 long r = 1; 62 62 63 __asm__volatile (63 asm volatile ( 64 64 "lock xaddq %1, %0\n" 65 65 : "=m" (val->count), "+r" (r) … … 73 73 long r = -1; 74 74 75 __asm__volatile (75 asm volatile ( 76 76 "lock xaddq %1, %0\n" 77 77 : "=m" (val->count), "+r" (r) … … 87 87 uint64_t v; 88 88 89 __asm__volatile (89 asm volatile ( 90 90 "movq $1, %0\n" 91 91 "xchgq %0, %1\n" … … 103 103 104 104 preemption_disable(); 105 __asm__volatile (105 asm volatile ( 106 106 "0:;" 107 107 #ifdef CONFIG_HT -
kernel/arch/amd64/include/memstr.h
r62c63fc re7b7be3f 52 52 unative_t d0, d1, d2; 53 53 54 __asm__ __volatile__(54 asm volatile( 55 55 "rep movsq\n\t" 56 56 "movq %4, %%rcx\n\t" … … 83 83 unative_t ret; 84 84 85 __asm__(85 asm ( 86 86 "repe cmpsb\n\t" 87 87 "je 1f\n\t" … … 109 109 unative_t d0, d1; 110 110 111 __asm__ __volatile__(111 asm volatile ( 112 112 "rep stosw\n\t" 113 113 : "=&D" (d0), "=&c" (d1), "=a" (x) … … 131 131 unative_t d0, d1; 132 132 133 __asm__ __volatile__(133 asm volatile ( 134 134 "rep stosb\n\t" 135 135 : "=&D" (d0), "=&c" (d1), "=a" (x) -
kernel/arch/amd64/src/cpu/cpu.c
r62c63fc re7b7be3f 77 77 void cpu_setup_fpu(void) 78 78 { 79 __asm__volatile (79 asm volatile ( 80 80 "movq %%cr0, %%rax;" 81 81 "btsq $1, %%rax;" /* cr0.mp */ … … 100 100 void fpu_disable(void) 101 101 { 102 __asm__volatile (102 asm volatile ( 103 103 "mov %%cr0,%%rax;" 104 104 "bts $3,%%rax;" … … 112 112 void fpu_enable(void) 113 113 { 114 __asm__volatile (114 asm volatile ( 115 115 "mov %%cr0,%%rax;" 116 116 "btr $3,%%rax;" -
kernel/arch/amd64/src/fpu_context.c
r62c63fc re7b7be3f 41 41 void fpu_context_save(fpu_context_t *fctx) 42 42 { 43 __asm__volatile (43 asm volatile ( 44 44 "fxsave %0" 45 45 : "=m"(*fctx) … … 50 50 void fpu_context_restore(fpu_context_t *fctx) 51 51 { 52 __asm__volatile (52 asm volatile ( 53 53 "fxrstor %0" 54 54 : "=m"(*fctx) … … 59 59 { 60 60 /* TODO: Zero all SSE, MMX etc. registers */ 61 __asm__volatile (61 asm volatile ( 62 62 "fninit;" 63 63 ); -
kernel/arch/amd64/src/userspace.c
r62c63fc re7b7be3f 55 55 ipl &= ~(0xcd4); 56 56 57 __asm__volatile (""57 asm volatile ("" 58 58 "pushq %0\n" 59 59 "pushq %1\n" -
kernel/arch/ia32/include/asm.h
r62c63fc re7b7be3f 58 58 * Halt the current CPU until interrupt event. 59 59 */ 60 static inline void cpu_halt(void) { __asm__("hlt\n"); }; 61 static inline void cpu_sleep(void) { __asm__("hlt\n"); }; 60 static inline void cpu_halt(void) 61 { 62 asm("hlt\n"); 63 }; 64 65 static inline void cpu_sleep(void) 66 { 67 asm("hlt\n"); 68 }; 62 69 63 70 #define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \ 64 71 { \ 65 72 unative_t res; \ 66 __asm__volatile ("movl %%" #reg ", %0" : "=r" (res) ); \73 asm volatile ("movl %%" #reg ", %0" : "=r" (res) ); \ 67 74 return res; \ 68 75 } … … 70 77 #define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \ 71 78 { \ 72 __asm__volatile ("movl %0, %%" #reg : : "r" (regn)); \79 asm volatile ("movl %0, %%" #reg : : "r" (regn)); \ 73 80 } 74 81 … … 99 106 * @param val Value to write 100 107 */ 101 static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); } 108 static inline void outb(uint16_t port, uint8_t val) 109 { 110 asm volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); 111 } 102 112 103 113 /** Word to port … … 108 118 * @param val Value to write 109 119 */ 110 static inline void outw(uint16_t port, uint16_t val) { __asm__ volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port) ); } 120 static inline void outw(uint16_t port, uint16_t val) 121 { 122 asm volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port) ); 123 } 111 124 112 125 /** Double word to port … … 117 130 * @param val Value to write 118 131 */ 119 static inline void outl(uint16_t port, uint32_t val) { __asm__ volatile ("outl %l0, %w1\n" : : "a" (val), "d" (port) ); } 132 static inline void outl(uint16_t port, uint32_t val) 133 { 134 asm volatile ("outl %l0, %w1\n" : : "a" (val), "d" (port) ); 135 } 120 136 121 137 /** Byte from port … … 126 142 * @return Value read 127 143 */ 128 static inline uint8_t inb(uint16_t port) { uint8_t val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; } 144 static inline uint8_t inb(uint16_t port) 145 { 146 uint8_t val; 147 148 asm volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); 149 return val; 150 } 129 151 130 152 /** Word from port … … 135 157 * @return Value read 136 158 */ 137 static inline uint16_t inw(uint16_t port) { uint16_t val; __asm__ volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port) ); return val; } 159 static inline uint16_t inw(uint16_t port) 160 { 161 uint16_t val; 162 163 asm volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port) ); 164 return val; 165 } 138 166 139 167 /** Double word from port … … 144 172 * @return Value read 145 173 */ 146 static inline uint32_t inl(uint16_t port) { uint32_t val; __asm__ volatile ("inl %w1, %l0 \n" : "=a" (val) : "d" (port) ); return val; } 174 static inline uint32_t inl(uint16_t port) 175 { 176 uint32_t val; 177 178 asm volatile ("inl %w1, %l0 \n" : "=a" (val) : "d" (port) ); 179 return val; 180 } 147 181 148 182 /** Enable interrupts. … … 156 190 { 157 191 ipl_t v; 158 __asm__volatile (192 asm volatile ( 159 193 "pushf\n\t" 160 194 "popl %0\n\t" … … 175 209 { 176 210 ipl_t v; 177 __asm__volatile (211 asm volatile ( 178 212 "pushf\n\t" 179 213 "popl %0\n\t" … … 192 226 static inline void interrupts_restore(ipl_t ipl) 193 227 { 194 __asm__volatile (228 asm volatile ( 195 229 "pushl %0\n\t" 196 230 "popf\n" … … 206 240 { 207 241 ipl_t v; 208 __asm__volatile (242 asm volatile ( 209 243 "pushf\n\t" 210 244 "popl %0\n" … … 224 258 uintptr_t v; 225 259 226 __asm__volatile ("andl %%esp, %0\n" : "=r" (v) : "0" (~(STACK_SIZE-1)));260 asm volatile ("andl %%esp, %0\n" : "=r" (v) : "0" (~(STACK_SIZE-1))); 227 261 228 262 return v; … … 234 268 uintptr_t *ip; 235 269 236 __asm__volatile (270 asm volatile ( 237 271 "mov %%eip, %0" 238 272 : "=r" (ip) … … 247 281 static inline void invlpg(uintptr_t addr) 248 282 { 249 __asm__volatile ("invlpg %0\n" :: "m" (*(unative_t *)addr));283 asm volatile ("invlpg %0\n" :: "m" (*(unative_t *)addr)); 250 284 } 251 285 … … 256 290 static inline void gdtr_load(ptr_16_32_t *gdtr_reg) 257 291 { 258 __asm__volatile ("lgdtl %0\n" : : "m" (*gdtr_reg));292 asm volatile ("lgdtl %0\n" : : "m" (*gdtr_reg)); 259 293 } 260 294 … … 265 299 static inline void gdtr_store(ptr_16_32_t *gdtr_reg) 266 300 { 267 __asm__volatile ("sgdtl %0\n" : : "m" (*gdtr_reg));301 asm volatile ("sgdtl %0\n" : : "m" (*gdtr_reg)); 268 302 } 269 303 … … 274 308 static inline void idtr_load(ptr_16_32_t *idtr_reg) 275 309 { 276 __asm__volatile ("lidtl %0\n" : : "m" (*idtr_reg));310 asm volatile ("lidtl %0\n" : : "m" (*idtr_reg)); 277 311 } 278 312 … … 283 317 static inline void tr_load(uint16_t sel) 284 318 { 285 __asm__volatile ("ltr %0" : : "r" (sel));319 asm volatile ("ltr %0" : : "r" (sel)); 286 320 } 287 321 -
kernel/arch/ia32/include/atomic.h
r62c63fc re7b7be3f 43 43 static inline void atomic_inc(atomic_t *val) { 44 44 #ifdef CONFIG_SMP 45 __asm__volatile ("lock incl %0\n" : "=m" (val->count));45 asm volatile ("lock incl %0\n" : "=m" (val->count)); 46 46 #else 47 __asm__volatile ("incl %0\n" : "=m" (val->count));47 asm volatile ("incl %0\n" : "=m" (val->count)); 48 48 #endif /* CONFIG_SMP */ 49 49 } … … 51 51 static inline void atomic_dec(atomic_t *val) { 52 52 #ifdef CONFIG_SMP 53 __asm__volatile ("lock decl %0\n" : "=m" (val->count));53 asm volatile ("lock decl %0\n" : "=m" (val->count)); 54 54 #else 55 __asm__volatile ("decl %0\n" : "=m" (val->count));55 asm volatile ("decl %0\n" : "=m" (val->count)); 56 56 #endif /* CONFIG_SMP */ 57 57 } … … 61 61 long r = 1; 62 62 63 __asm__volatile (63 asm volatile ( 64 64 "lock xaddl %1, %0\n" 65 65 : "=m" (val->count), "+r" (r) … … 73 73 long r = -1; 74 74 75 __asm__volatile (75 asm volatile ( 76 76 "lock xaddl %1, %0\n" 77 77 : "=m" (val->count), "+r"(r) … … 87 87 uint32_t v; 88 88 89 __asm__volatile (89 asm volatile ( 90 90 "movl $1, %0\n" 91 91 "xchgl %0, %1\n" … … 102 102 103 103 preemption_disable(); 104 __asm__volatile (104 asm volatile ( 105 105 "0:;" 106 106 #ifdef CONFIG_HT -
kernel/arch/ia32/include/barrier.h
r62c63fc re7b7be3f 47 47 */ 48 48 49 #define CS_ENTER_BARRIER() __asm__volatile ("" ::: "memory")50 #define CS_LEAVE_BARRIER() __asm__volatile ("" ::: "memory")49 #define CS_ENTER_BARRIER() asm volatile ("" ::: "memory") 50 #define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory") 51 51 52 52 static inline void cpuid_serialization(void) 53 53 { 54 __asm__volatile (54 asm volatile ( 55 55 "xorl %%eax, %%eax\n" 56 56 "cpuid\n" … … 60 60 61 61 #ifdef CONFIG_FENCES_P4 62 # define memory_barrier() __asm__volatile ("mfence\n" ::: "memory")63 # define read_barrier() __asm__volatile ("lfence\n" ::: "memory")62 # define memory_barrier() asm volatile ("mfence\n" ::: "memory") 63 # define read_barrier() asm volatile ("lfence\n" ::: "memory") 64 64 # ifdef CONFIG_WEAK_MEMORY 65 # define write_barrier() __asm__volatile ("sfence\n" ::: "memory")65 # define write_barrier() asm volatile ("sfence\n" ::: "memory") 66 66 # else 67 # define write_barrier() __asm__volatile( "" ::: "memory");67 # define write_barrier() asm volatile( "" ::: "memory"); 68 68 # endif 69 69 #elif CONFIG_FENCES_P3 … … 71 71 # define read_barrier() cpuid_serialization() 72 72 # ifdef CONFIG_WEAK_MEMORY 73 # define write_barrier() __asm__volatile ("sfence\n" ::: "memory")73 # define write_barrier() asm volatile ("sfence\n" ::: "memory") 74 74 # else 75 # define write_barrier() __asm__volatile( "" ::: "memory");75 # define write_barrier() asm volatile( "" ::: "memory"); 76 76 # endif 77 77 #else … … 81 81 # define write_barrier() cpuid_serialization() 82 82 # else 83 # define write_barrier() __asm__volatile( "" ::: "memory");83 # define write_barrier() asm volatile( "" ::: "memory"); 84 84 # endif 85 85 #endif -
kernel/arch/ia32/include/cpuid.h
r62c63fc re7b7be3f 77 77 uint32_t val, ret; 78 78 79 __asm__volatile (79 asm volatile ( 80 80 "pushf\n" /* read flags */ 81 81 "popl %0\n" … … 100 100 static inline void cpuid(uint32_t cmd, cpu_info_t *info) 101 101 { 102 __asm__volatile (102 asm volatile ( 103 103 "movl %4, %%eax\n" 104 104 "cpuid\n" -
kernel/arch/ia32/include/memstr.h
r62c63fc re7b7be3f 52 52 unative_t d0, d1, d2; 53 53 54 __asm__ __volatile__(54 asm volatile( 55 55 /* copy all full dwords */ 56 56 "rep movsl\n\t" … … 89 89 int ret; 90 90 91 __asm__(91 asm ( 92 92 "repe cmpsb\n\t" 93 93 "je 1f\n\t" … … 115 115 uint32_t d0, d1; 116 116 117 __asm__ __volatile__(117 asm volatile ( 118 118 "rep stosw\n\t" 119 119 : "=&D" (d0), "=&c" (d1), "=a" (x) … … 137 137 uint32_t d0, d1; 138 138 139 __asm__ __volatile__(139 asm volatile ( 140 140 "rep stosb\n\t" 141 141 : "=&D" (d0), "=&c" (d1), "=a" (x) -
kernel/arch/ia32/src/cpu/cpu.c
r62c63fc re7b7be3f 72 72 void fpu_disable(void) 73 73 { 74 __asm__volatile (74 asm volatile ( 75 75 "mov %%cr0,%%eax;" 76 76 "or $8,%%eax;" … … 84 84 void fpu_enable(void) 85 85 { 86 __asm__volatile (86 asm volatile ( 87 87 "mov %%cr0,%%eax;" 88 88 "and $0xffFFffF7,%%eax;" -
kernel/arch/ia32/src/fpu_context.c
r62c63fc re7b7be3f 44 44 static void fpu_context_f_save(fpu_context_t *fctx) 45 45 { 46 __asm__volatile (46 asm volatile ( 47 47 "fnsave %0" 48 48 : "=m"(*fctx) … … 52 52 static void fpu_context_f_restore(fpu_context_t *fctx) 53 53 { 54 __asm__volatile (54 asm volatile ( 55 55 "frstor %0" 56 56 : "=m"(*fctx) … … 60 60 static void fpu_context_fx_save(fpu_context_t *fctx) 61 61 { 62 __asm__volatile (62 asm volatile ( 63 63 "fxsave %0" 64 64 : "=m"(*fctx) … … 68 68 static void fpu_context_fx_restore(fpu_context_t *fctx) 69 69 { 70 __asm__volatile (70 asm volatile ( 71 71 "fxrstor %0" 72 72 : "=m"(*fctx) … … 104 104 { 105 105 uint32_t help0 = 0, help1 = 0; 106 __asm__volatile (106 asm volatile ( 107 107 "fninit;\n" 108 108 "stmxcsr %0\n" -
kernel/arch/ia32/src/pm.c
r62c63fc re7b7be3f 148 148 static void clean_IOPL_NT_flags(void) 149 149 { 150 __asm__volatile (150 asm volatile ( 151 151 "pushfl\n" 152 152 "pop %%eax\n" … … 161 161 static void clean_AM_flag(void) 162 162 { 163 __asm__volatile (163 asm volatile ( 164 164 "mov %%cr0, %%eax\n" 165 165 "and $0xfffbffff, %%eax\n" -
kernel/arch/ia32/src/userspace.c
r62c63fc re7b7be3f 52 52 ipl = interrupts_disable(); 53 53 54 __asm__volatile (54 asm volatile ( 55 55 /* 56 56 * Clear nested task flag. -
kernel/arch/ia32xen/include/asm.h
r62c63fc re7b7be3f 60 60 { \ 61 61 unative_t res; \ 62 __asm__volatile ("movl %%" #reg ", %0" : "=r" (res) ); \62 asm volatile ("movl %%" #reg ", %0" : "=r" (res) ); \ 63 63 return res; \ 64 64 } … … 66 66 #define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \ 67 67 { \ 68 __asm__volatile ("movl %0, %%" #reg : : "r" (regn)); \68 asm volatile ("movl %0, %%" #reg : : "r" (regn)); \ 69 69 } 70 70 … … 93 93 * @param val Value to write 94 94 */ 95 static inline void outb(uint16_t port, uint8_t val) { __asm__volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); }95 static inline void outb(uint16_t port, uint8_t val) { asm volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); } 96 96 97 97 /** Word to port … … 102 102 * @param val Value to write 103 103 */ 104 static inline void outw(uint16_t port, uint16_t val) { __asm__volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port) ); }104 static inline void outw(uint16_t port, uint16_t val) { asm volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port) ); } 105 105 106 106 /** Double word to port … … 111 111 * @param val Value to write 112 112 */ 113 static inline void outl(uint16_t port, uint32_t val) { __asm__volatile ("outl %l0, %w1\n" : : "a" (val), "d" (port) ); }113 static inline void outl(uint16_t port, uint32_t val) { asm volatile ("outl %l0, %w1\n" : : "a" (val), "d" (port) ); } 114 114 115 115 /** Byte from port … … 120 120 * @return Value read 121 121 */ 122 static inline uint8_t inb(uint16_t port) { uint8_t val; __asm__volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; }122 static inline uint8_t inb(uint16_t port) { uint8_t val; asm volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; } 123 123 124 124 /** Word from port … … 129 129 * @return Value read 130 130 */ 131 static inline uint16_t inw(uint16_t port) { uint16_t val; __asm__volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port) ); return val; }131 static inline uint16_t inw(uint16_t port) { uint16_t val; asm volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port) ); return val; } 132 132 133 133 /** Double word from port … … 138 138 * @return Value read 139 139 */ 140 static inline uint32_t inl(uint16_t port) { uint32_t val; __asm__volatile ("inl %w1, %l0 \n" : "=a" (val) : "d" (port) ); return val; }140 static inline uint32_t inl(uint16_t port) { uint32_t val; asm volatile ("inl %w1, %l0 \n" : "=a" (val) : "d" (port) ); return val; } 141 141 142 142 /** Enable interrupts. … … 214 214 uintptr_t v; 215 215 216 __asm__volatile ("andl %%esp, %0\n" : "=r" (v) : "0" (~(STACK_SIZE-1)));216 asm volatile ("andl %%esp, %0\n" : "=r" (v) : "0" (~(STACK_SIZE-1))); 217 217 218 218 return v; … … 224 224 uintptr_t *ip; 225 225 226 __asm__volatile (226 asm volatile ( 227 227 "mov %%eip, %0" 228 228 : "=r" (ip) … … 237 237 static inline void invlpg(uintptr_t addr) 238 238 { 239 __asm__volatile ("invlpg %0\n" :: "m" (*(unative_t *)addr));239 asm volatile ("invlpg %0\n" :: "m" (*(unative_t *)addr)); 240 240 } 241 241 … … 246 246 static inline void gdtr_load(ptr_16_32_t *gdtr_reg) 247 247 { 248 __asm__volatile ("lgdtl %0\n" : : "m" (*gdtr_reg));248 asm volatile ("lgdtl %0\n" : : "m" (*gdtr_reg)); 249 249 } 250 250 … … 255 255 static inline void gdtr_store(ptr_16_32_t *gdtr_reg) 256 256 { 257 __asm__volatile ("sgdtl %0\n" : : "m" (*gdtr_reg));257 asm volatile ("sgdtl %0\n" : : "m" (*gdtr_reg)); 258 258 } 259 259 … … 264 264 static inline void tr_load(uint16_t sel) 265 265 { 266 __asm__volatile ("ltr %0" : : "r" (sel));266 asm volatile ("ltr %0" : : "r" (sel)); 267 267 } 268 268 -
kernel/arch/ia32xen/src/pm.c
r62c63fc re7b7be3f 133 133 static void clean_IOPL_NT_flags(void) 134 134 { 135 // __asm__volatile (135 // asm volatile ( 136 136 // "pushfl\n" 137 137 // "pop %%eax\n" … … 146 146 static void clean_AM_flag(void) 147 147 { 148 // __asm__volatile (148 // asm volatile ( 149 149 // "mov %%cr0, %%eax\n" 150 150 // "and $0xfffbffff, %%eax\n" -
kernel/arch/ia64/include/asm.h
r62c63fc re7b7be3f 50 50 uint64_t v; 51 51 52 __asm__volatile ("and %0 = %1, r12" : "=r" (v) : "r" (~(STACK_SIZE-1)));52 asm volatile ("and %0 = %1, r12" : "=r" (v) : "r" (~(STACK_SIZE-1))); 53 53 54 54 return v; … … 63 63 uint64_t v; 64 64 65 __asm__volatile ("mov %0 = psr\n" : "=r" (v));65 asm volatile ("mov %0 = psr\n" : "=r" (v)); 66 66 67 67 return v; … … 76 76 uint64_t v; 77 77 78 __asm__volatile ("mov %0 = cr.iva\n" : "=r" (v));78 asm volatile ("mov %0 = cr.iva\n" : "=r" (v)); 79 79 80 80 return v; … … 87 87 static inline void iva_write(uint64_t v) 88 88 { 89 __asm__volatile ("mov cr.iva = %0\n" : : "r" (v));89 asm volatile ("mov cr.iva = %0\n" : : "r" (v)); 90 90 } 91 91 … … 99 99 uint64_t v; 100 100 101 __asm__volatile ("mov %0 = cr.ivr\n" : "=r" (v));101 asm volatile ("mov %0 = cr.ivr\n" : "=r" (v)); 102 102 103 103 return v; … … 110 110 static inline void itc_write(uint64_t v) 111 111 { 112 __asm__volatile ("mov ar.itc = %0\n" : : "r" (v));112 asm volatile ("mov ar.itc = %0\n" : : "r" (v)); 113 113 } 114 114 … … 121 121 uint64_t v; 122 122 123 __asm__volatile ("mov %0 = ar.itc\n" : "=r" (v));123 asm volatile ("mov %0 = ar.itc\n" : "=r" (v)); 124 124 125 125 return v; … … 132 132 static inline void itm_write(uint64_t v) 133 133 { 134 __asm__volatile ("mov cr.itm = %0\n" : : "r" (v));134 asm volatile ("mov cr.itm = %0\n" : : "r" (v)); 135 135 } 136 136 … … 143 143 uint64_t v; 144 144 145 __asm__volatile ("mov %0 = cr.itm\n" : "=r" (v));145 asm volatile ("mov %0 = cr.itm\n" : "=r" (v)); 146 146 147 147 return v; … … 156 156 uint64_t v; 157 157 158 __asm__volatile ("mov %0 = cr.itv\n" : "=r" (v));158 asm volatile ("mov %0 = cr.itv\n" : "=r" (v)); 159 159 160 160 return v; … … 167 167 static inline void itv_write(uint64_t v) 168 168 { 169 __asm__volatile ("mov cr.itv = %0\n" : : "r" (v));169 asm volatile ("mov cr.itv = %0\n" : : "r" (v)); 170 170 } 171 171 … … 176 176 static inline void eoi_write(uint64_t v) 177 177 { 178 __asm__volatile ("mov cr.eoi = %0\n" : : "r" (v));178 asm volatile ("mov cr.eoi = %0\n" : : "r" (v)); 179 179 } 180 180 … … 187 187 uint64_t v; 188 188 189 __asm__volatile ("mov %0 = cr.tpr\n" : "=r" (v));189 asm volatile ("mov %0 = cr.tpr\n" : "=r" (v)); 190 190 191 191 return v; … … 198 198 static inline void tpr_write(uint64_t v) 199 199 { 200 __asm__volatile ("mov cr.tpr = %0\n" : : "r" (v));200 asm volatile ("mov cr.tpr = %0\n" : : "r" (v)); 201 201 } 202 202 … … 212 212 uint64_t v; 213 213 214 __asm__volatile (214 asm volatile ( 215 215 "mov %0 = psr\n" 216 216 "rsm %1\n" … … 233 233 uint64_t v; 234 234 235 __asm__volatile (235 asm volatile ( 236 236 "mov %0 = psr\n" 237 237 "ssm %1\n" … … 271 271 static inline void pk_disable(void) 272 272 { 273 __asm__volatile ("rsm %0\n" : : "i" (PSR_PK_MASK));273 asm volatile ("rsm %0\n" : : "i" (PSR_PK_MASK)); 274 274 } 275 275 -
kernel/arch/ia64/include/atomic.h
r62c63fc re7b7be3f 47 47 long v; 48 48 49 __asm__volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm));49 asm volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm)); 50 50 51 51 return v; -
kernel/arch/ia64/include/barrier.h
r62c63fc re7b7be3f 42 42 #define CS_LEAVE_BARRIER() memory_barrier() 43 43 44 #define memory_barrier() __asm__volatile ("mf\n" ::: "memory")44 #define memory_barrier() asm volatile ("mf\n" ::: "memory") 45 45 #define read_barrier() memory_barrier() 46 46 #define write_barrier() memory_barrier() 47 47 48 #define srlz_i() __asm__volatile (";; srlz.i ;;\n" ::: "memory")49 #define srlz_d() __asm__volatile (";; srlz.d\n" ::: "memory")48 #define srlz_i() asm volatile (";; srlz.i ;;\n" ::: "memory") 49 #define srlz_d() asm volatile (";; srlz.d\n" ::: "memory") 50 50 51 51 #endif -
kernel/arch/ia64/include/cpu.h
r62c63fc re7b7be3f 59 59 uint64_t v; 60 60 61 __asm__volatile ("mov %0 = cpuid[%1]\n" : "=r" (v) : "r" (n));61 asm volatile ("mov %0 = cpuid[%1]\n" : "=r" (v) : "r" (n)); 62 62 63 63 return v; -
kernel/arch/ia64/include/mm/page.h
r62c63fc re7b7be3f 195 195 uint64_t ret; 196 196 197 __asm__volatile ("thash %0 = %1\n" : "=r" (ret) : "r" (va));197 asm volatile ("thash %0 = %1\n" : "=r" (ret) : "r" (va)); 198 198 199 199 return ret; … … 213 213 uint64_t ret; 214 214 215 __asm__volatile ("ttag %0 = %1\n" : "=r" (ret) : "r" (va));215 asm volatile ("ttag %0 = %1\n" : "=r" (ret) : "r" (va)); 216 216 217 217 return ret; … … 228 228 uint64_t ret; 229 229 ASSERT(i < REGION_REGISTERS); 230 __asm__volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT));230 asm volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT)); 231 231 return ret; 232 232 } … … 240 240 { 241 241 ASSERT(i < REGION_REGISTERS); 242 __asm__volatile (242 asm volatile ( 243 243 "mov rr[%0] = %1\n" 244 244 : … … 255 255 uint64_t ret; 256 256 257 __asm__volatile ("mov %0 = cr.pta\n" : "=r" (ret));257 asm volatile ("mov %0 = cr.pta\n" : "=r" (ret)); 258 258 259 259 return ret; … … 266 266 static inline void pta_write(uint64_t v) 267 267 { 268 __asm__volatile ("mov cr.pta = %0\n" : : "r" (v));268 asm volatile ("mov cr.pta = %0\n" : : "r" (v)); 269 269 } 270 270 -
kernel/arch/ia64/src/ia64.c
r62c63fc re7b7be3f 134 134 psr.bn = 1; /* start in bank 0 */ 135 135 136 __asm__volatile ("mov %0 = ar.rsc\n" : "=r" (rsc.value));136 asm volatile ("mov %0 = ar.rsc\n" : "=r" (rsc.value)); 137 137 rsc.loadrs = 0; 138 138 rsc.be = false; -
kernel/arch/ia64/src/mm/tlb.c
r62c63fc re7b7be3f 73 73 for(i = 0; i < count1; i++) { 74 74 for(j = 0; j < count2; j++) { 75 __asm__volatile (75 asm volatile ( 76 76 "ptc.e %0 ;;" 77 77 : … … 180 180 /*cnt+=(page!=va);*/ 181 181 for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) { 182 __asm__volatile (182 asm volatile ( 183 183 "ptc.l %0,%1;;" 184 184 : … … 245 245 } 246 246 247 __asm__volatile (247 asm volatile ( 248 248 "mov r8=psr;;\n" 249 249 "rsm %0;;\n" /* PSR_IC_MASK */ … … 321 321 } 322 322 323 __asm__volatile (323 asm volatile ( 324 324 "mov r8=psr;;\n" 325 325 "rsm %0;;\n" /* PSR_IC_MASK */ … … 383 383 void dtr_purge(uintptr_t page, count_t width) 384 384 { 385 __asm__volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));385 asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2)); 386 386 } 387 387 -
kernel/arch/ia64/src/proc/scheduler.c
r62c63fc re7b7be3f 74 74 * These values will be found there after switch from userspace. 75 75 */ 76 __asm__volatile (76 asm volatile ( 77 77 "bsw.0\n" 78 78 "mov r22 = %0\n" -
kernel/arch/ia64/src/ski/ski.c
r62c63fc re7b7be3f 70 70 void ski_putchar(chardev_t *d, const char ch) 71 71 { 72 __asm__volatile (72 asm volatile ( 73 73 "mov r15 = %0\n" 74 74 "mov r32 = %1\n" /* r32 is in0 */ … … 96 96 uint64_t ch; 97 97 98 __asm__volatile (98 asm volatile ( 99 99 "mov r15 = %1\n" 100 100 "break 0x80000;;\n" /* modifies r8 */ … … 205 205 void ski_init_console(void) 206 206 { 207 __asm__volatile (207 asm volatile ( 208 208 "mov r15 = %0\n" 209 209 "break 0x80000\n" -
kernel/arch/mips32/include/asm.h
r62c63fc re7b7be3f 44 44 { 45 45 /* Most of the simulators do not support */ 46 /* __asm__volatile ("wait"); */46 /* asm volatile ("wait"); */ 47 47 } 48 48 … … 57 57 uintptr_t v; 58 58 59 __asm__volatile ("and %0, $29, %1\n" : "=r" (v) : "r" (~(STACK_SIZE-1)));59 asm volatile ("and %0, $29, %1\n" : "=r" (v) : "r" (~(STACK_SIZE-1))); 60 60 61 61 return v; -
kernel/arch/mips32/include/atomic.h
r62c63fc re7b7be3f 56 56 long tmp, v; 57 57 58 __asm__volatile (58 asm volatile ( 59 59 "1:\n" 60 60 " ll %0, %1\n" -
kernel/arch/mips32/include/barrier.h
r62c63fc re7b7be3f 39 39 * TODO: implement true MIPS memory barriers for macros below. 40 40 */ 41 #define CS_ENTER_BARRIER() __asm__volatile ("" ::: "memory")42 #define CS_LEAVE_BARRIER() __asm__volatile ("" ::: "memory")41 #define CS_ENTER_BARRIER() asm volatile ("" ::: "memory") 42 #define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory") 43 43 44 #define memory_barrier() __asm__volatile ("" ::: "memory")45 #define read_barrier() __asm__volatile ("" ::: "memory")46 #define write_barrier() __asm__volatile ("" ::: "memory")44 #define memory_barrier() asm volatile ("" ::: "memory") 45 #define read_barrier() asm volatile ("" ::: "memory") 46 #define write_barrier() asm volatile ("" ::: "memory") 47 47 48 48 #endif -
kernel/arch/mips32/include/mm/tlb.h
r62c63fc re7b7be3f 143 143 static inline void tlbp(void) 144 144 { 145 __asm__volatile ("tlbp\n\t");145 asm volatile ("tlbp\n\t"); 146 146 } 147 147 … … 153 153 static inline void tlbr(void) 154 154 { 155 __asm__volatile ("tlbr\n\t");155 asm volatile ("tlbr\n\t"); 156 156 } 157 157 … … 162 162 static inline void tlbwi(void) 163 163 { 164 __asm__volatile ("tlbwi\n\t");164 asm volatile ("tlbwi\n\t"); 165 165 } 166 166 … … 171 171 static inline void tlbwr(void) 172 172 { 173 __asm__volatile ("tlbwr\n\t");173 asm volatile ("tlbwr\n\t"); 174 174 } 175 175 -
kernel/arch/sparc64/include/asm.h
r62c63fc re7b7be3f 52 52 uint64_t v; 53 53 54 __asm__volatile ("rdpr %%pstate, %0\n" : "=r" (v));54 asm volatile ("rdpr %%pstate, %0\n" : "=r" (v)); 55 55 56 56 return v; … … 63 63 static inline void pstate_write(uint64_t v) 64 64 { 65 __asm__volatile ("wrpr %0, %1, %%pstate\n" : : "r" (v), "i" (0));65 asm volatile ("wrpr %0, %1, %%pstate\n" : : "r" (v), "i" (0)); 66 66 } 67 67 … … 74 74 uint64_t v; 75 75 76 __asm__volatile ("rd %%tick_cmpr, %0\n" : "=r" (v));76 asm volatile ("rd %%tick_cmpr, %0\n" : "=r" (v)); 77 77 78 78 return v; … … 85 85 static inline void tick_compare_write(uint64_t v) 86 86 { 87 __asm__volatile ("wr %0, %1, %%tick_cmpr\n" : : "r" (v), "i" (0));87 asm volatile ("wr %0, %1, %%tick_cmpr\n" : : "r" (v), "i" (0)); 88 88 } 89 89 … … 96 96 uint64_t v; 97 97 98 __asm__volatile ("rdpr %%tick, %0\n" : "=r" (v));98 asm volatile ("rdpr %%tick, %0\n" : "=r" (v)); 99 99 100 100 return v; … … 107 107 static inline void tick_write(uint64_t v) 108 108 { 109 __asm__volatile ("wrpr %0, %1, %%tick\n" : : "r" (v), "i" (0));109 asm volatile ("wrpr %0, %1, %%tick\n" : : "r" (v), "i" (0)); 110 110 } 111 111 … … 118 118 uint64_t v; 119 119 120 __asm__volatile ("rd %%fprs, %0\n" : "=r" (v));120 asm volatile ("rd %%fprs, %0\n" : "=r" (v)); 121 121 122 122 return v; … … 129 129 static inline void fprs_write(uint64_t v) 130 130 { 131 __asm__volatile ("wr %0, %1, %%fprs\n" : : "r" (v), "i" (0));131 asm volatile ("wr %0, %1, %%fprs\n" : : "r" (v), "i" (0)); 132 132 } 133 133 … … 140 140 uint64_t v; 141 141 142 __asm__volatile ("rd %%softint, %0\n" : "=r" (v));142 asm volatile ("rd %%softint, %0\n" : "=r" (v)); 143 143 144 144 return v; … … 151 151 static inline void softint_write(uint64_t v) 152 152 { 153 __asm__volatile ("wr %0, %1, %%softint\n" : : "r" (v), "i" (0));153 asm volatile ("wr %0, %1, %%softint\n" : : "r" (v), "i" (0)); 154 154 } 155 155 … … 162 162 static inline void clear_softint_write(uint64_t v) 163 163 { 164 __asm__volatile ("wr %0, %1, %%clear_softint\n" : : "r" (v), "i" (0));164 asm volatile ("wr %0, %1, %%clear_softint\n" : : "r" (v), "i" (0)); 165 165 } 166 166 … … 173 173 static inline void set_softint_write(uint64_t v) 174 174 { 175 __asm__volatile ("wr %0, %1, %%set_softint\n" : : "r" (v), "i" (0));175 asm volatile ("wr %0, %1, %%set_softint\n" : : "r" (v), "i" (0)); 176 176 } 177 177 … … 248 248 uintptr_t unbiased_sp; 249 249 250 __asm__volatile ("add %%sp, %1, %0\n" : "=r" (unbiased_sp) : "i" (STACK_BIAS));250 asm volatile ("add %%sp, %1, %0\n" : "=r" (unbiased_sp) : "i" (STACK_BIAS)); 251 251 252 252 return ALIGN_DOWN(unbiased_sp, STACK_SIZE); … … 261 261 uint64_t v; 262 262 263 __asm__volatile ("rdpr %%ver, %0\n" : "=r" (v));263 asm volatile ("rdpr %%ver, %0\n" : "=r" (v)); 264 264 265 265 return v; … … 274 274 uint64_t v; 275 275 276 __asm__volatile ("rdpr %%tpc, %0\n" : "=r" (v));276 asm volatile ("rdpr %%tpc, %0\n" : "=r" (v)); 277 277 278 278 return v; … … 287 287 uint64_t v; 288 288 289 __asm__volatile ("rdpr %%tl, %0\n" : "=r" (v));289 asm volatile ("rdpr %%tl, %0\n" : "=r" (v)); 290 290 291 291 return v; … … 300 300 uint64_t v; 301 301 302 __asm__volatile ("rdpr %%tba, %0\n" : "=r" (v));302 asm volatile ("rdpr %%tba, %0\n" : "=r" (v)); 303 303 304 304 return v; … … 311 311 static inline void tba_write(uint64_t v) 312 312 { 313 __asm__volatile ("wrpr %0, %1, %%tba\n" : : "r" (v), "i" (0));313 asm volatile ("wrpr %0, %1, %%tba\n" : : "r" (v), "i" (0)); 314 314 } 315 315 … … 325 325 uint64_t v; 326 326 327 __asm__volatile ("ldxa [%1] %2, %0\n" : "=r" (v) : "r" (va), "i" ((unsigned) asi));327 asm volatile ("ldxa [%1] %2, %0\n" : "=r" (v) : "r" (va), "i" ((unsigned) asi)); 328 328 329 329 return v; … … 338 338 static inline void asi_u64_write(asi_t asi, uintptr_t va, uint64_t v) 339 339 { 340 __asm__volatile ("stxa %0, [%1] %2\n" : : "r" (v), "r" (va), "i" ((unsigned) asi) : "memory");340 asm volatile ("stxa %0, [%1] %2\n" : : "r" (v), "r" (va), "i" ((unsigned) asi) : "memory"); 341 341 } 342 342 … … 344 344 static inline void flushw(void) 345 345 { 346 __asm__volatile ("flushw\n");346 asm volatile ("flushw\n"); 347 347 } 348 348 … … 350 350 static inline void nucleus_enter(void) 351 351 { 352 __asm__volatile ("wrpr %g0, 1, %tl\n");352 asm volatile ("wrpr %g0, 1, %tl\n"); 353 353 } 354 354 … … 356 356 static inline void nucleus_leave(void) 357 357 { 358 __asm__volatile ("wrpr %g0, %g0, %tl\n");358 asm volatile ("wrpr %g0, %g0, %tl\n"); 359 359 } 360 360 -
kernel/arch/sparc64/include/atomic.h
r62c63fc re7b7be3f 58 58 a = *((uint64_t *) x); 59 59 b = a + i; 60 __asm__volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a));60 asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a)); 61 61 } while (a != b); 62 62 … … 99 99 volatile uintptr_t x = (uint64_t) &val->count; 100 100 101 __asm__volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), "+r" (v) : "r" (0));101 asm volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *) x)), "+r" (v) : "r" (0)); 102 102 103 103 return v; … … 111 111 volatile uintptr_t x = (uint64_t) &val->count; 112 112 113 __asm__volatile (113 asm volatile ( 114 114 "0:\n" 115 115 "casx %0, %3, %1\n" -
kernel/arch/sparc64/include/barrier.h
r62c63fc re7b7be3f 40 40 */ 41 41 #define CS_ENTER_BARRIER() \ 42 __asm__volatile ( \42 asm volatile ( \ 43 43 "membar #LoadLoad | #LoadStore\n" \ 44 44 ::: "memory" \ 45 45 ) 46 46 #define CS_LEAVE_BARRIER() \ 47 __asm__volatile ( \47 asm volatile ( \ 48 48 "membar #StoreStore\n" \ 49 49 "membar #LoadStore\n" \ … … 52 52 53 53 #define memory_barrier() \ 54 __asm__volatile ("membar #LoadLoad | #StoreStore\n" ::: "memory")54 asm volatile ("membar #LoadLoad | #StoreStore\n" ::: "memory") 55 55 #define read_barrier() \ 56 __asm__volatile ("membar #LoadLoad\n" ::: "memory")56 asm volatile ("membar #LoadLoad\n" ::: "memory") 57 57 #define write_barrier() \ 58 __asm__volatile ("membar #StoreStore\n" ::: "memory")58 asm volatile ("membar #StoreStore\n" ::: "memory") 59 59 60 60 /** Flush Instruction Memory instruction. */ … … 71 71 */ 72 72 73 __asm__volatile ("flush %o7\n");73 asm volatile ("flush %o7\n"); 74 74 } 75 75 … … 77 77 static inline void membar(void) 78 78 { 79 __asm__volatile ("membar #Sync\n");79 asm volatile ("membar #Sync\n"); 80 80 } 81 81 -
kernel/arch/sparc64/src/fpu_context.c
r62c63fc re7b7be3f 40 40 void fpu_context_save(fpu_context_t *fctx) 41 41 { 42 __asm__volatile (42 asm volatile ( 43 43 "std %%f0, %0\n" 44 44 "std %%f2, %1\n" … … 68 68 */ 69 69 70 __asm__volatile (70 asm volatile ( 71 71 "std %%f32, %0\n" 72 72 "std %%f34, %1\n" … … 91 91 ); 92 92 93 __asm__volatile ("stx %%fsr, %0\n" : "=m" (fctx->fsr));93 asm volatile ("stx %%fsr, %0\n" : "=m" (fctx->fsr)); 94 94 } 95 95 96 96 void fpu_context_restore(fpu_context_t *fctx) 97 97 { 98 __asm__volatile (98 asm volatile ( 99 99 "ldd %0, %%f0\n" 100 100 "ldd %1, %%f2\n" … … 125 125 */ 126 126 127 __asm__volatile (127 asm volatile ( 128 128 "ldd %0, %%f32\n" 129 129 "ldd %1, %%f34\n" … … 149 149 ); 150 150 151 __asm__volatile ("ldx %0, %%fsr\n" : : "m" (fctx->fsr));151 asm volatile ("ldx %0, %%fsr\n" : : "m" (fctx->fsr)); 152 152 } 153 153 -
uspace/libc/arch/amd64/include/atomic.h
r62c63fc re7b7be3f 39 39 40 40 static inline void atomic_inc(atomic_t *val) { 41 __asm__volatile ("lock incq %0\n" : "=m" (val->count));41 asm volatile ("lock incq %0\n" : "=m" (val->count)); 42 42 } 43 43 44 44 static inline void atomic_dec(atomic_t *val) { 45 __asm__volatile ("lock decq %0\n" : "=m" (val->count));45 asm volatile ("lock decq %0\n" : "=m" (val->count)); 46 46 } 47 47 … … 50 50 long r; 51 51 52 __asm__volatile (52 asm volatile ( 53 53 "movq $1, %0\n" 54 54 "lock xaddq %0, %1\n" … … 63 63 long r; 64 64 65 __asm__volatile (65 asm volatile ( 66 66 "movq $-1, %0\n" 67 67 "lock xaddq %0, %1\n" -
uspace/libc/arch/amd64/include/thread.h
r62c63fc re7b7be3f 52 52 void * retval; 53 53 54 __asm__("movq %%fs:0, %0" : "=r"(retval));54 asm ("movq %%fs:0, %0" : "=r"(retval)); 55 55 return retval; 56 56 } -
uspace/libc/arch/ia32/include/atomic.h
r62c63fc re7b7be3f 37 37 38 38 static inline void atomic_inc(atomic_t *val) { 39 __asm__volatile ("lock incl %0\n" : "=m" (val->count));39 asm volatile ("lock incl %0\n" : "=m" (val->count)); 40 40 } 41 41 42 42 static inline void atomic_dec(atomic_t *val) { 43 __asm__volatile ("lock decl %0\n" : "=m" (val->count));43 asm volatile ("lock decl %0\n" : "=m" (val->count)); 44 44 } 45 45 … … 48 48 long r; 49 49 50 __asm__volatile (50 asm volatile ( 51 51 "movl $1, %0\n" 52 52 "lock xaddl %0, %1\n" … … 61 61 long r; 62 62 63 __asm__volatile (63 asm volatile ( 64 64 "movl $-1, %0\n" 65 65 "lock xaddl %0, %1\n" -
uspace/libc/arch/ia32/include/thread.h
r62c63fc re7b7be3f 52 52 void * retval; 53 53 54 __asm__("movl %%gs:0, %0" : "=r"(retval));54 asm ("movl %%gs:0, %0" : "=r"(retval)); 55 55 return retval; 56 56 } -
uspace/libc/arch/ia64/include/atomic.h
r62c63fc re7b7be3f 47 47 long v; 48 48 49 __asm__volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm));49 asm volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm)); 50 50 51 51 return v; -
uspace/libc/arch/ia64/include/thread.h
r62c63fc re7b7be3f 46 46 static inline void __tcb_set(tcb_t *tcb) 47 47 { 48 __asm__volatile ("mov r13 = %0\n" : : "r" (tcb) : "r13");48 asm volatile ("mov r13 = %0\n" : : "r" (tcb) : "r13"); 49 49 } 50 50 … … 53 53 void *retval; 54 54 55 __asm__volatile ("mov %0 = r13\n" : "=r" (retval));55 asm volatile ("mov %0 = r13\n" : "=r" (retval)); 56 56 57 57 return retval; -
uspace/libc/arch/mips32/include/atomic.h
r62c63fc re7b7be3f 57 57 long tmp, v; 58 58 59 __asm__volatile (59 asm volatile ( 60 60 "1:\n" 61 61 " ll %0, %1\n" -
uspace/libc/arch/mips32/include/thread.h
r62c63fc re7b7be3f 62 62 tp += MIPS_TP_OFFSET + sizeof(tcb_t); 63 63 64 __asm__volatile ("add $27, %0, $0" : : "r"(tp)); /* Move tls to K1 */64 asm volatile ("add $27, %0, $0" : : "r"(tp)); /* Move tls to K1 */ 65 65 } 66 66 … … 69 69 void * retval; 70 70 71 __asm__volatile("add %0, $27, $0" : "=r"(retval));71 asm volatile("add %0, $27, $0" : "=r"(retval)); 72 72 73 73 return (tcb_t *)(retval - MIPS_TP_OFFSET - sizeof(tcb_t)); -
uspace/libc/arch/ppc32/include/atomic.h
r62c63fc re7b7be3f 40 40 long tmp; 41 41 42 asm __volatile__(42 asm volatile ( 43 43 "1:\n" 44 44 "lwarx %0, 0, %2\n" … … 55 55 long tmp; 56 56 57 asm __volatile__(57 asm volatile ( 58 58 "1:\n" 59 59 "lwarx %0, 0, %2\n" -
uspace/libc/arch/ppc64/include/atomic.h
r62c63fc re7b7be3f 40 40 long tmp; 41 41 42 asm __volatile__(42 asm volatile ( 43 43 "1:\n" 44 44 "lwarx %0, 0, %2\n" … … 55 55 long tmp; 56 56 57 asm __volatile__(57 asm volatile ( 58 58 "1:\n" 59 59 "lwarx %0, 0, %2\n" -
uspace/libc/arch/sparc64/include/atomic.h
r62c63fc re7b7be3f 54 54 a = val->count; 55 55 b = a + i; 56 __asm__volatile ("casx %0, %2, %1\n" : "+m" (*val), "+r" (b) : "r" (a));56 asm volatile ("casx %0, %2, %1\n" : "+m" (*val), "+r" (b) : "r" (a)); 57 57 } while (a != b); 58 58 -
uspace/libc/arch/sparc64/include/syscall.h
r62c63fc re7b7be3f 47 47 register uint64_t a4 asm("o3") = p4; 48 48 49 __asm__volatile (49 asm volatile ( 50 50 "ta %5\n" 51 51 : "=r" (a1) -
uspace/libc/arch/sparc64/include/thread.h
r62c63fc re7b7be3f 46 46 static inline void __tcb_set(tcb_t *tcb) 47 47 { 48 __asm__volatile ("mov %0, %%g7\n" : : "r" (tcb) : "g7");48 asm volatile ("mov %0, %%g7\n" : : "r" (tcb) : "g7"); 49 49 } 50 50 … … 53 53 void *retval; 54 54 55 __asm__volatile ("mov %%g7, %0\n" : "=r" (retval));55 asm volatile ("mov %%g7, %0\n" : "=r" (retval)); 56 56 57 57 return retval; -
uspace/libc/malloc/malloc.c
r62c63fc re7b7be3f 1570 1570 else {\ 1571 1571 unsigned int K;\ 1572 __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\1572 asm("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\ 1573 1573 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ 1574 1574 }\ … … 1629 1629 {\ 1630 1630 unsigned int J;\ 1631 __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\1631 asm("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\ 1632 1632 I = (bindex_t)J;\ 1633 1633 }
Note:
See TracChangeset
for help on using the changeset viewer.