Changes in / [7f1d897:ecbd287d] in mainline
- Files:
-
- 17 deleted
- 33 edited
Legend:
- Unmodified
- Added
- Removed
-
HelenOS.config
r7f1d897 recbd287d 528 528 ! CONFIG_OPTIMIZE_FOR_SIZE (n/y) 529 529 530 % Bare bonedbuild with essential binaries only531 ! CONFIG_BARE _BONED(n/y)532 530 % Barebone build with essential binaries only 531 ! CONFIG_BAREBONE (n/y) 532 -
boot/Makefile.common
r7f1d897 recbd287d 134 134 $(USPACE_PATH)/app/top/top 135 135 136 ifneq ($(CONFIG_BARE _BONED),y)136 ifneq ($(CONFIG_BAREBONE),y) 137 137 NET_CFG = \ 138 138 $(USPACE_PATH)/srv/net/cfg/general \ … … 152 152 -include arch/$(BARCH)/Makefile.inc 153 153 154 ifeq ($(CONFIG_BARE _BONED),y)154 ifeq ($(CONFIG_BAREBONE),y) 155 155 RD_SRVS = $(RD_SRVS_ESSENTIAL) 156 156 RD_APPS = $(RD_APPS_ESSENTIAL) -
boot/arch/sparc64/src/main.c
r7f1d897 recbd287d 101 101 * an unknown value of the "compatible" property is encountered. 102 102 */ 103 printf("Warning: Unknown architecture, assuming sun4u.\n"); 103 if (str_cmp(compatible, "sun4u") != 0) 104 printf("Warning: Unknown architecture, assuming sun4u.\n"); 104 105 arch = ARCH_SUN4U; 105 106 } else … … 212 213 printf(" %p|%p: boot info structure\n", &bootinfo, bootinfo_pa); 213 214 printf(" %p|%p: kernel entry point\n", KERNEL_ADDRESS, kernel_address_pa); 214 printf(" %p|%p: loader entry po unt\n", LOADER_ADDRESS, loader_address_pa);215 printf(" %p|%p: loader entry point\n", LOADER_ADDRESS, loader_address_pa); 215 216 216 217 size_t i; -
defaults/sparc64/serengeti/Makefile.config
r7f1d897 recbd287d 9 9 CONFIG_STRIP_BINARIES = y 10 10 CONFIG_OPTIMIZE_FOR_SIZE = y 11 CONFIG_BARE _BONED= y11 CONFIG_BAREBONE = y -
kernel/arch/abs32le/include/asm.h
r7f1d897 recbd287d 164 164 } 165 165 166 static inline bool interrupts_disabled(void) 167 { 168 /* On real hardware the return value is true iff interrupts are 169 disabled. */ 170 return false; 171 } 172 166 173 static inline uintptr_t get_stack_base(void) 167 174 { -
kernel/arch/amd64/include/asm.h
r7f1d897 recbd287d 38 38 #include <config.h> 39 39 #include <typedefs.h> 40 #include <arch/cpu.h> 40 41 41 42 extern void asm_delay_loop(uint32_t t); … … 269 270 return v; 270 271 } 272 273 /** Check interrupts state. 274 * 275 * @return True if interrupts are disabled. 276 * 277 */ 278 static inline bool interrupts_disabled(void) 279 { 280 ipl_t v; 281 282 asm volatile ( 283 "pushfq\n" 284 "popq %[v]\n" 285 : [v] "=r" (v) 286 ); 287 288 return ((v & RFLAGS_IF) == 0); 289 } 290 271 291 272 292 /** Write to MSR */ -
kernel/arch/arm32/include/interrupt.h
r7f1d897 recbd287d 52 52 extern void interrupts_restore(ipl_t ipl); 53 53 extern ipl_t interrupts_read(void); 54 extern bool interrupts_disabled(void); 54 55 55 56 -
kernel/arch/arm32/src/interrupt.c
r7f1d897 recbd287d 97 97 } 98 98 99 /** Check interrupts state. 100 * 101 * @return True if interrupts are disabled. 102 * 103 */ 104 bool interrupts_disabled(void) 105 { 106 return current_status_reg_read() & STATUS_REG_IRQ_DISABLED_BIT; 107 } 108 99 109 /** Initialize basic tables for exception dispatching 100 110 * and starts the timer. -
kernel/arch/ia32/include/asm.h
r7f1d897 recbd287d 38 38 39 39 #include <arch/pm.h> 40 #include <arch/cpu.h> 40 41 #include <typedefs.h> 41 42 #include <config.h> … … 299 300 } 300 301 302 /** Check interrupts state. 303 * 304 * @return True if interrupts are disabled. 305 * 306 */ 307 static inline bool interrupts_disabled(void) 308 { 309 ipl_t v; 310 311 asm volatile ( 312 "pushf\n" 313 "popl %[v]\n" 314 : [v] "=r" (v) 315 ); 316 317 return ((v & EFLAGS_IF) == 0); 318 } 319 301 320 /** Write to MSR */ 302 321 static inline void write_msr(uint32_t msr, uint64_t value) -
kernel/arch/ia32/src/mm/frame.c
r7f1d897 recbd287d 55 55 { 56 56 unsigned int i; 57 57 58 for (i = 0; i < e820counter; i++) { 58 59 uint64_t base = e820table[i].base_address; … … 60 61 61 62 #ifdef __32_BITS__ 62 63 63 /* Ignore physical memory above 4 GB */ 64 64 if ((base >> 32) != 0) … … 68 68 if (((base + size) >> 32) != 0) 69 69 size = 0xffffffff - base; 70 71 70 #endif 71 72 72 pfn_t pfn; 73 73 size_t count; … … 118 118 }; 119 119 120 121 120 void physmem_print(void) 122 121 { … … 134 133 135 134 printf("%#18llx %#18llx %s\n", e820table[i].base_address, 136 135 e820table[i].size, name); 137 136 } 138 137 } … … 148 147 #ifdef CONFIG_SMP 149 148 minconf = max(minconf, 150 151 149 ADDR2PFN(AP_BOOT_OFFSET + hardcoded_unmapped_ktext_size + 150 hardcoded_unmapped_kdata_size)); 152 151 #endif 152 153 153 init_e820_memory(minconf); 154 154 … … 159 159 /* Reserve AP real mode bootstrap memory */ 160 160 frame_mark_unavailable(AP_BOOT_OFFSET >> FRAME_WIDTH, 161 162 161 (hardcoded_unmapped_ktext_size + 162 hardcoded_unmapped_kdata_size) >> FRAME_WIDTH); 163 163 #endif 164 164 } -
kernel/arch/ia32/src/smp/apic.c
r7f1d897 recbd287d 426 426 427 427 /* Program Logical Destination Register. */ 428 ASSERT(CPU->id < 8) 428 ASSERT(CPU->id < 8); 429 429 ldr.value = l_apic[LDR]; 430 430 ldr.id = (uint8_t) (1 << CPU->id); -
kernel/arch/ia64/include/asm.h
r7f1d897 recbd287d 417 417 } 418 418 419 /** Check interrupts state. 420 * 421 * @return True if interrupts are disabled. 422 * 423 */ 424 static inline bool interrupts_disabled(void) 425 { 426 return !(psr_read() & PSR_I_MASK); 427 } 428 419 429 /** Disable protection key checking. */ 420 430 static inline void pk_disable(void) -
kernel/arch/mips32/include/asm.h
r7f1d897 recbd287d 74 74 extern void interrupts_restore(ipl_t ipl); 75 75 extern ipl_t interrupts_read(void); 76 extern void asm_delay_loop(uint32_t t);76 extern bool interrupts_disabled(void); 77 77 78 78 static inline void pio_write_8(ioport8_t *port, uint8_t v) -
kernel/arch/mips32/src/interrupt.c
r7f1d897 recbd287d 89 89 } 90 90 91 /** Check interrupts state. 92 * 93 * @return True if interrupts are disabled. 94 * 95 */ 96 bool interrupts_disabled(void) 97 { 98 return !(cp0_status_read() & cp0_status_ie_enabled_bit); 99 } 100 91 101 /* TODO: This is SMP unsafe!!! */ 92 102 uint32_t count_hi = 0; -
kernel/arch/ppc32/include/asm.h
r7f1d897 recbd287d 38 38 #include <typedefs.h> 39 39 #include <config.h> 40 #include <arch/cpu.h> 41 42 static inline uint32_t msr_read(void) 43 { 44 uint32_t msr; 45 46 asm volatile ( 47 "mfmsr %[msr]\n" 48 : [msr] "=r" (msr) 49 ); 50 51 return msr; 52 } 53 54 static inline void msr_write(uint32_t msr) 55 { 56 asm volatile ( 57 "mtmsr %[msr]\n" 58 :: [msr] "r" (msr) 59 ); 60 } 40 61 41 62 /** Enable interrupts. … … 45 66 * 46 67 * @return Old interrupt priority level. 68 * 47 69 */ 48 70 static inline ipl_t interrupts_enable(void) 49 71 { 50 ipl_t v; 51 ipl_t tmp; 52 53 asm volatile ( 54 "mfmsr %0\n" 55 "mfmsr %1\n" 56 "ori %1, %1, 1 << 15\n" 57 "mtmsr %1\n" 58 : "=r" (v), "=r" (tmp) 59 ); 60 return v; 72 ipl_t ipl = msr_read(); 73 msr_write(ipl | MSR_EE); 74 return ipl; 61 75 } 62 76 … … 67 81 * 68 82 * @return Old interrupt priority level. 83 * 69 84 */ 70 85 static inline ipl_t interrupts_disable(void) 71 86 { 72 ipl_t v; 73 ipl_t tmp; 74 75 asm volatile ( 76 "mfmsr %0\n" 77 "mfmsr %1\n" 78 "rlwinm %1, %1, 0, 17, 15\n" 79 "mtmsr %1\n" 80 : "=r" (v), "=r" (tmp) 81 ); 82 return v; 87 ipl_t ipl = msr_read(); 88 msr_write(ipl & (~MSR_EE)); 89 return ipl; 83 90 } 84 91 … … 88 95 * 89 96 * @param ipl Saved interrupt priority level. 97 * 90 98 */ 91 99 static inline void interrupts_restore(ipl_t ipl) 92 100 { 93 ipl_t tmp; 94 95 asm volatile ( 96 "mfmsr %1\n" 97 "rlwimi %0, %1, 0, 17, 15\n" 98 "cmpw 0, %0, %1\n" 99 "beq 0f\n" 100 "mtmsr %0\n" 101 "0:\n" 102 : "=r" (ipl), "=r" (tmp) 103 : "0" (ipl) 104 : "cr0" 105 ); 101 msr_write((msr_read() & (~MSR_EE)) | (ipl & MSR_EE)); 106 102 } 107 103 … … 111 107 * 112 108 * @return Current interrupt priority level. 109 * 113 110 */ 114 111 static inline ipl_t interrupts_read(void) 115 112 { 116 ipl_t v; 117 118 asm volatile ( 119 "mfmsr %0\n" 120 : "=r" (v) 121 ); 122 return v; 113 return msr_read(); 114 } 115 116 /** Check whether interrupts are disabled. 117 * 118 * @return True if interrupts are disabled. 119 * 120 */ 121 static inline bool interrupts_disabled(void) 122 { 123 return ((msr_read() & MSR_EE) == 0); 123 124 } 124 125 … … 128 129 * The stack is assumed to be STACK_SIZE bytes long. 129 130 * The stack must start on page boundary. 131 * 130 132 */ 131 133 static inline uintptr_t get_stack_base(void) 132 134 { 133 uintptr_t v;135 uintptr_t base; 134 136 135 137 asm volatile ( 136 "and % 0, %%sp, %1\n"137 : "=r" (v)138 : "r" (~(STACK_SIZE - 1))138 "and %[base], %%sp, %[mask]\n" 139 : [base] "=r" (base) 140 : [mask] "r" (~(STACK_SIZE - 1)) 139 141 ); 140 return v; 142 143 return base; 141 144 } 142 145 -
kernel/arch/ppc32/include/cpu.h
r7f1d897 recbd287d 36 36 #define KERN_ppc32_CPU_H_ 37 37 38 #include <arch/asm.h> 38 /* MSR bits */ 39 #define MSR_DR (1 << 4) 40 #define MSR_IR (1 << 5) 41 #define MSR_PR (1 << 14) 42 #define MSR_EE (1 << 15) 43 44 /* HID0 bits */ 45 #define HID0_STEN (1 << 24) 46 #define HID0_ICE (1 << 15) 47 #define HID0_DCE (1 << 14) 48 #define HID0_ICFI (1 << 11) 49 #define HID0_DCI (1 << 10) 50 51 #ifndef __ASM__ 52 53 #include <typedefs.h> 39 54 40 55 typedef struct { 41 int version; 42 int revision; 43 } cpu_arch_t; 44 56 uint16_t version; 57 uint16_t revision; 58 } __attribute__ ((packed)) cpu_arch_t; 59 60 static inline void cpu_version(cpu_arch_t *info) 61 { 62 asm volatile ( 63 "mfpvr %[cpu_info]\n" 64 : [cpu_info] "=r" (*info) 65 ); 66 } 67 68 #endif /* __ASM__ */ 69 45 70 #endif 46 71 -
kernel/arch/ppc32/src/cpu/cpu.c
r7f1d897 recbd287d 34 34 35 35 #include <arch/cpu.h> 36 #include <arch/cpuid.h>37 36 #include <cpu.h> 38 37 #include <arch.h> … … 45 44 void cpu_identify(void) 46 45 { 47 cpu_info_t info; 48 49 cpu_version(&info); 50 CPU->arch.version = info.version; 51 CPU->arch.revision = info.revision; 46 cpu_version(&CPU->arch); 52 47 } 53 48 54 void cpu_print_report(cpu_t * m)49 void cpu_print_report(cpu_t *cpu) 55 50 { 56 51 const char *name; 57 52 58 switch ( m->arch.version) {53 switch (cpu->arch.version) { 59 54 case 8: 60 name = " (PowerPC 750)";55 name = "PowerPC 750"; 61 56 break; 62 57 case 9: 63 name = " (PowerPC 604e)";58 name = "PowerPC 604e"; 64 59 break; 65 60 case 0x81: 66 name = " (PowerPC 8260)";61 name = "PowerPC 8260"; 67 62 break; 68 63 case 0x8081: 69 name = " (PowerPC 826xA)";64 name = "PowerPC 826xA"; 70 65 break; 71 66 default: 72 name = " ";67 name = "unknown"; 73 68 } 74 69 75 printf("cpu%d: version=%d%s, revision=%d\n", m->id, m->arch.version, name, m->arch.revision); 70 printf("cpu%" PRIs ": version=%" PRIu16" (%s), revision=%" PRIu16 "\n", cpu->id, 71 cpu->arch.version, name, cpu->arch.revision); 76 72 } 77 73 -
kernel/arch/ppc32/src/debug/stacktrace.c
r7f1d897 recbd287d 37 37 #include <typedefs.h> 38 38 39 #define FRAME_OFFSET_FP_PREV 0 40 #define FRAME_OFFSET_RA 1 41 39 42 bool kernel_frame_pointer_validate(uintptr_t fp) 40 43 { 41 return f alse;44 return fp != 0; 42 45 } 43 46 44 47 bool kernel_frame_pointer_prev(uintptr_t fp, uintptr_t *prev) 45 48 { 46 return false; 49 uint32_t *stack = (void *) fp; 50 *prev = stack[FRAME_OFFSET_FP_PREV]; 51 return true; 47 52 } 48 53 49 54 bool kernel_return_address_get(uintptr_t fp, uintptr_t *ra) 50 55 { 51 return false; 56 uint32_t *stack = (void *) fp; 57 *ra = stack[FRAME_OFFSET_RA]; 58 return true; 52 59 } 53 60 54 61 bool uspace_frame_pointer_validate(uintptr_t fp) 55 62 { 56 return f alse;63 return fp != 0; 57 64 } 58 65 -
kernel/arch/ppc32/src/debug/stacktrace_asm.S
r7f1d897 recbd287d 27 27 # 28 28 29 #include <arch/asm/regname.h> 30 29 31 .text 30 32 … … 33 35 34 36 frame_pointer_get: 37 mr r3, sp 35 38 blr 36 39 37 40 program_counter_get: 41 mflr r3 38 42 blr -
kernel/arch/sparc64/include/asm.h
r7f1d897 recbd287d 308 308 } 309 309 310 /** Check interrupts state. 311 * 312 * @return True if interrupts are disabled. 313 * 314 */ 315 static inline bool interrupts_disabled(void) 316 { 317 pstate_reg_t pstate; 318 319 pstate.value = pstate_read(); 320 return !pstate.ie; 321 } 322 310 323 /** Return base address of current stack. 311 324 * -
kernel/doc/synchronization
r7f1d897 recbd287d 5 5 | spinlock_t | 6 6 +------------+ 7 | 8 +------------------------------+ 9 | 10 INTERRUPTS-DISABLED SPINNING LOCKS | 11 irq_spinlock_lock, irq_spinlock_trylock, irq_spinlock_unlock | 12 +----------------+ | 13 | irq_spinlock_t |<--------------------+ 14 +----------------+ 15 16 7 17 8 18 WAIT QUEUES … … 10 20 +---------+ 11 21 | waitq_t | 12 13 22 +---------+ 23 / \ 14 24 SEMAPHORES / \ CONDITION VARIABLES 15 25 semaphore_down_timeout, semaphore_up condvar_wait_timeout, condvar_signal … … 18 28 +--------------+ +-----------+ 19 29 | ^ 20 30 | | 21 31 | +------+ 22 V / 32 V / 23 33 MUTEXES / READERS/WRITERS LOCKS 24 34 mutex_lock_timeout, mutex_unlock rwlock_reader/writer_lock_timeout, rwlock_unlock 25 35 +---------+ / +----------+ 26 27 28 29 36 | mutex_t |------------------------------->| rwlock_t | 37 +---------+ / +----------+ 38 | / 39 +------------------------+ -
kernel/generic/include/arch.h
r7f1d897 recbd287d 27 27 */ 28 28 29 /** @addtogroup generic 29 /** @addtogroup generic 30 30 * @{ 31 31 */ … … 41 41 #include <mm/as.h> 42 42 43 #define DEFAULT_CONTEXT 43 #define DEFAULT_CONTEXT 0 44 44 45 #define CPU 46 #define THREAD 47 #define TASK 48 #define AS 49 #define CONTEXT 50 #define PREEMPTION_DISABLED 45 #define CPU THE->cpu 46 #define THREAD THE->thread 47 #define TASK THE->task 48 #define AS THE->as 49 #define CONTEXT (THE->task ? THE->task->context : DEFAULT_CONTEXT) 50 #define PREEMPTION_DISABLED THE->preemption_disabled 51 51 52 #define context_check(ctx1, ctx2) 52 #define context_check(ctx1, ctx2) ((ctx1) == (ctx2)) 53 53 54 54 /** … … 58 58 */ 59 59 typedef struct { 60 size_t preemption_disabled; 61 thread_t *thread; 62 task_t *task; 63 cpu_t *cpu; 64 as_t *as; 60 size_t preemption_disabled; /**< Preemption disabled counter. */ 61 thread_t *thread; /**< Current thread. */ 62 task_t *task; /**< Current task. */ 63 cpu_t *cpu; /**< Executing cpu. */ 64 as_t *as; /**< Current address space. */ 65 65 } the_t; 66 66 67 67 #define THE ((the_t * )(get_stack_base())) 68 68 69 extern void the_initialize(the_t * the);70 extern void the_copy(the_t * src, the_t *dst);69 extern void the_initialize(the_t *); 70 extern void the_copy(the_t *, the_t *); 71 71 72 72 extern void arch_pre_mm_init(void); … … 80 80 extern void reboot(void); 81 81 extern void arch_reboot(void); 82 extern void *arch_construct_function(fncptr_t * fptr, void *addr, void *caller);82 extern void *arch_construct_function(fncptr_t *, void *, void *); 83 83 84 84 #endif -
kernel/generic/include/debug.h
r7f1d897 recbd287d 37 37 38 38 #include <panic.h> 39 #include < arch/debug.h>39 #include <symtab.h> 40 40 41 #define CALLER ((uintptr_t) __builtin_return_address(0))41 #define CALLER ((uintptr_t) __builtin_return_address(0)) 42 42 43 #ifndef HERE 44 /** Current Instruction Pointer address */ 45 # define HERE ((uintptr_t *) 0) 46 #endif 43 #ifdef CONFIG_DEBUG 47 44 48 45 /** Debugging ASSERT macro … … 55 52 * 56 53 */ 57 #ifdef CONFIG_DEBUG 58 # define ASSERT(expr) \ 59 if (!(expr)) { \ 60 panic("Assertion failed (%s), caller=%p.", #expr, CALLER); \ 61 } 62 #else 63 # define ASSERT(expr) 64 #endif 54 #define ASSERT(expr) \ 55 do { \ 56 if (!(expr)) \ 57 panic("Assertion failed (%s)", #expr); \ 58 } while (0) 59 60 /** Debugging verbose ASSERT macro 61 * 62 * If CONFIG_DEBUG is set, the ASSERT() macro 63 * evaluates expr and if it is false raises 64 * kernel panic. The panic message contains also 65 * the supplied message. 66 * 67 * @param expr Expression which is expected to be true. 68 * @param msg Additional message to show (string). 69 * 70 */ 71 #define ASSERT_VERBOSE(expr, msg) \ 72 do { \ 73 if (!(expr)) \ 74 panic("Assertion failed (%s, %s)", #expr, msg); \ 75 } while (0) 76 77 #else /* CONFIG_DEBUG */ 78 79 #define ASSERT(expr) 80 #define ASSERT_VERBOSE(expr, msg) 81 82 #endif /* CONFIG_DEBUG */ 83 84 #ifdef CONFIG_LOG 65 85 66 86 /** Extensive logging output macro … … 71 91 * 72 92 */ 73 74 #ifdef CONFIG_LOG 75 # define LOG(format, ...) \ 76 printf("%s() at %s:%u: " format "\n", __func__, __FILE__, \ 77 __LINE__, ##__VA_ARGS__); 78 #else 79 # define LOG(format, ...) 80 #endif 93 #define LOG(format, ...) \ 94 do { \ 95 printf("%s->%s() at %s:%u: " format "\n", symtab_fmt_name_lookup(CALLER), \ 96 __func__, __FILE__, __LINE__, ##__VA_ARGS__); \ 97 } while (0) 81 98 82 99 /** Extensive logging execute macro … … 87 104 * 88 105 */ 106 #define LOG_EXEC(fnc) \ 107 do { \ 108 printf("%s->%s() at %s:%u: " #fnc "\n", symtab_fmt_name_lookup(CALLER), \ 109 __func__, __FILE__, __LINE__); \ 110 fnc; \ 111 } while (0) 89 112 90 #ifdef CONFIG_LOG 91 # define LOG_EXEC(fnc) \ 92 { \ 93 printf("%s() at %s:%u: " #fnc "\n", __func__, __FILE__, \ 94 __LINE__); \ 95 fnc; \ 96 } 97 #else 98 # define LOG_EXEC(fnc) fnc 99 #endif 113 #else /* CONFIG_LOG */ 100 114 115 #define LOG(format, ...) 116 #define LOG_EXEC(fnc) fnc 117 118 #endif /* CONFOG_LOG */ 101 119 102 120 #endif -
kernel/generic/include/panic.h
r7f1d897 recbd287d 41 41 42 42 #ifdef CONFIG_DEBUG 43 # define panic(format, ...) \ 44 do { \ 45 silent = false; \ 46 printf("Kernel panic in %s() at %s:%u.\n", \ 47 __func__, __FILE__, __LINE__); \ 48 stack_trace(); \ 49 panic_printf("Panic message: " format "\n", \ 50 ##__VA_ARGS__);\ 51 } while (0) 52 #else 53 # define panic(format, ...) \ 54 do { \ 55 silent = false; \ 56 panic_printf("Kernel panic: " format "\n", ##__VA_ARGS__); \ 57 } while (0) 58 #endif 43 44 #define panic(format, ...) \ 45 do { \ 46 silent = false; \ 47 printf("Kernel panic in %s() at %s:%u\n", \ 48 __func__, __FILE__, __LINE__); \ 49 stack_trace(); \ 50 panic_printf("Panic message: " format "\n", \ 51 ##__VA_ARGS__);\ 52 } while (0) 53 54 #else /* CONFIG_DEBUG */ 55 56 #define panic(format, ...) \ 57 do { \ 58 silent = false; \ 59 panic_printf("Kernel panic: " format "\n", ##__VA_ARGS__); \ 60 stack_trace(); \ 61 } while (0) 62 63 #endif /* CONFIG_DEBUG */ 59 64 60 65 extern bool silent; -
kernel/generic/include/preemption.h
r7f1d897 recbd287d 27 27 */ 28 28 29 /** @addtogroup generic 29 /** @addtogroup generic 30 30 * @{ 31 31 */ -
kernel/generic/include/synch/spinlock.h
r7f1d897 recbd287d 41 41 #include <atomic.h> 42 42 #include <debug.h> 43 #include <arch/asm.h> 43 44 44 45 #ifdef CONFIG_SMP … … 49 50 #ifdef CONFIG_DEBUG_SPINLOCK 50 51 const char *name; 51 #endif 52 #endif /* CONFIG_DEBUG_SPINLOCK */ 52 53 } spinlock_t; 53 54 … … 60 61 61 62 /* 62 * SPINLOCK_INITIALIZE is to be used for statically allocated spinlocks. 63 * It declares and initializes the lock. 63 * SPINLOCK_INITIALIZE and SPINLOCK_STATIC_INITIALIZE are to be used 64 * for statically allocated spinlocks. They declare (either as global 65 * or static) symbol and initialize the lock. 64 66 */ 65 67 #ifdef CONFIG_DEBUG_SPINLOCK … … 77 79 } 78 80 79 #define spinlock_lock(lock) spinlock_lock_debug((lock)) 80 #define spinlock_unlock(lock) spinlock_unlock_debug((lock)) 81 82 #else 81 #define ASSERT_SPINLOCK(expr, lock) \ 82 ASSERT_VERBOSE(expr, (lock)->name) 83 84 #define spinlock_lock(lock) spinlock_lock_debug((lock)) 85 #define spinlock_unlock(lock) spinlock_unlock_debug((lock)) 86 87 #else /* CONFIG_DEBUG_SPINLOCK */ 83 88 84 89 #define SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ … … 92 97 } 93 98 94 #define spinlock_lock(lock) atomic_lock_arch(&(lock)->val) 95 #define spinlock_unlock(lock) spinlock_unlock_nondebug((lock)) 96 97 #endif 99 #define ASSERT_SPINLOCK(expr, lock) \ 100 ASSERT(expr) 101 102 #define spinlock_lock(lock) atomic_lock_arch(&(lock)->val) 103 #define spinlock_unlock(lock) spinlock_unlock_nondebug((lock)) 104 105 #endif /* CONFIG_DEBUG_SPINLOCK */ 98 106 99 107 #define SPINLOCK_INITIALIZE(lock_name) \ … … 103 111 SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, #lock_name) 104 112 105 extern void spinlock_initialize(spinlock_t * lock, const char *name);106 extern int spinlock_trylock(spinlock_t * lock);107 extern void spinlock_lock_debug(spinlock_t * lock);108 extern void spinlock_unlock_debug(spinlock_t * lock);113 extern void spinlock_initialize(spinlock_t *, const char *); 114 extern int spinlock_trylock(spinlock_t *); 115 extern void spinlock_lock_debug(spinlock_t *); 116 extern void spinlock_unlock_debug(spinlock_t *); 109 117 110 118 /** Unlock spinlock … … 113 121 * 114 122 * @param sl Pointer to spinlock_t structure. 123 * 115 124 */ 116 125 static inline void spinlock_unlock_nondebug(spinlock_t *lock) … … 141 150 } 142 151 143 #else 152 #else /* CONFIG_DEBUG_SPINLOCK */ 144 153 145 154 #define DEADLOCK_PROBE_INIT(pname) 146 155 #define DEADLOCK_PROBE(pname, value) 147 156 148 #endif 157 #endif /* CONFIG_DEBUG_SPINLOCK */ 149 158 150 159 #else /* CONFIG_SMP */ … … 160 169 #define SPINLOCK_INITIALIZE_NAME(name, desc_name) 161 170 #define SPINLOCK_STATIC_INITIALIZE_NAME(name, desc_name) 171 172 #define ASSERT_SPINLOCK(expr, lock) 162 173 163 174 #define spinlock_initialize(lock, name) … … 170 181 #define DEADLOCK_PROBE(pname, value) 171 182 183 #endif /* CONFIG_SMP */ 184 185 typedef struct { 186 SPINLOCK_DECLARE(lock); /**< Spinlock */ 187 bool guard; /**< Flag whether ipl is valid */ 188 ipl_t ipl; /**< Original interrupt level */ 189 } irq_spinlock_t; 190 191 #define IRQ_SPINLOCK_DECLARE(lock_name) irq_spinlock_t lock_name 192 #define IRQ_SPINLOCK_EXTERN(lock_name) extern irq_spinlock_t lock_name 193 194 #ifdef CONFIG_SMP 195 196 #define ASSERT_IRQ_SPINLOCK(expr, irq_lock) \ 197 ASSERT_SPINLOCK(expr, &((irq_lock)->lock)) 198 199 /* 200 * IRQ_SPINLOCK_INITIALIZE and IRQ_SPINLOCK_STATIC_INITIALIZE are to be used 201 * for statically allocated interrupts-disabled spinlocks. They declare (either 202 * as global or static symbol) and initialize the lock. 203 */ 204 #ifdef CONFIG_DEBUG_SPINLOCK 205 206 #define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ 207 irq_spinlock_t lock_name = { \ 208 .lock = { \ 209 .name = desc_name, \ 210 .val = { 0 } \ 211 }, \ 212 .guard = false, \ 213 .ipl = 0 \ 214 } 215 216 #define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \ 217 static irq_spinlock_t lock_name = { \ 218 .lock = { \ 219 .name = desc_name, \ 220 .val = { 0 } \ 221 }, \ 222 .guard = false, \ 223 .ipl = 0 \ 224 } 225 226 #else /* CONFIG_DEBUG_SPINLOCK */ 227 228 #define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ 229 irq_spinlock_t lock_name = { \ 230 .lock = { \ 231 .val = { 0 } \ 232 }, \ 233 .guard = false, \ 234 .ipl = 0 \ 235 } 236 237 #define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \ 238 static irq_spinlock_t lock_name = { \ 239 .lock = { \ 240 .val = { 0 } \ 241 }, \ 242 .guard = false, \ 243 .ipl = 0 \ 244 } 245 246 #endif /* CONFIG_DEBUG_SPINLOCK */ 247 248 #else /* CONFIG_SMP */ 249 250 /* 251 * Since the spinlocks are void on UP systems, we also need 252 * to have a special variant of interrupts-disabled spinlock 253 * macros which take this into account. 254 */ 255 256 #define ASSERT_IRQ_SPINLOCK(expr, irq_lock) \ 257 ASSERT_SPINLOCK(expr, NULL) 258 259 #define IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ 260 irq_spinlock_t lock_name = { \ 261 .guard = false, \ 262 .ipl = 0 \ 263 } 264 265 #define IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \ 266 static irq_spinlock_t lock_name = { \ 267 .guard = false, \ 268 .ipl = 0 \ 269 } 270 271 #endif /* CONFIG_SMP */ 272 273 #define IRQ_SPINLOCK_INITIALIZE(lock_name) \ 274 IRQ_SPINLOCK_INITIALIZE_NAME(lock_name, #lock_name) 275 276 #define IRQ_SPINLOCK_STATIC_INITIALIZE(lock_name) \ 277 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, #lock_name) 278 279 /** Initialize interrupts-disabled spinlock 280 * 281 * @param lock IRQ spinlock to be initialized. 282 * @param name IRQ spinlock name. 283 * 284 */ 285 static inline void irq_spinlock_initialize(irq_spinlock_t *lock, const char *name) 286 { 287 spinlock_initialize(&(lock->lock), name); 288 lock->guard = false; 289 lock->ipl = 0; 290 } 291 292 /** Lock interrupts-disabled spinlock 293 * 294 * Lock a spinlock which requires disabled interrupts. 295 * 296 * @param lock IRQ spinlock to be locked. 297 * @param irq_dis If true, interrupts are actually disabled 298 * prior locking the spinlock. If false, interrupts 299 * are expected to be already disabled. 300 * 301 */ 302 static inline void irq_spinlock_lock(irq_spinlock_t *lock, bool irq_dis) 303 { 304 if (irq_dis) { 305 ipl_t ipl = interrupts_disable(); 306 spinlock_lock(&(lock->lock)); 307 308 lock->guard = true; 309 lock->ipl = ipl; 310 } else { 311 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock); 312 313 spinlock_lock(&(lock->lock)); 314 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 315 } 316 } 317 318 /** Unlock interrupts-disabled spinlock 319 * 320 * Unlock a spinlock which requires disabled interrupts. 321 * 322 * @param lock IRQ spinlock to be unlocked. 323 * @param irq_res If true, interrupts are restored to previously 324 * saved interrupt level. 325 * 326 */ 327 static inline void irq_spinlock_unlock(irq_spinlock_t *lock, bool irq_res) 328 { 329 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock); 330 331 if (irq_res) { 332 ASSERT_IRQ_SPINLOCK(lock->guard, lock); 333 334 lock->guard = false; 335 ipl_t ipl = lock->ipl; 336 337 spinlock_unlock(&(lock->lock)); 338 interrupts_restore(ipl); 339 } else { 340 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 341 spinlock_unlock(&(lock->lock)); 342 } 343 } 344 345 /** Lock interrupts-disabled spinlock 346 * 347 * Lock an interrupts-disabled spinlock conditionally. If the 348 * spinlock is not available at the moment, signal failure. 349 * Interrupts are expected to be already disabled. 350 * 351 * @param lock IRQ spinlock to be locked conditionally. 352 * 353 * @return Zero on failure, non-zero otherwise. 354 * 355 */ 356 static inline int irq_spinlock_trylock(irq_spinlock_t *lock) 357 { 358 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), lock); 359 int rc = spinlock_trylock(&(lock->lock)); 360 361 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 362 return rc; 363 } 364 365 /** Pass lock from one interrupts-disabled spinlock to another 366 * 367 * Pass lock from one IRQ spinlock to another IRQ spinlock 368 * without enabling interrupts during the process. 369 * 370 * The first IRQ spinlock is supposed to be locked. 371 * 372 * @param unlock IRQ spinlock to be unlocked. 373 * @param lock IRQ spinlock to be locked. 374 * 375 */ 376 static inline void irq_spinlock_pass(irq_spinlock_t *unlock, 377 irq_spinlock_t *lock) 378 { 379 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock); 380 381 /* Pass guard from unlock to lock */ 382 bool guard = unlock->guard; 383 ipl_t ipl = unlock->ipl; 384 unlock->guard = false; 385 386 spinlock_unlock(&(unlock->lock)); 387 spinlock_lock(&(lock->lock)); 388 389 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 390 391 if (guard) { 392 lock->guard = true; 393 lock->ipl = ipl; 394 } 395 } 396 397 /** Hand-over-hand locking of interrupts-disabled spinlocks 398 * 399 * Implement hand-over-hand locking between two interrupts-disabled 400 * spinlocks without enabling interrupts during the process. 401 * 402 * The first IRQ spinlock is supposed to be locked. 403 * 404 * @param unlock IRQ spinlock to be unlocked. 405 * @param lock IRQ spinlock to be locked. 406 * 407 */ 408 static inline void irq_spinlock_exchange(irq_spinlock_t *unlock, 409 irq_spinlock_t *lock) 410 { 411 ASSERT_IRQ_SPINLOCK(interrupts_disabled(), unlock); 412 413 spinlock_lock(&(lock->lock)); 414 ASSERT_IRQ_SPINLOCK(!lock->guard, lock); 415 416 /* Pass guard from unlock to lock */ 417 if (unlock->guard) { 418 lock->guard = true; 419 lock->ipl = unlock->ipl; 420 unlock->guard = false; 421 } 422 423 spinlock_unlock(&(unlock->lock)); 424 } 425 172 426 #endif 173 427 174 #endif175 176 428 /** @} 177 429 */ -
kernel/generic/src/ipc/kbox.c
r7f1d897 recbd287d 47 47 void ipc_kbox_cleanup(void) 48 48 { 49 ipl_t ipl;50 49 bool have_kb_thread; 51 50 … … 78 77 * kbox thread to clean it up since sender != debugger. 79 78 */ 80 ipl = interrupts_disable(); 81 spinlock_lock(&TASK->lock); 79 mutex_lock(&TASK->udebug.lock); 82 80 udebug_task_cleanup(TASK); 83 spinlock_unlock(&TASK->lock); 84 interrupts_restore(ipl); 85 81 mutex_unlock(&TASK->udebug.lock); 82 86 83 if (have_kb_thread) { 87 84 LOG("Join kb.thread."); -
kernel/generic/src/mm/as.c
r7f1d897 recbd287d 422 422 * No need to check for overlaps. 423 423 */ 424 425 page_table_lock(as, false); 424 426 425 427 /* … … 486 488 pte_t *pte; 487 489 488 page_table_lock(as, false);489 490 pte = page_mapping_find(as, b + 490 491 i * PAGE_SIZE); … … 499 500 page_mapping_remove(as, b + 500 501 i * PAGE_SIZE); 501 page_table_unlock(as, false);502 502 } 503 503 } … … 510 510 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 511 511 area->pages - pages); 512 512 513 /* 513 514 * Invalidate software translation caches (e.g. TSB on sparc64). … … 516 517 pages * PAGE_SIZE, area->pages - pages); 517 518 tlb_shootdown_finalize(); 519 520 page_table_unlock(as, false); 518 521 519 522 } else { … … 565 568 566 569 base = area->base; 570 571 page_table_lock(as, false); 567 572 568 573 /* … … 586 591 587 592 for (j = 0; j < (size_t) node->value[i]; j++) { 588 page_table_lock(as, false);589 593 pte = page_mapping_find(as, b + j * PAGE_SIZE); 590 594 ASSERT(pte && PTE_VALID(pte) && … … 596 600 } 597 601 page_mapping_remove(as, b + j * PAGE_SIZE); 598 page_table_unlock(as, false);599 602 } 600 603 } … … 606 609 607 610 tlb_invalidate_pages(as->asid, area->base, area->pages); 611 608 612 /* 609 613 * Invalidate potential software translation caches (e.g. TSB on … … 612 616 as_invalidate_translation_cache(as, area->base, area->pages); 613 617 tlb_shootdown_finalize(); 618 619 page_table_unlock(as, false); 614 620 615 621 btree_destroy(&area->used_space); … … 858 864 old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 859 865 866 page_table_lock(as, false); 867 860 868 /* 861 869 * Start TLB shootdown sequence. … … 881 889 882 890 for (j = 0; j < (size_t) node->value[i]; j++) { 883 page_table_lock(as, false);884 891 pte = page_mapping_find(as, b + j * PAGE_SIZE); 885 892 ASSERT(pte && PTE_VALID(pte) && … … 889 896 /* Remove old mapping */ 890 897 page_mapping_remove(as, b + j * PAGE_SIZE); 891 page_table_unlock(as, false);892 898 } 893 899 } … … 906 912 as_invalidate_translation_cache(as, area->base, area->pages); 907 913 tlb_shootdown_finalize(); 914 915 page_table_unlock(as, false); 908 916 909 917 /* -
kernel/generic/src/preempt/preemption.c
r7f1d897 recbd287d 27 27 */ 28 28 29 /** @addtogroup generic 29 /** @addtogroup generic 30 30 * @{ 31 31 */ 32 32 33 33 /** 34 * @file 35 * @brief 34 * @file preemption.c 35 * @brief Preemption control. 36 36 */ 37 37 38 38 #include <preemption.h> 39 39 #include <arch.h> … … 52 52 void preemption_enable(void) 53 53 { 54 ASSERT( THE->preemption_disabled);54 ASSERT(PREEMPTION_DISABLED); 55 55 memory_barrier(); 56 56 THE->preemption_disabled--; -
kernel/generic/src/synch/mutex.c
r7f1d897 recbd287d 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Mutexes. 36 36 */ 37 37 38 38 #include <synch/mutex.h> 39 39 #include <synch/semaphore.h> … … 44 44 /** Initialize mutex. 45 45 * 46 * @param mtx 47 * @param type 46 * @param mtx Mutex. 47 * @param type Type of the mutex. 48 48 */ 49 49 void mutex_initialize(mutex_t *mtx, mutex_type_t type) … … 57 57 * Timeout mode and non-blocking mode can be requested. 58 58 * 59 * @param mtx 60 * @param usec 61 * @param flags 59 * @param mtx Mutex. 60 * @param usec Timeout in microseconds. 61 * @param flags Specify mode of operation. 62 62 * 63 63 * For exact description of possible combinations of 64 64 * usec and flags, see comment for waitq_sleep_timeout(). 65 65 * 66 * @return See comment for waitq_sleep_timeout(). 66 * @return See comment for waitq_sleep_timeout(). 67 * 67 68 */ 68 69 int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags) … … 70 71 int rc; 71 72 72 if ( mtx->type == MUTEX_PASSIVE && THREAD) {73 if ((mtx->type == MUTEX_PASSIVE) && (THREAD)) { 73 74 rc = _semaphore_down_timeout(&mtx->sem, usec, flags); 74 75 } else { 75 ASSERT( mtx->type == MUTEX_ACTIVE || !THREAD);76 ASSERT((mtx->type == MUTEX_ACTIVE) || (!THREAD)); 76 77 ASSERT(usec == SYNCH_NO_TIMEOUT); 77 78 ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE)); 79 78 80 do { 79 81 rc = semaphore_trydown(&mtx->sem); … … 87 89 /** Release mutex. 88 90 * 89 * @param mtx 91 * @param mtx Mutex. 90 92 */ 91 93 void mutex_unlock(mutex_t *mtx) -
kernel/generic/src/synch/spinlock.c
r7f1d897 recbd287d 128 128 void spinlock_unlock_debug(spinlock_t *lock) 129 129 { 130 ASSERT (atomic_get(&lock->val) != 0);130 ASSERT_SPINLOCK(atomic_get(&lock->val) != 0, lock); 131 131 132 132 /* … … 143 143 /** Lock spinlock conditionally 144 144 * 145 * Lock spinlock conditionally. 146 * If the spinlock is not available at the moment, 147 * signal failure. 145 * Lock spinlock conditionally. If the spinlock is not available 146 * at the moment, signal failure. 148 147 * 149 148 * @param lock Pointer to spinlock_t structure. -
kernel/generic/src/synch/waitq.c
r7f1d897 recbd287d 261 261 int rc; 262 262 263 ASSERT( !PREEMPTION_DISABLED || PARAM_NON_BLOCKING(flags, usec));263 ASSERT((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec))); 264 264 265 265 ipl = waitq_sleep_prepare(wq); -
kernel/generic/src/time/clock.c
r7f1d897 recbd287d 195 195 spinlock_unlock(&THREAD->lock); 196 196 197 if ( !ticks && !PREEMPTION_DISABLED) {197 if ((!ticks) && (!PREEMPTION_DISABLED)) { 198 198 #ifdef CONFIG_UDEBUG 199 199 istate_t *istate;
Note:
See TracChangeset
for help on using the changeset viewer.