Changeset 5f85c91 in mainline
- Timestamp:
- 2005-11-08T12:22:35Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 36a140b
- Parents:
- 389f41e
- Files:
-
- 30 edited
Legend:
- Unmodified
- Added
- Removed
-
Makefile
r389f41e r5f85c91 63 63 endif 64 64 ifeq ($(CONFIG_DEBUG_SPINLOCK),y) 65 DEFS += -D DEBUG_SPINLOCK65 DEFS += -DCONFIG_DEBUG_SPINLOCK 66 66 endif 67 67 -
arch/amd64/Makefile.inc
r389f41e r5f85c91 61 61 62 62 ifeq ($(CONFIG_SMP),y) 63 DEFS += -D SMP63 DEFS += -DCONFIG_SMP 64 64 endif 65 65 ifeq ($(CONFIG_HT),y) 66 DEFS += -D HT66 DEFS += -DCONFIG_HT 67 67 endif 68 68 ifeq ($(CONFIG_FPU_LAZY),y) 69 DEFS += -D FPU_LAZY69 DEFS += -DCONFIG_FPU_LAZY 70 70 endif 71 71 -
arch/amd64/src/amd64.c
r389f41e r5f85c91 76 76 trap_register(VECTOR_SYSCALL, syscall); 77 77 78 #ifdef __SMP__78 #ifdef CONFIG_SMP 79 79 trap_register(VECTOR_TLB_SHOOTDOWN_IPI, tlb_shootdown_ipi); 80 80 trap_register(VECTOR_WAKEUP_IPI, wakeup_ipi); 81 #endif /* __SMP__*/81 #endif /* CONFIG_SMP */ 82 82 } 83 83 } … … 95 95 memory_print_map(); 96 96 97 #ifdef __SMP__97 #ifdef CONFIG_SMP 98 98 acpi_init(); 99 #endif /* __SMP__*/99 #endif /* CONFIG_SMP */ 100 100 } 101 101 } -
arch/amd64/src/interrupt.c
r389f41e r5f85c91 140 140 void nm_fault(__u8 n, __native stack[]) 141 141 { 142 #ifdef FPU_LAZY142 #ifdef CONFIG_FPU_LAZY 143 143 scheduler_fpu_lazy_request(); 144 144 #else -
arch/amd64/src/smp/ap.S
r389f41e r5f85c91 40 40 .section K_TEXT_START_2, "ax" 41 41 42 #ifdef __SMP__42 #ifdef CONFIG_SMP 43 43 44 44 .global ap_boot … … 101 101 102 102 103 #endif /* __SMP__*/103 #endif /* CONFIG_SMP */ -
arch/ia32/Makefile.inc
r389f41e r5f85c91 80 80 81 81 ifeq ($(CONFIG_SMP),y) 82 DEFS += -D SMP82 DEFS += -DCONFIG_SMP 83 83 endif 84 84 ifeq ($(CONFIG_HT),y) 85 DEFS += -D HT85 DEFS += -DCONFIG_HT 86 86 endif 87 87 ifeq ($(CONFIG_FPU_LAZY),y) 88 DEFS += -D FPU_LAZY88 DEFS += -DCONFIG_FPU_LAZY 89 89 endif 90 90 -
arch/ia32/include/atomic.h
r389f41e r5f85c91 33 33 34 34 static inline void atomic_inc(volatile int *val) { 35 #ifdef __SMP__35 #ifdef CONFIG_SMP 36 36 __asm__ volatile ("lock incl %0\n" : "=m" (*val)); 37 37 #else 38 38 __asm__ volatile ("incl %0\n" : "=m" (*val)); 39 #endif /* __SMP__*/39 #endif /* CONFIG_SMP */ 40 40 } 41 41 42 42 static inline void atomic_dec(volatile int *val) { 43 #ifdef __SMP__43 #ifdef CONFIG_SMP 44 44 __asm__ volatile ("lock decl %0\n" : "=m" (*val)); 45 45 #else 46 46 __asm__ volatile ("decl %0\n" : "=m" (*val)); 47 #endif /* __SMP__*/47 #endif /* CONFIG_SMP */ 48 48 } 49 49 -
arch/ia32/src/atomic.S
r389f41e r5f85c91 29 29 .text 30 30 31 #ifdef __SMP__31 #ifdef CONFIG_SMP 32 32 33 33 .global spinlock_arch … … 43 43 44 44 0: 45 #ifdef __HT__45 #ifdef CONFIG_HT 46 46 pause # Pentium 4's with HT love this instruction 47 47 #endif -
arch/ia32/src/ia32.c
r389f41e r5f85c91 63 63 trap_register(VECTOR_SYSCALL, syscall); 64 64 65 #ifdef __SMP__65 #ifdef CONFIG_SMP 66 66 trap_register(VECTOR_TLB_SHOOTDOWN_IPI, tlb_shootdown_ipi); 67 67 trap_register(VECTOR_WAKEUP_IPI, wakeup_ipi); 68 #endif /* __SMP__*/68 #endif /* CONFIG_SMP */ 69 69 } 70 70 } … … 82 82 memory_print_map(); 83 83 84 #ifdef __SMP__84 #ifdef CONFIG_SMP 85 85 acpi_init(); 86 #endif /* __SMP__*/86 #endif /* CONFIG_SMP */ 87 87 } 88 88 } -
arch/ia32/src/interrupt.c
r389f41e r5f85c91 111 111 void nm_fault(__u8 n, __native stack[]) 112 112 { 113 #ifdef FPU_LAZY113 #ifdef CONFIG_FPU_LAZY 114 114 scheduler_fpu_lazy_request(); 115 115 #else -
arch/ia32/src/smp/ap.S
r389f41e r5f85c91 33 33 .section K_TEXT_START_2, "ax" 34 34 35 #ifdef __SMP__35 #ifdef CONFIG_SMP 36 36 37 37 .global ap_boot … … 74 74 jmpl $KTEXT, $main_ap 75 75 76 #endif /* __SMP__*/76 #endif /* CONFIG_SMP */ -
arch/ia32/src/smp/apic.c
r389f41e r5f85c91 38 38 #include <arch.h> 39 39 40 #ifdef __SMP__40 #ifdef CONFIG_SMP 41 41 42 42 /* … … 417 417 } 418 418 419 #endif /* __SMP__*/419 #endif /* CONFIG_SMP */ -
arch/ia32/src/smp/ipi.c
r389f41e r5f85c91 27 27 */ 28 28 29 #ifdef __SMP__29 #ifdef CONFIG_SMP 30 30 31 31 #include <smp/ipi.h> … … 37 37 } 38 38 39 #endif /* __SMP__*/39 #endif /* CONFIG_SMP */ -
arch/ia32/src/smp/mps.c
r389f41e r5f85c91 27 27 */ 28 28 29 #ifdef __SMP__29 #ifdef CONFIG_SMP 30 30 31 31 #include <config.h> … … 423 423 } 424 424 425 #endif /* __SMP__*/425 #endif /* CONFIG_SMP */ -
arch/ia32/src/smp/smp.c
r389f41e r5f85c91 48 48 #include <arch/i8259.h> 49 49 50 #ifdef __SMP__50 #ifdef CONFIG_SMP 51 51 52 52 static struct smp_config_operations *ops = NULL; … … 166 166 } 167 167 168 #endif /* __SMP__*/168 #endif /* CONFIG_SMP */ -
arch/mips32/Makefile.inc
r389f41e r5f85c91 103 103 104 104 ifeq ($(CONFIG_FPU_LAZY),y) 105 DEFS += -D FPU_LAZY105 DEFS += -DCONFIG_FPU_LAZY 106 106 endif 107 107 -
arch/mips32/src/exception.c
r389f41e r5f85c91 74 74 break; 75 75 case EXC_CpU: 76 #ifdef FPU_LAZY76 #ifdef CONFIG_FPU_LAZY 77 77 if (cp0_cause_coperr(cause) == fpu_cop_id) 78 78 scheduler_fpu_lazy_request(); -
genarch/src/acpi/matd.c
r389f41e r5f85c91 43 43 struct acpi_madt *acpi_madt = NULL; 44 44 45 #ifdef __SMP__45 #ifdef CONFIG_SMP 46 46 47 47 static void madt_l_apic_entry(struct madt_l_apic *la, __u32 index); … … 211 211 212 212 213 #endif /* __SMP__*/213 #endif /* CONFIG_SMP */ -
generic/include/cpu.h
r389f41e r5f85c91 53 53 link_t timeout_active_head; 54 54 55 #ifdef __SMP__55 #ifdef CONFIG_SMP 56 56 int kcpulbstarted; 57 57 waitq_t kcpulb_wq; 58 #endif /* __SMP__*/58 #endif /* CONFIG_SMP */ 59 59 60 60 int id; -
generic/include/mm/tlb.h
r389f41e r5f85c91 34 34 extern void tlb_init(void); 35 35 36 #ifdef __SMP__36 #ifdef CONFIG_SMP 37 37 extern void tlb_shootdown_start(void); 38 38 extern void tlb_shootdown_finalize(void); … … 42 42 # define tlb_shootdown_finalize() ; 43 43 # define tlb_shootdown_ipi_recv() ; 44 #endif /* __SMP__*/44 #endif /* CONFIG_SMP */ 45 45 46 46 /* Export TLB interface that each architecture must implement. */ -
generic/include/smp/ipi.h
r389f41e r5f85c91 30 30 #define __IPI_H__ 31 31 32 #ifdef __SMP__32 #ifdef CONFIG_SMP 33 33 extern void ipi_broadcast(int ipi); 34 34 extern void ipi_broadcast_arch(int ipi); 35 35 #else 36 36 #define ipi_broadcast(x) ; 37 #endif /* __SMP__*/37 #endif /* CONFIG_SMP */ 38 38 39 39 #endif -
generic/include/smp/smp.h
r389f41e r5f85c91 30 30 #define __SMP_H__ 31 31 32 #ifdef __SMP__32 #ifdef CONFIG_SMP 33 33 extern void smp_init(void); 34 34 #else 35 35 #define smp_init() ; 36 #endif /* __SMP__*/36 #endif /* CONFIG_SMP */ 37 37 38 38 #endif /* __SMP_H__ */ -
generic/include/synch/spinlock.h
r389f41e r5f85c91 34 34 #include <preemption.h> 35 35 36 #ifdef __SMP__36 #ifdef CONFIG_SMP 37 37 struct spinlock { 38 38 int val; -
generic/src/cpu/cpu.c
r389f41e r5f85c91 50 50 int i, j; 51 51 52 #ifdef __SMP__52 #ifdef CONFIG_SMP 53 53 if (config.cpu_active == 1) { 54 #endif /* __SMP__*/54 #endif /* CONFIG_SMP */ 55 55 cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count); 56 56 if (!cpus) … … 67 67 cpus[i].id = i; 68 68 69 #ifdef __SMP__69 #ifdef CONFIG_SMP 70 70 waitq_initialize(&cpus[i].kcpulb_wq); 71 71 #endif /* __SMP */ … … 76 76 } 77 77 78 #ifdef __SMP__78 #ifdef CONFIG_SMP 79 79 } 80 #endif /* __SMP__*/80 #endif /* CONFIG_SMP */ 81 81 82 82 CPU = &cpus[config.cpu_active-1]; -
generic/src/main/kinit.c
r389f41e r5f85c91 44 44 #include <memstr.h> 45 45 46 #ifdef __SMP__46 #ifdef CONFIG_SMP 47 47 #include <arch/smp/mps.h> 48 #endif /* __SMP__*/48 #endif /* CONFIG_SMP */ 49 49 50 50 #include <synch/waitq.h> … … 67 67 interrupts_disable(); 68 68 69 #ifdef __SMP__69 #ifdef CONFIG_SMP 70 70 if (config.cpu_count > 1) { 71 71 /* … … 85 85 else panic("thread_create/kmp"); 86 86 } 87 #endif /* __SMP__*/87 #endif /* CONFIG_SMP */ 88 88 /* 89 89 * Now that all CPUs are up, we can report what we've found. … … 96 96 } 97 97 98 #ifdef __SMP__98 #ifdef CONFIG_SMP 99 99 if (config.cpu_count > 1) { 100 100 /* … … 114 114 } 115 115 } 116 #endif /* __SMP__*/116 #endif /* CONFIG_SMP */ 117 117 118 118 interrupts_enable(); -
generic/src/main/main.c
r389f41e r5f85c91 40 40 #include <align.h> 41 41 42 #ifdef __SMP__42 #ifdef CONFIG_SMP 43 43 #include <arch/smp/apic.h> 44 44 #include <arch/smp/mps.h> 45 #endif /* __SMP__*/45 #endif /* CONFIG_SMP */ 46 46 47 47 #include <smp/smp.h> … … 203 203 204 204 205 #ifdef __SMP__205 #ifdef CONFIG_SMP 206 206 /** Application CPUs main kernel routine 207 207 * … … 269 269 /* not reached */ 270 270 } 271 #endif /* __SMP__*/271 #endif /* CONFIG_SMP */ -
generic/src/mm/tlb.c
r389f41e r5f85c91 37 37 #include <arch.h> 38 38 39 #ifdef __SMP__39 #ifdef CONFIG_SMP 40 40 static spinlock_t tlblock; 41 41 #endif … … 49 49 } 50 50 51 #ifdef __SMP__51 #ifdef CONFIG_SMP 52 52 /* must be called with interrupts disabled */ 53 53 void tlb_shootdown_start(void) … … 85 85 CPU->tlb_active = 1; 86 86 } 87 #endif /* __SMP__*/87 #endif /* CONFIG_SMP */ -
generic/src/proc/scheduler.c
r389f41e r5f85c91 62 62 { 63 63 before_thread_runs_arch(); 64 #ifdef FPU_LAZY64 #ifdef CONFIG_FPU_LAZY 65 65 if(THREAD==CPU->fpu_owner) 66 66 fpu_enable(); … … 78 78 } 79 79 80 #ifdef FPU_LAZY80 #ifdef CONFIG_FPU_LAZY 81 81 void scheduler_fpu_lazy_request(void) 82 82 { … … 135 135 136 136 if (n == 0) { 137 #ifdef __SMP__137 #ifdef CONFIG_SMP 138 138 /* 139 139 * If the load balancing thread is not running, wake it up and … … 144 144 goto loop; 145 145 } 146 #endif /* __SMP__*/146 #endif /* CONFIG_SMP */ 147 147 148 148 /* … … 413 413 if (THREAD) { 414 414 spinlock_lock(&THREAD->lock); 415 #ifndef FPU_LAZY415 #ifndef CONFIG_FPU_LAZY 416 416 fpu_context_save(&(THREAD->saved_fpu_context)); 417 417 #endif … … 463 463 464 464 465 #ifdef __SMP__465 #ifdef CONFIG_SMP 466 466 /** Load balancing thread 467 467 * … … 624 624 } 625 625 626 #endif /* __SMP__*/626 #endif /* CONFIG_SMP */ -
generic/src/smp/ipi.c
r389f41e r5f85c91 27 27 */ 28 28 29 #ifdef __SMP__29 #ifdef CONFIG_SMP 30 30 31 31 #include <smp/ipi.h> … … 45 45 * Provisions must be made to avoid sending IPI: 46 46 * - before all CPU's were configured to accept the IPI 47 * - if there is only one CPU but the kernel was compiled with __SMP__47 * - if there is only one CPU but the kernel was compiled with CONFIG_SMP 48 48 */ 49 49 … … 52 52 } 53 53 54 #endif /* __SMP__*/54 #endif /* CONFIG_SMP */ -
generic/src/synch/spinlock.c
r389f41e r5f85c91 35 35 #include <debug.h> 36 36 37 #ifdef __SMP__37 #ifdef CONFIG_SMP 38 38 39 39 /** Initialize spinlock … … 48 48 } 49 49 50 #ifdef DEBUG_SPINLOCK50 #ifdef CONFIG_DEBUG_SPINLOCK 51 51 /** Lock spinlock 52 52 *
Note:
See TracChangeset
for help on using the changeset viewer.