Changeset 39cea6a in mainline
- Timestamp:
- 2006-04-13T17:38:03Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- e185136
- Parents:
- 897ad60
- Files:
-
- 17 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/amd64/include/pm.h
r897ad60 r39cea6a 67 67 #define DPL_USER (PL_USER<<5) 68 68 69 #define IO_MAP_BASE (104)69 #define TSS_BASIC_SIZE 104 70 70 71 71 #ifndef __ASM__ … … 83 83 unsigned base_24_31: 8; 84 84 } __attribute__ ((packed)); 85 typedef struct descriptor descriptor_t; 85 86 86 87 struct tss_descriptor { … … 89 90 unsigned base_16_23: 8; 90 91 unsigned type: 4; 91 unsigned 92 unsigned : 1; 92 93 unsigned dpl : 2; 93 94 unsigned present : 1; … … 100 101 unsigned : 32; 101 102 } __attribute__ ((packed)); 103 typedef struct tss_descriptor tss_descriptor_t; 102 104 103 105 struct idescriptor { … … 113 115 unsigned : 32; 114 116 } __attribute__ ((packed)); 117 typedef struct idescriptor idescriptor_t; 115 118 116 119 struct ptr_16_64 { … … 118 121 __u64 base; 119 122 } __attribute__ ((packed)); 123 typedef struct ptr_16_64 ptr_16_64_t; 120 124 121 125 struct ptr_16_32 { … … 123 127 __u32 base; 124 128 } __attribute__ ((packed)); 129 typedef struct ptr_16_32 ptr_16_32_t; 125 130 126 131 struct tss { … … 142 147 __u8 iomap[0x10000 + 1]; /* 64K + 1 terminating byte */ 143 148 } __attribute__ ((packed)); 149 typedef struct tss tss_t; 144 150 145 extern struct tss*tss_p;151 extern tss_t *tss_p; 146 152 147 extern struct descriptorgdt[];148 extern struct idescriptoridt[];153 extern descriptor_t gdt[]; 154 extern idescriptor_t idt[]; 149 155 150 extern struct ptr_16_64gdtr;151 extern struct ptr_16_32bootstrap_gdtr;152 extern struct ptr_16_32protected_ap_gdtr;156 extern ptr_16_64_t gdtr; 157 extern ptr_16_32_t bootstrap_gdtr; 158 extern ptr_16_32_t protected_ap_gdtr; 153 159 154 160 extern void pm_init(void); 155 161 156 extern void gdt_tss_setbase( struct descriptor*d, __address base);157 extern void gdt_tss_setlimit( struct descriptor*d, __u32 limit);162 extern void gdt_tss_setbase(descriptor_t *d, __address base); 163 extern void gdt_tss_setlimit(descriptor_t *d, __u32 limit); 158 164 159 165 extern void idt_init(void); 160 extern void idt_setoffset( struct idescriptor*d, __address offset);166 extern void idt_setoffset(idescriptor_t *d, __address offset); 161 167 162 extern void tss_initialize( struct tss*t);168 extern void tss_initialize(tss_t *t); 163 169 164 170 #endif /* __ASM__ */ -
arch/amd64/src/cpu/cpu.c
r897ad60 r39cea6a 119 119 { 120 120 CPU->arch.tss = tss_p; 121 CPU->fpu_owner=NULL; 121 CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((__u8 *) CPU->arch.tss); 122 CPU->fpu_owner = NULL; 122 123 } 123 124 124 125 125 void cpu_identify(void) -
arch/amd64/src/pm.c
r897ad60 r39cea6a 47 47 */ 48 48 49 struct descriptorgdt[GDT_ITEMS] = {49 descriptor_t gdt[GDT_ITEMS] = { 50 50 /* NULL descriptor */ 51 51 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, … … 111 111 }; 112 112 113 struct idescriptoridt[IDT_ITEMS];114 115 struct ptr_16_64gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt };116 struct ptr_16_64idtr = {.limit = sizeof(idt), .base= (__u64) idt };117 118 static struct tsstss;119 struct tss*tss_p = NULL;120 121 void gdt_tss_setbase( struct descriptor*d, __address base)122 { 123 struct tss_descriptor *td = (struct tss_descriptor*) d;113 idescriptor_t idt[IDT_ITEMS]; 114 115 ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt }; 116 ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (__u64) idt }; 117 118 static tss_t tss; 119 tss_t *tss_p = NULL; 120 121 void gdt_tss_setbase(descriptor_t *d, __address base) 122 { 123 tss_descriptor_t *td = (tss_descriptor_t *) d; 124 124 125 125 td->base_0_15 = base & 0xffff; … … 129 129 } 130 130 131 void gdt_tss_setlimit( struct descriptor*d, __u32 limit)132 { 133 struct tss_descriptor *td = ( struct tss_descriptor*) d;131 void gdt_tss_setlimit(descriptor_t *d, __u32 limit) 132 { 133 struct tss_descriptor *td = (tss_descriptor_t *) d; 134 134 135 135 td->limit_0_15 = limit & 0xffff; … … 137 137 } 138 138 139 void idt_setoffset( struct idescriptor*d, __address offset)139 void idt_setoffset(idescriptor_t *d, __address offset) 140 140 { 141 141 /* … … 147 147 } 148 148 149 void tss_initialize( struct tss*t)150 { 151 memsetb((__address) t, sizeof( struct tss), 0);149 void tss_initialize(tss_t *t) 150 { 151 memsetb((__address) t, sizeof(tss_t), 0); 152 152 } 153 153 … … 157 157 void idt_init(void) 158 158 { 159 struct idescriptor*d;159 idescriptor_t *d; 160 160 int i; 161 161 … … 184 184 void pm_init(void) 185 185 { 186 struct descriptor*gdt_p = (struct descriptor *) gdtr.base;187 struct tss_descriptor*tss_desc;186 descriptor_t *gdt_p = (struct descriptor *) gdtr.base; 187 tss_descriptor_t *tss_desc; 188 188 189 189 /* … … 201 201 } 202 202 else { 203 tss_p = (struct tss *) malloc(sizeof( struct tss),FRAME_ATOMIC);203 tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC); 204 204 if (!tss_p) 205 205 panic("could not allocate TSS\n"); … … 208 208 tss_initialize(tss_p); 209 209 210 tss_desc = ( struct tss_descriptor*) (&gdt_p[TSS_DES]);210 tss_desc = (tss_descriptor_t *) (&gdt_p[TSS_DES]); 211 211 tss_desc->present = 1; 212 212 tss_desc->type = AR_TSS; … … 214 214 215 215 gdt_tss_setbase(&gdt_p[TSS_DES], (__address) tss_p); 216 gdt_tss_setlimit(&gdt_p[TSS_DES], sizeof( struct tss) - 1);216 gdt_tss_setlimit(&gdt_p[TSS_DES], sizeof(tss_t) - 1); 217 217 218 218 gdtr_load(&gdtr); -
arch/amd64/src/proc/scheduler.c
r897ad60 r39cea6a 29 29 #include <proc/scheduler.h> 30 30 #include <cpu.h> 31 #include <proc/task.h> 31 32 #include <proc/thread.h> 32 33 #include <arch.h> … … 35 36 #include <arch/debugger.h> 36 37 #include <print.h> 38 #include <arch/pm.h> 37 39 40 /** Perform amd64 specific tasks needed before the new task is run. */ 41 void before_task_runs_arch(void) 42 { 43 } 44 45 /** Perform amd64 specific tasks needed before the new thread is scheduled. */ 38 46 void before_thread_runs_arch(void) 39 47 { 48 size_t iomap_size; 49 ptr_16_64_t cpugdtr; 50 descriptor_t *gdt_p; 51 40 52 CPU->arch.tss->rsp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; 41 53 … … 43 55 * hidden part of gs */ 44 56 swapgs(); 45 write_msr(AMD_MSR_GS, 46 (__u64)&THREAD->kstack); 57 write_msr(AMD_MSR_GS, (__u64)&THREAD->kstack); 47 58 swapgs(); 48 59 49 60 /* TLS support - set FS to thread local storage */ 50 61 write_msr(AMD_MSR_FS, THREAD->arch.tls); 62 63 /* 64 * Switch the I/O Permission Bitmap, if necessary. 65 * 66 * First, copy the I/O Permission Bitmap. 67 * This needs to be changed so that the 68 * copying is avoided if the same task 69 * was already running and the iomap did 70 * not change. 71 */ 72 spinlock_lock(&TASK->lock); 73 iomap_size = TASK->arch.iomap_size; 74 if (iomap_size) { 75 ASSERT(TASK->arch.iomap); 76 memcpy(CPU->arch.tss->iomap, TASK->arch.iomap, iomap_size); 77 CPU->arch.tss->iomap[iomap_size] = 0xff; /* terminating byte */ 78 } 79 spinlock_unlock(&TASK->lock); 80 81 /* Second, adjust TSS segment limit. */ 82 gdtr_store(&cpugdtr); 83 gdt_p = (descriptor_t *) cpugdtr.base; 84 gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + iomap_size - 1); 85 gdtr_load(&cpugdtr); 51 86 52 87 #ifdef CONFIG_DEBUG_AS_WATCHPOINT -
arch/ia32/include/asm.h
r897ad60 r39cea6a 257 257 * @param gdtr_reg Address of memory from where to load GDTR. 258 258 */ 259 static inline void gdtr_load( struct ptr_16_32*gdtr_reg)259 static inline void gdtr_load(ptr_16_32_t *gdtr_reg) 260 260 { 261 261 __asm__ volatile ("lgdt %0\n" : : "m" (*gdtr_reg)); … … 266 266 * @param gdtr_reg Address of memory to where to load GDTR. 267 267 */ 268 static inline void gdtr_store( struct ptr_16_32*gdtr_reg)268 static inline void gdtr_store(ptr_16_32_t *gdtr_reg) 269 269 { 270 270 __asm__ volatile ("sgdt %0\n" : : "m" (*gdtr_reg)); … … 275 275 * @param idtr_reg Address of memory from where to load IDTR. 276 276 */ 277 static inline void idtr_load( struct ptr_16_32*idtr_reg)277 static inline void idtr_load(ptr_16_32_t *idtr_reg) 278 278 { 279 279 __asm__ volatile ("lidt %0\n" : : "m" (*idtr_reg)); -
arch/ia32/include/pm.h
r897ad60 r39cea6a 56 56 #define DPL_USER (PL_USER<<5) 57 57 58 #define IO_MAP_BASE (104)58 #define TSS_BASIC_SIZE 104 59 59 60 60 #ifndef __ASM__ … … 68 68 __u32 base; 69 69 } __attribute__ ((packed)); 70 typedef struct ptr_16_32 ptr_16_32_t; 70 71 71 72 struct descriptor { … … 81 82 unsigned base_24_31: 8; 82 83 } __attribute__ ((packed)); 84 typedef struct descriptor descriptor_t; 83 85 84 86 struct idescriptor { … … 89 91 unsigned offset_16_31: 16; 90 92 } __attribute__ ((packed)); 91 93 typedef struct idescriptor idescriptor_t; 92 94 93 95 struct tss { … … 132 134 __u8 iomap[0x10000+1]; /* 64K + 1 terminating byte */ 133 135 } __attribute__ ((packed)); 136 typedef struct tss tss_t; 134 137 135 extern struct ptr_16_32gdtr;136 extern struct ptr_16_32bootstrap_gdtr;137 extern struct ptr_16_32protected_ap_gdtr;138 extern ptr_16_32_t gdtr; 139 extern ptr_16_32_t bootstrap_gdtr; 140 extern ptr_16_32_t protected_ap_gdtr; 138 141 extern struct tss *tss_p; 139 142 140 extern struct descriptorgdt[];143 extern descriptor_t gdt[]; 141 144 142 145 extern void pm_init(void); 143 146 144 extern void gdt_setbase( struct descriptor*d, __address base);145 extern void gdt_setlimit( struct descriptor*d, __u32 limit);147 extern void gdt_setbase(descriptor_t *d, __address base); 148 extern void gdt_setlimit(descriptor_t *d, __u32 limit); 146 149 147 150 extern void idt_init(void); 148 extern void idt_setoffset( struct idescriptor*d, __address offset);151 extern void idt_setoffset(idescriptor_t *d, __address offset); 149 152 150 extern void tss_initialize( struct tss*t);153 extern void tss_initialize(tss_t *t); 151 154 extern void set_tls_desc(__address tls); 152 155 -
arch/ia32/src/cpu/cpu.c
r897ad60 r39cea6a 88 88 } 89 89 90 91 92 93 90 void cpu_arch_init(void) 94 91 { 95 __u32 help=0; 92 cpuid_feature_info fi; 93 cpuid_extended_feature_info efi; 94 cpu_info_t info; 95 __u32 help = 0; 96 96 97 97 CPU->arch.tss = tss_p; 98 CPU-> fpu_owner=NULL;98 CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((__u8 *) CPU->arch.tss); 99 99 100 cpuid_feature_info fi; 101 cpuid_extended_feature_info efi; 100 CPU->fpu_owner = NULL; 102 101 103 cpu_info_t info;104 102 cpuid(1, &info); 105 103 106 fi.word =info.cpuid_edx;107 efi.word =info.cpuid_ecx;104 fi.word = info.cpuid_edx; 105 efi.word = info.cpuid_ecx; 108 106 109 if(fi.bits.fxsr) fpu_fxsr(); 110 else fpu_fsr(); 107 if (fi.bits.fxsr) 108 fpu_fxsr(); 109 else 110 fpu_fsr(); 111 111 112 if(fi.bits.sse) asm volatile ( 113 "mov %%cr4,%0;\n" 114 "or %1,%0;\n" 115 "mov %0,%%cr4;\n" 116 :"+r"(help) 117 :"i"(CR4_OSFXSR_MASK|(1<<10)) 118 ); 119 112 if (fi.bits.sse) { 113 asm volatile ( 114 "mov %%cr4,%0\n" 115 "or %1,%0\n" 116 "mov %0,%%cr4\n" 117 : "+r" (help) 118 : "i" (CR4_OSFXSR_MASK|(1<<10)) 119 ); 120 } 120 121 } 121 122 122 123 123 void cpu_identify(void) -
arch/ia32/src/pm.c
r897ad60 r39cea6a 53 53 * structure in it's base. 54 54 */ 55 struct descriptorgdt[GDT_ITEMS] = {55 descriptor_t gdt[GDT_ITEMS] = { 56 56 /* NULL descriptor */ 57 57 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, … … 69 69 }; 70 70 71 static struct idescriptoridt[IDT_ITEMS];72 73 static struct tsstss;74 75 struct tss*tss_p = NULL;71 static idescriptor_t idt[IDT_ITEMS]; 72 73 static tss_t tss; 74 75 tss_t *tss_p = NULL; 76 76 77 77 /* gdtr is changed by kmp before next CPU is initialized */ 78 struct ptr_16_32bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) };79 struct ptr_16_32gdtr = { .limit = sizeof(gdt), .base = (__address) gdt };80 81 void gdt_setbase( struct descriptor*d, __address base)78 ptr_16_32_t bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) }; 79 ptr_16_32_t gdtr = { .limit = sizeof(gdt), .base = (__address) gdt }; 80 81 void gdt_setbase(descriptor_t *d, __address base) 82 82 { 83 83 d->base_0_15 = base & 0xffff; … … 86 86 } 87 87 88 void gdt_setlimit( struct descriptor*d, __u32 limit)88 void gdt_setlimit(descriptor_t *d, __u32 limit) 89 89 { 90 90 d->limit_0_15 = limit & 0xffff; … … 92 92 } 93 93 94 void idt_setoffset( struct idescriptor*d, __address offset)94 void idt_setoffset(idescriptor_t *d, __address offset) 95 95 { 96 96 /* … … 101 101 } 102 102 103 void tss_initialize( struct tss*t)103 void tss_initialize(tss_t *t) 104 104 { 105 105 memsetb((__address) t, sizeof(struct tss), 0); … … 111 111 void idt_init(void) 112 112 { 113 struct idescriptor*d;113 idescriptor_t *d; 114 114 int i; 115 115 … … 142 142 static void clean_IOPL_NT_flags(void) 143 143 { 144 asm 145 ( 146 "pushfl;" 147 "pop %%eax;" 148 "and $0xffff8fff,%%eax;" 149 "push %%eax;" 150 "popfl;" 151 : 152 : 153 :"%eax" 144 __asm__ volatile ( 145 "pushfl\n" 146 "pop %%eax\n" 147 "and $0xffff8fff, %%eax\n" 148 "push %%eax\n" 149 "popfl\n" 150 : : : "eax" 154 151 ); 155 152 } … … 158 155 static void clean_AM_flag(void) 159 156 { 160 asm 161 ( 162 "mov %%cr0,%%eax;" 163 "and $0xFFFBFFFF,%%eax;" 164 "mov %%eax,%%cr0;" 165 : 166 : 167 :"%eax" 157 __asm__ volatile ( 158 "mov %%cr0, %%eax\n" 159 "and $0xfffbffff, %%eax\n" 160 "mov %%eax, %%cr0\n" 161 : : : "eax" 168 162 ); 169 163 } … … 171 165 void pm_init(void) 172 166 { 173 struct descriptor *gdt_p = (struct descriptor*) gdtr.base;174 struct ptr_16_32idtr;167 descriptor_t *gdt_p = (descriptor_t *) gdtr.base; 168 ptr_16_32_t idtr; 175 169 176 170 /* … … 196 190 } 197 191 else { 198 tss_p = ( struct tss *) malloc(sizeof(struct tss),FRAME_ATOMIC);192 tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC); 199 193 if (!tss_p) 200 194 panic("could not allocate TSS\n"); … … 208 202 209 203 gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p); 210 gdt_setlimit(&gdt_p[TSS_DES], sizeof( struct tss) - 1);204 gdt_setlimit(&gdt_p[TSS_DES], sizeof(tss_t) - 1); 211 205 212 206 /* … … 222 216 void set_tls_desc(__address tls) 223 217 { 224 struct ptr_16_32cpugdtr;225 struct descriptor *gdt_p = (struct descriptor*) cpugdtr.base;218 ptr_16_32_t cpugdtr; 219 descriptor_t *gdt_p = (descriptor_t *) cpugdtr.base; 226 220 227 221 gdtr_store(&cpugdtr); -
arch/ia32/src/proc/scheduler.c
r897ad60 r39cea6a 29 29 #include <proc/scheduler.h> 30 30 #include <cpu.h> 31 #include <proc/task.h> 31 32 #include <proc/thread.h> 32 33 #include <arch.h> … … 34 35 #include <arch/debugger.h> 35 36 #include <arch/pm.h> 37 #include <arch/asm.h> 36 38 39 /** Perform ia32 specific tasks needed before the new task is run. */ 40 void before_task_runs_arch(void) 41 { 42 } 43 44 /** Perform ia32 specific tasks needed before the new thread is scheduled. 45 * 46 * THREAD is locked and interrupts are disabled. 47 */ 37 48 void before_thread_runs_arch(void) 38 49 { 50 size_t iomap_size; 51 ptr_16_32_t cpugdtr; 52 descriptor_t *gdt_p; 53 39 54 CPU->arch.tss->esp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; 40 55 CPU->arch.tss->ss0 = selector(KDATA_DES); … … 42 57 /* Set up TLS in GS register */ 43 58 set_tls_desc(THREAD->arch.tls); 59 60 /* 61 * Switch the I/O Permission Bitmap, if necessary. 62 * 63 * First, copy the I/O Permission Bitmap. 64 * This needs to be changed so that the 65 * copying is avoided if the same task 66 * was already running and the iomap did 67 * not change. 68 */ 69 spinlock_lock(&TASK->lock); 70 iomap_size = TASK->arch.iomap_size; 71 if (iomap_size) { 72 ASSERT(TASK->arch.iomap); 73 memcpy(CPU->arch.tss->iomap, TASK->arch.iomap, iomap_size); 74 CPU->arch.tss->iomap[iomap_size] = 0xff; /* terminating byte */ 75 } 76 spinlock_unlock(&TASK->lock); 77 78 /* Second, adjust TSS segment limit. */ 79 gdtr_store(&cpugdtr); 80 gdt_p = (descriptor_t *) cpugdtr.base; 81 gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE + iomap_size - 1); 82 gdtr_load(&cpugdtr); 44 83 45 84 #ifdef CONFIG_DEBUG_AS_WATCHPOINT -
arch/ia64/src/proc/scheduler.c
r897ad60 r39cea6a 37 37 #include <align.h> 38 38 39 /** Perform ia64 specific tasks needed before the new task is run. */ 40 void before_task_runs_arch(void) 41 { 42 } 43 39 44 /** Prepare kernel stack pointers in bank 0 r22 and r23 and make sure the stack is mapped in DTR. */ 40 45 void before_thread_runs_arch(void) -
arch/mips32/src/mips32.c
r897ad60 r39cea6a 137 137 } 138 138 139 /** Perform mips32 specific tasks needed before the new task is run. */ 140 void before_task_runs_arch(void) 141 { 142 } 143 144 /** Perform mips32 specific tasks needed before the new thread is scheduled. */ 139 145 void before_thread_runs_arch(void) 140 146 { -
arch/ppc32/src/proc/scheduler.c
r897ad60 r39cea6a 35 35 __address supervisor_sp_physical; 36 36 37 /** Perform ppc32 specific tasks needed before the new task is run. */ 38 void before_task_runs_arch(void) 39 { 40 } 41 42 /** Perform ppc32 specific tasks needed before the new thread is scheduled. */ 37 43 void before_thread_runs_arch(void) 38 44 { -
arch/ppc64/src/proc/scheduler.c
r897ad60 r39cea6a 35 35 __address supervisor_sp_physical; 36 36 37 /** Perform ppc64 specific tasks needed before the new task is run. */ 38 void before_task_runs_arch(void) 39 { 40 } 41 42 /** Perform ppc64 specific tasks needed before the new thread is scheduled. */ 37 43 void before_thread_runs_arch(void) 38 44 { -
arch/sparc64/src/proc/scheduler.c
r897ad60 r39cea6a 35 35 #include <align.h> 36 36 37 /** Perform sparc64 specific tasks needed before the new task is run. */ 38 void before_task_runs_arch(void) 39 { 40 } 41 37 42 /** Ensure that thread's kernel stack is locked in TLB. */ 38 43 void before_thread_runs_arch(void) -
generic/include/proc/scheduler.h
r897ad60 r39cea6a 53 53 extern void kcpulb(void *arg); 54 54 55 extern void before_thread_runs(void);56 extern void after_thread_ran(void);57 58 55 extern void sched_print_list(void); 59 56 … … 61 58 * To be defined by architectures: 62 59 */ 60 extern void before_task_runs_arch(void); 63 61 extern void before_thread_runs_arch(void); 64 62 extern void after_thread_ran_arch(void); -
generic/include/proc/task.h
r897ad60 r39cea6a 53 53 atomic_t active_calls; /**< Active asynchronous messages */ 54 54 55 task_arch_t arch; 55 task_arch_t arch; /**< Architecture specific task data. */ 56 56 }; 57 57 -
generic/src/proc/scheduler.c
r897ad60 r39cea6a 48 48 #include <debug.h> 49 49 50 static void before_task_runs(void); 51 static void before_thread_runs(void); 52 static void after_thread_ran(void); 50 53 static void scheduler_separated_stack(void); 51 54 52 55 atomic_t nrdy; /**< Number of ready threads in the system. */ 56 57 /** Carry out actions before new task runs. */ 58 void before_task_runs(void) 59 { 60 before_task_runs_arch(); 61 } 53 62 54 63 /** Take actions before new thread runs. … … 435 444 } 436 445 TASK = THREAD->task; 446 before_task_runs(); 437 447 } 438 448
Note:
See TracChangeset
for help on using the changeset viewer.