Changeset a3eeceb6 in mainline
- Timestamp:
- 2006-02-09T23:29:57Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- d0a0f12
- Parents:
- 8f00329
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/amd64/src/fpu_context.c
r8f00329 ra3eeceb6 57 57 } 58 58 59 void fpu_init( void)59 void fpu_init(fpu_context_t *fctx) 60 60 { 61 61 /* TODO: Zero all SSE, MMX etc. registers */ -
arch/ia32/src/fpu_context.c
r8f00329 ra3eeceb6 49 49 } 50 50 51 void fpu_init( void)51 void fpu_init(fpu_context_t *fctx) 52 52 { 53 53 __asm__ volatile ( -
arch/mips32/src/fpu_context.c
r8f00329 ra3eeceb6 51 51 } 52 52 53 void fpu_init( void)53 void fpu_init(fpu_context_t *fctx) 54 54 { 55 55 /* TODO: Zero all registers */ -
generic/include/fpu_context.h
r8f00329 ra3eeceb6 36 36 extern void fpu_context_save(fpu_context_t *); 37 37 extern void fpu_context_restore(fpu_context_t *); 38 extern void fpu_init( void);38 extern void fpu_init(fpu_context_t *); 39 39 extern void fpu_enable(void); 40 40 extern void fpu_disable(void); -
generic/include/mm/frame.h
r8f00329 ra3eeceb6 58 58 __address e1 = s1+sz1; 59 59 __address e2 = s2+sz2; 60 if (s1 >= s2 && s1 < e2) 61 return 1; 62 if (e1 >= s2 && e1 < e2) 63 return 1; 64 if ((s1 < s2) && (e1 >= e2)) 65 return 1; 66 return 0; 60 61 return s1 < e2 && s2 < e1; 67 62 } 68 63 -
generic/src/proc/scheduler.c
r8f00329 ra3eeceb6 56 56 * tread is passed control. 57 57 * 58 * THREAD->lock is locked on entry 59 * 58 60 */ 59 61 void before_thread_runs(void) … … 70 72 fpu_context_restore(&(THREAD->saved_fpu_context)); 71 73 else { 72 fpu_init( );74 fpu_init(&(THREAD->saved_fpu_context)); 73 75 THREAD->fpu_context_exists=1; 74 76 } … … 80 82 { 81 83 fpu_enable(); 84 spinlock_lock(&CPU->lock); 85 86 /* Save old context */ 82 87 if (CPU->fpu_owner != NULL) { 88 spinlock_lock(&CPU->fpu_owner->lock); 83 89 fpu_context_save(&CPU->fpu_owner->saved_fpu_context); 84 90 /* don't prevent migration */ 85 91 CPU->fpu_owner->fpu_context_engaged=0; 86 } 92 spinlock_unlock(&CPU->fpu_owner->lock); 93 } 94 95 spinlock_lock(&THREAD->lock); 87 96 if (THREAD->fpu_context_exists) 88 97 fpu_context_restore(&THREAD->saved_fpu_context); 89 98 else { 90 fpu_init( );99 fpu_init(&(THREAD->saved_fpu_context)); 91 100 THREAD->fpu_context_exists=1; 92 101 } 93 102 CPU->fpu_owner=THREAD; 94 103 THREAD->fpu_context_engaged = 1; 104 105 spinlock_unlock(&THREAD->lock); 106 spinlock_unlock(&CPU->lock); 95 107 } 96 108 #endif -
test/fpu/fpu1/test.c
r8f00329 ra3eeceb6 127 127 waitq_wakeup(&can_start, WAKEUP_ALL); 128 128 129 while ( threads_ok!= THREADS)129 while (atomic_get(&threads_ok) != THREADS) 130 130 ; 131 131 -
test/fpu/mips1/test.c
r8f00329 ra3eeceb6 48 48 { 49 49 int i; 50 volatile long long j;51 double e,d,le,f;52 50 int arg __attribute__((aligned(16))) = (int)((__native) data); 53 51 int after_arg __attribute__((aligned(16))); … … 78 76 { 79 77 int i; 80 volatile long long j;81 double e,d,le,f;82 78 int arg __attribute__((aligned(16))) = (int)((__native) data); 83 79 int after_arg __attribute__((aligned(16))); … … 130 126 waitq_wakeup(&can_start, WAKEUP_ALL); 131 127 132 while ( threads_ok!= THREADS)128 while (atomic_get(&threads_ok) != THREADS) 133 129 ; 134 130 -
test/fpu/sse1/test.c
r8f00329 ra3eeceb6 48 48 { 49 49 int i; 50 volatile long long j;51 double e,d,le,f;52 50 int arg __attribute__((aligned(16))) = (int)((__native) data); 53 51 int after_arg __attribute__((aligned(16))); … … 78 76 { 79 77 int i; 80 volatile long long j;81 double e,d,le,f;82 78 int arg __attribute__((aligned(16))) = (int)((__native) data); 83 79 int after_arg __attribute__((aligned(16))); … … 130 126 waitq_wakeup(&can_start, WAKEUP_ALL); 131 127 132 while ( threads_ok!= THREADS)128 while (atomic_get(&threads_ok) != THREADS) 133 129 ; 134 130
Note:
See TracChangeset
for help on using the changeset viewer.