Changeset 78de83de in mainline
- Timestamp:
- 2018-09-07T15:41:29Z (6 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 077842c
- Parents:
- 508b0df1
- Location:
- kernel/generic
- Files:
-
- 1 added
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/adt/cht.h
r508b0df1 r78de83de 36 36 #define KERN_CONC_HASH_TABLE_H_ 37 37 38 #include <atomic.h> 38 39 #include <stdint.h> 39 40 #include <adt/list.h> -
kernel/generic/include/mm/as.h
r508b0df1 r78de83de 48 48 #include <lib/elf.h> 49 49 #include <arch.h> 50 #include <lib/refcount.h> 50 51 51 52 #define AS THE->as … … 111 112 112 113 /** Number of references (i.e. tasks that reference this as). */ 113 atomic_ t refcount;114 atomic_refcount_t refcount; 114 115 115 116 mutex_t lock; -
kernel/generic/include/synch/spinlock.h
r508b0df1 r78de83de 36 36 #define KERN_SPINLOCK_H_ 37 37 38 #include <assert.h> 39 #include <stdatomic.h> 38 40 #include <stdbool.h> 39 #include <barrier.h>40 #include <assert.h>41 41 #include <preemption.h> 42 #include <atomic.h>43 42 #include <arch/asm.h> 44 43 … … 46 45 47 46 typedef struct spinlock { 48 atomic_ t val;47 atomic_flag flag; 49 48 50 49 #ifdef CONFIG_DEBUG_SPINLOCK … … 70 69 spinlock_t lock_name = { \ 71 70 .name = desc_name, \ 72 . val = { 0 }\71 .flag = ATOMIC_FLAG_INIT \ 73 72 } 74 73 … … 76 75 static spinlock_t lock_name = { \ 77 76 .name = desc_name, \ 78 . val = { 0 }\77 .flag = ATOMIC_FLAG_INIT \ 79 78 } 80 79 … … 89 88 #define SPINLOCK_INITIALIZE_NAME(lock_name, desc_name) \ 90 89 spinlock_t lock_name = { \ 91 . val = { 0 }\90 .flag = ATOMIC_FLAG_INIT \ 92 91 } 93 92 94 93 #define SPINLOCK_STATIC_INITIALIZE_NAME(lock_name, desc_name) \ 95 94 static spinlock_t lock_name = { \ 96 . val = { 0 }\95 .flag = ATOMIC_FLAG_INIT \ 97 96 } 98 97 … … 126 125 NO_TRACE static inline void spinlock_unlock_nondebug(spinlock_t *lock) 127 126 { 128 /* 129 * Prevent critical section code from bleeding out this way down. 130 */ 131 CS_LEAVE_BARRIER(); 132 133 atomic_set(&lock->val, 0); 127 atomic_flag_clear_explicit(&lock->flag, memory_order_release); 134 128 preemption_enable(); 135 129 } … … 215 209 .lock = { \ 216 210 .name = desc_name, \ 217 . val = { 0 }\211 .flag = ATOMIC_FLAG_INIT \ 218 212 }, \ 219 213 .guard = false, \ … … 225 219 .lock = { \ 226 220 .name = desc_name, \ 227 . val = { 0 }\221 .flag = ATOMIC_FLAG_INIT \ 228 222 }, \ 229 223 .guard = false, \ … … 236 230 irq_spinlock_t lock_name = { \ 237 231 .lock = { \ 238 . val = { 0 }\232 .flag = ATOMIC_FLAG_INIT \ 239 233 }, \ 240 234 .guard = false, \ … … 245 239 static irq_spinlock_t lock_name = { \ 246 240 .lock = { \ 247 . val = { 0 }\241 .flag = ATOMIC_FLAG_INIT \ 248 242 }, \ 249 243 .guard = false, \ -
kernel/generic/src/mm/as.c
r508b0df1 r78de83de 163 163 as->asid = ASID_INVALID; 164 164 165 atomic_set(&as->refcount, 0);165 refcount_init(&as->refcount); 166 166 as->cpu_refcount = 0; 167 167 … … 190 190 191 191 assert(as != AS); 192 assert( atomic_get(&as->refcount) == 0);192 assert(refcount_unique(&as->refcount)); 193 193 194 194 /* … … 267 267 NO_TRACE void as_hold(as_t *as) 268 268 { 269 atomic_inc(&as->refcount);269 refcount_up(&as->refcount); 270 270 } 271 271 … … 275 275 * destroys the address space. 276 276 * 277 * @param as Address space to be released.277 * @param as Address space to be released. 278 278 * 279 279 */ 280 280 NO_TRACE void as_release(as_t *as) 281 281 { 282 if ( atomic_predec(&as->refcount) == 0)282 if (refcount_down(&as->refcount)) 283 283 as_destroy(as); 284 284 } -
kernel/generic/src/synch/spinlock.c
r508b0df1 r78de83de 56 56 void spinlock_initialize(spinlock_t *lock, const char *name) 57 57 { 58 atomic_ set(&lock->val, 0);58 atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed); 59 59 #ifdef CONFIG_DEBUG_SPINLOCK 60 60 lock->name = name; … … 79 79 80 80 preemption_disable(); 81 while ( test_and_set(&lock->val)) {81 while (atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire)) { 82 82 /* 83 83 * We need to be careful about particular locks … … 115 115 if (deadlock_reported) 116 116 printf("cpu%u: not deadlocked\n", CPU->id); 117 118 /*119 * Prevent critical section code from bleeding out this way up.120 */121 CS_ENTER_BARRIER();122 117 } 123 118 … … 132 127 ASSERT_SPINLOCK(spinlock_locked(lock), lock); 133 128 134 /* 135 * Prevent critical section code from bleeding out this way down. 136 */ 137 CS_LEAVE_BARRIER(); 138 139 atomic_set(&lock->val, 0); 129 atomic_flag_clear_explicit(&lock->flag, memory_order_release); 140 130 preemption_enable(); 141 131 } … … 156 146 { 157 147 preemption_disable(); 158 bool ret = !test_and_set(&lock->val); 159 160 /* 161 * Prevent critical section code from bleeding out this way up. 162 */ 163 CS_ENTER_BARRIER(); 148 bool ret = !atomic_flag_test_and_set_explicit(&lock->flag, memory_order_acquire); 164 149 165 150 if (!ret) … … 176 161 bool spinlock_locked(spinlock_t *lock) 177 162 { 178 return atomic_get(&lock->val) != 0; 163 // XXX: Atomic flag doesn't support simple atomic read (by design), 164 // so instead we test_and_set and then clear if necessary. 165 // This function is only used inside assert, so we don't need 166 // any preemption_disable/enable here. 167 168 bool ret = atomic_flag_test_and_set_explicit(&lock->flag, memory_order_relaxed); 169 if (!ret) 170 atomic_flag_clear_explicit(&lock->flag, memory_order_relaxed); 171 return ret; 179 172 } 180 173
Note:
See TracChangeset
for help on using the changeset viewer.