Changes in kernel/arch/sparc64/include/barrier.h [7a0359b:723060a] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/include/barrier.h
r7a0359b r723060a 27 27 */ 28 28 29 /** @addtogroup sparc64 29 /** @addtogroup sparc64 30 30 * @{ 31 31 */ … … 36 36 #define KERN_sparc64_BARRIER_H_ 37 37 38 #include <trace.h>39 40 #ifdef KERNEL41 42 #include <typedefs.h>43 44 #else45 46 #include <stdint.h>47 48 #endif49 50 38 /* 51 39 * Our critical section barriers are prepared for the weakest RMO memory model. 52 40 */ 53 #define CS_ENTER_BARRIER() \ 54 asm volatile ( \ 55 "membar #LoadLoad | #LoadStore\n" \ 56 ::: "memory" \ 41 #define CS_ENTER_BARRIER() \ 42 asm volatile ( \ 43 "membar #LoadLoad | #LoadStore\n" \ 44 ::: "memory" \ 45 ) 46 #define CS_LEAVE_BARRIER() \ 47 asm volatile ( \ 48 "membar #StoreStore\n" \ 49 "membar #LoadStore\n" \ 50 ::: "memory" \ 57 51 ) 58 52 59 #define CS_LEAVE_BARRIER()\60 asm volatile ( \61 "membar #StoreStore\n"\62 "membar #LoadStore\n" \63 ::: "memory"\64 )53 #define memory_barrier() \ 54 asm volatile ("membar #LoadLoad | #StoreStore\n" ::: "memory") 55 #define read_barrier() \ 56 asm volatile ("membar #LoadLoad\n" ::: "memory") 57 #define write_barrier() \ 58 asm volatile ("membar #StoreStore\n" ::: "memory") 65 59 66 #define memory_barrier() \ 67 asm volatile ( \ 68 "membar #LoadLoad | #StoreStore\n" \ 69 ::: "memory" \ 70 ) 71 72 #define read_barrier() \ 73 asm volatile ( \ 74 "membar #LoadLoad\n" \ 75 ::: "memory" \ 76 ) 77 78 #define write_barrier() \ 79 asm volatile ( \ 80 "membar #StoreStore\n" \ 81 ::: "memory" \ 82 ) 83 84 #define flush(a) \ 85 asm volatile ( \ 86 "flush %[reg]\n" \ 87 :: [reg] "r" ((a)) \ 88 : "memory" \ 89 ) 60 #define flush(a) \ 61 asm volatile ("flush %0\n" :: "r" ((a)) : "memory") 90 62 91 63 /** Flush Instruction pipeline. */ 92 NO_TRACEstatic inline void flush_pipeline(void)64 static inline void flush_pipeline(void) 93 65 { 94 uint64_t pc;95 96 66 /* 97 67 * The FLUSH instruction takes address parameter. … … 100 70 * The entire kernel text is mapped by a locked ITLB and 101 71 * DTLB entries. Therefore, when this function is called, 102 * the % pcregister will always be in the range mapped by72 * the %o7 register will always be in the range mapped by 103 73 * DTLB. 104 *105 74 */ 106 107 asm volatile ( 108 "rd %%pc, %[pc]\n" 109 "flush %[pc]\n" 110 : [pc] "=&r" (pc) 111 ); 75 76 asm volatile ("flush %o7\n"); 112 77 } 113 78 114 79 /** Memory Barrier instruction. */ 115 NO_TRACEstatic inline void membar(void)80 static inline void membar(void) 116 81 { 117 asm volatile ( 118 "membar #Sync\n" 119 ); 82 asm volatile ("membar #Sync\n"); 120 83 } 121 84 122 85 #if defined (US) 123 86 124 #define FLUSH_INVAL_MIN 4 87 #define smc_coherence(a) \ 88 { \ 89 write_barrier(); \ 90 flush((a)); \ 91 } 125 92 126 #define smc_coherence(a) \ 127 do { \ 128 write_barrier(); \ 129 flush((a)); \ 130 } while (0) 131 132 #define smc_coherence_block(a, l) \ 133 do { \ 134 unsigned long i; \ 135 write_barrier(); \ 136 \ 137 for (i = 0; i < (l); i += FLUSH_INVAL_MIN) \ 138 flush((void *)(a) + i); \ 139 } while (0) 93 #define FLUSH_INVAL_MIN 4 94 #define smc_coherence_block(a, l) \ 95 { \ 96 unsigned long i; \ 97 write_barrier(); \ 98 for (i = 0; i < (l); i += FLUSH_INVAL_MIN) \ 99 flush((void *)(a) + i); \ 100 } 140 101 141 102 #elif defined (US3) 142 103 143 #define smc_coherence(a) 144 do {\145 write_barrier();\146 flush_pipeline();\147 } while (0) 104 #define smc_coherence(a) \ 105 { \ 106 write_barrier(); \ 107 flush_pipeline(); \ 108 } 148 109 149 #define smc_coherence_block(a, l) 150 do {\151 write_barrier();\152 flush_pipeline();\153 } while (0) 110 #define smc_coherence_block(a, l) \ 111 { \ 112 write_barrier(); \ 113 flush_pipeline(); \ 114 } 154 115 155 #endif 116 #endif /* defined(US3) */ 156 117 157 118 #endif
Note:
See TracChangeset
for help on using the changeset viewer.