Changeset e08162b in mainline
- Timestamp:
- 2016-01-07T13:41:38Z (9 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f4582c6
- Parents:
- 7254df6
- Location:
- kernel/arch/sparc64
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/include/arch/mm/sun4u/tsb.h
r7254df6 re08162b 43 43 * in TLBs - only one TLB entry will do. 44 44 */ 45 #define TSB_SIZE 2 /* when changing this, change 46 * as.c as well */ 47 #define ITSB_ENTRY_COUNT (512 * (1 << TSB_SIZE)) 48 #define DTSB_ENTRY_COUNT (512 * (1 << TSB_SIZE)) 45 #define TSB_BASE_REG_SIZE 2 /* keep in sync with as.c */ 46 #define ITSB_ENTRY_COUNT (512 * (1 << TSB_BASE_REG_SIZE)) 47 #define DTSB_ENTRY_COUNT (512 * (1 << TSB_BASE_REG_SIZE)) 48 49 #define ITSB_ENTRY_MASK (ITSB_ENTRY_COUNT - 1) 50 #define DTSB_ENTRY_MASK (DTSB_ENTRY_COUNT - 1) 51 52 #define TSB_ENTRY_COUNT (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) 53 #define TSB_SIZE (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) 54 #define TSB_FRAMES SIZE2FRAMES(TSB_SIZE) 49 55 50 56 #define TSB_TAG_TARGET_CONTEXT_SHIFT 48 -
kernel/arch/sparc64/include/arch/mm/sun4v/tsb.h
r7254df6 re08162b 44 44 * in TLBs - only one TLB entry will do. 45 45 */ 46 #define TSB_SIZE 3 /* when changing this, change 47 * as.c as well */ 48 #define TSB_ENTRY_COUNT (512 * (1 << TSB_SIZE)) 46 #define TSB_ENTRY_COUNT 4096 47 #define TSB_ENTRY_MASK (TSB_ENTRY_COUNT - 1) 48 #define TSB_SIZE (TSB_ENTRY_COUNT * sizeof(tsb_entry_t)) 49 #define TSB_FRAMES SIZE2FRAMES(TSB_SIZE) 49 50 50 51 #ifndef __ASM__ -
kernel/arch/sparc64/src/mm/sun4u/as.c
r7254df6 re08162b 63 63 { 64 64 #ifdef CONFIG_TSB 65 uintptr_t tsb_phys = 66 frame_alloc(SIZE2FRAMES((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * 67 sizeof(tsb_entry_t)), flags, 0); 68 if (!tsb_phys) 65 uintptr_t tsb_base = frame_alloc(TSB_FRAMES, flags, TSB_SIZE - 1); 66 if (!tsb_base) 69 67 return -1; 70 71 tsb_entry_t *tsb = (tsb_entry_t *) PA2KA(tsb_phys); 68 69 tsb_entry_t *tsb = (tsb_entry_t *) PA2KA(tsb_base); 70 memsetb(tsb, TSB_SIZE, 0); 72 71 73 72 as->arch.itsb = tsb; 74 73 as->arch.dtsb = tsb + ITSB_ENTRY_COUNT; 75 76 memsetb(as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *77 sizeof(tsb_entry_t), 0);78 74 #endif 79 75 … … 84 80 { 85 81 #ifdef CONFIG_TSB 86 size_t frames = SIZE2FRAMES((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * 87 sizeof(tsb_entry_t)); 88 frame_free(KA2PA((uintptr_t) as->arch.itsb), frames); 89 90 return frames; 82 frame_free(KA2PA((uintptr_t) as->arch.itsb), TSB_FRAMES); 83 84 return TSB_FRAMES; 91 85 #else 92 86 return 0; … … 136 130 uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); 137 131 138 ASSERT(as->arch.itsb && as->arch.dtsb); 132 ASSERT(as->arch.itsb); 133 ASSERT(as->arch.dtsb); 139 134 140 135 uintptr_t tsb = (uintptr_t) as->arch.itsb; 141 136 142 if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {137 if (!overlaps(tsb, TSB_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 143 138 /* 144 139 * TSBs were allocated from memory not covered … … 155 150 * 156 151 */ 157 tsb_base_reg_t tsb_base ;158 159 tsb_base .value = 0;160 tsb_base .size = TSB_SIZE;161 tsb_base .split = 0;162 163 tsb_base .base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;164 itsb_base_write(tsb_base .value);165 tsb_base .base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;166 dtsb_base_write(tsb_base .value);152 tsb_base_reg_t tsb_base_reg; 153 154 tsb_base_reg.value = 0; 155 tsb_base_reg.size = TSB_BASE_REG_SIZE; 156 tsb_base_reg.split = 0; 157 158 tsb_base_reg.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH; 159 itsb_base_write(tsb_base_reg.value); 160 tsb_base_reg.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH; 161 dtsb_base_write(tsb_base_reg.value); 167 162 168 163 #if defined (US3) … … 207 202 uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); 208 203 209 ASSERT(as->arch.itsb && as->arch.dtsb); 204 ASSERT(as->arch.itsb); 205 ASSERT(as->arch.dtsb); 210 206 211 207 uintptr_t tsb = (uintptr_t) as->arch.itsb; 212 208 213 if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {209 if (!overlaps(tsb, TSB_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 214 210 /* 215 211 * TSBs were allocated from memory not covered -
kernel/arch/sparc64/src/mm/sun4u/tsb.c
r7254df6 re08162b 42 42 #include <debug.h> 43 43 44 #define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)45 46 44 /** Invalidate portion of TSB. 47 45 * … … 60 58 size_t cnt; 61 59 62 ASSERT(as->arch.itsb && as->arch.dtsb); 60 ASSERT(as->arch.itsb); 61 ASSERT(as->arch.dtsb); 63 62 64 i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; 65 ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT); 63 i0 = (page >> MMU_PAGE_WIDTH) & ITSB_ENTRY_MASK; 66 64 67 65 if (pages == (size_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT) … … 71 69 72 70 for (i = 0; i < cnt; i++) { 73 as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid = 74 true; 75 as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid = 76 true; 71 as->arch.itsb[(i0 + i) & ITSB_ENTRY_MASK].tag.invalid = true; 72 as->arch.dtsb[(i0 + i) & DTSB_ENTRY_MASK].tag.invalid = true; 77 73 } 78 74 } … … 86 82 { 87 83 as_t *as; 88 tsb_entry_t *t sb;84 tsb_entry_t *tte; 89 85 size_t entry; 90 86 … … 92 88 93 89 as = t->as; 94 entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; 95 ASSERT(entry < ITSB_ENTRY_COUNT); 96 tsb = &as->arch.itsb[entry]; 90 entry = ((t->page >> MMU_PAGE_WIDTH) + index) & ITSB_ENTRY_MASK; 91 tte = &as->arch.itsb[entry]; 97 92 98 93 /* … … 102 97 */ 103 98 104 t sb->tag.invalid = true; /* invalidate the entry99 tte->tag.invalid = true; /* invalidate the entry 105 100 * (tag target has this 106 101 * set to 0) */ … … 108 103 write_barrier(); 109 104 110 t sb->tag.context = as->asid;105 tte->tag.context = as->asid; 111 106 /* the shift is bigger than PAGE_WIDTH, do not bother with index */ 112 t sb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;113 t sb->data.value = 0;114 t sb->data.size = PAGESIZE_8K;115 t sb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;116 t sb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */117 t sb->data.p = t->k; /* p as privileged, k as kernel */118 t sb->data.v = t->p; /* v as valid, p as present */107 tte->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; 108 tte->data.value = 0; 109 tte->data.size = PAGESIZE_8K; 110 tte->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; 111 tte->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */ 112 tte->data.p = t->k; /* p as privileged, k as kernel */ 113 tte->data.v = t->p; /* v as valid, p as present */ 119 114 120 115 write_barrier(); 121 116 122 t sb->tag.invalid = false; /* mark the entry as valid */117 tte->tag.invalid = false; /* mark the entry as valid */ 123 118 } 124 119 … … 132 127 { 133 128 as_t *as; 134 tsb_entry_t *t sb;129 tsb_entry_t *tte; 135 130 size_t entry; 136 131 … … 138 133 139 134 as = t->as; 140 entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; 141 ASSERT(entry < DTSB_ENTRY_COUNT); 142 tsb = &as->arch.dtsb[entry]; 135 entry = ((t->page >> MMU_PAGE_WIDTH) + index) & DTSB_ENTRY_MASK; 136 tte = &as->arch.dtsb[entry]; 143 137 144 138 /* … … 148 142 */ 149 143 150 t sb->tag.invalid = true; /* invalidate the entry144 tte->tag.invalid = true; /* invalidate the entry 151 145 * (tag target has this 152 146 * set to 0) */ … … 154 148 write_barrier(); 155 149 156 t sb->tag.context = as->asid;150 tte->tag.context = as->asid; 157 151 /* the shift is bigger than PAGE_WIDTH, do not bother with index */ 158 t sb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;159 t sb->data.value = 0;160 t sb->data.size = PAGESIZE_8K;161 t sb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;162 t sb->data.cp = t->c;152 tte->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; 153 tte->data.value = 0; 154 tte->data.size = PAGESIZE_8K; 155 tte->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; 156 tte->data.cp = t->c; 163 157 #ifdef CONFIG_VIRT_IDX_DCACHE 164 t sb->data.cv = t->c;158 tte->data.cv = t->c; 165 159 #endif /* CONFIG_VIRT_IDX_DCACHE */ 166 t sb->data.p = t->k; /* p as privileged */167 t sb->data.w = ro ? false : t->w;168 t sb->data.v = t->p;160 tte->data.p = t->k; /* p as privileged */ 161 tte->data.w = ro ? false : t->w; 162 tte->data.v = t->p; 169 163 170 164 write_barrier(); 171 165 172 t sb->tag.invalid = false; /* mark the entry as valid */166 tte->tag.invalid = false; /* mark the entry as valid */ 173 167 } 174 168 -
kernel/arch/sparc64/src/mm/sun4v/as.c
r7254df6 re08162b 66 66 { 67 67 #ifdef CONFIG_TSB 68 uintptr_t tsb = 69 frame_alloc(SIZE2FRAMES(TSB_ENTRY_COUNT * sizeof(tsb_entry_t)), 70 flags, 0); 71 if (!tsb) 68 uintptr_t tsb_base = frame_alloc(TSB_FRAMES, flags, TSB_SIZE - 1); 69 if (!tsb_base) 72 70 return -1; 73 71 72 tsb_entry_t *tsb = (tsb_entry_t *) PA2KA(tsb_base); 73 74 74 as->arch.tsb_description.page_size = PAGESIZE_8K; 75 75 as->arch.tsb_description.associativity = 1; 76 76 as->arch.tsb_description.num_ttes = TSB_ENTRY_COUNT; 77 77 as->arch.tsb_description.pgsize_mask = 1 << PAGESIZE_8K; 78 as->arch.tsb_description.tsb_base = tsb ;78 as->arch.tsb_description.tsb_base = tsb_base; 79 79 as->arch.tsb_description.reserved = 0; 80 80 as->arch.tsb_description.context = 0; 81 81 82 memsetb((void *) PA2KA(as->arch.tsb_description.tsb_base), 83 TSB_ENTRY_COUNT * sizeof(tsb_entry_t), 0); 82 memsetb(tsb, TSB_SIZE, 0); 84 83 #endif 85 84 … … 90 89 { 91 90 #ifdef CONFIG_TSB 92 size_t frames = SIZE2FRAMES(TSB_ENTRY_COUNT * sizeof(tsb_entry_t)); 93 frame_free(as->arch.tsb_description.tsb_base, frames); 91 frame_free(as->arch.tsb_description.tsb_base, TSB_FRAMES); 94 92 95 return frames;93 return TSB_FRAMES; 96 94 #else 97 95 return 0; … … 126 124 uintptr_t tsb = PA2KA(as->arch.tsb_description.tsb_base); 127 125 128 if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {126 if (!overlaps(tsb, TSB_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 129 127 /* 130 128 * TSBs were allocated from memory not covered … … 137 135 } 138 136 139 __hypercall_fast2(MMU_TSB_CTXNON0, 1, KA2PA(& (as->arch.tsb_description)));137 __hypercall_fast2(MMU_TSB_CTXNON0, 1, KA2PA(&as->arch.tsb_description)); 140 138 #endif 141 139 } … … 166 164 uintptr_t tsb = PA2KA(as->arch.tsb_description.tsb_base); 167 165 168 if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {166 if (!overlaps(tsb, TSB_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 169 167 /* 170 168 * TSBs were allocated from memory not covered -
kernel/arch/sparc64/src/mm/sun4v/tsb.c
r7254df6 re08162b 44 44 #include <debug.h> 45 45 46 #define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)47 48 46 /** Invalidate portion of TSB. 49 47 * … … 58 56 void tsb_invalidate(as_t *as, uintptr_t page, size_t pages) 59 57 { 58 tsb_entry_t *tsb; 60 59 size_t i0, i; 61 60 size_t cnt; … … 63 62 ASSERT(as->arch.tsb_description.tsb_base); 64 63 65 i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; 66 ASSERT(i0 < TSB_ENTRY_COUNT); 64 i0 = (page >> MMU_PAGE_WIDTH) & TSB_ENTRY_MASK; 67 65 68 if (pages == (size_t) - 1 || (pages)> TSB_ENTRY_COUNT)66 if (pages == (size_t) -1 || pages > TSB_ENTRY_COUNT) 69 67 cnt = TSB_ENTRY_COUNT; 70 68 else 71 69 cnt = pages; 72 70 73 for (i = 0; i < cnt; i++) { 74 ((tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base))[ 75 (i0 + i) & (TSB_ENTRY_COUNT - 1)].data.v = false; 76 } 71 tsb = (tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base); 72 for (i = 0; i < cnt; i++) 73 tsb[(i0 + i) & TSB_ENTRY_MASK].data.v = false; 77 74 } 78 75 … … 85 82 as_t *as; 86 83 tsb_entry_t *tsb; 87 size_t entry; 84 tsb_entry_t *tte; 85 size_t index; 88 86 89 87 as = t->as; 90 entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; 91 ASSERT(entry < TSB_ENTRY_COUNT); 92 tsb = &((tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base))[entry]; 88 index = (t->page >> MMU_PAGE_WIDTH) & TSB_ENTRY_MASK; 89 90 tsb = (tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base); 91 tte = &tsb[index]; 93 92 94 93 /* … … 98 97 */ 99 98 100 t sb->data.v = false;99 tte->data.v = false; 101 100 102 101 write_barrier(); 103 102 104 t sb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;103 tte->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; 105 104 106 t sb->data.value = 0;107 t sb->data.nfo = false;108 t sb->data.ra = t->frame >> MMU_FRAME_WIDTH;109 t sb->data.ie = false;110 t sb->data.e = false;111 t sb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */112 t sb->data.cv = false;113 t sb->data.p = t->k; /* p as privileged, k as kernel */114 t sb->data.x = true;115 t sb->data.w = false;116 t sb->data.size = PAGESIZE_8K;105 tte->data.value = 0; 106 tte->data.nfo = false; 107 tte->data.ra = t->frame >> MMU_FRAME_WIDTH; 108 tte->data.ie = false; 109 tte->data.e = false; 110 tte->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */ 111 tte->data.cv = false; 112 tte->data.p = t->k; /* p as privileged, k as kernel */ 113 tte->data.x = true; 114 tte->data.w = false; 115 tte->data.size = PAGESIZE_8K; 117 116 118 117 write_barrier(); 119 118 120 t sb->data.v = t->p; /* v as valid, p as present */119 tte->data.v = t->p; /* v as valid, p as present */ 121 120 } 122 121 … … 130 129 as_t *as; 131 130 tsb_entry_t *tsb; 132 size_t entry; 131 tsb_entry_t *tte; 132 size_t index; 133 133 134 134 as = t->as; 135 entry = (t->page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;136 ASSERT(entry < TSB_ENTRY_COUNT);137 t sb = &((tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base))[entry];135 index = (t->page >> MMU_PAGE_WIDTH) & TSB_ENTRY_MASK; 136 tsb = (tsb_entry_t *) PA2KA(as->arch.tsb_description.tsb_base); 137 tte = &tsb[index]; 138 138 139 139 /* … … 143 143 */ 144 144 145 t sb->data.v = false;145 tte->data.v = false; 146 146 147 147 write_barrier(); 148 148 149 t sb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;149 tte->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; 150 150 151 t sb->data.value = 0;152 t sb->data.nfo = false;153 t sb->data.ra = t->frame >> MMU_FRAME_WIDTH;154 t sb->data.ie = false;155 t sb->data.e = false;156 t sb->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */151 tte->data.value = 0; 152 tte->data.nfo = false; 153 tte->data.ra = t->frame >> MMU_FRAME_WIDTH; 154 tte->data.ie = false; 155 tte->data.e = false; 156 tte->data.cp = t->c; /* cp as cache in phys.-idxed, c as cacheable */ 157 157 #ifdef CONFIG_VIRT_IDX_DCACHE 158 t sb->data.cv = t->c;158 tte->data.cv = t->c; 159 159 #endif /* CONFIG_VIRT_IDX_DCACHE */ 160 t sb->data.p = t->k; /* p as privileged, k as kernel */161 t sb->data.x = true;162 t sb->data.w = ro ? false : t->w;163 t sb->data.size = PAGESIZE_8K;160 tte->data.p = t->k; /* p as privileged, k as kernel */ 161 tte->data.x = true; 162 tte->data.w = ro ? false : t->w; 163 tte->data.size = PAGESIZE_8K; 164 164 165 165 write_barrier(); 166 166 167 t sb->data.v = t->p; /* v as valid, p as present */167 tte->data.v = t->p; /* v as valid, p as present */ 168 168 } 169 169
Note:
See TracChangeset
for help on using the changeset viewer.