Changeset a0d74fd in mainline
- Timestamp:
- 2006-03-01T11:07:04Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 9ad03fe
- Parents:
- 2c49fbbe
- Location:
- arch
- Files:
-
- 1 added
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/ia64/Makefile.inc
r2c49fbbe ra0d74fd 75 75 arch/$(ARCH)/src/mm/page.c \ 76 76 arch/$(ARCH)/src/mm/tlb.c \ 77 arch/$(ARCH)/src/proc/scheduler.c \ 77 78 arch/$(ARCH)/src/drivers/it.c -
arch/ia64/include/mm/asid.h
r2c49fbbe ra0d74fd 35 35 36 36 typedef __u16 asid_t; 37 typedef __u32 rid_t; 38 39 #endif /* __ASM__ */ 37 40 38 41 /** … … 41 44 * but those extra bits are not used by the kernel. 42 45 */ 43 #endif44 45 46 #define RIDS_PER_ASID 7 47 46 48 #define RID_MAX 262143 /* 2^18 - 1 */ 49 #define RID_KERNEL 0 50 #define RID_INVALID 1 47 51 48 #define ASID2RID(asid, vrn) (((asid) *RIDS_PER_ASID)+(vrn))52 #define ASID2RID(asid, vrn) (((asid)>RIDS_PER_ASID)?(((asid)*RIDS_PER_ASID)+(vrn)):(asid)) 49 53 #define RID2ASID(rid) ((rid)/RIDS_PER_ASID) 50 51 #ifndef __ASM__52 53 54 typedef __u32 rid_t;55 56 #endif57 54 58 55 #define ASID_MAX_ARCH (RID_MAX/RIDS_PER_ASID) -
arch/ia64/include/mm/page.h
r2c49fbbe ra0d74fd 31 31 #define __ia64_PAGE_H__ 32 32 33 #define PAGE_SIZE FRAME_SIZE 34 #define PAGE_WIDTH FRAME_WIDTH 35 36 /** Bit width of the TLB-locked portion of kernel address space. */ 37 #define KERNEL_PAGE_WIDTH 28 /* 256M */ 38 39 #define SET_PTL0_ADDRESS_ARCH(x) /**< To be removed as situation permits. */ 40 41 #define PPN_SHIFT 12 42 43 #define VRN_SHIFT 61 44 #define VRN_MASK (7LL << VRN_SHIFT) 45 #define VA2VRN(va) ((va)>>VRN_SHIFT) 46 47 #ifdef __ASM__ 48 #define VRN_KERNEL 7 49 #else 50 #define VRN_KERNEL 7LL 51 #endif 52 53 #define REGION_REGISTERS 8 54 55 #define KA2PA(x) ((__address) (x-(VRN_KERNEL<<VRN_SHIFT))) 56 #define PA2KA(x) ((__address) (x+(VRN_KERNEL<<VRN_SHIFT))) 57 58 #define VHPT_WIDTH 20 /* 1M */ 59 #define VHPT_SIZE (1 << VHPT_WIDTH) 60 #define VHPT_BASE 0 /* Must be aligned to VHPT_SIZE */ 61 62 #define PTA_BASE_SHIFT 15 63 64 /** Memory Attributes. */ 65 #define MA_WRITEBACK 0x0 66 #define MA_UNCACHEABLE 0x4 67 68 /** Privilege Levels. Only the most and the least privileged ones are ever used. */ 69 #define PL_KERNEL 0x0 70 #define PL_USER 0x3 71 72 /* Access Rigths. Only certain combinations are used by the kernel. */ 73 #define AR_READ 0x0 74 #define AR_EXECUTE 0x1 75 #define AR_WRITE 0x2 76 33 77 #ifndef __ASM__ 34 35 78 36 79 #include <arch/mm/frame.h> … … 41 84 #include <typedefs.h> 42 85 #include <debug.h> 43 44 #endif45 46 #define PAGE_SIZE FRAME_SIZE47 #define PAGE_WIDTH FRAME_WIDTH48 #define KERNEL_PAGE_WIDTH 2849 50 51 52 #define SET_PTL0_ADDRESS_ARCH(x) /**< To be removed as situation permits. */53 54 #define PPN_SHIFT 1255 56 #define VRN_SHIFT 6157 #define VRN_MASK (7LL << VRN_SHIFT)58 59 #ifdef __ASM__60 #define VRN_KERNEL 761 #else62 #define VRN_KERNEL 7LL63 #endif64 65 #define REGION_REGISTERS 866 67 #define KA2PA(x) ((__address) (x-(VRN_KERNEL<<VRN_SHIFT)))68 #define PA2KA(x) ((__address) (x+(VRN_KERNEL<<VRN_SHIFT)))69 70 71 #define VHPT_WIDTH 20 /* 1M */72 #define VHPT_SIZE (1 << VHPT_WIDTH)73 #define VHPT_BASE 0 /* Must be aligned to VHPT_SIZE */74 75 #define PTA_BASE_SHIFT 1576 77 /** Memory Attributes. */78 #define MA_WRITEBACK 0x079 #define MA_UNCACHEABLE 0x480 81 /** Privilege Levels. Only the most and the least privileged ones are ever used. */82 #define PL_KERNEL 0x083 #define PL_USER 0x384 85 /* Access Rigths. Only certain combinations are used by the kernel. */86 #define AR_READ 0x087 #define AR_EXECUTE 0x188 #define AR_WRITE 0x289 90 91 #define VA_REGION_INDEX 6192 93 #define VA_REGION(va) (va>>VA_REGION_INDEX)94 95 #ifndef __ASM__96 86 97 87 struct vhpt_tag_info { … … 156 146 } vhpt_entry_t; 157 147 158 typedef vhpt_entry_t tlb_entry_t;159 160 148 struct region_register_map { 161 149 unsigned ve : 1; … … 231 219 __u64 ret; 232 220 ASSERT(i < REGION_REGISTERS); 233 i=i<<VRN_SHIFT; 234 __asm__ volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i)); 235 236 return ret; 237 } 238 221 __asm__ volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT)); 222 return ret; 223 } 239 224 240 225 /** Write Region Register. … … 246 231 { 247 232 ASSERT(i < REGION_REGISTERS); 248 i=i<<VRN_SHIFT;249 233 __asm__ volatile ( 250 "mov rr[%0] = %1;;\n" 251 : 252 : "r" (i), "r" (v)); 234 "mov rr[%0] = %1\n" 235 : 236 : "r" (i << VRN_SHIFT), "r" (v) 237 ); 253 238 } 254 239 … … 281 266 extern void vhpt_set_record(vhpt_entry_t *v, __address page, asid_t asid, __address frame, int flags); 282 267 283 284 285 268 #endif 286 269 287 270 #endif 288 289 -
arch/ia64/include/mm/tlb.h
r2c49fbbe ra0d74fd 39 39 #include <typedefs.h> 40 40 41 extern void tc_mapping_insert(__address va, asid_t asid, vhpt_entry_t entry, bool dtc); 42 extern void dtc_mapping_insert(__address va, asid_t asid, vhpt_entry_t entry); 43 extern void itc_mapping_insert(__address va, asid_t asid, vhpt_entry_t entry); 41 /** Data and instruction Translation Register indices. */ 42 #define DTR_KERNEL 0 43 #define ITR_KERNEL 0 44 #define DTR_KSTACK 1 45 46 /** Portion of TLB insertion format data structure. */ 47 union tlb_entry { 48 __u64 word[2]; 49 struct { 50 /* Word 0 */ 51 unsigned p : 1; /**< Present. */ 52 unsigned : 1; 53 unsigned ma : 3; /**< Memory attribute. */ 54 unsigned a : 1; /**< Accessed. */ 55 unsigned d : 1; /**< Dirty. */ 56 unsigned pl : 2; /**< Privilege level. */ 57 unsigned ar : 3; /**< Access rights. */ 58 unsigned long long ppn : 38; /**< Physical Page Number, a.k.a. PFN. */ 59 unsigned : 2; 60 unsigned ed : 1; 61 unsigned ig1 : 11; 62 63 /* Word 1 */ 64 unsigned : 2; 65 unsigned ps : 6; /**< Page size will be 2^ps. */ 66 unsigned key : 24; /**< Protection key, unused. */ 67 unsigned : 32; 68 } __attribute__ ((packed)); 69 } __attribute__ ((packed)); 70 typedef union tlb_entry tlb_entry_t; 71 72 extern void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc); 73 extern void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry); 74 extern void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry); 44 75 45 76 extern void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr); 46 77 extern void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr); 47 78 extern void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr); 79 80 extern void dtlb_mapping_insert(__address page, __address frame, bool dtr, index_t tr); 48 81 49 82 extern void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate); -
arch/ia64/src/dummy.s
r2c49fbbe ra0d74fd 32 32 .global asm_delay_loop 33 33 .global userspace 34 .global before_thread_runs_arch35 .global after_thread_ran_arch36 34 .global cpu_sleep 37 35 .global dummy … … 40 38 .global fpu_init 41 39 42 before_thread_runs_arch:43 after_thread_ran_arch:44 40 userspace: 45 41 calibrate_delay_loop: -
arch/ia64/src/mm/page.c
r2c49fbbe ra0d74fd 56 56 void set_environment(void) 57 57 { 58 59 58 region_register rr; 60 59 pta_register pta; … … 63 62 /* 64 63 * First set up kernel region register. 65 * This action is redundand (see start.S) but I would to keep it to make sure that 66 *no unexpected changes will be made. 64 * This is redundant (see start.S) but we keep it here just for sure. 67 65 */ 68 66 rr.word = rr_read(VRN_KERNEL); 69 67 rr.map.ve = 0; /* disable VHPT walker */ 70 68 rr.map.ps = PAGE_WIDTH; 71 rr.map.rid = ASID2RID(ASID_KERNEL, VRN_KERNEL);69 rr.map.rid = ASID2RID(ASID_KERNEL, VRN_KERNEL); 72 70 rr_write(VRN_KERNEL, rr.word); 73 71 srlz_i(); 74 72 srlz_d(); 75 73 76 74 /* 77 75 * And invalidate the rest of region register. … … 84 82 rr.word == rr_read(i); 85 83 rr.map.ve = 0; /* disable VHPT walker */ 86 rr.map.rid = ASID2RID(ASID_INVALID,i);84 rr.map.rid = RID_INVALID; 87 85 rr_write(i, rr.word); 88 86 srlz_i(); … … 101 99 srlz_i(); 102 100 srlz_d(); 103 104 105 return ;106 107 101 } 108 102 -
arch/ia64/src/mm/tlb.c
r2c49fbbe ra0d74fd 32 32 33 33 #include <mm/tlb.h> 34 #include <mm/asid.h> 34 35 #include <arch/mm/tlb.h> 36 #include <arch/mm/page.h> 35 37 #include <arch/barrier.h> 36 38 #include <arch/interrupt.h> 37 39 #include <typedefs.h> 38 40 #include <panic.h> 41 #include <print.h> 39 42 40 43 /** Invalidate all TLB entries. */ … … 85 88 bool restore_rr = false; 86 89 87 if (!(entry. not_present.p))90 if (!(entry.p)) 88 91 return; 89 92 90 rr.word = rr_read(VA _REGION(va));91 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA _REGION(va))))) {93 rr.word = rr_read(VA2VRN(va)); 94 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { 92 95 /* 93 96 * The selected region register does not contain required RID. … … 97 100 98 101 rr0 = rr; 99 rr0.map.rid = ASID2RID(asid, VA _REGION(va));100 rr_write(VA _REGION(va), rr0.word);102 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); 103 rr_write(VA2VRN(va), rr0.word); 101 104 srlz_d(); 102 105 srlz_i(); … … 121 124 122 125 if (restore_rr) { 123 rr_write(VA _REGION(va),rr.word);126 rr_write(VA2VRN(va), rr.word); 124 127 srlz_d(); 125 128 srlz_i(); … … 164 167 bool restore_rr = false; 165 168 166 if (!(entry. not_present.p))169 if (!(entry.p)) 167 170 return; 168 171 169 rr.word = rr_read(VA _REGION(va));170 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA _REGION(va))))) {172 rr.word = rr_read(VA2VRN(va)); 173 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { 171 174 /* 172 175 * The selected region register does not contain required RID. … … 176 179 177 180 rr0 = rr; 178 rr0.map.rid = ASID2RID(asid, VA _REGION(va));179 rr_write(VA _REGION(va), rr0.word);181 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); 182 rr_write(VA2VRN(va), rr0.word); 180 183 srlz_d(); 181 184 srlz_i(); … … 200 203 201 204 if (restore_rr) { 202 rr_write(VA_REGION(va),rr.word); 203 srlz_d(); 204 srlz_i(); 205 } 205 rr_write(VA2VRN(va), rr.word); 206 srlz_d(); 207 srlz_i(); 208 } 209 } 210 211 /** Insert data into DTLB. 212 * 213 * @param va Virtual page address. 214 * @param asid Address space identifier. 215 * @param entry The rest of TLB entry as required by TLB insertion format. 216 * @param dtr If true, insert into data translation register, use data translation cache otherwise. 217 * @param tr Translation register if dtr is true, ignored otherwise. 218 */ 219 void dtlb_mapping_insert(__address page, __address frame, bool dtr, index_t tr) 220 { 221 tlb_entry_t entry; 222 223 entry.word[0] = 0; 224 entry.word[1] = 0; 225 226 entry.p = true; /* present */ 227 entry.ma = MA_WRITEBACK; 228 entry.a = true; /* already accessed */ 229 entry.d = true; /* already dirty */ 230 entry.pl = PL_KERNEL; 231 entry.ar = AR_READ | AR_WRITE; 232 entry.ppn = frame >> PPN_SHIFT; 233 entry.ps = PAGE_WIDTH; 234 235 if (dtr) 236 dtr_mapping_insert(page, ASID_KERNEL, entry, tr); 237 else 238 dtc_mapping_insert(page, ASID_KERNEL, entry); 206 239 } 207 240 … … 211 244 } 212 245 246 /** Data TLB fault with VHPT turned off. 247 * 248 * @param vector Interruption vector. 249 * @param pstate Structure with saved interruption state. 250 */ 213 251 void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate) 214 252 { 215 panic("%s: %P\n", __FUNCTION__, pstate->cr_ifa); 253 region_register rr; 254 rid_t rid; 255 __address va; 256 257 va = pstate->cr_ifa; /* faulting address */ 258 rr.word = rr_read(VA2VRN(va)); 259 rid = rr.map.rid; 260 if (RID2ASID(rid) == ASID_KERNEL) { 261 if (VA2VRN(va) == VRN_KERNEL) { 262 /* 263 * Provide KA2PA(identity) mapping for faulting piece of 264 * kernel address space. 265 */ 266 dtlb_mapping_insert(va, KA2PA(va), false, 0); 267 return; 268 } 269 } 270 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid); 216 271 } 217 272 -
arch/ia64/src/start.S
r2c49fbbe ra0d74fd 53 53 movl r10=(RR_MASK) 54 54 and r9=r10,r9 55 movl r10=(( ASID2RID(ASID_KERNEL,VRN_KERNEL)<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT))55 movl r10=((RID_KERNEL<<RID_SHIFT)|(KERNEL_PAGE_WIDTH<<PS_SHIFT)) 56 56 or r9=r10,r9 57 57 mov rr[r8]=r9 -
arch/sparc64/include/mm/tlb.h
r2c49fbbe ra0d74fd 46 46 #define PAGESIZE_512K 2 47 47 #define PAGESIZE_4M 3 48 49 /** Bit width of the TLB-locked portion of kernel address space. */ 50 #define KERNEL_PAGE_WIDTH 22 /* 4M */ 48 51 49 52 union tlb_context_reg { -
arch/sparc64/src/proc/scheduler.c
r2c49fbbe ra0d74fd 31 31 #include <arch.h> 32 32 #include <arch/mm/tlb.h> 33 #include <arch/mm/page.h> 33 34 #include <config.h> 34 35 #include <align.h> … … 39 40 __address base; 40 41 41 base = ALIGN_DOWN(config.base, 4*1024*1024);42 base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH); 42 43 43 if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + 4*1024*1024) {44 if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) { 44 45 /* 45 46 * Kernel stack of this thread is not locked in DTLB. … … 48 49 */ 49 50 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (__address) THREAD->kstack); 50 dtlb_insert_mapping((__address) THREAD->kstack, (__address) THREAD->kstack, PAGESIZE_8K, true, true);51 dtlb_insert_mapping((__address) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true); 51 52 } 52 53 } … … 57 58 __address base; 58 59 59 base = ALIGN_DOWN(config.base, 4*1024*1024);60 base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH); 60 61 61 if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + 4*1024*1024) {62 if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) { 62 63 /* 63 64 * Kernel stack of this thread is locked in DTLB.
Note:
See TracChangeset
for help on using the changeset viewer.