Changeset 38a1a84 in mainline
- Timestamp:
- 2005-10-05T21:29:16Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- bca1b47
- Parents:
- 49c1f93
- Location:
- arch/mips32
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/mips32/include/mm/page.h
r49c1f93 r38a1a84 47 47 * - 32-bit virtual addresses 48 48 * - Offset is 14 bits => pages are 16K long 49 * - PTE's use the same format as CP0 EntryLo[01] registers => PTE is therefore 4 bytes long 49 * - PTE's use similar format as CP0 EntryLo[01] registers => PTE is therefore 4 bytes long 50 * - PTE's make use of CP0 EntryLo's two-bit reserved field for bit W (writable) and bit A (accessed) 50 51 * - PTL0 has 64 entries (6 bits) 51 52 * - PTL1 is not used … … 57 58 #define PTL1_INDEX_ARCH(vaddr) 0 58 59 #define PTL2_INDEX_ARCH(vaddr) 0 59 #define PTL3_INDEX_ARCH(vaddr) (((vaddr)>>1 2)&0xfff)60 #define PTL3_INDEX_ARCH(vaddr) (((vaddr)>>14)&0x3fff) 60 61 61 62 #define GET_PTL0_ADDRESS_ARCH() (PTL0) … … 98 99 (1<<PAGE_USER_SHIFT) | 99 100 (1<<PAGE_READ_SHIFT) | 100 ((p-> d)<<PAGE_WRITE_SHIFT) |101 ((p->w)<<PAGE_WRITE_SHIFT) | 101 102 (1<<PAGE_EXEC_SHIFT) 102 103 ); … … 110 111 p->c = (flags & PAGE_CACHEABLE) != 0 ? PAGE_CACHEABLE_EXC_WRITE : PAGE_UNCACHED; 111 112 p->v = !(flags & PAGE_NOT_PRESENT); 112 p-> d= (flags & PAGE_WRITE) != 0;113 p->w = (flags & PAGE_WRITE) != 0; 113 114 } 114 115 -
arch/mips32/include/mm/tlb.h
r49c1f93 r38a1a84 48 48 unsigned c : 3; /* cache coherency attribute */ 49 49 unsigned pfn : 24; /* frame number */ 50 unsigned : 2; 50 unsigned zero: 2; /* zero */ 51 } __attribute__ ((packed)); 52 53 struct pte { 54 unsigned g : 1; /* global bit */ 55 unsigned v : 1; /* valid bit */ 56 unsigned d : 1; /* dirty/write-protect bit */ 57 unsigned c : 3; /* cache coherency attribute */ 58 unsigned pfn : 24; /* frame number */ 59 unsigned w : 1; /* writable */ 60 unsigned a : 1; /* accessed */ 51 61 } __attribute__ ((packed)); 52 62 … … 63 73 } __attribute__ ((packed)); 64 74 65 struct tlb_entry { 66 struct entry_lo lo0; 67 struct entry_lo lo1; 68 struct entry_hi hi; 69 struct page_mask mask; 75 struct index { 76 unsigned index : 4; 77 unsigned : 27; 78 unsigned p : 1; 70 79 } __attribute__ ((packed)); 80 81 /** Probe TLB for Matching Entry 82 * 83 * Probe TLB for Matching Entry. 84 */ 85 static inline void tlbp(void) 86 { 87 __asm__ volatile ("tlbp\n\t"); 88 } 71 89 72 90 -
arch/mips32/include/types.h
r49c1f93 r38a1a84 50 50 typedef __u32 __native; 51 51 52 typedef struct entry_lopte_t;52 typedef struct pte pte_t; 53 53 54 54 #endif -
arch/mips32/src/mm/tlb.c
r49c1f93 r38a1a84 43 43 static void tlb_modified_fail(struct exception_regdump *pstate); 44 44 45 static pte_t *find_mapping_and_check(__address badvaddr); 46 static void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, bool c, __address pfn); 47 45 48 /** Initialize TLB 46 49 * … … 80 83 void tlb_refill(struct exception_regdump *pstate) 81 84 { 82 struct entry_ hi hi;85 struct entry_lo lo; 83 86 __address badvaddr; 84 87 pte_t *pte; 85 88 86 *((__u32 *) &hi) = cp0_entry_hi_read();87 89 badvaddr = cp0_badvaddr_read(); 88 90 89 spinlock_lock(&VM->lock); 90 91 /* 92 * Refill cannot succeed if the ASIDs don't match. 93 */ 94 if (hi.asid != VM->asid) 95 goto fail; 96 97 /* 98 * Refill cannot succeed if badvaddr is not 99 * associated with any mapping. 100 */ 101 pte = find_mapping(badvaddr, 0); 91 spinlock_lock(&VM->lock); 92 pte = find_mapping_and_check(badvaddr); 102 93 if (!pte) 103 94 goto fail; 104 105 /* 106 * Refill cannot succeed if the mapping is marked as invalid. 107 */ 108 if (!pte->v) 109 goto fail; 95 96 /* 97 * Record access to PTE. 98 */ 99 pte->a = 1; 100 101 prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn); 110 102 111 103 /* 112 104 * New entry is to be inserted into TLB 113 105 */ 114 cp0_pagemask_write(TLB_PAGE_MASK_16K);115 106 if ((badvaddr/PAGE_SIZE) % 2 == 0) { 116 cp0_entry_lo0_write(*((__u32 *) pte));107 cp0_entry_lo0_write(*((__u32 *) &lo)); 117 108 cp0_entry_lo1_write(0); 118 109 } 119 110 else { 120 111 cp0_entry_lo0_write(0); 121 cp0_entry_lo1_write(*((__u32 *) pte));112 cp0_entry_lo1_write(*((__u32 *) &lo)); 122 113 } 123 114 tlbwr(); … … 131 122 } 132 123 124 /** Process TLB Invalid Exception 125 * 126 * Process TLB Invalid Exception. 127 * 128 * @param pstate Interrupted register context. 129 */ 133 130 void tlb_invalid(struct exception_regdump *pstate) 134 131 { 132 struct index index; 133 __address badvaddr; 134 struct entry_lo lo; 135 pte_t *pte; 136 137 badvaddr = cp0_badvaddr_read(); 138 139 /* 140 * Locate the faulting entry in TLB. 141 */ 142 tlbp(); 143 *((__u32 *) &index) = cp0_index_read(); 144 145 spinlock_lock(&VM->lock); 146 147 /* 148 * Fail if the entry is not in TLB. 149 */ 150 if (index.p) 151 goto fail; 152 153 pte = find_mapping_and_check(badvaddr); 154 if (!pte) 155 goto fail; 156 157 /* 158 * Read the faulting TLB entry. 159 */ 160 tlbr(); 161 162 /* 163 * Record access to PTE. 164 */ 165 pte->a = 1; 166 167 prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn); 168 169 /* 170 * The entry is to be updated in TLB. 171 */ 172 if ((badvaddr/PAGE_SIZE) % 2 == 0) 173 cp0_entry_lo0_write(*((__u32 *) &lo)); 174 else 175 cp0_entry_lo1_write(*((__u32 *) &lo)); 176 tlbwi(); 177 178 spinlock_unlock(&VM->lock); 179 return; 180 181 fail: 182 spinlock_unlock(&VM->lock); 135 183 tlb_invalid_fail(pstate); 136 184 } 137 185 186 /** Process TLB Modified Exception 187 * 188 * Process TLB Modified Exception. 189 * 190 * @param pstate Interrupted register context. 191 */ 192 138 193 void tlb_modified(struct exception_regdump *pstate) 139 194 { 195 struct index index; 196 __address badvaddr; 197 struct entry_lo lo; 198 pte_t *pte; 199 200 badvaddr = cp0_badvaddr_read(); 201 202 /* 203 * Locate the faulting entry in TLB. 204 */ 205 tlbp(); 206 *((__u32 *) &index) = cp0_index_read(); 207 208 spinlock_lock(&VM->lock); 209 210 /* 211 * Fail if the entry is not in TLB. 212 */ 213 if (index.p) 214 goto fail; 215 216 pte = find_mapping_and_check(badvaddr); 217 if (!pte) 218 goto fail; 219 220 /* 221 * Fail if the page is not writable. 222 */ 223 if (!pte->w) 224 goto fail; 225 226 /* 227 * Read the faulting TLB entry. 228 */ 229 tlbr(); 230 231 /* 232 * Record access and write to PTE. 233 */ 234 pte->a = 1; 235 pte->d = 1; 236 237 prepare_entry_lo(&lo, pte->g, pte->v, pte->w, pte->c, pte->pfn); 238 239 /* 240 * The entry is to be updated in TLB. 241 */ 242 if ((badvaddr/PAGE_SIZE) % 2 == 0) 243 cp0_entry_lo0_write(*((__u32 *) &lo)); 244 else 245 cp0_entry_lo1_write(*((__u32 *) &lo)); 246 tlbwi(); 247 248 spinlock_unlock(&VM->lock); 249 return; 250 251 fail: 252 spinlock_unlock(&VM->lock); 140 253 tlb_modified_fail(pstate); 141 254 } … … 163 276 if (s) 164 277 symbol = s; 165 panic("%X: TLB Invalid Exception at %X(%s)\n", cp0_badvaddr_read(), 166 pstate->epc, symbol); 278 panic("%X: TLB Invalid Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol); 167 279 } 168 280 … … 174 286 if (s) 175 287 symbol = s; 176 panic("%X: TLB Modified Exception at %X(%s)\n", cp0_badvaddr_read(), 177 pstate->epc, symbol); 288 panic("%X: TLB Modified Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol); 178 289 } 179 290 … … 189 300 cpu_priority_restore(pri); 190 301 } 302 303 /** Try to find PTE for faulting address 304 * 305 * Try to find PTE for faulting address. 306 * The VM->lock must be held on entry to this function. 307 * 308 * @param badvaddr Faulting virtual address. 309 * 310 * @return PTE on success, NULL otherwise. 311 */ 312 pte_t *find_mapping_and_check(__address badvaddr) 313 { 314 struct entry_hi hi; 315 pte_t *pte; 316 317 *((__u32 *) &hi) = cp0_entry_hi_read(); 318 319 /* 320 * Handler cannot succeed if the ASIDs don't match. 321 */ 322 if (hi.asid != VM->asid) 323 return NULL; 324 325 /* 326 * Handler cannot succeed if badvaddr has no mapping. 327 */ 328 pte = find_mapping(badvaddr, 0); 329 if (!pte) 330 return NULL; 331 332 /* 333 * Handler cannot succeed if the mapping is marked as invalid. 334 */ 335 if (!pte->v) 336 return NULL; 337 338 return pte; 339 } 340 341 void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, bool c, __address pfn) 342 { 343 lo->g = g; 344 lo->v = v; 345 lo->d = d; 346 lo->c = c; 347 lo->pfn = pfn; 348 lo->zero = 0; 349 }
Note:
See TracChangeset
for help on using the changeset viewer.