Changeset 9ad03fe in mainline
- Timestamp:
- 2006-03-01T12:58:13Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 03427d0
- Parents:
- a0d74fd
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/ia64/include/mm/tlb.h
ra0d74fd r9ad03fe 78 78 extern void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr); 79 79 80 extern void dtlb_mapping_insert(__address page, __address frame, bool dtr, index_t tr); 80 extern void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr); 81 82 extern void dtc_pte_copy(pte_t *t); 83 extern void itc_pte_copy(pte_t *t); 81 84 82 85 extern void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate); -
arch/ia64/src/mm/page.c
ra0d74fd r9ad03fe 73 73 74 74 /* 75 * And invalidatethe rest of region register.75 * And setup the rest of region register. 76 76 */ 77 77 for(i = 0; i < REGION_REGISTERS; i++) { … … 82 82 rr.word == rr_read(i); 83 83 rr.map.ve = 0; /* disable VHPT walker */ 84 rr.map.rid = RID_INVALID; 84 rr.map.rid = RID_KERNEL; 85 rr.map.ps = PAGE_WIDTH; 85 86 rr_write(i, rr.word); 86 87 srlz_i(); -
arch/ia64/src/mm/tlb.c
ra0d74fd r9ad03fe 33 33 #include <mm/tlb.h> 34 34 #include <mm/asid.h> 35 #include <mm/page.h> 36 #include <mm/as.h> 35 37 #include <arch/mm/tlb.h> 36 38 #include <arch/mm/page.h> … … 39 41 #include <typedefs.h> 40 42 #include <panic.h> 41 #include < print.h>43 #include <arch.h> 42 44 43 45 /** Invalidate all TLB entries. */ … … 87 89 region_register rr; 88 90 bool restore_rr = false; 89 90 if (!(entry.p))91 return;92 91 93 92 rr.word = rr_read(VA2VRN(va)); … … 167 166 bool restore_rr = false; 168 167 169 if (!(entry.p))170 return;171 172 168 rr.word = rr_read(VA2VRN(va)); 173 169 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { … … 217 213 * @param tr Translation register if dtr is true, ignored otherwise. 218 214 */ 219 void dtlb_ mapping_insert(__address page, __address frame, bool dtr, index_t tr)215 void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr) 220 216 { 221 217 tlb_entry_t entry; … … 239 235 } 240 236 237 /** Copy content of PTE into data translation cache. 238 * 239 * @param t PTE. 240 */ 241 void dtc_pte_copy(pte_t *t) 242 { 243 tlb_entry_t entry; 244 245 entry.word[0] = 0; 246 entry.word[1] = 0; 247 248 entry.p = t->p; 249 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; 250 entry.a = t->a; 251 entry.d = t->d; 252 entry.pl = t->k ? PL_KERNEL : PL_USER; 253 entry.ar = t->w ? AR_WRITE : AR_READ; 254 entry.ppn = t->frame >> PPN_SHIFT; 255 entry.ps = PAGE_WIDTH; 256 257 dtc_mapping_insert(t->page, t->as->asid, entry); 258 } 259 260 /** Copy content of PTE into instruction translation cache. 261 * 262 * @param t PTE. 263 */ 264 void itc_pte_copy(pte_t *t) 265 { 266 tlb_entry_t entry; 267 268 entry.word[0] = 0; 269 entry.word[1] = 0; 270 271 ASSERT(t->x); 272 273 entry.p = t->p; 274 entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE; 275 entry.a = t->a; 276 entry.pl = t->k ? PL_KERNEL : PL_USER; 277 entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ; 278 entry.ppn = t->frame >> PPN_SHIFT; 279 entry.ps = PAGE_WIDTH; 280 281 itc_mapping_insert(t->page, t->as->asid, entry); 282 } 283 284 /** Instruction TLB fault handler for faults with VHPT turned off. 285 * 286 * @param vector Interruption vector. 287 * @param pstate Structure with saved interruption state. 288 */ 241 289 void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate) 242 290 { 243 panic("%s\n", __FUNCTION__); 244 } 245 246 /** Data TLB fault with VHPT turned off. 291 region_register rr; 292 __address va; 293 pte_t *t; 294 295 va = pstate->cr_ifa; /* faulting address */ 296 t = page_mapping_find(AS, va); 297 if (t) { 298 /* 299 * The mapping was found in software page hash table. 300 * Insert it into data translation cache. 301 */ 302 itc_pte_copy(t); 303 } else { 304 /* 305 * Forward the page fault to address space page fault handler. 306 */ 307 if (!as_page_fault(va)) { 308 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid); 309 } 310 } 311 } 312 313 /** Data TLB fault handler for faults with VHPT turned off. 247 314 * 248 315 * @param vector Interruption vector. … … 254 321 rid_t rid; 255 322 __address va; 323 pte_t *t; 256 324 257 325 va = pstate->cr_ifa; /* faulting address */ … … 264 332 * kernel address space. 265 333 */ 266 dtlb_ mapping_insert(va, KA2PA(va), false, 0);334 dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0); 267 335 return; 268 336 } 269 337 } 270 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid); 271 } 272 338 339 t = page_mapping_find(AS, va); 340 if (t) { 341 /* 342 * The mapping was found in software page hash table. 343 * Insert it into data translation cache. 344 */ 345 dtc_pte_copy(t); 346 } else { 347 /* 348 * Forward the page fault to address space page fault handler. 349 */ 350 if (!as_page_fault(va)) { 351 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid); 352 } 353 } 354 } 355 356 /** Data nested TLB fault handler. 357 * 358 * This fault should not occur. 359 * 360 * @param vector Interruption vector. 361 * @param pstate Structure with saved interruption state. 362 */ 273 363 void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate) 274 364 { … … 276 366 } 277 367 368 /** Data Dirty bit fault handler. 369 * 370 * @param vector Interruption vector. 371 * @param pstate Structure with saved interruption state. 372 */ 278 373 void data_dirty_bit_fault(__u64 vector, struct exception_regdump *pstate) 279 374 { 280 panic("%s\n", __FUNCTION__); 281 } 282 375 pte_t *t; 376 377 t = page_mapping_find(AS, pstate->cr_ifa); 378 ASSERT(t && t->p); 379 if (t && t->p) { 380 /* 381 * Update the Dirty bit in page tables and reinsert 382 * the mapping into DTC. 383 */ 384 t->d = true; 385 dtc_pte_copy(t); 386 } 387 } 388 389 /** Instruction access bit fault handler. 390 * 391 * @param vector Interruption vector. 392 * @param pstate Structure with saved interruption state. 393 */ 283 394 void instruction_access_bit_fault(__u64 vector, struct exception_regdump *pstate) 284 395 { 285 panic("%s\n", __FUNCTION__); 286 } 287 396 pte_t *t; 397 398 t = page_mapping_find(AS, pstate->cr_ifa); 399 ASSERT(t && t->p); 400 if (t && t->p) { 401 /* 402 * Update the Accessed bit in page tables and reinsert 403 * the mapping into ITC. 404 */ 405 t->a = true; 406 itc_pte_copy(t); 407 } 408 } 409 410 /** Data access bit fault handler. 411 * 412 * @param vector Interruption vector. 413 * @param pstate Structure with saved interruption state. 414 */ 288 415 void data_access_bit_fault(__u64 vector, struct exception_regdump *pstate) 289 416 { 290 panic("%s\n", __FUNCTION__); 291 } 292 417 pte_t *t; 418 419 t = page_mapping_find(AS, pstate->cr_ifa); 420 ASSERT(t && t->p); 421 if (t && t->p) { 422 /* 423 * Update the Accessed bit in page tables and reinsert 424 * the mapping into DTC. 425 */ 426 t->a = true; 427 dtc_pte_copy(t); 428 } 429 } 430 431 /** Page not present fault handler. 432 * 433 * @param vector Interruption vector. 434 * @param pstate Structure with saved interruption state. 435 */ 293 436 void page_not_present(__u64 vector, struct exception_regdump *pstate) 294 437 { 295 panic("%s\n", __FUNCTION__); 296 } 438 region_register rr; 439 __address va; 440 pte_t *t; 441 442 va = pstate->cr_ifa; /* faulting address */ 443 t = page_mapping_find(AS, va); 444 ASSERT(t); 445 446 if (t->p) { 447 /* 448 * If the Present bit is set in page hash table, just copy it 449 * and update ITC/DTC. 450 */ 451 if (t->x) 452 itc_pte_copy(t); 453 else 454 dtc_pte_copy(t); 455 } else { 456 if (!as_page_fault(va)) { 457 panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid); 458 } 459 } 460 } -
arch/ia64/src/proc/scheduler.c
ra0d74fd r9ad03fe 47 47 * If not, fill respective tranlsation register. 48 48 */ 49 dtlb_ mapping_insert((__address) THREAD->kstack, KA2PA(THREAD->kstack), true, DTR_KSTACK);49 dtlb_kernel_mapping_insert((__address) THREAD->kstack, KA2PA(THREAD->kstack), true, DTR_KSTACK); 50 50 } 51 51 } -
genarch/include/mm/page_ht.h
ra0d74fd r9ad03fe 52 52 __address page; /**< Virtual memory page. */ 53 53 __address frame; /**< Physical memory frame. */ 54 int flags; 54 unsigned g : 1; /**< Global page. */ 55 unsigned x : 1; /**< Execute. */ 56 unsigned w : 1; /**< Writable. */ 57 unsigned k : 1; /**< Kernel privileges required. */ 58 unsigned c : 1; /**< Cacheable. */ 55 59 unsigned a : 1; /**< Accessed. */ 56 60 unsigned d : 1; /**< Dirty. */ -
genarch/src/mm/page_ht.c
ra0d74fd r9ad03fe 172 172 t = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC); 173 173 ASSERT(t != NULL); 174 174 175 t->g = (flags & PAGE_GLOBAL) != 0; 176 t->x = (flags & PAGE_EXEC) != 0; 177 t->w = (flags & PAGE_WRITE) != 0; 178 t->k = !(flags & PAGE_USER); 179 t->c = (flags & PAGE_CACHEABLE) != 0; 180 t->p = !(flags & PAGE_NOT_PRESENT); 181 182 t->as = as; 183 t->page = page; 184 t->frame = frame; 185 175 186 hash_table_insert(&page_ht, key, &t->link); 176 187 } -
generic/src/adt/hash_table.c
ra0d74fd r9ad03fe 48 48 void hash_table_create(hash_table_t *h, count_t m, count_t max_keys, hash_table_operations_t *op) 49 49 { 50 int i; 51 50 52 ASSERT(h); 51 53 ASSERT(op && op->hash && op->compare); … … 57 59 } 58 60 memsetb((__address) h->entry, m * sizeof(link_t *), 0); 61 62 for (i = 0; i < m; i++) 63 list_initialize(&h->entry[i]); 59 64 60 65 h->entries = m;
Note:
See TracChangeset
for help on using the changeset viewer.