Changeset 567807b1 in mainline
- Timestamp:
- 2006-05-24T17:03:29Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 8d6bc2d5
- Parents:
- 82da5f5
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/amd64/include/mm/page.h
r82da5f5 r567807b1 107 107 #ifndef __ASM__ 108 108 109 /* Page fault error codes. */ 110 111 /** When bit on this position is 0, the page fault was caused by a not-present page. */ 112 #define PFERR_CODE_P (1<<0) 113 114 /** When bit on this position is 1, the page fault was caused by a write. */ 115 #define PFERR_CODE_RW (1<<1) 116 117 /** When bit on this position is 1, the page fault was caused in user mode. */ 118 #define PFERR_CODE_US (1<<2) 119 120 /** When bit on this position is 1, a reserved bit was set in page directory. */ 121 #define PFERR_CODE_RSVD (1<<3) 122 123 /** When bit on this position os 1, the page fault was caused during instruction fecth. */ 124 #define PFERR_CODE_ID (1<<4) 125 109 126 /** Page Table Entry. */ 110 127 struct page_specifier { -
arch/amd64/src/mm/page.c
r82da5f5 r567807b1 169 169 { 170 170 __address page; 171 pf_access_t access; 171 172 172 173 page = read_cr2(); 173 if (as_page_fault(page, istate) == AS_PF_FAULT) { 174 175 if (istate->error_word & PFERR_CODE_RSVD) 176 panic("Reserved bit set in page table entry.\n"); 177 178 if (istate->error_word & PFERR_CODE_RW) 179 access = PF_ACCESS_WRITE; 180 else if (istate->error_word & PFERR_CODE_ID) 181 access = PF_ACCESS_EXEC; 182 else 183 access = PF_ACCESS_READ; 184 185 if (as_page_fault(page, access, istate) == AS_PF_FAULT) { 174 186 print_info_errcode(n, istate); 175 187 printf("Page fault address: %llX\n", page); -
arch/ia32/include/interrupt.h
r82da5f5 r567807b1 93 93 extern void (* eoi_function)(void); 94 94 95 extern void PRINT_INFO_ERRCODE(istate_t *istate); 95 96 extern void null_interrupt(int n, istate_t *istate); 96 97 extern void gp_fault(int n, istate_t *istate); … … 98 99 extern void ss_fault(int n, istate_t *istate); 99 100 extern void simd_fp_exception(int n, istate_t *istate); 100 extern void page_fault(int n, istate_t *istate);101 101 extern void syscall(int n, istate_t *istate); 102 102 extern void tlb_shootdown_ipi(int n, istate_t *istate); -
arch/ia32/include/mm/page.h
r82da5f5 r567807b1 91 91 #include <typedefs.h> 92 92 93 /* Page fault error codes. */ 94 95 /** When bit on this position is 0, the page fault was caused by a not-present page. */ 96 #define PFERR_CODE_P (1<<0) 97 98 /** When bit on this position is 1, the page fault was caused by a write. */ 99 #define PFERR_CODE_RW (1<<1) 100 101 /** When bit on this position is 1, the page fault was caused in user mode. */ 102 #define PFERR_CODE_US (1<<2) 103 104 /** When bit on this position is 1, a reserved bit was set in page directory. */ 105 #define PFERR_CODE_RSVD (1<<3) 106 93 107 /** Page Table Entry. */ 94 108 struct page_specifier { … … 139 153 140 154 extern void page_arch_init(void); 155 extern void page_fault(int n, istate_t *istate); 141 156 142 157 #endif /* __ASM__ */ -
arch/ia32/src/interrupt.c
r82da5f5 r567807b1 55 55 void (* eoi_function)(void) = NULL; 56 56 57 staticvoid PRINT_INFO_ERRCODE(istate_t *istate)57 void PRINT_INFO_ERRCODE(istate_t *istate) 58 58 { 59 59 char *symbol = get_symtab_entry(istate->eip); … … 140 140 } 141 141 142 void page_fault(int n, istate_t *istate)143 {144 __address page;145 146 page = read_cr2();147 if (as_page_fault(page, istate) == AS_PF_FAULT) {148 PRINT_INFO_ERRCODE(istate);149 printf("page fault address: %#x\n", page);150 panic("page fault\n");151 }152 }153 154 142 void syscall(int n, istate_t *istate) 155 143 { -
arch/ia32/src/mm/page.c
r82da5f5 r567807b1 44 44 #include <interrupt.h> 45 45 46 47 46 void page_arch_init(void) 48 47 { … … 88 87 return virtaddr; 89 88 } 89 90 void page_fault(int n, istate_t *istate) 91 { 92 __address page; 93 pf_access_t access; 94 95 page = read_cr2(); 96 97 if (istate->error_word & PFERR_CODE_RSVD) 98 panic("Reserved bit set in page directory.\n"); 99 100 if (istate->error_word & PFERR_CODE_RW) 101 access = PF_ACCESS_WRITE; 102 else 103 access = PF_ACCESS_READ; 104 105 if (as_page_fault(page, access, istate) == AS_PF_FAULT) { 106 PRINT_INFO_ERRCODE(istate); 107 printf("page fault address: %#x\n", page); 108 panic("page fault\n"); 109 } 110 } -
arch/ia64/src/mm/tlb.c
r82da5f5 r567807b1 430 430 { 431 431 region_register rr; 432 rid_t rid; 432 433 __address va; 433 434 pte_t *t; 434 435 435 436 va = istate->cr_ifa; /* faulting address */ 437 rr.word = rr_read(VA2VRN(va)); 438 rid = rr.map.rid; 439 436 440 page_table_lock(AS, true); 437 441 t = page_mapping_find(AS, va); … … 448 452 */ 449 453 page_table_unlock(AS, true); 450 if (as_page_fault(va, istate) == AS_PF_FAULT) {451 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, istate->cr_ifa, rr.map.rid, istate->cr_iip);454 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 455 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); 452 456 } 453 457 } … … 494 498 */ 495 499 page_table_unlock(AS, true); 496 if (as_page_fault(va, istate) == AS_PF_FAULT) {500 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 497 501 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); 498 502 } … … 519 523 void data_dirty_bit_fault(__u64 vector, istate_t *istate) 520 524 { 525 region_register rr; 526 rid_t rid; 527 __address va; 521 528 pte_t *t; 529 530 va = istate->cr_ifa; /* faulting address */ 531 rr.word = rr_read(VA2VRN(va)); 532 rid = rr.map.rid; 522 533 523 534 page_table_lock(AS, true); 524 t = page_mapping_find(AS, istate->cr_ifa); 535 t = page_mapping_find(AS, va); 536 ASSERT(t && t->p); 537 if (t && t->p && t->w) { 538 /* 539 * Update the Dirty bit in page tables and reinsert 540 * the mapping into DTC. 541 */ 542 t->d = true; 543 dtc_pte_copy(t); 544 } else { 545 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 546 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); 547 t->d = true; 548 dtc_pte_copy(t); 549 } 550 } 551 page_table_unlock(AS, true); 552 } 553 554 /** Instruction access bit fault handler. 555 * 556 * @param vector Interruption vector. 557 * @param istate Structure with saved interruption state. 558 */ 559 void instruction_access_bit_fault(__u64 vector, istate_t *istate) 560 { 561 region_register rr; 562 rid_t rid; 563 __address va; 564 pte_t *t; 565 566 va = istate->cr_ifa; /* faulting address */ 567 rr.word = rr_read(VA2VRN(va)); 568 rid = rr.map.rid; 569 570 page_table_lock(AS, true); 571 t = page_mapping_find(AS, va); 572 ASSERT(t && t->p); 573 if (t && t->p && t->x) { 574 /* 575 * Update the Accessed bit in page tables and reinsert 576 * the mapping into ITC. 577 */ 578 t->a = true; 579 itc_pte_copy(t); 580 } else { 581 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 582 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); 583 t->a = true; 584 itc_pte_copy(t); 585 } 586 } 587 page_table_unlock(AS, true); 588 } 589 590 /** Data access bit fault handler. 591 * 592 * @param vector Interruption vector. 593 * @param istate Structure with saved interruption state. 594 */ 595 void data_access_bit_fault(__u64 vector, istate_t *istate) 596 { 597 region_register rr; 598 rid_t rid; 599 __address va; 600 pte_t *t; 601 602 va = istate->cr_ifa; /* faulting address */ 603 rr.word = rr_read(VA2VRN(va)); 604 rid = rr.map.rid; 605 606 page_table_lock(AS, true); 607 t = page_mapping_find(AS, va); 525 608 ASSERT(t && t->p); 526 609 if (t && t->p) { 527 610 /* 528 * Update the Dirtybit in page tables and reinsert611 * Update the Accessed bit in page tables and reinsert 529 612 * the mapping into DTC. 530 613 */ 531 t-> d= true;614 t->a = true; 532 615 dtc_pte_copy(t); 616 } else { 617 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 618 panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip); 619 t->a = true; 620 itc_pte_copy(t); 621 } 533 622 } 534 623 page_table_unlock(AS, true); 535 624 } 536 625 537 /** Instruction access bit fault handler.626 /** Page not present fault handler. 538 627 * 539 628 * @param vector Interruption vector. 540 629 * @param istate Structure with saved interruption state. 541 630 */ 542 void instruction_access_bit_fault(__u64 vector, istate_t *istate)543 {544 pte_t *t;545 546 page_table_lock(AS, true);547 t = page_mapping_find(AS, istate->cr_ifa);548 ASSERT(t && t->p);549 if (t && t->p) {550 /*551 * Update the Accessed bit in page tables and reinsert552 * the mapping into ITC.553 */554 t->a = true;555 itc_pte_copy(t);556 }557 page_table_unlock(AS, true);558 }559 560 /** Data access bit fault handler.561 *562 * @param vector Interruption vector.563 * @param istate Structure with saved interruption state.564 */565 void data_access_bit_fault(__u64 vector, istate_t *istate)566 {567 pte_t *t;568 569 page_table_lock(AS, true);570 t = page_mapping_find(AS, istate->cr_ifa);571 ASSERT(t && t->p);572 if (t && t->p) {573 /*574 * Update the Accessed bit in page tables and reinsert575 * the mapping into DTC.576 */577 t->a = true;578 dtc_pte_copy(t);579 }580 page_table_unlock(AS, true);581 }582 583 /** Page not present fault handler.584 *585 * @param vector Interruption vector.586 * @param istate Structure with saved interruption state.587 */588 631 void page_not_present(__u64 vector, istate_t *istate) 589 632 { 590 633 region_register rr; 634 rid_t rid; 591 635 __address va; 592 636 pte_t *t; 593 637 594 638 va = istate->cr_ifa; /* faulting address */ 639 rr.word = rr_read(VA2VRN(va)); 640 rid = rr.map.rid; 641 595 642 page_table_lock(AS, true); 596 643 t = page_mapping_find(AS, va); … … 609 656 } else { 610 657 page_table_unlock(AS, true); 611 if (as_page_fault(va, istate) == AS_PF_FAULT) {612 panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, r r.map.rid);613 } 614 } 615 } 658 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 659 panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid); 660 } 661 } 662 } -
arch/mips32/src/mm/tlb.c
r82da5f5 r567807b1 45 45 static void tlb_modified_fail(istate_t *istate); 46 46 47 static pte_t *find_mapping_and_check(__address badvaddr, i state_t *istate, int *pfrc);47 static pte_t *find_mapping_and_check(__address badvaddr, int access, istate_t *istate, int *pfrc); 48 48 49 49 static void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, __address pfn); … … 102 102 page_table_lock(AS, true); 103 103 104 pte = find_mapping_and_check(badvaddr, istate, &pfrc);104 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc); 105 105 if (!pte) { 106 106 switch (pfrc) { … … 187 187 } 188 188 189 pte = find_mapping_and_check(badvaddr, istate, &pfrc);189 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc); 190 190 if (!pte) { 191 191 switch (pfrc) { … … 271 271 } 272 272 273 pte = find_mapping_and_check(badvaddr, istate, &pfrc);273 pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate, &pfrc); 274 274 if (!pte) { 275 275 switch (pfrc) { … … 367 367 * 368 368 * @param badvaddr Faulting virtual address. 369 * @param access Access mode that caused the fault. 369 370 * @param istate Pointer to interrupted state. 370 371 * @param pfrc Pointer to variable where as_page_fault() return code will be stored. … … 372 373 * @return PTE on success, NULL otherwise. 373 374 */ 374 pte_t *find_mapping_and_check(__address badvaddr, i state_t *istate, int *pfrc)375 pte_t *find_mapping_and_check(__address badvaddr, int access, istate_t *istate, int *pfrc) 375 376 { 376 377 entry_hi_t hi; … … 405 406 */ 406 407 page_table_unlock(AS, true); 407 switch (rc = as_page_fault(badvaddr, istate)) {408 switch (rc = as_page_fault(badvaddr, access, istate)) { 408 409 case AS_PF_OK: 409 410 /* -
arch/ppc32/src/mm/page.c
r82da5f5 r567807b1 54 54 * @param lock Lock/unlock the address space. 55 55 * @param badvaddr Faulting virtual address. 56 * @param access Access mode that caused the fault. 56 57 * @param istate Pointer to interrupted state. 57 58 * @param pfrc Pointer to variable where as_page_fault() return code will be stored. … … 59 60 * 60 61 */ 61 static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, istate_t *istate, int *pfcr) 62 static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access, 63 istate_t *istate, int *pfcr) 62 64 { 63 65 /* … … 79 81 */ 80 82 page_table_unlock(as, lock); 81 switch (rc = as_page_fault(badvaddr, istate)) {83 switch (rc = as_page_fault(badvaddr, access, istate)) { 82 84 case AS_PF_OK: 83 85 /* … … 212 214 page_table_lock(as, lock); 213 215 214 pte = find_mapping_and_check(as, lock, badvaddr, istate, &pfcr);216 pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfcr); 215 217 if (!pte) { 216 218 switch (pfcr) { -
arch/ppc64/src/mm/page.c
r82da5f5 r567807b1 54 54 * @param lock Lock/unlock the address space. 55 55 * @param badvaddr Faulting virtual address. 56 * @param access Access mode that caused the fault. 56 57 * @param istate Pointer to interrupted state. 57 58 * @param pfrc Pointer to variable where as_page_fault() return code will be stored. … … 59 60 * 60 61 */ 61 static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, istate_t *istate, int *pfcr) 62 static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access, 63 istate_t *istate, int *pfcr) 62 64 { 63 65 /* … … 79 81 */ 80 82 page_table_unlock(as, lock); 81 switch (rc = as_page_fault(badvaddr, istate)) {83 switch (rc = as_page_fault(badvaddr, access, istate)) { 82 84 case AS_PF_OK: 83 85 /* … … 212 214 page_table_lock(as, lock); 213 215 214 pte = find_mapping_and_check(as, lock, badvaddr, istate, &pfcr);216 pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfcr); 215 217 if (!pte) { 216 218 switch (pfcr) { -
generic/include/mm/as.h
r82da5f5 r567807b1 128 128 /** Address space area backend structure. */ 129 129 struct mem_backend { 130 int (* backend_page_fault)(as_area_t *area, __address addr );130 int (* backend_page_fault)(as_area_t *area, __address addr, pf_access_t access); 131 131 void (* backend_frame_free)(as_area_t *area, __address page, __address frame); 132 132 }; … … 146 146 extern int as_area_get_flags(as_area_t *area); 147 147 extern void as_set_mapping(as_t *as, __address page, __address frame); 148 extern int as_page_fault(__address page, istate_t *istate);148 extern int as_page_fault(__address page, pf_access_t access, istate_t *istate); 149 149 extern void as_switch(as_t *old, as_t *new); 150 150 extern void as_free(as_t *as); -
generic/include/mm/page.h
r82da5f5 r567807b1 61 61 #define PAGE_GLOBAL (1<<PAGE_GLOBAL_SHIFT) 62 62 63 /** Page fault access type. */ 64 enum pf_access { 65 PF_ACCESS_READ, 66 PF_ACCESS_WRITE, 67 PF_ACCESS_EXEC 68 }; 69 typedef enum pf_access pf_access_t; 70 63 71 /** Operations to manipulate page mappings. */ 64 72 struct page_mapping_operations { -
generic/src/lib/elf.c
r82da5f5 r567807b1 57 57 static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as); 58 58 59 static int elf_page_fault(as_area_t *area, __address addr );59 static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access); 60 60 static void elf_frame_free(as_area_t *area, __address page, __address frame); 61 61 … … 226 226 * @param area Pointer to the address space area. 227 227 * @param addr Faulting virtual address. 228 * @param access Access mode that caused the fault (i.e. read/write/exec). 228 229 * 229 230 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). 230 231 */ 231 int elf_page_fault(as_area_t *area, __address addr )232 int elf_page_fault(as_area_t *area, __address addr, pf_access_t access) 232 233 { 233 234 elf_header_t *elf = (elf_header_t *) area->backend_data[0]; -
generic/src/mm/as.c
r82da5f5 r567807b1 376 376 __address base; 377 377 ipl_t ipl; 378 bool cond; 378 379 379 380 ipl = interrupts_disable(); … … 388 389 389 390 base = area->base; 390 if (!(area->flags & AS_AREA_DEVICE)) { 391 bool cond; 392 393 /* 394 * Releasing physical memory. 395 * Areas mapping memory-mapped devices are treated differently than 396 * areas backing frame_alloc()'ed memory. 397 */ 398 399 /* 400 * Visit only the pages mapped by used_space B+tree. 401 * Note that we must be very careful when walking the tree 402 * leaf list and removing used space as the leaf list changes 403 * unpredictibly after each remove. The solution is to actually 404 * not walk the tree at all, but to remove items from the head 405 * of the leaf list until there are some keys left. 406 */ 407 for (cond = true; cond;) { 408 btree_node_t *node; 391 392 /* 393 * Visit only the pages mapped by used_space B+tree. 394 * Note that we must be very careful when walking the tree 395 * leaf list and removing used space as the leaf list changes 396 * unpredictibly after each remove. The solution is to actually 397 * not walk the tree at all, but to remove items from the head 398 * of the leaf list until there are some keys left. 399 */ 400 for (cond = true; cond;) { 401 btree_node_t *node; 409 402 410 411 412 413 414 415 403 ASSERT(!list_empty(&area->used_space.leaf_head)); 404 node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link); 405 if ((cond = (bool) node->keys)) { 406 __address b = node->key[0]; 407 count_t i; 408 pte_t *pte; 416 409 417 for (i = 0; i < (count_t) node->value[0]; i++) { 418 page_table_lock(as, false); 419 pte = page_mapping_find(as, b + i*PAGE_SIZE); 420 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); 421 if (area->backend && area->backend->backend_frame_free) { 422 area->backend->backend_frame_free(area, 423 b + i*PAGE_SIZE, PTE_GET_FRAME(pte)); 424 } 425 page_mapping_remove(as, b + i*PAGE_SIZE); 426 page_table_unlock(as, false); 410 for (i = 0; i < (count_t) node->value[0]; i++) { 411 page_table_lock(as, false); 412 pte = page_mapping_find(as, b + i*PAGE_SIZE); 413 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); 414 if (area->backend && area->backend->backend_frame_free) { 415 area->backend->backend_frame_free(area, 416 b + i*PAGE_SIZE, PTE_GET_FRAME(pte)); 427 417 } 428 if (!used_space_remove(area, b, i))429 panic("Could not remove used space.\n");418 page_mapping_remove(as, b + i*PAGE_SIZE); 419 page_table_unlock(as, false); 430 420 } 421 if (!used_space_remove(area, b, i)) 422 panic("Could not remove used space.\n"); 431 423 } 432 424 } … … 624 616 * 625 617 * @param page Faulting page. 618 * @param access Access mode that caused the fault (i.e. read/write/exec). 626 619 * @param istate Pointer to interrupted state. 627 620 * … … 629 622 * fault was caused by copy_to_uspace() or copy_from_uspace(). 630 623 */ 631 int as_page_fault(__address page, istate_t *istate)624 int as_page_fault(__address page, pf_access_t access, istate_t *istate) 632 625 { 633 626 pte_t *pte; … … 689 682 * Resort to the backend page fault handler. 690 683 */ 691 if (area->backend->backend_page_fault(area, page ) != AS_PF_OK) {684 if (area->backend->backend_page_fault(area, page, access) != AS_PF_OK) { 692 685 page_table_unlock(AS, false); 693 686 mutex_unlock(&area->lock); … … 1451 1444 } 1452 1445 1453 static int anon_page_fault(as_area_t *area, __address addr );1446 static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access); 1454 1447 static void anon_frame_free(as_area_t *area, __address page, __address frame); 1455 1448 … … 1468 1461 * @param area Pointer to the address space area. 1469 1462 * @param addr Faulting virtual address. 1463 * @param access Access mode that caused the fault (i.e. read/write/exec). 1470 1464 * 1471 1465 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). 1472 1466 */ 1473 int anon_page_fault(as_area_t *area, __address addr )1467 int anon_page_fault(as_area_t *area, __address addr, pf_access_t access) 1474 1468 { 1475 1469 __address frame;
Note:
See TracChangeset
for help on using the changeset viewer.