Changeset 567807b1 in mainline


Ignore:
Timestamp:
2006-05-24T17:03:29Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
8d6bc2d5
Parents:
82da5f5
Message:

Modify the hierarchy of page fault handlers to pass access mode that caused the fault.
Architectures are required to pass either PF_ACCESS_READ, PF_ACCESS_WRITE or PF_ACCESS_EXEC
to as_page_fault(), depending on the cause of the fault.

Files:
14 edited

Legend:

Unmodified
Added
Removed
  • arch/amd64/include/mm/page.h

    r82da5f5 r567807b1  
    107107#ifndef __ASM__
    108108
     109/* Page fault error codes. */
     110
     111/** When bit on this position is 0, the page fault was caused by a not-present page. */
     112#define PFERR_CODE_P            (1<<0) 
     113
     114/** When bit on this position is 1, the page fault was caused by a write. */
     115#define PFERR_CODE_RW           (1<<1)
     116
     117/** When bit on this position is 1, the page fault was caused in user mode. */
     118#define PFERR_CODE_US           (1<<2)
     119
     120/** When bit on this position is 1, a reserved bit was set in page directory. */
     121#define PFERR_CODE_RSVD         (1<<3)
     122
     123/** When bit on this position os 1, the page fault was caused during instruction fecth. */
     124#define PFERR_CODE_ID           (1<<4)
     125
    109126/** Page Table Entry. */
    110127struct page_specifier {
  • arch/amd64/src/mm/page.c

    r82da5f5 r567807b1  
    169169{
    170170        __address page;
     171        pf_access_t access;
    171172       
    172173        page = read_cr2();
    173         if (as_page_fault(page, istate) == AS_PF_FAULT) {
     174       
     175        if (istate->error_word & PFERR_CODE_RSVD)
     176                panic("Reserved bit set in page table entry.\n");
     177       
     178        if (istate->error_word & PFERR_CODE_RW)
     179                access = PF_ACCESS_WRITE;
     180        else if (istate->error_word & PFERR_CODE_ID)
     181                access = PF_ACCESS_EXEC;
     182        else
     183                access = PF_ACCESS_READ;
     184       
     185        if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
    174186                print_info_errcode(n, istate);
    175187                printf("Page fault address: %llX\n", page);
  • arch/ia32/include/interrupt.h

    r82da5f5 r567807b1  
    9393extern void (* eoi_function)(void);
    9494
     95extern void PRINT_INFO_ERRCODE(istate_t *istate);
    9596extern void null_interrupt(int n, istate_t *istate);
    9697extern void gp_fault(int n, istate_t *istate);
     
    9899extern void ss_fault(int n, istate_t *istate);
    99100extern void simd_fp_exception(int n, istate_t *istate);
    100 extern void page_fault(int n, istate_t *istate);
    101101extern void syscall(int n, istate_t *istate);
    102102extern void tlb_shootdown_ipi(int n, istate_t *istate);
  • arch/ia32/include/mm/page.h

    r82da5f5 r567807b1  
    9191#include <typedefs.h>
    9292
     93/* Page fault error codes. */
     94
     95/** When bit on this position is 0, the page fault was caused by a not-present page. */
     96#define PFERR_CODE_P            (1<<0)
     97
     98/** When bit on this position is 1, the page fault was caused by a write. */
     99#define PFERR_CODE_RW           (1<<1)
     100
     101/** When bit on this position is 1, the page fault was caused in user mode. */
     102#define PFERR_CODE_US           (1<<2)
     103
     104/** When bit on this position is 1, a reserved bit was set in page directory. */
     105#define PFERR_CODE_RSVD         (1<<3) 
     106
    93107/** Page Table Entry. */
    94108struct page_specifier {
     
    139153
    140154extern void page_arch_init(void);
     155extern void page_fault(int n, istate_t *istate);
    141156
    142157#endif /* __ASM__ */
  • arch/ia32/src/interrupt.c

    r82da5f5 r567807b1  
    5555void (* eoi_function)(void) = NULL;
    5656
    57 static void PRINT_INFO_ERRCODE(istate_t *istate)
     57void PRINT_INFO_ERRCODE(istate_t *istate)
    5858{
    5959        char *symbol = get_symtab_entry(istate->eip);
     
    140140}
    141141
    142 void page_fault(int n, istate_t *istate)
    143 {
    144         __address page;
    145 
    146         page = read_cr2();
    147         if (as_page_fault(page, istate) == AS_PF_FAULT) {
    148                 PRINT_INFO_ERRCODE(istate);
    149                 printf("page fault address: %#x\n", page);
    150                 panic("page fault\n");
    151         }
    152 }
    153 
    154142void syscall(int n, istate_t *istate)
    155143{
  • arch/ia32/src/mm/page.c

    r82da5f5 r567807b1  
    4444#include <interrupt.h>
    4545
    46 
    4746void page_arch_init(void)
    4847{
     
    8887        return virtaddr;
    8988}
     89
     90void page_fault(int n, istate_t *istate)
     91{
     92        __address page;
     93        pf_access_t access;
     94       
     95        page = read_cr2();
     96               
     97        if (istate->error_word & PFERR_CODE_RSVD)
     98                panic("Reserved bit set in page directory.\n");
     99
     100        if (istate->error_word & PFERR_CODE_RW)
     101                access = PF_ACCESS_WRITE;
     102        else
     103                access = PF_ACCESS_READ;
     104
     105        if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
     106                PRINT_INFO_ERRCODE(istate);
     107                printf("page fault address: %#x\n", page);
     108                panic("page fault\n");
     109        }
     110}
  • arch/ia64/src/mm/tlb.c

    r82da5f5 r567807b1  
    430430{
    431431        region_register rr;
     432        rid_t rid;
    432433        __address va;
    433434        pte_t *t;
    434435       
    435436        va = istate->cr_ifa;    /* faulting address */
     437        rr.word = rr_read(VA2VRN(va));
     438        rid = rr.map.rid;
     439
    436440        page_table_lock(AS, true);
    437441        t = page_mapping_find(AS, va);
     
    448452                 */
    449453                page_table_unlock(AS, true);
    450                 if (as_page_fault(va, istate) == AS_PF_FAULT) {
    451                         panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, istate->cr_ifa, rr.map.rid, istate->cr_iip);
     454                if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
     455                        panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
    452456                }
    453457        }
     
    494498                 */
    495499                page_table_unlock(AS, true);
    496                 if (as_page_fault(va, istate) == AS_PF_FAULT) {
     500                if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
    497501                        panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
    498502                }
     
    519523void data_dirty_bit_fault(__u64 vector, istate_t *istate)
    520524{
     525        region_register rr;
     526        rid_t rid;
     527        __address va;
    521528        pte_t *t;
     529       
     530        va = istate->cr_ifa;    /* faulting address */
     531        rr.word = rr_read(VA2VRN(va));
     532        rid = rr.map.rid;
    522533
    523534        page_table_lock(AS, true);
    524         t = page_mapping_find(AS, istate->cr_ifa);
     535        t = page_mapping_find(AS, va);
     536        ASSERT(t && t->p);
     537        if (t && t->p && t->w) {
     538                /*
     539                 * Update the Dirty bit in page tables and reinsert
     540                 * the mapping into DTC.
     541                 */
     542                t->d = true;
     543                dtc_pte_copy(t);
     544        } else {
     545                if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
     546                        panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
     547                        t->d = true;
     548                        dtc_pte_copy(t);
     549                }
     550        }
     551        page_table_unlock(AS, true);
     552}
     553
     554/** Instruction access bit fault handler.
     555 *
     556 * @param vector Interruption vector.
     557 * @param istate Structure with saved interruption state.
     558 */
     559void instruction_access_bit_fault(__u64 vector, istate_t *istate)
     560{
     561        region_register rr;
     562        rid_t rid;
     563        __address va;
     564        pte_t *t;       
     565
     566        va = istate->cr_ifa;    /* faulting address */
     567        rr.word = rr_read(VA2VRN(va));
     568        rid = rr.map.rid;
     569
     570        page_table_lock(AS, true);
     571        t = page_mapping_find(AS, va);
     572        ASSERT(t && t->p);
     573        if (t && t->p && t->x) {
     574                /*
     575                 * Update the Accessed bit in page tables and reinsert
     576                 * the mapping into ITC.
     577                 */
     578                t->a = true;
     579                itc_pte_copy(t);
     580        } else {
     581                if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
     582                        panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
     583                        t->a = true;
     584                        itc_pte_copy(t);
     585                }
     586        }
     587        page_table_unlock(AS, true);
     588}
     589
     590/** Data access bit fault handler.
     591 *
     592 * @param vector Interruption vector.
     593 * @param istate Structure with saved interruption state.
     594 */
     595void data_access_bit_fault(__u64 vector, istate_t *istate)
     596{
     597        region_register rr;
     598        rid_t rid;
     599        __address va;
     600        pte_t *t;
     601
     602        va = istate->cr_ifa;    /* faulting address */
     603        rr.word = rr_read(VA2VRN(va));
     604        rid = rr.map.rid;
     605
     606        page_table_lock(AS, true);
     607        t = page_mapping_find(AS, va);
    525608        ASSERT(t && t->p);
    526609        if (t && t->p) {
    527610                /*
    528                  * Update the Dirty bit in page tables and reinsert
     611                 * Update the Accessed bit in page tables and reinsert
    529612                 * the mapping into DTC.
    530613                 */
    531                 t->d = true;
     614                t->a = true;
    532615                dtc_pte_copy(t);
     616        } else {
     617                if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
     618                        panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
     619                        t->a = true;
     620                        itc_pte_copy(t);
     621                }
    533622        }
    534623        page_table_unlock(AS, true);
    535624}
    536625
    537 /** Instruction access bit fault handler.
     626/** Page not present fault handler.
    538627 *
    539628 * @param vector Interruption vector.
    540629 * @param istate Structure with saved interruption state.
    541630 */
    542 void instruction_access_bit_fault(__u64 vector, istate_t *istate)
    543 {
    544         pte_t *t;
    545 
    546         page_table_lock(AS, true);
    547         t = page_mapping_find(AS, istate->cr_ifa);
    548         ASSERT(t && t->p);
    549         if (t && t->p) {
    550                 /*
    551                  * Update the Accessed bit in page tables and reinsert
    552                  * the mapping into ITC.
    553                  */
    554                 t->a = true;
    555                 itc_pte_copy(t);
    556         }
    557         page_table_unlock(AS, true);
    558 }
    559 
    560 /** Data access bit fault handler.
    561  *
    562  * @param vector Interruption vector.
    563  * @param istate Structure with saved interruption state.
    564  */
    565 void data_access_bit_fault(__u64 vector, istate_t *istate)
    566 {
    567         pte_t *t;
    568 
    569         page_table_lock(AS, true);
    570         t = page_mapping_find(AS, istate->cr_ifa);
    571         ASSERT(t && t->p);
    572         if (t && t->p) {
    573                 /*
    574                  * Update the Accessed bit in page tables and reinsert
    575                  * the mapping into DTC.
    576                  */
    577                 t->a = true;
    578                 dtc_pte_copy(t);
    579         }
    580         page_table_unlock(AS, true);
    581 }
    582 
    583 /** Page not present fault handler.
    584  *
    585  * @param vector Interruption vector.
    586  * @param istate Structure with saved interruption state.
    587  */
    588631void page_not_present(__u64 vector, istate_t *istate)
    589632{
    590633        region_register rr;
     634        rid_t rid;
    591635        __address va;
    592636        pte_t *t;
    593637       
    594638        va = istate->cr_ifa;    /* faulting address */
     639        rr.word = rr_read(VA2VRN(va));
     640        rid = rr.map.rid;
     641
    595642        page_table_lock(AS, true);
    596643        t = page_mapping_find(AS, va);
     
    609656        } else {
    610657                page_table_unlock(AS, true);
    611                 if (as_page_fault(va, istate) == AS_PF_FAULT) {
    612                         panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rr.map.rid);
    613                 }
    614         }
    615 }
     658                if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
     659                        panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
     660                }
     661        }
     662}
  • arch/mips32/src/mm/tlb.c

    r82da5f5 r567807b1  
    4545static void tlb_modified_fail(istate_t *istate);
    4646
    47 static pte_t *find_mapping_and_check(__address badvaddr, istate_t *istate, int *pfrc);
     47static pte_t *find_mapping_and_check(__address badvaddr, int access, istate_t *istate, int *pfrc);
    4848
    4949static void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, __address pfn);
     
    102102        page_table_lock(AS, true);
    103103
    104         pte = find_mapping_and_check(badvaddr, istate, &pfrc);
     104        pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc);
    105105        if (!pte) {
    106106                switch (pfrc) {
     
    187187        }
    188188
    189         pte = find_mapping_and_check(badvaddr, istate, &pfrc);
     189        pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc);
    190190        if (!pte) {
    191191                switch (pfrc) {
     
    271271        }
    272272
    273         pte = find_mapping_and_check(badvaddr, istate, &pfrc);
     273        pte = find_mapping_and_check(badvaddr, PF_ACCESS_WRITE, istate, &pfrc);
    274274        if (!pte) {
    275275                switch (pfrc) {
     
    367367 *
    368368 * @param badvaddr Faulting virtual address.
     369 * @param access Access mode that caused the fault.
    369370 * @param istate Pointer to interrupted state.
    370371 * @param pfrc Pointer to variable where as_page_fault() return code will be stored.
     
    372373 * @return PTE on success, NULL otherwise.
    373374 */
    374 pte_t *find_mapping_and_check(__address badvaddr, istate_t *istate, int *pfrc)
     375pte_t *find_mapping_and_check(__address badvaddr, int access, istate_t *istate, int *pfrc)
    375376{
    376377        entry_hi_t hi;
     
    405406                 */
    406407                page_table_unlock(AS, true);
    407                 switch (rc = as_page_fault(badvaddr, istate)) {
     408                switch (rc = as_page_fault(badvaddr, access, istate)) {
    408409                case AS_PF_OK:
    409410                        /*
  • arch/ppc32/src/mm/page.c

    r82da5f5 r567807b1  
    5454 * @param lock     Lock/unlock the address space.
    5555 * @param badvaddr Faulting virtual address.
     56 * @param access   Access mode that caused the fault.
    5657 * @param istate   Pointer to interrupted state.
    5758 * @param pfrc     Pointer to variable where as_page_fault() return code will be stored.
     
    5960 *
    6061 */
    61 static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, istate_t *istate, int *pfcr)
     62static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
     63                                     istate_t *istate, int *pfcr)
    6264{
    6365        /*
     
    7981                 */
    8082                page_table_unlock(as, lock);
    81                 switch (rc = as_page_fault(badvaddr, istate)) {
     83                switch (rc = as_page_fault(badvaddr, access, istate)) {
    8284                        case AS_PF_OK:
    8385                                /*
     
    212214        page_table_lock(as, lock);
    213215       
    214         pte = find_mapping_and_check(as, lock, badvaddr, istate, &pfcr);
     216        pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfcr);
    215217        if (!pte) {
    216218                switch (pfcr) {
  • arch/ppc64/src/mm/page.c

    r82da5f5 r567807b1  
    5454 * @param lock     Lock/unlock the address space.
    5555 * @param badvaddr Faulting virtual address.
     56 * @param access   Access mode that caused the fault.
    5657 * @param istate   Pointer to interrupted state.
    5758 * @param pfrc     Pointer to variable where as_page_fault() return code will be stored.
     
    5960 *
    6061 */
    61 static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, istate_t *istate, int *pfcr)
     62static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
     63                                     istate_t *istate, int *pfcr)
    6264{
    6365        /*
     
    7981                 */
    8082                page_table_unlock(as, lock);
    81                 switch (rc = as_page_fault(badvaddr, istate)) {
     83                switch (rc = as_page_fault(badvaddr, access, istate)) {
    8284                        case AS_PF_OK:
    8385                                /*
     
    212214        page_table_lock(as, lock);
    213215       
    214         pte = find_mapping_and_check(as, lock, badvaddr, istate, &pfcr);
     216        pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfcr);
    215217        if (!pte) {
    216218                switch (pfcr) {
  • generic/include/mm/as.h

    r82da5f5 r567807b1  
    128128/** Address space area backend structure. */
    129129struct mem_backend {
    130         int (* backend_page_fault)(as_area_t *area, __address addr);
     130        int (* backend_page_fault)(as_area_t *area, __address addr, pf_access_t access);
    131131        void (* backend_frame_free)(as_area_t *area, __address page, __address frame);
    132132};
     
    146146extern int as_area_get_flags(as_area_t *area);
    147147extern void as_set_mapping(as_t *as, __address page, __address frame);
    148 extern int as_page_fault(__address page, istate_t *istate);
     148extern int as_page_fault(__address page, pf_access_t access, istate_t *istate);
    149149extern void as_switch(as_t *old, as_t *new);
    150150extern void as_free(as_t *as);
  • generic/include/mm/page.h

    r82da5f5 r567807b1  
    6161#define PAGE_GLOBAL             (1<<PAGE_GLOBAL_SHIFT)
    6262
     63/** Page fault access type. */
     64enum pf_access {
     65        PF_ACCESS_READ,
     66        PF_ACCESS_WRITE,
     67        PF_ACCESS_EXEC
     68};
     69typedef enum pf_access pf_access_t;
     70
    6371/** Operations to manipulate page mappings. */
    6472struct page_mapping_operations {
  • generic/src/lib/elf.c

    r82da5f5 r567807b1  
    5757static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
    5858
    59 static int elf_page_fault(as_area_t *area, __address addr);
     59static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);
    6060static void elf_frame_free(as_area_t *area, __address page, __address frame);
    6161
     
    226226 * @param area Pointer to the address space area.
    227227 * @param addr Faulting virtual address.
     228 * @param access Access mode that caused the fault (i.e. read/write/exec).
    228229 *
    229230 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
    230231 */
    231 int elf_page_fault(as_area_t *area, __address addr)
     232int elf_page_fault(as_area_t *area, __address addr, pf_access_t access)
    232233{
    233234        elf_header_t *elf = (elf_header_t *) area->backend_data[0];
  • generic/src/mm/as.c

    r82da5f5 r567807b1  
    376376        __address base;
    377377        ipl_t ipl;
     378        bool cond;
    378379
    379380        ipl = interrupts_disable();
     
    388389
    389390        base = area->base;
    390         if (!(area->flags & AS_AREA_DEVICE)) {
    391                 bool cond;     
    392        
    393                 /*
    394                  * Releasing physical memory.
    395                  * Areas mapping memory-mapped devices are treated differently than
    396                  * areas backing frame_alloc()'ed memory.
    397                  */
    398 
    399                 /*
    400                  * Visit only the pages mapped by used_space B+tree.
    401                  * Note that we must be very careful when walking the tree
    402                  * leaf list and removing used space as the leaf list changes
    403                  * unpredictibly after each remove. The solution is to actually
    404                  * not walk the tree at all, but to remove items from the head
    405                  * of the leaf list until there are some keys left.
    406                  */
    407                 for (cond = true; cond;) {
    408                         btree_node_t *node;
     391
     392        /*
     393         * Visit only the pages mapped by used_space B+tree.
     394         * Note that we must be very careful when walking the tree
     395         * leaf list and removing used space as the leaf list changes
     396         * unpredictibly after each remove. The solution is to actually
     397         * not walk the tree at all, but to remove items from the head
     398         * of the leaf list until there are some keys left.
     399         */
     400        for (cond = true; cond;) {
     401                btree_node_t *node;
    409402               
    410                         ASSERT(!list_empty(&area->used_space.leaf_head));
    411                         node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
    412                         if ((cond = (bool) node->keys)) {
    413                                 __address b = node->key[0];
    414                                 count_t i;
    415                                 pte_t *pte;
     403                ASSERT(!list_empty(&area->used_space.leaf_head));
     404                node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
     405                if ((cond = (bool) node->keys)) {
     406                        __address b = node->key[0];
     407                        count_t i;
     408                        pte_t *pte;
    416409                       
    417                                 for (i = 0; i < (count_t) node->value[0]; i++) {
    418                                         page_table_lock(as, false);
    419                                         pte = page_mapping_find(as, b + i*PAGE_SIZE);
    420                                         ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
    421                                         if (area->backend && area->backend->backend_frame_free) {
    422                                                 area->backend->backend_frame_free(area,
    423                                                         b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
    424                                         }
    425                                         page_mapping_remove(as, b + i*PAGE_SIZE);
    426                                         page_table_unlock(as, false);
     410                        for (i = 0; i < (count_t) node->value[0]; i++) {
     411                                page_table_lock(as, false);
     412                                pte = page_mapping_find(as, b + i*PAGE_SIZE);
     413                                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
     414                                if (area->backend && area->backend->backend_frame_free) {
     415                                        area->backend->backend_frame_free(area,
     416                                                b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
    427417                                }
    428                                 if (!used_space_remove(area, b, i))
    429                                         panic("Could not remove used space.\n");
     418                                page_mapping_remove(as, b + i*PAGE_SIZE);
     419                                page_table_unlock(as, false);
    430420                        }
     421                        if (!used_space_remove(area, b, i))
     422                                panic("Could not remove used space.\n");
    431423                }
    432424        }
     
    624616 *
    625617 * @param page Faulting page.
     618 * @param access Access mode that caused the fault (i.e. read/write/exec).
    626619 * @param istate Pointer to interrupted state.
    627620 *
     
    629622 *         fault was caused by copy_to_uspace() or copy_from_uspace().
    630623 */
    631 int as_page_fault(__address page, istate_t *istate)
     624int as_page_fault(__address page, pf_access_t access, istate_t *istate)
    632625{
    633626        pte_t *pte;
     
    689682         * Resort to the backend page fault handler.
    690683         */
    691         if (area->backend->backend_page_fault(area, page) != AS_PF_OK) {
     684        if (area->backend->backend_page_fault(area, page, access) != AS_PF_OK) {
    692685                page_table_unlock(AS, false);
    693686                mutex_unlock(&area->lock);
     
    14511444}
    14521445
    1453 static int anon_page_fault(as_area_t *area, __address addr);
     1446static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access);
    14541447static void anon_frame_free(as_area_t *area, __address page, __address frame);
    14551448
     
    14681461 * @param area Pointer to the address space area.
    14691462 * @param addr Faulting virtual address.
     1463 * @param access Access mode that caused the fault (i.e. read/write/exec).
    14701464 *
    14711465 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
    14721466 */
    1473 int anon_page_fault(as_area_t *area, __address addr)
     1467int anon_page_fault(as_area_t *area, __address addr, pf_access_t access)
    14741468{
    14751469        __address frame;
Note: See TracChangeset for help on using the changeset viewer.