Changeset 0ee077ee in mainline


Ignore:
Timestamp:
2006-05-27T17:50:30Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
127c957b
Parents:
fb84455
Message:

Move the sharing functionality to address space area backends.
Add backend for continuous regions of physical memory.
Sharing for these areas works automagically now.

Files:
3 added
7 edited

Legend:

Unmodified
Added
Removed
  • Makefile

    rfb84455 r0ee077ee  
    142142        generic/src/mm/tlb.c \
    143143        generic/src/mm/as.c \
     144        generic/src/mm/backend_anon.c \
     145        generic/src/mm/backend_elf.c \
     146        generic/src/mm/backend_phys.c \
    144147        generic/src/mm/slab.c \
    145148        generic/src/lib/func.c \
  • genarch/src/mm/page_ht.c

    rfb84455 r0ee077ee  
    187187
    188188                t->as = as;
    189                 t->page = page;
    190                 t->frame = frame;
     189                t->page = ALIGN_DOWN(page, PAGE_SIZE);
     190                t->frame = ALIGN_DOWN(frame, FRAME_SIZE);
    191191
    192192                hash_table_insert(&page_ht, key, &t->link);
  • generic/include/mm/as.h

    rfb84455 r0ee077ee  
    3131
    3232/** Address space area flags. */
    33 #define AS_AREA_READ    1
    34 #define AS_AREA_WRITE   2
    35 #define AS_AREA_EXEC    4
    36 #define AS_AREA_DEVICE  8
    37 #define AS_AREA_ANON    16
     33#define AS_AREA_READ            1
     34#define AS_AREA_WRITE           2
     35#define AS_AREA_EXEC            4
     36#define AS_AREA_CACHEABLE       8
    3837
    3938#ifdef KERNEL
     
    105104                                             or memcpy_to_uspace(). */
    106105
    107 typedef struct share_info share_info_t;
    108 typedef struct mem_backend mem_backend_t;
     106/** This structure contains information associated with the shared address space area. */
     107typedef struct {
     108        mutex_t lock;           /**< This lock must be acquired only when the as_area lock is held. */
     109        count_t refcount;       /**< This structure can be deallocated if refcount drops to 0. */
     110        btree_t pagemap;        /**< B+tree containing complete map of anonymous pages of the shared area. */
     111} share_info_t;
     112
     113/** Address space area backend structure. */
     114typedef struct {
     115        int (* page_fault)(as_area_t *area, __address addr, pf_access_t access);
     116        void (* frame_free)(as_area_t *area, __address page, __address frame);
     117        void (* share)(as_area_t *area);
     118} mem_backend_t;
     119
     120/** Backend data stored in address space area. */
     121typedef struct backend_data {
     122        __native d1;
     123        __native d2;
     124} mem_backend_data_t;
    109125
    110126/** Address space area structure.
     
    115131struct as_area {
    116132        mutex_t lock;
     133        as_t *as;               /**< Containing address space. */
    117134        int flags;              /**< Flags related to the memory represented by the address space area. */
    118135        int attributes;         /**< Attributes related to the address space area itself. */
     
    121138        btree_t used_space;     /**< Map of used space. */
    122139        share_info_t *sh_info;  /**< If the address space area has been shared, this pointer will
    123                                      reference the share info structure. */
     140                                     reference the share info structure. */
    124141        mem_backend_t *backend; /**< Memory backend backing this address space area. */
    125         void *backend_data[2];  /**< Data to be used by the backend. */
    126 };
    127142
    128 /** Address space area backend structure. */
    129 struct mem_backend {
    130         int (* backend_page_fault)(as_area_t *area, __address addr, pf_access_t access);
    131         void (* backend_frame_free)(as_area_t *area, __address page, __address frame);
     143        /** Data to be used by the backend. */
     144        mem_backend_data_t backend_data;
    132145};
    133146
     
    141154extern as_t *as_create(int flags);
    142155extern as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
    143         mem_backend_t *backend, void **backend_data);
     156        mem_backend_t *backend, mem_backend_data_t *backend_data);
    144157extern int as_area_resize(as_t *as, __address address, size_t size, int flags);
    145158extern int as_area_destroy(as_t *as, __address address);
    146159extern int as_area_get_flags(as_area_t *area);
    147 extern void as_set_mapping(as_t *as, __address page, __address frame);
    148160extern bool as_area_check_access(as_area_t *area, pf_access_t access);
    149161extern int as_page_fault(__address page, pf_access_t access, istate_t *istate);
     
    164176extern mem_backend_t anon_backend;
    165177extern mem_backend_t elf_backend;
     178extern mem_backend_t phys_backend;
    166179
    167180/* Address space area related syscalls. */
  • generic/src/ddi/ddi.c

    rfb84455 r0ee077ee  
    6666        task_t *t;
    6767        int flags;
    68         count_t i;
     68        mem_backend_data_t backend_data = { .d1 = (__native) pf, .d2 = (__native) pages };
    6969       
    7070        /*
     
    9999        spinlock_unlock(&tasks_lock);
    100100       
    101         flags = AS_AREA_DEVICE | AS_AREA_READ;
     101        flags = AS_AREA_READ;
    102102        if (writable)
    103103                flags |= AS_AREA_WRITE;
    104         if (!as_area_create(t->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE, NULL, NULL)) {
     104        if (!as_area_create(t->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE,
     105                &phys_backend, &backend_data)) {
    105106                /*
    106107                 * The address space area could not have been created.
     
    112113        }
    113114       
    114         /* Initialize page tables. */
    115         for (i = 0; i < pages; i++)
    116                 as_set_mapping(t->as, vp + i * PAGE_SIZE, pf + i * FRAME_SIZE);
    117 
     115        /*
     116         * Mapping is created on-demand during page fault.
     117         */
     118       
    118119        spinlock_unlock(&t->lock);
    119120        interrupts_restore(ipl);
  • generic/src/lib/elf.c

    rfb84455 r0ee077ee  
    11/*
    22 * Copyright (C) 2006 Sergey Bondari
     3 * Copyright (C) 2006 Jakub Jermar
    34 * All rights reserved.
    45 *
     
    5758static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as);
    5859
    59 static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);
    60 static void elf_frame_free(as_area_t *area, __address page, __address frame);
    61 
    62 mem_backend_t elf_backend = {
    63         .backend_page_fault = elf_page_fault,
    64         .backend_frame_free = elf_frame_free
    65 };
    66 
    6760/** ELF loader
    6861 *
     
    170163        as_area_t *a;
    171164        int flags = 0;
    172         void *backend_data[2] = { elf, entry };
     165        mem_backend_data_t backend_data = { .d1 = (__native) elf, .d2 = (__native) entry };
    173166
    174167        if (entry->p_align > 1) {
     
    184177        if (entry->p_flags & PF_R)
    185178                flags |= AS_AREA_READ;
     179        flags |= AS_AREA_CACHEABLE;
    186180
    187181        /*
     
    191185                return EE_UNSUPPORTED;
    192186
    193         a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data);
     187        a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE,
     188                &elf_backend, &backend_data);
    194189        if (!a)
    195190                return EE_MEMORY;
     
    219214        return EE_OK;
    220215}
    221 
    222 /** Service a page fault in the ELF backend address space area.
    223  *
    224  * The address space area and page tables must be already locked.
    225  *
    226  * @param area Pointer to the address space area.
    227  * @param addr Faulting virtual address.
    228  * @param access Access mode that caused the fault (i.e. read/write/exec).
    229  *
    230  * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
    231  */
    232 int elf_page_fault(as_area_t *area, __address addr, pf_access_t access)
    233 {
    234         elf_header_t *elf = (elf_header_t *) area->backend_data[0];
    235         elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
    236         __address base, frame;
    237         index_t i;
    238 
    239         if (!as_area_check_access(area, access))
    240                 return AS_PF_FAULT;
    241 
    242         ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
    243         i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
    244         base = (__address) (((void *) elf) + entry->p_offset);
    245         ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
    246        
    247         if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
    248                 /*
    249                  * Initialized portion of the segment. The memory is backed
    250                  * directly by the content of the ELF image. Pages are
    251                  * only copied if the segment is writable so that there
    252                  * can be more instantions of the same memory ELF image
    253                  * used at a time. Note that this could be later done
    254                  * as COW.
    255                  */
    256                 if (entry->p_flags & PF_W) {
    257                         frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
    258                         memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
    259                 } else {
    260                         frame = KA2PA(base + i*FRAME_SIZE);
    261                 }       
    262         } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
    263                 /*
    264                  * This is the uninitialized portion of the segment.
    265                  * It is not physically present in the ELF image.
    266                  * To resolve the situation, a frame must be allocated
    267                  * and cleared.
    268                  */
    269                 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
    270                 memsetb(PA2KA(frame), FRAME_SIZE, 0);
    271         } else {
    272                 size_t size;
    273                 /*
    274                  * The mixed case.
    275                  * The lower part is backed by the ELF image and
    276                  * the upper part is anonymous memory.
    277                  */
    278                 size = entry->p_filesz - (i<<PAGE_WIDTH);
    279                 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
    280                 memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
    281                 memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
    282         }
    283        
    284         page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
    285         if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
    286                 panic("Could not insert used space.\n");
    287 
    288         return AS_PF_OK;
    289 }
    290 
    291 /** Free a frame that is backed by the ELF backend.
    292  *
    293  * The address space area and page tables must be already locked.
    294  *
    295  * @param area Pointer to the address space area.
    296  * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
    297  * @param frame Frame to be released.
    298  *
    299  */
    300 void elf_frame_free(as_area_t *area, __address page, __address frame)
    301 {
    302         elf_header_t *elf = (elf_header_t *) area->backend_data[0];
    303         elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];
    304         __address base;
    305         index_t i;
    306        
    307         ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
    308         i = (page - entry->p_vaddr) >> PAGE_WIDTH;
    309         base = (__address) (((void *) elf) + entry->p_offset);
    310         ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
    311        
    312         if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
    313                 if (entry->p_flags & PF_W) {
    314                         /*
    315                          * Free the frame with the copy of writable segment data.
    316                          */
    317                         frame_free(ADDR2PFN(frame));
    318                 }
    319         } else {
    320                 /*
    321                  * The frame is either anonymous memory or the mixed case (i.e. lower
    322                  * part is backed by the ELF image and the upper is anonymous).
    323                  * In any case, a frame needs to be freed.
    324                  */
    325                 frame_free(ADDR2PFN(frame));
    326         }
    327 }
  • generic/src/mm/as.c

    rfb84455 r0ee077ee  
    7575#include <arch/interrupt.h>
    7676
    77 /** This structure contains information associated with the shared address space area. */
    78 struct share_info {
    79         mutex_t lock;           /**< This lock must be acquired only when the as_area lock is held. */
    80         count_t refcount;       /**< This structure can be deallocated if refcount drops to 0. */
    81         btree_t pagemap;        /**< B+tree containing complete map of anonymous pages of the shared area. */
    82 };
    83 
    8477as_operations_t *as_operations = NULL;
    8578
     
    160153 */
    161154as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
    162                mem_backend_t *backend, void **backend_data)
     155               mem_backend_t *backend, mem_backend_data_t *backend_data)
    163156{
    164157        ipl_t ipl;
     
    188181        mutex_initialize(&a->lock);
    189182       
     183        a->as = as;
    190184        a->flags = flags;
    191185        a->attributes = attrs;
     
    194188        a->sh_info = NULL;
    195189        a->backend = backend;
    196         if (backend_data) {
    197                 a->backend_data[0] = backend_data[0];
    198                 a->backend_data[1] = backend_data[1];
    199         }
     190        if (backend_data)
     191                a->backend_data = *backend_data;
     192        else
     193                memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0);
     194
    200195        btree_create(&a->used_space);
    201196       
     
    236231        }
    237232
    238         if (area->flags & AS_AREA_DEVICE) {
     233        if (area->backend == &phys_backend) {
    239234                /*
    240235                 * Remapping of address space areas associated
     
    327322                                        pte = page_mapping_find(as, b + i*PAGE_SIZE);
    328323                                        ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
    329                                         if (area->backend && area->backend->backend_frame_free) {
    330                                                 area->backend->backend_frame_free(area,
     324                                        if (area->backend && area->backend->frame_free) {
     325                                                area->backend->frame_free(area,
    331326                                                        b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
    332327                                        }
     
    412407                                pte = page_mapping_find(as, b + i*PAGE_SIZE);
    413408                                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
    414                                 if (area->backend && area->backend->backend_frame_free) {
    415                                         area->backend->backend_frame_free(area,
     409                                if (area->backend && area->backend->frame_free) {
     410                                        area->backend->frame_free(area,
    416411                                                b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
    417412                                }
     
    453448/** Share address space area with another or the same address space.
    454449 *
    455  * Address space area of anonymous memory is shared with a new address
    456  * space area. If the source address space area has not been shared so
    457  * far, a new sh_info is created and the original mapping is duplicated
    458  * in its pagemap B+tree. The new address space are simply gets the
    459  * sh_info of the source area.
     450 * Address space area mapping is shared with a new address space area.
     451 * If the source address space area has not been shared so far,
     452 * a new sh_info is created. The new address space area simply gets the
     453 * sh_info of the source area. The process of duplicating the
     454 * mapping is done through the backend share function.
    460455 *
    461456 * @param src_as Pointer to source address space.
     
    480475        as_area_t *src_area, *dst_area;
    481476        share_info_t *sh_info;
    482         link_t *cur;
     477        mem_backend_t *src_backend;
     478        mem_backend_data_t src_backend_data;
    483479
    484480        ipl = interrupts_disable();
     
    494490        }
    495491       
    496         if (!src_area->backend || src_area->backend != &anon_backend) {
    497                 /*
    498                  * As of now, only anonymous address space areas can be shared.
     492        if (!src_area->backend || !src_area->backend->share) {
     493                /*
     494                 * There is now backend or the backend does not
     495                 * know how to share the area.
    499496                 */
    500497                mutex_unlock(&src_area->lock);
     
    506503        src_size = src_area->pages * PAGE_SIZE;
    507504        src_flags = src_area->flags;
     505        src_backend = src_area->backend;
     506        src_backend_data = src_area->backend_data;
    508507       
    509508        if (src_size != acc_size) {
     
    532531        }
    533532
    534         /*
    535          * Copy used portions of the area to sh_info's page map.
    536          */
    537         mutex_lock(&sh_info->lock);
    538         for (cur = src_area->used_space.leaf_head.next; cur != &src_area->used_space.leaf_head; cur = cur->next) {
    539                 btree_node_t *node;
    540                 int i;
    541                
    542                 node = list_get_instance(cur, btree_node_t, leaf_link);
    543                 for (i = 0; i < node->keys; i++) {
    544                         __address base = node->key[i];
    545                         count_t count = (count_t) node->value[i];
    546                         int j;
    547                        
    548                         for (j = 0; j < count; j++) {
    549                                 pte_t *pte;
    550                        
    551                                 page_table_lock(src_as, false);
    552                                 pte = page_mapping_find(src_as, base + j*PAGE_SIZE);
    553                                 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
    554                                 btree_insert(&sh_info->pagemap, (base + j*PAGE_SIZE) - src_area->base,
    555                                         (void *) PTE_GET_FRAME(pte), NULL);
    556                                 page_table_unlock(src_as, false);
    557                         }
    558                                
    559                 }
    560         }
    561         mutex_unlock(&sh_info->lock);
     533        src_area->backend->share(src_area);
    562534
    563535        mutex_unlock(&src_area->lock);
     
    573545         */
    574546        dst_area = as_area_create(AS, src_flags & dst_flags_mask, src_size, dst_base,
    575                                   AS_AREA_ATTR_PARTIAL, &anon_backend, NULL);
     547                                  AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
    576548        if (!dst_area) {
    577549                /*
     
    599571}
    600572
    601 /** Initialize mapping for one page of address space.
    602  *
    603  * This functions maps 'page' to 'frame' according
    604  * to attributes of the address space area to
    605  * wich 'page' belongs.
    606  *
    607  * @param as Target address space.
    608  * @param page Virtual page within the area.
    609  * @param frame Physical frame to which page will be mapped.
    610  */
    611 void as_set_mapping(as_t *as, __address page, __address frame)
    612 {
    613         as_area_t *area;
    614         ipl_t ipl;
    615        
    616         ipl = interrupts_disable();
    617         page_table_lock(as, true);
    618        
    619         area = find_area_and_lock(as, page);
    620         if (!area) {
    621                 panic("Page not part of any as_area.\n");
    622         }
    623 
    624         ASSERT(!area->backend);
    625        
    626         page_mapping_insert(as, page, frame, as_area_get_flags(area));
    627         if (!used_space_insert(area, page, 1))
    628                 panic("Could not insert used space.\n");
    629        
    630         mutex_unlock(&area->lock);
    631         page_table_unlock(as, true);
    632         interrupts_restore(ipl);
    633 }
    634 
    635573/** Check access mode for address space area.
    636574 *
     
    703641        }
    704642
    705         if (!area->backend || !area->backend->backend_page_fault) {
     643        if (!area->backend || !area->backend->page_fault) {
    706644                /*
    707645                 * The address space area is not backed by any backend
     
    736674         * Resort to the backend page fault handler.
    737675         */
    738         if (area->backend->backend_page_fault(area, page, access) != AS_PF_OK) {
     676        if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
    739677                page_table_unlock(AS, false);
    740678                mutex_unlock(&area->lock);
     
    855793                flags |= PAGE_EXEC;
    856794       
    857         if (!(aflags & AS_AREA_DEVICE))
     795        if (aflags & AS_AREA_CACHEABLE)
    858796                flags |= PAGE_CACHEABLE;
    859797               
     
    14981436}
    14991437
    1500 static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access);
    1501 static void anon_frame_free(as_area_t *area, __address page, __address frame);
    1502 
    1503 /*
    1504  * Anonymous memory backend.
    1505  */
    1506 mem_backend_t anon_backend = {
    1507         .backend_page_fault = anon_page_fault,
    1508         .backend_frame_free = anon_frame_free
    1509 };
    1510 
    1511 /** Service a page fault in the anonymous memory address space area.
    1512  *
    1513  * The address space area and page tables must be already locked.
    1514  *
    1515  * @param area Pointer to the address space area.
    1516  * @param addr Faulting virtual address.
    1517  * @param access Access mode that caused the fault (i.e. read/write/exec).
    1518  *
    1519  * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
    1520  */
    1521 int anon_page_fault(as_area_t *area, __address addr, pf_access_t access)
    1522 {
    1523         __address frame;
    1524 
    1525         if (!as_area_check_access(area, access))
    1526                 return AS_PF_FAULT;
    1527 
    1528         if (area->sh_info) {
    1529                 btree_node_t *leaf;
    1530                
    1531                 /*
    1532                  * The area is shared, chances are that the mapping can be found
    1533                  * in the pagemap of the address space area share info structure.
    1534                  * In the case that the pagemap does not contain the respective
    1535                  * mapping, a new frame is allocated and the mapping is created.
    1536                  */
    1537                 mutex_lock(&area->sh_info->lock);
    1538                 frame = (__address) btree_search(&area->sh_info->pagemap,
    1539                         ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
    1540                 if (!frame) {
    1541                         bool allocate = true;
    1542                         int i;
    1543                        
    1544                         /*
    1545                          * Zero can be returned as a valid frame address.
    1546                          * Just a small workaround.
    1547                          */
    1548                         for (i = 0; i < leaf->keys; i++) {
    1549                                 if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
    1550                                         allocate = false;
    1551                                         break;
    1552                                 }
    1553                         }
    1554                         if (allocate) {
    1555                                 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
    1556                                 memsetb(PA2KA(frame), FRAME_SIZE, 0);
    1557                                
    1558                                 /*
    1559                                  * Insert the address of the newly allocated frame to the pagemap.
    1560                                  */
    1561                                 btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, (void *) frame, leaf);
    1562                         }
    1563                 }
    1564                 mutex_unlock(&area->sh_info->lock);
    1565         } else {
    1566 
    1567                 /*
    1568                  * In general, there can be several reasons that
    1569                  * can have caused this fault.
    1570                  *
    1571                  * - non-existent mapping: the area is an anonymous
    1572                  *   area (e.g. heap or stack) and so far has not been
    1573                  *   allocated a frame for the faulting page
    1574                  *
    1575                  * - non-present mapping: another possibility,
    1576                  *   currently not implemented, would be frame
    1577                  *   reuse; when this becomes a possibility,
    1578                  *   do not forget to distinguish between
    1579                  *   the different causes
    1580                  */
    1581                 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
    1582                 memsetb(PA2KA(frame), FRAME_SIZE, 0);
    1583         }
    1584        
    1585         /*
    1586          * Map 'page' to 'frame'.
    1587          * Note that TLB shootdown is not attempted as only new information is being
    1588          * inserted into page tables.
    1589          */
    1590         page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
    1591         if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
    1592                 panic("Could not insert used space.\n");
    1593                
    1594         return AS_PF_OK;
    1595 }
    1596 
    1597 /** Free a frame that is backed by the anonymous memory backend.
    1598  *
    1599  * The address space area and page tables must be already locked.
    1600  *
    1601  * @param area Ignored.
    1602  * @param page Ignored.
    1603  * @param frame Frame to be released.
    1604  */
    1605 void anon_frame_free(as_area_t *area, __address page, __address frame)
    1606 {
    1607         frame_free(ADDR2PFN(frame));
    1608 }
    1609 
    16101438/*
    16111439 * Address space related syscalls.
     
    16151443__native sys_as_area_create(__address address, size_t size, int flags)
    16161444{
    1617         if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
     1445        if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
    16181446                return (__native) address;
    16191447        else
  • generic/src/proc/task.c

    rfb84455 r0ee077ee  
    154154         * Create the data as_area.
    155155         */
    156         a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE, LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE,
     156        a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
     157                LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE,
    157158                USTACK_ADDRESS, AS_AREA_ATTR_NONE, &anon_backend, NULL);
    158159
Note: See TracChangeset for help on using the changeset viewer.