Changeset 0ee077ee in mainline for generic/src/mm/as.c


Ignore:
Timestamp:
2006-05-27T17:50:30Z (19 years ago)
Author:
Jakub Jermar <jakub@…>
Branches:
lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
Children:
127c957b
Parents:
fb84455
Message:

Move the sharing functionality to address space area backends.
Add backend for continuous regions of physical memory.
Sharing for these areas works automagically now.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • generic/src/mm/as.c

    rfb84455 r0ee077ee  
    7575#include <arch/interrupt.h>
    7676
    77 /** This structure contains information associated with the shared address space area. */
    78 struct share_info {
    79         mutex_t lock;           /**< This lock must be acquired only when the as_area lock is held. */
    80         count_t refcount;       /**< This structure can be deallocated if refcount drops to 0. */
    81         btree_t pagemap;        /**< B+tree containing complete map of anonymous pages of the shared area. */
    82 };
    83 
    8477as_operations_t *as_operations = NULL;
    8578
     
    160153 */
    161154as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs,
    162                mem_backend_t *backend, void **backend_data)
     155               mem_backend_t *backend, mem_backend_data_t *backend_data)
    163156{
    164157        ipl_t ipl;
     
    188181        mutex_initialize(&a->lock);
    189182       
     183        a->as = as;
    190184        a->flags = flags;
    191185        a->attributes = attrs;
     
    194188        a->sh_info = NULL;
    195189        a->backend = backend;
    196         if (backend_data) {
    197                 a->backend_data[0] = backend_data[0];
    198                 a->backend_data[1] = backend_data[1];
    199         }
     190        if (backend_data)
     191                a->backend_data = *backend_data;
     192        else
     193                memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0);
     194
    200195        btree_create(&a->used_space);
    201196       
     
    236231        }
    237232
    238         if (area->flags & AS_AREA_DEVICE) {
     233        if (area->backend == &phys_backend) {
    239234                /*
    240235                 * Remapping of address space areas associated
     
    327322                                        pte = page_mapping_find(as, b + i*PAGE_SIZE);
    328323                                        ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
    329                                         if (area->backend && area->backend->backend_frame_free) {
    330                                                 area->backend->backend_frame_free(area,
     324                                        if (area->backend && area->backend->frame_free) {
     325                                                area->backend->frame_free(area,
    331326                                                        b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
    332327                                        }
     
    412407                                pte = page_mapping_find(as, b + i*PAGE_SIZE);
    413408                                ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
    414                                 if (area->backend && area->backend->backend_frame_free) {
    415                                         area->backend->backend_frame_free(area,
     409                                if (area->backend && area->backend->frame_free) {
     410                                        area->backend->frame_free(area,
    416411                                                b + i*PAGE_SIZE, PTE_GET_FRAME(pte));
    417412                                }
     
    453448/** Share address space area with another or the same address space.
    454449 *
    455  * Address space area of anonymous memory is shared with a new address
    456  * space area. If the source address space area has not been shared so
    457  * far, a new sh_info is created and the original mapping is duplicated
    458  * in its pagemap B+tree. The new address space are simply gets the
    459  * sh_info of the source area.
     450 * Address space area mapping is shared with a new address space area.
     451 * If the source address space area has not been shared so far,
     452 * a new sh_info is created. The new address space area simply gets the
     453 * sh_info of the source area. The process of duplicating the
     454 * mapping is done through the backend share function.
    460455 *
    461456 * @param src_as Pointer to source address space.
     
    480475        as_area_t *src_area, *dst_area;
    481476        share_info_t *sh_info;
    482         link_t *cur;
     477        mem_backend_t *src_backend;
     478        mem_backend_data_t src_backend_data;
    483479
    484480        ipl = interrupts_disable();
     
    494490        }
    495491       
    496         if (!src_area->backend || src_area->backend != &anon_backend) {
    497                 /*
    498                  * As of now, only anonymous address space areas can be shared.
     492        if (!src_area->backend || !src_area->backend->share) {
     493                /*
     494                 * There is now backend or the backend does not
     495                 * know how to share the area.
    499496                 */
    500497                mutex_unlock(&src_area->lock);
     
    506503        src_size = src_area->pages * PAGE_SIZE;
    507504        src_flags = src_area->flags;
     505        src_backend = src_area->backend;
     506        src_backend_data = src_area->backend_data;
    508507       
    509508        if (src_size != acc_size) {
     
    532531        }
    533532
    534         /*
    535          * Copy used portions of the area to sh_info's page map.
    536          */
    537         mutex_lock(&sh_info->lock);
    538         for (cur = src_area->used_space.leaf_head.next; cur != &src_area->used_space.leaf_head; cur = cur->next) {
    539                 btree_node_t *node;
    540                 int i;
    541                
    542                 node = list_get_instance(cur, btree_node_t, leaf_link);
    543                 for (i = 0; i < node->keys; i++) {
    544                         __address base = node->key[i];
    545                         count_t count = (count_t) node->value[i];
    546                         int j;
    547                        
    548                         for (j = 0; j < count; j++) {
    549                                 pte_t *pte;
    550                        
    551                                 page_table_lock(src_as, false);
    552                                 pte = page_mapping_find(src_as, base + j*PAGE_SIZE);
    553                                 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
    554                                 btree_insert(&sh_info->pagemap, (base + j*PAGE_SIZE) - src_area->base,
    555                                         (void *) PTE_GET_FRAME(pte), NULL);
    556                                 page_table_unlock(src_as, false);
    557                         }
    558                                
    559                 }
    560         }
    561         mutex_unlock(&sh_info->lock);
     533        src_area->backend->share(src_area);
    562534
    563535        mutex_unlock(&src_area->lock);
     
    573545         */
    574546        dst_area = as_area_create(AS, src_flags & dst_flags_mask, src_size, dst_base,
    575                                   AS_AREA_ATTR_PARTIAL, &anon_backend, NULL);
     547                                  AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data);
    576548        if (!dst_area) {
    577549                /*
     
    599571}
    600572
    601 /** Initialize mapping for one page of address space.
    602  *
    603  * This functions maps 'page' to 'frame' according
    604  * to attributes of the address space area to
    605  * wich 'page' belongs.
    606  *
    607  * @param as Target address space.
    608  * @param page Virtual page within the area.
    609  * @param frame Physical frame to which page will be mapped.
    610  */
    611 void as_set_mapping(as_t *as, __address page, __address frame)
    612 {
    613         as_area_t *area;
    614         ipl_t ipl;
    615        
    616         ipl = interrupts_disable();
    617         page_table_lock(as, true);
    618        
    619         area = find_area_and_lock(as, page);
    620         if (!area) {
    621                 panic("Page not part of any as_area.\n");
    622         }
    623 
    624         ASSERT(!area->backend);
    625        
    626         page_mapping_insert(as, page, frame, as_area_get_flags(area));
    627         if (!used_space_insert(area, page, 1))
    628                 panic("Could not insert used space.\n");
    629        
    630         mutex_unlock(&area->lock);
    631         page_table_unlock(as, true);
    632         interrupts_restore(ipl);
    633 }
    634 
    635573/** Check access mode for address space area.
    636574 *
     
    703641        }
    704642
    705         if (!area->backend || !area->backend->backend_page_fault) {
     643        if (!area->backend || !area->backend->page_fault) {
    706644                /*
    707645                 * The address space area is not backed by any backend
     
    736674         * Resort to the backend page fault handler.
    737675         */
    738         if (area->backend->backend_page_fault(area, page, access) != AS_PF_OK) {
     676        if (area->backend->page_fault(area, page, access) != AS_PF_OK) {
    739677                page_table_unlock(AS, false);
    740678                mutex_unlock(&area->lock);
     
    855793                flags |= PAGE_EXEC;
    856794       
    857         if (!(aflags & AS_AREA_DEVICE))
     795        if (aflags & AS_AREA_CACHEABLE)
    858796                flags |= PAGE_CACHEABLE;
    859797               
     
    14981436}
    14991437
    1500 static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access);
    1501 static void anon_frame_free(as_area_t *area, __address page, __address frame);
    1502 
    1503 /*
    1504  * Anonymous memory backend.
    1505  */
    1506 mem_backend_t anon_backend = {
    1507         .backend_page_fault = anon_page_fault,
    1508         .backend_frame_free = anon_frame_free
    1509 };
    1510 
    1511 /** Service a page fault in the anonymous memory address space area.
    1512  *
    1513  * The address space area and page tables must be already locked.
    1514  *
    1515  * @param area Pointer to the address space area.
    1516  * @param addr Faulting virtual address.
    1517  * @param access Access mode that caused the fault (i.e. read/write/exec).
    1518  *
    1519  * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
    1520  */
    1521 int anon_page_fault(as_area_t *area, __address addr, pf_access_t access)
    1522 {
    1523         __address frame;
    1524 
    1525         if (!as_area_check_access(area, access))
    1526                 return AS_PF_FAULT;
    1527 
    1528         if (area->sh_info) {
    1529                 btree_node_t *leaf;
    1530                
    1531                 /*
    1532                  * The area is shared, chances are that the mapping can be found
    1533                  * in the pagemap of the address space area share info structure.
    1534                  * In the case that the pagemap does not contain the respective
    1535                  * mapping, a new frame is allocated and the mapping is created.
    1536                  */
    1537                 mutex_lock(&area->sh_info->lock);
    1538                 frame = (__address) btree_search(&area->sh_info->pagemap,
    1539                         ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
    1540                 if (!frame) {
    1541                         bool allocate = true;
    1542                         int i;
    1543                        
    1544                         /*
    1545                          * Zero can be returned as a valid frame address.
    1546                          * Just a small workaround.
    1547                          */
    1548                         for (i = 0; i < leaf->keys; i++) {
    1549                                 if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
    1550                                         allocate = false;
    1551                                         break;
    1552                                 }
    1553                         }
    1554                         if (allocate) {
    1555                                 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
    1556                                 memsetb(PA2KA(frame), FRAME_SIZE, 0);
    1557                                
    1558                                 /*
    1559                                  * Insert the address of the newly allocated frame to the pagemap.
    1560                                  */
    1561                                 btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, (void *) frame, leaf);
    1562                         }
    1563                 }
    1564                 mutex_unlock(&area->sh_info->lock);
    1565         } else {
    1566 
    1567                 /*
    1568                  * In general, there can be several reasons that
    1569                  * can have caused this fault.
    1570                  *
    1571                  * - non-existent mapping: the area is an anonymous
    1572                  *   area (e.g. heap or stack) and so far has not been
    1573                  *   allocated a frame for the faulting page
    1574                  *
    1575                  * - non-present mapping: another possibility,
    1576                  *   currently not implemented, would be frame
    1577                  *   reuse; when this becomes a possibility,
    1578                  *   do not forget to distinguish between
    1579                  *   the different causes
    1580                  */
    1581                 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));
    1582                 memsetb(PA2KA(frame), FRAME_SIZE, 0);
    1583         }
    1584        
    1585         /*
    1586          * Map 'page' to 'frame'.
    1587          * Note that TLB shootdown is not attempted as only new information is being
    1588          * inserted into page tables.
    1589          */
    1590         page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
    1591         if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
    1592                 panic("Could not insert used space.\n");
    1593                
    1594         return AS_PF_OK;
    1595 }
    1596 
    1597 /** Free a frame that is backed by the anonymous memory backend.
    1598  *
    1599  * The address space area and page tables must be already locked.
    1600  *
    1601  * @param area Ignored.
    1602  * @param page Ignored.
    1603  * @param frame Frame to be released.
    1604  */
    1605 void anon_frame_free(as_area_t *area, __address page, __address frame)
    1606 {
    1607         frame_free(ADDR2PFN(frame));
    1608 }
    1609 
    16101438/*
    16111439 * Address space related syscalls.
     
    16151443__native sys_as_area_create(__address address, size_t size, int flags)
    16161444{
    1617         if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
     1445        if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))
    16181446                return (__native) address;
    16191447        else
Note: See TracChangeset for help on using the changeset viewer.