Changeset 0ee077ee in mainline
- Timestamp:
- 2006-05-27T17:50:30Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 127c957b
- Parents:
- fb84455
- Files:
-
- 3 added
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
Makefile
rfb84455 r0ee077ee 142 142 generic/src/mm/tlb.c \ 143 143 generic/src/mm/as.c \ 144 generic/src/mm/backend_anon.c \ 145 generic/src/mm/backend_elf.c \ 146 generic/src/mm/backend_phys.c \ 144 147 generic/src/mm/slab.c \ 145 148 generic/src/lib/func.c \ -
genarch/src/mm/page_ht.c
rfb84455 r0ee077ee 187 187 188 188 t->as = as; 189 t->page = page;190 t->frame = frame;189 t->page = ALIGN_DOWN(page, PAGE_SIZE); 190 t->frame = ALIGN_DOWN(frame, FRAME_SIZE); 191 191 192 192 hash_table_insert(&page_ht, key, &t->link); -
generic/include/mm/as.h
rfb84455 r0ee077ee 31 31 32 32 /** Address space area flags. */ 33 #define AS_AREA_READ 1 34 #define AS_AREA_WRITE 2 35 #define AS_AREA_EXEC 4 36 #define AS_AREA_DEVICE 8 37 #define AS_AREA_ANON 16 33 #define AS_AREA_READ 1 34 #define AS_AREA_WRITE 2 35 #define AS_AREA_EXEC 4 36 #define AS_AREA_CACHEABLE 8 38 37 39 38 #ifdef KERNEL … … 105 104 or memcpy_to_uspace(). */ 106 105 107 typedef struct share_info share_info_t; 108 typedef struct mem_backend mem_backend_t; 106 /** This structure contains information associated with the shared address space area. */ 107 typedef struct { 108 mutex_t lock; /**< This lock must be acquired only when the as_area lock is held. */ 109 count_t refcount; /**< This structure can be deallocated if refcount drops to 0. */ 110 btree_t pagemap; /**< B+tree containing complete map of anonymous pages of the shared area. */ 111 } share_info_t; 112 113 /** Address space area backend structure. */ 114 typedef struct { 115 int (* page_fault)(as_area_t *area, __address addr, pf_access_t access); 116 void (* frame_free)(as_area_t *area, __address page, __address frame); 117 void (* share)(as_area_t *area); 118 } mem_backend_t; 119 120 /** Backend data stored in address space area. */ 121 typedef struct backend_data { 122 __native d1; 123 __native d2; 124 } mem_backend_data_t; 109 125 110 126 /** Address space area structure. … … 115 131 struct as_area { 116 132 mutex_t lock; 133 as_t *as; /**< Containing address space. */ 117 134 int flags; /**< Flags related to the memory represented by the address space area. */ 118 135 int attributes; /**< Attributes related to the address space area itself. */ … … 121 138 btree_t used_space; /**< Map of used space. */ 122 139 share_info_t *sh_info; /**< If the address space area has been shared, this pointer will 123 reference the share info structure. */140 reference the share info structure. */ 124 141 mem_backend_t *backend; /**< Memory backend backing this address space area. */ 125 void *backend_data[2]; /**< Data to be used by the backend. */126 };127 142 128 /** Address space area backend structure. */ 129 struct mem_backend { 130 int (* backend_page_fault)(as_area_t *area, __address addr, pf_access_t access); 131 void (* backend_frame_free)(as_area_t *area, __address page, __address frame); 143 /** Data to be used by the backend. */ 144 mem_backend_data_t backend_data; 132 145 }; 133 146 … … 141 154 extern as_t *as_create(int flags); 142 155 extern as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs, 143 mem_backend_t *backend, void **backend_data);156 mem_backend_t *backend, mem_backend_data_t *backend_data); 144 157 extern int as_area_resize(as_t *as, __address address, size_t size, int flags); 145 158 extern int as_area_destroy(as_t *as, __address address); 146 159 extern int as_area_get_flags(as_area_t *area); 147 extern void as_set_mapping(as_t *as, __address page, __address frame);148 160 extern bool as_area_check_access(as_area_t *area, pf_access_t access); 149 161 extern int as_page_fault(__address page, pf_access_t access, istate_t *istate); … … 164 176 extern mem_backend_t anon_backend; 165 177 extern mem_backend_t elf_backend; 178 extern mem_backend_t phys_backend; 166 179 167 180 /* Address space area related syscalls. */ -
generic/src/ddi/ddi.c
rfb84455 r0ee077ee 66 66 task_t *t; 67 67 int flags; 68 count_t i;68 mem_backend_data_t backend_data = { .d1 = (__native) pf, .d2 = (__native) pages }; 69 69 70 70 /* … … 99 99 spinlock_unlock(&tasks_lock); 100 100 101 flags = AS_AREA_ DEVICE | AS_AREA_READ;101 flags = AS_AREA_READ; 102 102 if (writable) 103 103 flags |= AS_AREA_WRITE; 104 if (!as_area_create(t->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE, NULL, NULL)) { 104 if (!as_area_create(t->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE, 105 &phys_backend, &backend_data)) { 105 106 /* 106 107 * The address space area could not have been created. … … 112 113 } 113 114 114 /* Initialize page tables. */115 for (i = 0; i < pages; i++)116 as_set_mapping(t->as, vp + i * PAGE_SIZE, pf + i * FRAME_SIZE);117 115 /* 116 * Mapping is created on-demand during page fault. 117 */ 118 118 119 spinlock_unlock(&t->lock); 119 120 interrupts_restore(ipl); -
generic/src/lib/elf.c
rfb84455 r0ee077ee 1 1 /* 2 2 * Copyright (C) 2006 Sergey Bondari 3 * Copyright (C) 2006 Jakub Jermar 3 4 * All rights reserved. 4 5 * … … 57 58 static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as); 58 59 59 static int elf_page_fault(as_area_t *area, __address addr, pf_access_t access);60 static void elf_frame_free(as_area_t *area, __address page, __address frame);61 62 mem_backend_t elf_backend = {63 .backend_page_fault = elf_page_fault,64 .backend_frame_free = elf_frame_free65 };66 67 60 /** ELF loader 68 61 * … … 170 163 as_area_t *a; 171 164 int flags = 0; 172 void *backend_data[2] = { elf,entry };165 mem_backend_data_t backend_data = { .d1 = (__native) elf, .d2 = (__native) entry }; 173 166 174 167 if (entry->p_align > 1) { … … 184 177 if (entry->p_flags & PF_R) 185 178 flags |= AS_AREA_READ; 179 flags |= AS_AREA_CACHEABLE; 186 180 187 181 /* … … 191 185 return EE_UNSUPPORTED; 192 186 193 a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data); 187 a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, 188 &elf_backend, &backend_data); 194 189 if (!a) 195 190 return EE_MEMORY; … … 219 214 return EE_OK; 220 215 } 221 222 /** Service a page fault in the ELF backend address space area.223 *224 * The address space area and page tables must be already locked.225 *226 * @param area Pointer to the address space area.227 * @param addr Faulting virtual address.228 * @param access Access mode that caused the fault (i.e. read/write/exec).229 *230 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).231 */232 int elf_page_fault(as_area_t *area, __address addr, pf_access_t access)233 {234 elf_header_t *elf = (elf_header_t *) area->backend_data[0];235 elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];236 __address base, frame;237 index_t i;238 239 if (!as_area_check_access(area, access))240 return AS_PF_FAULT;241 242 ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));243 i = (addr - entry->p_vaddr) >> PAGE_WIDTH;244 base = (__address) (((void *) elf) + entry->p_offset);245 ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);246 247 if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {248 /*249 * Initialized portion of the segment. The memory is backed250 * directly by the content of the ELF image. Pages are251 * only copied if the segment is writable so that there252 * can be more instantions of the same memory ELF image253 * used at a time. Note that this could be later done254 * as COW.255 */256 if (entry->p_flags & PF_W) {257 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));258 memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);259 } else {260 frame = KA2PA(base + i*FRAME_SIZE);261 }262 } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {263 /*264 * This is the uninitialized portion of the segment.265 * It is not physically present in the ELF image.266 * To resolve the situation, a frame must be allocated267 * and cleared.268 */269 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));270 memsetb(PA2KA(frame), FRAME_SIZE, 0);271 } else {272 size_t size;273 /*274 * The mixed case.275 * The lower part is backed by the ELF image and276 * the upper part is anonymous memory.277 */278 size = entry->p_filesz - (i<<PAGE_WIDTH);279 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));280 memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);281 memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);282 }283 284 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));285 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))286 panic("Could not insert used space.\n");287 288 return AS_PF_OK;289 }290 291 /** Free a frame that is backed by the ELF backend.292 *293 * The address space area and page tables must be already locked.294 *295 * @param area Pointer to the address space area.296 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.297 * @param frame Frame to be released.298 *299 */300 void elf_frame_free(as_area_t *area, __address page, __address frame)301 {302 elf_header_t *elf = (elf_header_t *) area->backend_data[0];303 elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1];304 __address base;305 index_t i;306 307 ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));308 i = (page - entry->p_vaddr) >> PAGE_WIDTH;309 base = (__address) (((void *) elf) + entry->p_offset);310 ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);311 312 if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {313 if (entry->p_flags & PF_W) {314 /*315 * Free the frame with the copy of writable segment data.316 */317 frame_free(ADDR2PFN(frame));318 }319 } else {320 /*321 * The frame is either anonymous memory or the mixed case (i.e. lower322 * part is backed by the ELF image and the upper is anonymous).323 * In any case, a frame needs to be freed.324 */325 frame_free(ADDR2PFN(frame));326 }327 } -
generic/src/mm/as.c
rfb84455 r0ee077ee 75 75 #include <arch/interrupt.h> 76 76 77 /** This structure contains information associated with the shared address space area. */78 struct share_info {79 mutex_t lock; /**< This lock must be acquired only when the as_area lock is held. */80 count_t refcount; /**< This structure can be deallocated if refcount drops to 0. */81 btree_t pagemap; /**< B+tree containing complete map of anonymous pages of the shared area. */82 };83 84 77 as_operations_t *as_operations = NULL; 85 78 … … 160 153 */ 161 154 as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs, 162 mem_backend_t *backend, void **backend_data)155 mem_backend_t *backend, mem_backend_data_t *backend_data) 163 156 { 164 157 ipl_t ipl; … … 188 181 mutex_initialize(&a->lock); 189 182 183 a->as = as; 190 184 a->flags = flags; 191 185 a->attributes = attrs; … … 194 188 a->sh_info = NULL; 195 189 a->backend = backend; 196 if (backend_data) { 197 a->backend_data[0] = backend_data[0]; 198 a->backend_data[1] = backend_data[1]; 199 } 190 if (backend_data) 191 a->backend_data = *backend_data; 192 else 193 memsetb((__address) &a->backend_data, sizeof(a->backend_data), 0); 194 200 195 btree_create(&a->used_space); 201 196 … … 236 231 } 237 232 238 if (area-> flags & AS_AREA_DEVICE) {233 if (area->backend == &phys_backend) { 239 234 /* 240 235 * Remapping of address space areas associated … … 327 322 pte = page_mapping_find(as, b + i*PAGE_SIZE); 328 323 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); 329 if (area->backend && area->backend-> backend_frame_free) {330 area->backend-> backend_frame_free(area,324 if (area->backend && area->backend->frame_free) { 325 area->backend->frame_free(area, 331 326 b + i*PAGE_SIZE, PTE_GET_FRAME(pte)); 332 327 } … … 412 407 pte = page_mapping_find(as, b + i*PAGE_SIZE); 413 408 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); 414 if (area->backend && area->backend-> backend_frame_free) {415 area->backend-> backend_frame_free(area,409 if (area->backend && area->backend->frame_free) { 410 area->backend->frame_free(area, 416 411 b + i*PAGE_SIZE, PTE_GET_FRAME(pte)); 417 412 } … … 453 448 /** Share address space area with another or the same address space. 454 449 * 455 * Address space area of anonymous memory is shared with a new address456 * space area. If the source address space area has not been shared so457 * far, a new sh_info is created and the original mapping is duplicated458 * in its pagemap B+tree. The new address space are simply getsthe459 * sh_info of the source area.450 * Address space area mapping is shared with a new address space area. 451 * If the source address space area has not been shared so far, 452 * a new sh_info is created. The new address space area simply gets the 453 * sh_info of the source area. The process of duplicating the 454 * mapping is done through the backend share function. 460 455 * 461 456 * @param src_as Pointer to source address space. … … 480 475 as_area_t *src_area, *dst_area; 481 476 share_info_t *sh_info; 482 link_t *cur; 477 mem_backend_t *src_backend; 478 mem_backend_data_t src_backend_data; 483 479 484 480 ipl = interrupts_disable(); … … 494 490 } 495 491 496 if (!src_area->backend || src_area->backend != &anon_backend) { 497 /* 498 * As of now, only anonymous address space areas can be shared. 492 if (!src_area->backend || !src_area->backend->share) { 493 /* 494 * There is now backend or the backend does not 495 * know how to share the area. 499 496 */ 500 497 mutex_unlock(&src_area->lock); … … 506 503 src_size = src_area->pages * PAGE_SIZE; 507 504 src_flags = src_area->flags; 505 src_backend = src_area->backend; 506 src_backend_data = src_area->backend_data; 508 507 509 508 if (src_size != acc_size) { … … 532 531 } 533 532 534 /* 535 * Copy used portions of the area to sh_info's page map. 536 */ 537 mutex_lock(&sh_info->lock); 538 for (cur = src_area->used_space.leaf_head.next; cur != &src_area->used_space.leaf_head; cur = cur->next) { 539 btree_node_t *node; 540 int i; 541 542 node = list_get_instance(cur, btree_node_t, leaf_link); 543 for (i = 0; i < node->keys; i++) { 544 __address base = node->key[i]; 545 count_t count = (count_t) node->value[i]; 546 int j; 547 548 for (j = 0; j < count; j++) { 549 pte_t *pte; 550 551 page_table_lock(src_as, false); 552 pte = page_mapping_find(src_as, base + j*PAGE_SIZE); 553 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); 554 btree_insert(&sh_info->pagemap, (base + j*PAGE_SIZE) - src_area->base, 555 (void *) PTE_GET_FRAME(pte), NULL); 556 page_table_unlock(src_as, false); 557 } 558 559 } 560 } 561 mutex_unlock(&sh_info->lock); 533 src_area->backend->share(src_area); 562 534 563 535 mutex_unlock(&src_area->lock); … … 573 545 */ 574 546 dst_area = as_area_create(AS, src_flags & dst_flags_mask, src_size, dst_base, 575 AS_AREA_ATTR_PARTIAL, &anon_backend, NULL);547 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 576 548 if (!dst_area) { 577 549 /* … … 599 571 } 600 572 601 /** Initialize mapping for one page of address space.602 *603 * This functions maps 'page' to 'frame' according604 * to attributes of the address space area to605 * wich 'page' belongs.606 *607 * @param as Target address space.608 * @param page Virtual page within the area.609 * @param frame Physical frame to which page will be mapped.610 */611 void as_set_mapping(as_t *as, __address page, __address frame)612 {613 as_area_t *area;614 ipl_t ipl;615 616 ipl = interrupts_disable();617 page_table_lock(as, true);618 619 area = find_area_and_lock(as, page);620 if (!area) {621 panic("Page not part of any as_area.\n");622 }623 624 ASSERT(!area->backend);625 626 page_mapping_insert(as, page, frame, as_area_get_flags(area));627 if (!used_space_insert(area, page, 1))628 panic("Could not insert used space.\n");629 630 mutex_unlock(&area->lock);631 page_table_unlock(as, true);632 interrupts_restore(ipl);633 }634 635 573 /** Check access mode for address space area. 636 574 * … … 703 641 } 704 642 705 if (!area->backend || !area->backend-> backend_page_fault) {643 if (!area->backend || !area->backend->page_fault) { 706 644 /* 707 645 * The address space area is not backed by any backend … … 736 674 * Resort to the backend page fault handler. 737 675 */ 738 if (area->backend-> backend_page_fault(area, page, access) != AS_PF_OK) {676 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { 739 677 page_table_unlock(AS, false); 740 678 mutex_unlock(&area->lock); … … 855 793 flags |= PAGE_EXEC; 856 794 857 if ( !(aflags & AS_AREA_DEVICE))795 if (aflags & AS_AREA_CACHEABLE) 858 796 flags |= PAGE_CACHEABLE; 859 797 … … 1498 1436 } 1499 1437 1500 static int anon_page_fault(as_area_t *area, __address addr, pf_access_t access);1501 static void anon_frame_free(as_area_t *area, __address page, __address frame);1502 1503 /*1504 * Anonymous memory backend.1505 */1506 mem_backend_t anon_backend = {1507 .backend_page_fault = anon_page_fault,1508 .backend_frame_free = anon_frame_free1509 };1510 1511 /** Service a page fault in the anonymous memory address space area.1512 *1513 * The address space area and page tables must be already locked.1514 *1515 * @param area Pointer to the address space area.1516 * @param addr Faulting virtual address.1517 * @param access Access mode that caused the fault (i.e. read/write/exec).1518 *1519 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).1520 */1521 int anon_page_fault(as_area_t *area, __address addr, pf_access_t access)1522 {1523 __address frame;1524 1525 if (!as_area_check_access(area, access))1526 return AS_PF_FAULT;1527 1528 if (area->sh_info) {1529 btree_node_t *leaf;1530 1531 /*1532 * The area is shared, chances are that the mapping can be found1533 * in the pagemap of the address space area share info structure.1534 * In the case that the pagemap does not contain the respective1535 * mapping, a new frame is allocated and the mapping is created.1536 */1537 mutex_lock(&area->sh_info->lock);1538 frame = (__address) btree_search(&area->sh_info->pagemap,1539 ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);1540 if (!frame) {1541 bool allocate = true;1542 int i;1543 1544 /*1545 * Zero can be returned as a valid frame address.1546 * Just a small workaround.1547 */1548 for (i = 0; i < leaf->keys; i++) {1549 if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {1550 allocate = false;1551 break;1552 }1553 }1554 if (allocate) {1555 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));1556 memsetb(PA2KA(frame), FRAME_SIZE, 0);1557 1558 /*1559 * Insert the address of the newly allocated frame to the pagemap.1560 */1561 btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, (void *) frame, leaf);1562 }1563 }1564 mutex_unlock(&area->sh_info->lock);1565 } else {1566 1567 /*1568 * In general, there can be several reasons that1569 * can have caused this fault.1570 *1571 * - non-existent mapping: the area is an anonymous1572 * area (e.g. heap or stack) and so far has not been1573 * allocated a frame for the faulting page1574 *1575 * - non-present mapping: another possibility,1576 * currently not implemented, would be frame1577 * reuse; when this becomes a possibility,1578 * do not forget to distinguish between1579 * the different causes1580 */1581 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0));1582 memsetb(PA2KA(frame), FRAME_SIZE, 0);1583 }1584 1585 /*1586 * Map 'page' to 'frame'.1587 * Note that TLB shootdown is not attempted as only new information is being1588 * inserted into page tables.1589 */1590 page_mapping_insert(AS, addr, frame, as_area_get_flags(area));1591 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))1592 panic("Could not insert used space.\n");1593 1594 return AS_PF_OK;1595 }1596 1597 /** Free a frame that is backed by the anonymous memory backend.1598 *1599 * The address space area and page tables must be already locked.1600 *1601 * @param area Ignored.1602 * @param page Ignored.1603 * @param frame Frame to be released.1604 */1605 void anon_frame_free(as_area_t *area, __address page, __address frame)1606 {1607 frame_free(ADDR2PFN(frame));1608 }1609 1610 1438 /* 1611 1439 * Address space related syscalls. … … 1615 1443 __native sys_as_area_create(__address address, size_t size, int flags) 1616 1444 { 1617 if (as_area_create(AS, flags , size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL))1445 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL)) 1618 1446 return (__native) address; 1619 1447 else -
generic/src/proc/task.c
rfb84455 r0ee077ee 154 154 * Create the data as_area. 155 155 */ 156 a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE, LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE, 156 a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, 157 LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE, 157 158 USTACK_ADDRESS, AS_AREA_ATTR_NONE, &anon_backend, NULL); 158 159
Note:
See TracChangeset
for help on using the changeset viewer.