Changeset 8182031 in mainline
- Timestamp:
- 2006-05-23T23:09:13Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 82da5f5
- Parents:
- 56789125
- Location:
- generic
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
generic/include/mm/as.h
r56789125 r8182031 35 35 #define AS_AREA_EXEC 4 36 36 #define AS_AREA_DEVICE 8 37 #define AS_AREA_ANON 16 37 38 38 39 #ifdef KERNEL … … 61 62 62 63 #define FLAG_AS_KERNEL (1 << 0) /**< Kernel address space. */ 63 64 65 /** Address space area attributes. */66 #define AS_AREA_ATTR_NONE 067 #define AS_AREA_ATTR_PARTIAL 1 /* Not fully initialized area. */68 69 #define AS_PF_FAULT 0 /**< The page fault was not resolved by as_page_fault(). */70 #define AS_PF_OK 1 /**< The page fault was resolved by as_page_fault(). */71 #define AS_PF_DEFER 2 /**< The page fault was caused by memcpy_from_uspace()72 or memcpy_to_uspace(). */73 74 /** Address space area structure.75 *76 * Each as_area_t structure describes one contiguous area of virtual memory.77 * In the future, it should not be difficult to support shared areas.78 */79 struct as_area {80 mutex_t lock;81 int flags; /**< Flags related to the memory represented by the address space area. */82 int attributes; /**< Attributes related to the address space area itself. */83 count_t pages; /**< Size of this area in multiples of PAGE_SIZE. */84 __address base; /**< Base address of this area. */85 btree_t used_space; /**< Map of used space. */86 };87 64 88 65 /** Address space structure. … … 119 96 typedef struct as_operations as_operations_t; 120 97 98 /** Address space area attributes. */ 99 #define AS_AREA_ATTR_NONE 0 100 #define AS_AREA_ATTR_PARTIAL 1 /**< Not fully initialized area. */ 101 102 #define AS_PF_FAULT 0 /**< The page fault was not resolved by as_page_fault(). */ 103 #define AS_PF_OK 1 /**< The page fault was resolved by as_page_fault(). */ 104 #define AS_PF_DEFER 2 /**< The page fault was caused by memcpy_from_uspace() 105 or memcpy_to_uspace(). */ 106 107 typedef struct share_info share_info_t; 108 typedef struct mem_backend mem_backend_t; 109 110 /** Address space area structure. 111 * 112 * Each as_area_t structure describes one contiguous area of virtual memory. 113 * In the future, it should not be difficult to support shared areas. 114 */ 115 struct as_area { 116 mutex_t lock; 117 int flags; /**< Flags related to the memory represented by the address space area. */ 118 int attributes; /**< Attributes related to the address space area itself. */ 119 count_t pages; /**< Size of this area in multiples of PAGE_SIZE. */ 120 __address base; /**< Base address of this area. */ 121 btree_t used_space; /**< Map of used space. */ 122 share_info_t *sh_info; /**< If the address space area has been shared, this pointer will 123 reference the share info structure. */ 124 mem_backend_t *backend; /**< Memory backend backing this address space area. */ 125 void *backend_data[2]; /**< Data to be used by the backend. */ 126 }; 127 128 /** Address space area backend structure. */ 129 struct mem_backend { 130 int (* backend_page_fault)(as_area_t *area, __address addr); 131 void (* backend_frame_free)(as_area_t *area, __address page, __address frame); 132 }; 133 121 134 extern as_t *AS_KERNEL; 122 135 extern as_operations_t *as_operations; … … 127 140 extern void as_init(void); 128 141 extern as_t *as_create(int flags); 129 extern as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs); 142 extern as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs, 143 mem_backend_t *backend, void **backend_data); 130 144 extern int as_area_resize(as_t *as, __address address, size_t size, int flags); 131 145 extern int as_area_destroy(as_t *as, __address address); 146 extern int as_area_get_flags(as_area_t *area); 132 147 extern void as_set_mapping(as_t *as, __address page, __address frame); 133 148 extern int as_page_fault(__address page, istate_t *istate); … … 136 151 extern int as_area_steal(task_t *src_task, __address src_base, size_t acc_size, __address dst_base); 137 152 extern size_t as_get_size(__address base); 153 extern int used_space_insert(as_area_t *a, __address page, count_t count); 154 extern int used_space_remove(as_area_t *a, __address page, count_t count); 138 155 139 156 /* Interface to be implemented by architectures. */ … … 141 158 extern void as_install_arch(as_t *as); 142 159 #endif /* !def as_install_arch */ 160 161 /* Backend declarations. */ 162 extern mem_backend_t anon_backend; 163 extern mem_backend_t elf_backend; 143 164 144 165 /* Address space area related syscalls. */ -
generic/src/ddi/ddi.c
r56789125 r8182031 102 102 if (writable) 103 103 flags |= AS_AREA_WRITE; 104 if (!as_area_create(t->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE )) {104 if (!as_area_create(t->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE, NULL, NULL)) { 105 105 /* 106 106 * The address space area could not have been created. -
generic/src/lib/elf.c
r56789125 r8182031 42 42 #include <memstr.h> 43 43 #include <macros.h> 44 #include <arch.h> 44 45 45 46 static char *error_codes[] = { … … 55 56 static int section_header(elf_section_header_t *entry, elf_header_t *elf, as_t *as); 56 57 static int load_segment(elf_segment_header_t *entry, elf_header_t *elf, as_t *as); 58 59 static int elf_page_fault(as_area_t *area, __address addr); 60 static void elf_frame_free(as_area_t *area, __address page, __address frame); 61 62 mem_backend_t elf_backend = { 63 .backend_page_fault = elf_page_fault, 64 .backend_frame_free = elf_frame_free 65 }; 57 66 58 67 /** ELF loader … … 160 169 { 161 170 as_area_t *a; 162 int i, flags = 0; 163 size_t segment_size; 164 __u8 *segment; 171 int flags = 0; 172 void *backend_data[2] = { elf, entry }; 165 173 166 174 if (entry->p_align > 1) { … … 183 191 return EE_UNSUPPORTED; 184 192 185 segment_size = ALIGN_UP(max(entry->p_filesz, entry->p_memsz), PAGE_SIZE); 186 if ((entry->p_flags & PF_W)) { 187 /* If writable, copy data (should be COW in the future) */ 188 segment = malloc(segment_size, 0); 189 memsetb((__address) (segment + entry->p_filesz), segment_size - entry->p_filesz, 0); 190 memcpy(segment, (void *) (((__address) elf) + entry->p_offset), entry->p_filesz); 191 } else /* Map identically original data */ 192 segment = ((void *) elf) + entry->p_offset; 193 194 a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE); 193 a = as_area_create(as, flags, entry->p_memsz, entry->p_vaddr, AS_AREA_ATTR_NONE, &elf_backend, backend_data); 195 194 if (!a) 196 195 return EE_MEMORY; 197 196 198 for (i = 0; i < SIZE2FRAMES(entry->p_filesz); i++) {199 as_set_mapping(as, entry->p_vaddr + i*PAGE_SIZE, KA2PA(((__address) segment) + i*PAGE_SIZE));200 }201 197 /* 198 * The segment will be mapped on demand by elf_page_fault(). 199 */ 200 202 201 return EE_OK; 203 202 } … … 220 219 return EE_OK; 221 220 } 221 222 /** Service a page fault in the ELF backend address space area. 223 * 224 * The address space area and page tables must be already locked. 225 * 226 * @param area Pointer to the address space area. 227 * @param addr Faulting virtual address. 228 * 229 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). 230 */ 231 int elf_page_fault(as_area_t *area, __address addr) 232 { 233 elf_header_t *elf = (elf_header_t *) area->backend_data[0]; 234 elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1]; 235 __address base, frame; 236 index_t i; 237 238 ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz)); 239 i = (addr - entry->p_vaddr) >> PAGE_WIDTH; 240 base = (__address) (((void *) elf) + entry->p_offset); 241 ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); 242 243 if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) { 244 /* 245 * Initialized portion of the segment. The memory is backed 246 * directly by the content of the ELF image. Pages are 247 * only copied if the segment is writable so that there 248 * can be more instantions of the same memory ELF image 249 * used at a time. Note that this could be later done 250 * as COW. 251 */ 252 if (entry->p_flags & PF_W) { 253 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); 254 memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE); 255 } else { 256 frame = KA2PA(base + i*FRAME_SIZE); 257 } 258 } else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { 259 /* 260 * This is the uninitialized portion of the segment. 261 * It is not physically present in the ELF image. 262 * To resolve the situation, a frame must be allocated 263 * and cleared. 264 */ 265 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); 266 memsetb(PA2KA(frame), FRAME_SIZE, 0); 267 } else { 268 size_t size; 269 /* 270 * The mixed case. 271 * The lower part is backed by the ELF image and 272 * the upper part is anonymous memory. 273 */ 274 size = entry->p_filesz - (i<<PAGE_WIDTH); 275 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); 276 memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0); 277 memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size); 278 } 279 280 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 281 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) 282 panic("Could not insert used space.\n"); 283 284 return AS_PF_OK; 285 } 286 287 /** Free a frame that is backed by the ELF backend. 288 * 289 * The address space area and page tables must be already locked. 290 * 291 * @param area Pointer to the address space area. 292 * @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE. 293 * @param frame Frame to be released. 294 * 295 */ 296 void elf_frame_free(as_area_t *area, __address page, __address frame) 297 { 298 elf_header_t *elf = (elf_header_t *) area->backend_data[0]; 299 elf_segment_header_t *entry = (elf_segment_header_t *) area->backend_data[1]; 300 __address base; 301 index_t i; 302 303 ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz)); 304 i = (page - entry->p_vaddr) >> PAGE_WIDTH; 305 base = (__address) (((void *) elf) + entry->p_offset); 306 ASSERT(ALIGN_UP(base, FRAME_SIZE) == base); 307 308 if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) { 309 if (entry->p_flags & PF_W) { 310 /* 311 * Free the frame with the copy of writable segment data. 312 */ 313 frame_free(ADDR2PFN(frame)); 314 } 315 } else { 316 /* 317 * The frame is either anonymous memory or the mixed case (i.e. lower 318 * part is backed by the ELF image and the upper is anonymous). 319 * In any case, a frame needs to be freed. 320 */ 321 frame_free(ADDR2PFN(frame)); 322 } 323 } -
generic/src/mm/as.c
r56789125 r8182031 75 75 #include <arch/interrupt.h> 76 76 77 /** This structure contains information associated with the shared address space area. */ 78 struct share_info { 79 mutex_t lock; /**< This lock must be acquired only when the as_area lock is held. */ 80 count_t refcount; /**< This structure can be deallocated if refcount drops to 0. */ 81 btree_t pagemap; /**< B+tree containing complete map of anonymous pages of the shared area. */ 82 }; 83 77 84 as_operations_t *as_operations = NULL; 78 85 … … 90 97 91 98 static int area_flags_to_page_flags(int aflags); 92 static int get_area_flags(as_area_t *a);93 99 static as_area_t *find_area_and_lock(as_t *as, __address va); 94 100 static bool check_area_conflicts(as_t *as, __address va, size_t size, as_area_t *avoid_area); 95 static int used_space_insert(as_area_t *a, __address page, count_t count); 96 static int used_space_remove(as_area_t *a, __address page, count_t count); 101 static void sh_info_remove_reference(share_info_t *sh_info); 97 102 98 103 /** Initialize address space subsystem. */ … … 149 154 * @param base Base address of area. 150 155 * @param attrs Attributes of the area. 156 * @param backend Address space area backend. NULL if no backend is used. 157 * @param backend_data NULL or a pointer to an array holding two void *. 151 158 * 152 159 * @return Address space area on success or NULL on failure. 153 160 */ 154 as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs) 161 as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base, int attrs, 162 mem_backend_t *backend, void **backend_data) 155 163 { 156 164 ipl_t ipl; … … 184 192 a->pages = SIZE2FRAMES(size); 185 193 a->base = base; 194 a->sh_info = NULL; 195 a->backend = backend; 196 if (backend_data) { 197 a->backend_data[0] = backend_data[0]; 198 a->backend_data[1] = backend_data[1]; 199 } 186 200 btree_create(&a->used_space); 187 201 … … 226 240 * Remapping of address space areas associated 227 241 * with memory mapped devices is not supported. 242 */ 243 mutex_unlock(&area->lock); 244 mutex_unlock(&as->lock); 245 interrupts_restore(ipl); 246 return ENOTSUP; 247 } 248 if (area->sh_info) { 249 /* 250 * Remapping of shared address space areas 251 * is not supported. 228 252 */ 229 253 mutex_unlock(&area->lock); … … 303 327 pte = page_mapping_find(as, b + i*PAGE_SIZE); 304 328 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); 305 frame_free(ADDR2PFN(PTE_GET_FRAME(pte))); 329 if (area->backend && area->backend->backend_frame_free) { 330 area->backend->backend_frame_free(area, 331 b + i*PAGE_SIZE, PTE_GET_FRAME(pte)); 332 } 306 333 page_mapping_remove(as, b + i*PAGE_SIZE); 307 334 page_table_unlock(as, false); … … 392 419 pte = page_mapping_find(as, b + i*PAGE_SIZE); 393 420 ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte)); 394 frame_free(ADDR2PFN(PTE_GET_FRAME(pte))); 421 if (area->backend && area->backend->backend_frame_free) { 422 area->backend->backend_frame_free(area, 423 b + i*PAGE_SIZE, PTE_GET_FRAME(pte)); 424 } 395 425 page_mapping_remove(as, b + i*PAGE_SIZE); 396 426 page_table_unlock(as, false); … … 411 441 412 442 area->attributes |= AS_AREA_ATTR_PARTIAL; 443 444 if (area->sh_info) 445 sh_info_remove_reference(area->sh_info); 446 413 447 mutex_unlock(&area->lock); 414 448 … … 485 519 * preliminary as_page_fault() calls. 486 520 */ 487 dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL );521 dst_area = as_area_create(AS, src_flags, src_size, dst_base, AS_AREA_ATTR_PARTIAL, &anon_backend, NULL); 488 522 if (!dst_area) { 489 523 /* … … 569 603 } 570 604 571 page_mapping_insert(as, page, frame, get_area_flags(area)); 605 ASSERT(!area->backend); 606 607 page_mapping_insert(as, page, frame, as_area_get_flags(area)); 572 608 if (!used_space_insert(area, page, 1)) 573 609 panic("Could not insert used space.\n"); … … 580 616 /** Handle page fault within the current address space. 581 617 * 582 * This is the high-level page fault handler. 618 * This is the high-level page fault handler. It decides 619 * whether the page fault can be resolved by any backend 620 * and if so, it invokes the backend to resolve the page 621 * fault. 622 * 583 623 * Interrupts are assumed disabled. 584 624 * … … 586 626 * @param istate Pointer to interrupted state. 587 627 * 588 * @return 0 on page fault, 1 on success or 2 if the fault was caused by copy_to_uspace() or copy_from_uspace(). 628 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or AS_PF_DEFER if the 629 * fault was caused by copy_to_uspace() or copy_from_uspace(). 589 630 */ 590 631 int as_page_fault(__address page, istate_t *istate) … … 592 633 pte_t *pte; 593 634 as_area_t *area; 594 __address frame;595 635 596 636 if (!THREAD) 597 return 0;637 return AS_PF_FAULT; 598 638 599 639 ASSERT(AS); … … 620 660 } 621 661 622 ASSERT(!(area->flags & AS_AREA_DEVICE)); 662 if (!area->backend || !area->backend->backend_page_fault) { 663 /* 664 * The address space area is not backed by any backend 665 * or the backend cannot handle page faults. 666 */ 667 mutex_unlock(&area->lock); 668 mutex_unlock(&AS->lock); 669 goto page_fault; 670 } 623 671 624 672 page_table_lock(AS, false); … … 634 682 mutex_unlock(&area->lock); 635 683 mutex_unlock(&AS->lock); 636 return 1; 637 } 638 } 639 640 /* 641 * In general, there can be several reasons that 642 * can have caused this fault. 643 * 644 * - non-existent mapping: the area is a scratch 645 * area (e.g. stack) and so far has not been 646 * allocated a frame for the faulting page 647 * 648 * - non-present mapping: another possibility, 649 * currently not implemented, would be frame 650 * reuse; when this becomes a possibility, 651 * do not forget to distinguish between 652 * the different causes 684 return AS_PF_OK; 685 } 686 } 687 688 /* 689 * Resort to the backend page fault handler. 653 690 */ 654 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); 655 memsetb(PA2KA(frame), FRAME_SIZE, 0); 656 657 /* 658 * Map 'page' to 'frame'. 659 * Note that TLB shootdown is not attempted as only new information is being 660 * inserted into page tables. 661 */ 662 page_mapping_insert(AS, page, frame, get_area_flags(area)); 663 if (!used_space_insert(area, ALIGN_DOWN(page, PAGE_SIZE), 1)) 664 panic("Could not insert used space.\n"); 691 if (area->backend->backend_page_fault(area, page) != AS_PF_OK) { 692 page_table_unlock(AS, false); 693 mutex_unlock(&area->lock); 694 mutex_unlock(&AS->lock); 695 goto page_fault; 696 } 697 665 698 page_table_unlock(AS, false); 666 667 699 mutex_unlock(&area->lock); 668 700 mutex_unlock(&AS->lock); … … 670 702 671 703 page_fault: 672 if (!THREAD)673 return AS_PF_FAULT;674 675 704 if (THREAD->in_copy_from_uspace) { 676 705 THREAD->in_copy_from_uspace = false; … … 794 823 * @return Flags to be used in page_mapping_insert(). 795 824 */ 796 int get_area_flags(as_area_t *a)825 int as_area_get_flags(as_area_t *a) 797 826 { 798 827 return area_flags_to_page_flags(a->flags); … … 1382 1411 } 1383 1412 1413 /** Remove reference to address space area share info. 1414 * 1415 * If the reference count drops to 0, the sh_info is deallocated. 1416 * 1417 * @param sh_info Pointer to address space area share info. 1418 */ 1419 void sh_info_remove_reference(share_info_t *sh_info) 1420 { 1421 bool dealloc = false; 1422 1423 mutex_lock(&sh_info->lock); 1424 ASSERT(sh_info->refcount); 1425 if (--sh_info->refcount == 0) { 1426 dealloc = true; 1427 bool cond; 1428 1429 /* 1430 * Now walk carefully the pagemap B+tree and free/remove 1431 * reference from all frames found there. 1432 */ 1433 for (cond = true; cond;) { 1434 btree_node_t *node; 1435 1436 ASSERT(!list_empty(&sh_info->pagemap.leaf_head)); 1437 node = list_get_instance(sh_info->pagemap.leaf_head.next, btree_node_t, leaf_link); 1438 if ((cond = node->keys)) { 1439 frame_free(ADDR2PFN((__address) node->value[0])); 1440 btree_remove(&sh_info->pagemap, node->key[0], node); 1441 } 1442 } 1443 1444 } 1445 mutex_unlock(&sh_info->lock); 1446 1447 if (dealloc) { 1448 btree_destroy(&sh_info->pagemap); 1449 free(sh_info); 1450 } 1451 } 1452 1453 static int anon_page_fault(as_area_t *area, __address addr); 1454 static void anon_frame_free(as_area_t *area, __address page, __address frame); 1455 1456 /* 1457 * Anonymous memory backend. 1458 */ 1459 mem_backend_t anon_backend = { 1460 .backend_page_fault = anon_page_fault, 1461 .backend_frame_free = anon_frame_free 1462 }; 1463 1464 /** Service a page fault in the anonymous memory address space area. 1465 * 1466 * The address space area and page tables must be already locked. 1467 * 1468 * @param area Pointer to the address space area. 1469 * @param addr Faulting virtual address. 1470 * 1471 * @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced). 1472 */ 1473 int anon_page_fault(as_area_t *area, __address addr) 1474 { 1475 __address frame; 1476 1477 if (area->sh_info) { 1478 btree_node_t *leaf; 1479 1480 /* 1481 * The area is shared, chances are that the mapping can be found 1482 * in the pagemap of the address space area share info structure. 1483 * In the case that the pagemap does not contain the respective 1484 * mapping, a new frame is allocated and the mapping is created. 1485 */ 1486 mutex_lock(&area->sh_info->lock); 1487 frame = (__address) btree_search(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), &leaf); 1488 if (!frame) { 1489 bool allocate = true; 1490 int i; 1491 1492 /* 1493 * Zero can be returned as a valid frame address. 1494 * Just a small workaround. 1495 */ 1496 for (i = 0; i < leaf->keys; i++) { 1497 if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) { 1498 allocate = false; 1499 break; 1500 } 1501 } 1502 if (allocate) { 1503 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); 1504 memsetb(PA2KA(frame), FRAME_SIZE, 0); 1505 1506 /* 1507 * Insert the address of the newly allocated frame to the pagemap. 1508 */ 1509 btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE), (void *) frame, leaf); 1510 } 1511 } 1512 mutex_unlock(&area->sh_info->lock); 1513 } else { 1514 1515 /* 1516 * In general, there can be several reasons that 1517 * can have caused this fault. 1518 * 1519 * - non-existent mapping: the area is an anonymous 1520 * area (e.g. heap or stack) and so far has not been 1521 * allocated a frame for the faulting page 1522 * 1523 * - non-present mapping: another possibility, 1524 * currently not implemented, would be frame 1525 * reuse; when this becomes a possibility, 1526 * do not forget to distinguish between 1527 * the different causes 1528 */ 1529 frame = PFN2ADDR(frame_alloc(ONE_FRAME, 0)); 1530 memsetb(PA2KA(frame), FRAME_SIZE, 0); 1531 } 1532 1533 /* 1534 * Map 'page' to 'frame'. 1535 * Note that TLB shootdown is not attempted as only new information is being 1536 * inserted into page tables. 1537 */ 1538 page_mapping_insert(AS, addr, frame, as_area_get_flags(area)); 1539 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1)) 1540 panic("Could not insert used space.\n"); 1541 1542 return AS_PF_OK; 1543 } 1544 1545 /** Free a frame that is backed by the anonymous memory backend. 1546 * 1547 * The address space area and page tables must be already locked. 1548 * 1549 * @param area Ignored. 1550 * @param page Ignored. 1551 * @param frame Frame to be released. 1552 */ 1553 void anon_frame_free(as_area_t *area, __address page, __address frame) 1554 { 1555 frame_free(ADDR2PFN(frame)); 1556 } 1557 1384 1558 /* 1385 1559 * Address space related syscalls. … … 1389 1563 __native sys_as_area_create(__address address, size_t size, int flags) 1390 1564 { 1391 if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE ))1565 if (as_area_create(AS, flags, size, address, AS_AREA_ATTR_NONE, &anon_backend, NULL)) 1392 1566 return (__native) address; 1393 1567 else -
generic/src/proc/task.c
r56789125 r8182031 155 155 */ 156 156 a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE, LOADED_PROG_STACK_PAGES_NO*PAGE_SIZE, 157 USTACK_ADDRESS, AS_AREA_ATTR_NONE );157 USTACK_ADDRESS, AS_AREA_ATTR_NONE, &anon_backend, NULL); 158 158 159 159 t = thread_create(uinit, kernel_uarg, task, 0, "uinit");
Note:
See TracChangeset
for help on using the changeset viewer.