Changes in kernel/generic/src/mm/as.c [fbcdeb8:7250d2c] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
rfbcdeb8 r7250d2c 387 387 } 388 388 389 /** Return pointer to unmapped address space area390 *391 * The address space must be already locked when calling392 * this function.393 *394 * @param as Address space.395 * @param bound Lowest address bound.396 * @param size Requested size of the allocation.397 *398 * @return Address of the beginning of unmapped address space area.399 * @return -1 if no suitable address space area was found.400 *401 */402 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound,403 size_t size)404 {405 ASSERT(mutex_locked(&as->lock));406 407 if (size == 0)408 return (uintptr_t) -1;409 410 /*411 * Make sure we allocate from page-aligned412 * address. Check for possible overflow in413 * each step.414 */415 416 size_t pages = SIZE2FRAMES(size);417 418 /*419 * Find the lowest unmapped address aligned on the size420 * boundary, not smaller than bound and of the required size.421 */422 423 /* First check the bound address itself */424 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE);425 if ((addr >= bound) &&426 (check_area_conflicts(as, addr, pages, NULL)))427 return addr;428 429 /* Eventually check the addresses behind each area */430 list_foreach(as->as_area_btree.leaf_list, cur) {431 btree_node_t *node =432 list_get_instance(cur, btree_node_t, leaf_link);433 434 for (btree_key_t i = 0; i < node->keys; i++) {435 as_area_t *area = (as_area_t *) node->value[i];436 437 mutex_lock(&area->lock);438 439 addr =440 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE);441 bool avail =442 ((addr >= bound) && (addr >= area->base) &&443 (check_area_conflicts(as, addr, pages, area)));444 445 mutex_unlock(&area->lock);446 447 if (avail)448 return addr;449 }450 }451 452 /* No suitable address space area found */453 return (uintptr_t) -1;454 }455 456 389 /** Create address space area of common attributes. 457 390 * … … 461 394 * @param flags Flags of the area memory. 462 395 * @param size Size of area. 396 * @param base Base address of area. 463 397 * @param attrs Attributes of the area. 464 398 * @param backend Address space area backend. NULL if no backend is used. 465 399 * @param backend_data NULL or a pointer to an array holding two void *. 466 * @param base Starting virtual address of the area.467 * If set to -1, a suitable mappable area is found.468 * @param bound Lowest address bound if base is set to -1.469 * Otherwise ignored.470 400 * 471 401 * @return Address space area on success or NULL on failure. … … 473 403 */ 474 404 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 475 u nsigned int attrs, mem_backend_t *backend,476 mem_backend_data_t *backend_data , uintptr_t *base, uintptr_t bound)477 { 478 if (( *base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0))405 uintptr_t base, unsigned int attrs, mem_backend_t *backend, 406 mem_backend_data_t *backend_data) 407 { 408 if ((base % PAGE_SIZE) != 0) 479 409 return NULL; 480 410 … … 490 420 mutex_lock(&as->lock); 491 421 492 if (*base == (uintptr_t) -1) { 493 *base = as_get_unmapped_area(as, bound, size); 494 if (*base == (uintptr_t) -1) { 495 mutex_unlock(&as->lock); 496 return NULL; 497 } 498 } 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 422 if (!check_area_conflicts(as, base, pages, NULL)) { 501 423 mutex_unlock(&as->lock); 502 424 return NULL; … … 512 434 area->pages = pages; 513 435 area->resident = 0; 514 area->base = *base;436 area->base = base; 515 437 area->sh_info = NULL; 516 438 area->backend = backend; … … 530 452 531 453 btree_create(&area->used_space); 532 btree_insert(&as->as_area_btree, *base, (void *) area, 533 NULL); 454 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 534 455 535 456 mutex_unlock(&as->lock); … … 939 860 * @param acc_size Expected size of the source area. 940 861 * @param dst_as Pointer to destination address space. 862 * @param dst_base Target base address. 941 863 * @param dst_flags_mask Destination address space area flags mask. 942 * @param dst_base Target base address. If set to -1,943 * a suitable mappable area is found.944 * @param bound Lowest address bound if dst_base is set to -1.945 * Otherwise ignored.946 864 * 947 865 * @return Zero on success. … … 955 873 */ 956 874 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 957 as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base, 958 uintptr_t bound) 875 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 959 876 { 960 877 mutex_lock(&src_as->lock); … … 1028 945 * to support sharing in less privileged mode. 1029 946 */ 1030 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, 1031 src_size, AS_AREA_ATTR_PARTIAL, src_backend, 1032 &src_backend_data, dst_base, bound); 947 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 948 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 1033 949 if (!dst_area) { 1034 950 /* … … 2039 1955 */ 2040 1956 2041 sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags, 2042 uintptr_t bound)2043 { 2044 uintptr_t virt = base;2045 as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size,2046 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound);2047 if (area == NULL)1957 /** Wrapper for as_area_create(). */ 1958 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags) 1959 { 1960 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, 1961 AS_AREA_ATTR_NONE, &anon_backend, NULL)) 1962 return (sysarg_t) address; 1963 else 2048 1964 return (sysarg_t) -1; 2049 2050 return (sysarg_t) virt; 2051 } 2052 1965 } 1966 1967 /** Wrapper for as_area_resize(). */ 2053 1968 sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 2054 1969 { … … 2056 1971 } 2057 1972 1973 /** Wrapper for as_area_change_flags(). */ 2058 1974 sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 2059 1975 { … … 2061 1977 } 2062 1978 1979 /** Wrapper for as_area_destroy(). */ 2063 1980 sysarg_t sys_as_area_destroy(uintptr_t address) 2064 1981 { 2065 1982 return (sysarg_t) as_area_destroy(AS, address); 1983 } 1984 1985 /** Return pointer to unmapped address space area 1986 * 1987 * @param base Lowest address bound. 1988 * @param size Requested size of the allocation. 1989 * 1990 * @return Pointer to the beginning of unmapped address space area. 1991 * 1992 */ 1993 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size) 1994 { 1995 if (size == 0) 1996 return 0; 1997 1998 /* 1999 * Make sure we allocate from page-aligned 2000 * address. Check for possible overflow in 2001 * each step. 2002 */ 2003 2004 size_t pages = SIZE2FRAMES(size); 2005 uintptr_t ret = 0; 2006 2007 /* 2008 * Find the lowest unmapped address aligned on the sz 2009 * boundary, not smaller than base and of the required size. 2010 */ 2011 2012 mutex_lock(&AS->lock); 2013 2014 /* First check the base address itself */ 2015 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE); 2016 if ((addr >= base) && 2017 (check_area_conflicts(AS, addr, pages, NULL))) 2018 ret = addr; 2019 2020 /* Eventually check the addresses behind each area */ 2021 list_foreach(AS->as_area_btree.leaf_list, cur) { 2022 if (ret != 0) 2023 break; 2024 2025 btree_node_t *node = 2026 list_get_instance(cur, btree_node_t, leaf_link); 2027 2028 btree_key_t i; 2029 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2030 uintptr_t addr; 2031 2032 as_area_t *area = (as_area_t *) node->value[i]; 2033 2034 mutex_lock(&area->lock); 2035 2036 addr = ALIGN_UP(area->base + P2SZ(area->pages), 2037 PAGE_SIZE); 2038 2039 if ((addr >= base) && (addr >= area->base) && 2040 (check_area_conflicts(AS, addr, pages, area))) 2041 ret = addr; 2042 2043 mutex_unlock(&area->lock); 2044 } 2045 } 2046 2047 mutex_unlock(&AS->lock); 2048 2049 return (sysarg_t) ret; 2066 2050 } 2067 2051
Note:
See TracChangeset
for help on using the changeset viewer.