Changes in kernel/generic/src/mm/as.c [7250d2c:fbcdeb8] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r7250d2c rfbcdeb8 387 387 } 388 388 389 /** Return pointer to unmapped address space area 390 * 391 * The address space must be already locked when calling 392 * this function. 393 * 394 * @param as Address space. 395 * @param bound Lowest address bound. 396 * @param size Requested size of the allocation. 397 * 398 * @return Address of the beginning of unmapped address space area. 399 * @return -1 if no suitable address space area was found. 400 * 401 */ 402 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 403 size_t size) 404 { 405 ASSERT(mutex_locked(&as->lock)); 406 407 if (size == 0) 408 return (uintptr_t) -1; 409 410 /* 411 * Make sure we allocate from page-aligned 412 * address. Check for possible overflow in 413 * each step. 414 */ 415 416 size_t pages = SIZE2FRAMES(size); 417 418 /* 419 * Find the lowest unmapped address aligned on the size 420 * boundary, not smaller than bound and of the required size. 421 */ 422 423 /* First check the bound address itself */ 424 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); 425 if ((addr >= bound) && 426 (check_area_conflicts(as, addr, pages, NULL))) 427 return addr; 428 429 /* Eventually check the addresses behind each area */ 430 list_foreach(as->as_area_btree.leaf_list, cur) { 431 btree_node_t *node = 432 list_get_instance(cur, btree_node_t, leaf_link); 433 434 for (btree_key_t i = 0; i < node->keys; i++) { 435 as_area_t *area = (as_area_t *) node->value[i]; 436 437 mutex_lock(&area->lock); 438 439 addr = 440 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); 441 bool avail = 442 ((addr >= bound) && (addr >= area->base) && 443 (check_area_conflicts(as, addr, pages, area))); 444 445 mutex_unlock(&area->lock); 446 447 if (avail) 448 return addr; 449 } 450 } 451 452 /* No suitable address space area found */ 453 return (uintptr_t) -1; 454 } 455 389 456 /** Create address space area of common attributes. 390 457 * … … 394 461 * @param flags Flags of the area memory. 395 462 * @param size Size of area. 396 * @param base Base address of area.397 463 * @param attrs Attributes of the area. 398 464 * @param backend Address space area backend. NULL if no backend is used. 399 465 * @param backend_data NULL or a pointer to an array holding two void *. 466 * @param base Starting virtual address of the area. 467 * If set to -1, a suitable mappable area is found. 468 * @param bound Lowest address bound if base is set to -1. 469 * Otherwise ignored. 400 470 * 401 471 * @return Address space area on success or NULL on failure. … … 403 473 */ 404 474 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 405 u intptr_t base, unsigned int attrs, mem_backend_t *backend,406 mem_backend_data_t *backend_data )407 { 408 if (( base % PAGE_SIZE) != 0)475 unsigned int attrs, mem_backend_t *backend, 476 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound) 477 { 478 if ((*base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0)) 409 479 return NULL; 410 480 … … 420 490 mutex_lock(&as->lock); 421 491 422 if (!check_area_conflicts(as, base, pages, NULL)) { 492 if (*base == (uintptr_t) -1) { 493 *base = as_get_unmapped_area(as, bound, size); 494 if (*base == (uintptr_t) -1) { 495 mutex_unlock(&as->lock); 496 return NULL; 497 } 498 } 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 423 501 mutex_unlock(&as->lock); 424 502 return NULL; … … 434 512 area->pages = pages; 435 513 area->resident = 0; 436 area->base = base;514 area->base = *base; 437 515 area->sh_info = NULL; 438 516 area->backend = backend; … … 452 530 453 531 btree_create(&area->used_space); 454 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 532 btree_insert(&as->as_area_btree, *base, (void *) area, 533 NULL); 455 534 456 535 mutex_unlock(&as->lock); … … 860 939 * @param acc_size Expected size of the source area. 861 940 * @param dst_as Pointer to destination address space. 862 * @param dst_base Target base address.863 941 * @param dst_flags_mask Destination address space area flags mask. 942 * @param dst_base Target base address. If set to -1, 943 * a suitable mappable area is found. 944 * @param bound Lowest address bound if dst_base is set to -1. 945 * Otherwise ignored. 864 946 * 865 947 * @return Zero on success. … … 873 955 */ 874 956 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 875 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 957 as_t *dst_as, unsigned int dst_flags_mask, uintptr_t *dst_base, 958 uintptr_t bound) 876 959 { 877 960 mutex_lock(&src_as->lock); … … 945 1028 * to support sharing in less privileged mode. 946 1029 */ 947 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 948 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 1030 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, 1031 src_size, AS_AREA_ATTR_PARTIAL, src_backend, 1032 &src_backend_data, dst_base, bound); 949 1033 if (!dst_area) { 950 1034 /* … … 1955 2039 */ 1956 2040 1957 /** Wrapper for as_area_create(). */ 1958 sysarg_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags)1959 { 1960 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address,1961 AS_AREA_ATTR_NONE, &anon_backend, NULL))1962 return (sysarg_t) address;1963 else2041 sysarg_t sys_as_area_create(uintptr_t base, size_t size, unsigned int flags, 2042 uintptr_t bound) 2043 { 2044 uintptr_t virt = base; 2045 as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size, 2046 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound); 2047 if (area == NULL) 1964 2048 return (sysarg_t) -1; 1965 } 1966 1967 /** Wrapper for as_area_resize(). */ 2049 2050 return (sysarg_t) virt; 2051 } 2052 1968 2053 sysarg_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1969 2054 { … … 1971 2056 } 1972 2057 1973 /** Wrapper for as_area_change_flags(). */1974 2058 sysarg_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1975 2059 { … … 1977 2061 } 1978 2062 1979 /** Wrapper for as_area_destroy(). */1980 2063 sysarg_t sys_as_area_destroy(uintptr_t address) 1981 2064 { 1982 2065 return (sysarg_t) as_area_destroy(AS, address); 1983 }1984 1985 /** Return pointer to unmapped address space area1986 *1987 * @param base Lowest address bound.1988 * @param size Requested size of the allocation.1989 *1990 * @return Pointer to the beginning of unmapped address space area.1991 *1992 */1993 sysarg_t sys_as_get_unmapped_area(uintptr_t base, size_t size)1994 {1995 if (size == 0)1996 return 0;1997 1998 /*1999 * Make sure we allocate from page-aligned2000 * address. Check for possible overflow in2001 * each step.2002 */2003 2004 size_t pages = SIZE2FRAMES(size);2005 uintptr_t ret = 0;2006 2007 /*2008 * Find the lowest unmapped address aligned on the sz2009 * boundary, not smaller than base and of the required size.2010 */2011 2012 mutex_lock(&AS->lock);2013 2014 /* First check the base address itself */2015 uintptr_t addr = ALIGN_UP(base, PAGE_SIZE);2016 if ((addr >= base) &&2017 (check_area_conflicts(AS, addr, pages, NULL)))2018 ret = addr;2019 2020 /* Eventually check the addresses behind each area */2021 list_foreach(AS->as_area_btree.leaf_list, cur) {2022 if (ret != 0)2023 break;2024 2025 btree_node_t *node =2026 list_get_instance(cur, btree_node_t, leaf_link);2027 2028 btree_key_t i;2029 for (i = 0; (ret == 0) && (i < node->keys); i++) {2030 uintptr_t addr;2031 2032 as_area_t *area = (as_area_t *) node->value[i];2033 2034 mutex_lock(&area->lock);2035 2036 addr = ALIGN_UP(area->base + P2SZ(area->pages),2037 PAGE_SIZE);2038 2039 if ((addr >= base) && (addr >= area->base) &&2040 (check_area_conflicts(AS, addr, pages, area)))2041 ret = addr;2042 2043 mutex_unlock(&area->lock);2044 }2045 }2046 2047 mutex_unlock(&AS->lock);2048 2049 return (sysarg_t) ret;2050 2066 } 2051 2067
Note:
See TracChangeset
for help on using the changeset viewer.