Changes in kernel/generic/src/mm/as.c [c4c2406:57355a40] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
rc4c2406 r57355a40 285 285 /** Check area conflicts with other areas. 286 286 * 287 * @param as Address space. 288 * @param addr Starting virtual address of the area being tested. 289 * @param count Number of pages in the area being tested. 290 * @param avoid Do not touch this area. 287 * @param as Address space. 288 * @param addr Starting virtual address of the area being tested. 289 * @param count Number of pages in the area being tested. 290 * @param guarded True if the area being tested is protected by guard pages. 291 * @param avoid Do not touch this area. 291 292 * 292 293 * @return True if there is no conflict, false otherwise. … … 294 295 */ 295 296 NO_TRACE static bool check_area_conflicts(as_t *as, uintptr_t addr, 296 size_t count, as_area_t *avoid)297 size_t count, bool guarded, as_area_t *avoid) 297 298 { 298 299 ASSERT((addr % PAGE_SIZE) == 0); 299 300 ASSERT(mutex_locked(&as->lock)); 301 302 /* 303 * If the addition of the supposed area address and size overflows, 304 * report conflict. 305 */ 306 if (overflows_into_positive(addr, P2SZ(count))) 307 return false; 300 308 301 309 /* … … 304 312 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE)) 305 313 return false; 306 314 307 315 /* 308 316 * The leaf node is found in O(log n), where n is proportional to … … 328 336 if (area != avoid) { 329 337 mutex_lock(&area->lock); 330 338 339 /* 340 * If at least one of the two areas are protected 341 * by the AS_AREA_GUARD flag then we must be sure 342 * that they are separated by at least one unmapped 343 * page. 344 */ 345 int const gp = (guarded || 346 (area->flags & AS_AREA_GUARD)) ? 1 : 0; 347 348 /* 349 * The area comes from the left neighbour node, which 350 * means that there already are some areas in the leaf 351 * node, which in turn means that adding gp is safe and 352 * will not cause an integer overflow. 353 */ 331 354 if (overlaps(addr, P2SZ(count), area->base, 355 P2SZ(area->pages + gp))) { 356 mutex_unlock(&area->lock); 357 return false; 358 } 359 360 mutex_unlock(&area->lock); 361 } 362 } 363 364 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 365 if (node) { 366 area = (as_area_t *) node->value[0]; 367 368 if (area != avoid) { 369 int gp; 370 371 mutex_lock(&area->lock); 372 373 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0; 374 if (gp && overflows(addr, P2SZ(count))) { 375 /* 376 * Guard page not needed if the supposed area 377 * is adjacent to the end of the address space. 378 * We already know that the following test is 379 * going to fail... 380 */ 381 gp--; 382 } 383 384 if (overlaps(addr, P2SZ(count + gp), area->base, 332 385 P2SZ(area->pages))) { 333 386 mutex_unlock(&area->lock); … … 339 392 } 340 393 341 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf);342 if (node) {343 area = (as_area_t *) node->value[0];344 345 if (area != avoid) {346 mutex_lock(&area->lock);347 348 if (overlaps(addr, P2SZ(count), area->base,349 P2SZ(area->pages))) {350 mutex_unlock(&area->lock);351 return false;352 }353 354 mutex_unlock(&area->lock);355 }356 }357 358 394 /* Second, check the leaf node. */ 359 395 btree_key_t i; 360 396 for (i = 0; i < leaf->keys; i++) { 361 397 area = (as_area_t *) leaf->value[i]; 398 int agp; 399 int gp; 362 400 363 401 if (area == avoid) … … 365 403 366 404 mutex_lock(&area->lock); 367 368 if (overlaps(addr, P2SZ(count), area->base, 369 P2SZ(area->pages))) { 405 406 gp = (guarded || (area->flags & AS_AREA_GUARD)) ? 1 : 0; 407 agp = gp; 408 409 /* 410 * Sanitize the two possible unsigned integer overflows. 411 */ 412 if (gp && overflows(addr, P2SZ(count))) 413 gp--; 414 if (agp && overflows(area->base, P2SZ(area->pages))) 415 agp--; 416 417 if (overlaps(addr, P2SZ(count + gp), area->base, 418 P2SZ(area->pages + agp))) { 370 419 mutex_unlock(&area->lock); 371 420 return false; … … 377 426 /* 378 427 * So far, the area does not conflict with other areas. 379 * Check if it doesn't conflict with kerneladdress space.428 * Check if it is contained in the user address space. 380 429 */ 381 430 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 382 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START, 383 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 431 return iswithin(USER_ADDRESS_SPACE_START, 432 (USER_ADDRESS_SPACE_END - USER_ADDRESS_SPACE_START) + 1, 433 addr, P2SZ(count)); 384 434 } 385 435 … … 392 442 * this function. 393 443 * 394 * @param as Address space. 395 * @param bound Lowest address bound. 396 * @param size Requested size of the allocation. 444 * @param as Address space. 445 * @param bound Lowest address bound. 446 * @param size Requested size of the allocation. 447 * @param guarded True if the allocation must be protected by guard pages. 397 448 * 398 449 * @return Address of the beginning of unmapped address space area. … … 401 452 */ 402 453 NO_TRACE static uintptr_t as_get_unmapped_area(as_t *as, uintptr_t bound, 403 size_t size )454 size_t size, bool guarded) 404 455 { 405 456 ASSERT(mutex_locked(&as->lock)); … … 423 474 /* First check the bound address itself */ 424 475 uintptr_t addr = ALIGN_UP(bound, PAGE_SIZE); 425 if ((addr >= bound) && 426 (check_area_conflicts(as, addr, pages, NULL))) 427 return addr; 476 if (addr >= bound) { 477 if (guarded) { 478 /* Leave an unmapped page between the lower 479 * bound and the area's start address. 480 */ 481 addr += P2SZ(1); 482 } 483 484 if (check_area_conflicts(as, addr, pages, guarded, NULL)) 485 return addr; 486 } 428 487 429 488 /* Eventually check the addresses behind each area */ … … 439 498 addr = 440 499 ALIGN_UP(area->base + P2SZ(area->pages), PAGE_SIZE); 500 501 if (guarded || area->flags & AS_AREA_GUARD) { 502 /* We must leave an unmapped page 503 * between the two areas. 504 */ 505 addr += P2SZ(1); 506 } 507 441 508 bool avail = 442 509 ((addr >= bound) && (addr >= area->base) && 443 (check_area_conflicts(as, addr, pages, area)));510 (check_area_conflicts(as, addr, pages, guarded, area))); 444 511 445 512 mutex_unlock(&area->lock); … … 481 548 if (size == 0) 482 549 return NULL; 483 550 484 551 size_t pages = SIZE2FRAMES(size); 485 552 … … 487 554 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 488 555 return NULL; 556 557 bool const guarded = flags & AS_AREA_GUARD; 489 558 490 559 mutex_lock(&as->lock); 491 560 492 561 if (*base == (uintptr_t) -1) { 493 *base = as_get_unmapped_area(as, bound, size );562 *base = as_get_unmapped_area(as, bound, size, guarded); 494 563 if (*base == (uintptr_t) -1) { 495 564 mutex_unlock(&as->lock); … … 497 566 } 498 567 } 499 500 if (!check_area_conflicts(as, *base, pages, NULL)) { 568 569 if (overflows_into_positive(*base, size)) 570 return NULL; 571 572 if (!check_area_conflicts(as, *base, pages, guarded, NULL)) { 501 573 mutex_unlock(&as->lock); 502 574 return NULL; … … 625 697 return ENOENT; 626 698 } 627 628 if (area->backend == &phys_backend) { 629 /* 630 * Remapping of address space areas associated 631 * with memory mapped devices is not supported. 699 700 if (!area->backend->is_resizable(area)) { 701 /* 702 * The backend does not support resizing for this area. 632 703 */ 633 704 mutex_unlock(&area->lock); … … 776 847 /* 777 848 * Growing the area. 849 */ 850 851 if (overflows_into_positive(address, P2SZ(pages))) 852 return EINVAL; 853 854 /* 778 855 * Check for overlaps with other address space areas. 779 856 */ 780 if (!check_area_conflicts(as, address, pages, area)) { 857 bool const guarded = area->flags & AS_AREA_GUARD; 858 if (!check_area_conflicts(as, address, pages, guarded, area)) { 781 859 mutex_unlock(&area->lock); 782 860 mutex_unlock(&as->lock); … … 979 1057 } 980 1058 981 if ((!src_area->backend) || (!src_area->backend->share)) { 982 /* 983 * There is no backend or the backend does not 984 * know how to share the area. 1059 if (!src_area->backend->is_shareable(src_area)) { 1060 /* 1061 * The backend does not permit sharing of this area. 985 1062 */ 986 1063 mutex_unlock(&src_area->lock); … … 2054 2131 { 2055 2132 uintptr_t virt = base; 2056 as_area_t *area = as_area_create(AS, flags , size,2133 as_area_t *area = as_area_create(AS, flags | AS_AREA_CACHEABLE, size, 2057 2134 AS_AREA_ATTR_NONE, &anon_backend, NULL, &virt, bound); 2058 2135 if (area == NULL)
Note:
See TracChangeset
for help on using the changeset viewer.