Changes in kernel/generic/src/mm/as.c [8f80c77:1624aae] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r8f80c77 r1624aae 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Address space related functions. 36 36 * 37 37 * This file contains address space manipulation functions. … … 86 86 * Each architecture decides what functions will be used to carry out 87 87 * address space operations such as creating or locking page tables. 88 *89 88 */ 90 89 as_operations_t *as_operations = NULL; … … 92 91 /** 93 92 * Slab for as_t objects. 94 *95 93 */ 96 94 static slab_cache_t *as_slab; … … 102 100 * - as->asid for each as of the as_t type 103 101 * - asids_allocated counter 104 *105 102 */ 106 103 SPINLOCK_INITIALIZE(asidlock); … … 109 106 * This list contains address spaces that are not active on any 110 107 * processor and that have valid ASID. 111 *112 108 */ 113 109 LIST_INITIALIZE(inactive_as_with_asid_head); … … 116 112 as_t *AS_KERNEL = NULL; 117 113 118 static unsigned int area_flags_to_page_flags(unsignedint);114 static int area_flags_to_page_flags(int); 119 115 static as_area_t *find_area_and_lock(as_t *, uintptr_t); 120 116 static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *); 121 117 static void sh_info_remove_reference(share_info_t *); 122 118 123 static int as_constructor(void *obj, unsignedint flags)119 static int as_constructor(void *obj, int flags) 124 120 { 125 121 as_t *as = (as_t *) obj; 126 122 int rc; 123 127 124 link_initialize(&as->inactive_as_with_asid_link); 128 125 mutex_initialize(&as->lock, MUTEX_PASSIVE); 129 126 130 intrc = as_constructor_arch(as, flags);127 rc = as_constructor_arch(as, flags); 131 128 132 129 return rc; 133 130 } 134 131 135 static size_t as_destructor(void *obj)132 static int as_destructor(void *obj) 136 133 { 137 134 as_t *as = (as_t *) obj; 135 138 136 return as_destructor_arch(as); 139 137 } … … 143 141 { 144 142 as_arch_init(); 145 143 146 144 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, 147 145 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); … … 159 157 /** Create address space. 160 158 * 161 * @param flags Flags that influence the way in wich the address 162 * space is created. 163 * 164 */ 165 as_t *as_create(unsigned int flags) 166 { 167 as_t *as = (as_t *) slab_alloc(as_slab, 0); 159 * @param flags Flags that influence the way in wich the address space 160 * is created. 161 */ 162 as_t *as_create(int flags) 163 { 164 as_t *as; 165 166 as = (as_t *) slab_alloc(as_slab, 0); 168 167 (void) as_create_arch(as, 0); 169 168 … … 177 176 atomic_set(&as->refcount, 0); 178 177 as->cpu_refcount = 0; 179 180 178 #ifdef AS_PAGE_TABLE 181 179 as->genarch.page_table = page_table_create(flags); … … 194 192 * We know that we don't hold any spinlock. 195 193 * 196 * @param as Address space to be destroyed. 197 * 194 * @param as Address space to be destroyed. 198 195 */ 199 196 void as_destroy(as_t *as) 200 197 { 198 ipl_t ipl; 199 bool cond; 201 200 DEADLOCK_PROBE_INIT(p_asidlock); 202 201 … … 215 214 * disabled to prevent nested context switches. We also depend on the 216 215 * fact that so far no spinlocks are held. 217 *218 216 */ 219 217 preemption_disable(); 220 ipl_t ipl = interrupts_read(); 221 218 ipl = interrupts_read(); 222 219 retry: 223 220 interrupts_disable(); … … 227 224 goto retry; 228 225 } 229 230 /* Interrupts disabled, enable preemption */ 231 preemption_enable(); 232 233 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) { 226 preemption_enable(); /* Interrupts disabled, enable preemption */ 227 if (as->asid != ASID_INVALID && as != AS_KERNEL) { 234 228 if (as->cpu_refcount == 0) 235 229 list_remove(&as->inactive_as_with_asid_link); 236 237 230 asid_put(as->asid); 238 231 } 239 240 232 spinlock_unlock(&asidlock); 241 233 242 234 /* 243 235 * Destroy address space areas of the address space. 244 236 * The B+tree must be walked carefully because it is 245 237 * also being destroyed. 246 * 247 */248 bool cond = true;249 while (cond) { 238 */ 239 for (cond = true; cond; ) { 240 btree_node_t *node; 241 250 242 ASSERT(!list_empty(&as->as_area_btree.leaf_head)); 251 252 btree_node_t *node = 253 list_get_instance(as->as_area_btree.leaf_head.next, 243 node = list_get_instance(as->as_area_btree.leaf_head.next, 254 244 btree_node_t, leaf_link); 255 256 if ((cond = node->keys)) 245 246 if ((cond = node->keys)) { 257 247 as_area_destroy(as, node->key[0]); 258 } 259 248 } 249 } 250 260 251 btree_destroy(&as->as_area_btree); 261 262 252 #ifdef AS_PAGE_TABLE 263 253 page_table_destroy(as->genarch.page_table); … … 265 255 page_table_destroy(NULL); 266 256 #endif 267 257 268 258 interrupts_restore(ipl); 269 259 270 260 slab_free(as_slab, as); 271 261 } … … 276 266 * space. 277 267 * 278 * @param as Address space to be held. 279 * 268 * @param a Address space to be held. 280 269 */ 281 270 void as_hold(as_t *as) … … 289 278 * space. 290 279 * 291 * @param asAddress space to be released. 292 * 280 * @param a Address space to be released. 293 281 */ 294 282 void as_release(as_t *as) … … 302 290 * The created address space area is added to the target address space. 303 291 * 304 * @param as Target address space. 305 * @param flags Flags of the area memory. 306 * @param size Size of area. 307 * @param base Base address of area. 308 * @param attrs Attributes of the area. 309 * @param backend Address space area backend. NULL if no backend is used. 310 * @param backend_data NULL or a pointer to an array holding two void *. 311 * 312 * @return Address space area on success or NULL on failure. 313 * 314 */ 315 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 316 uintptr_t base, unsigned int attrs, mem_backend_t *backend, 317 mem_backend_data_t *backend_data) 318 { 292 * @param as Target address space. 293 * @param flags Flags of the area memory. 294 * @param size Size of area. 295 * @param base Base address of area. 296 * @param attrs Attributes of the area. 297 * @param backend Address space area backend. NULL if no backend is used. 298 * @param backend_data NULL or a pointer to an array holding two void *. 299 * 300 * @return Address space area on success or NULL on failure. 301 */ 302 as_area_t * 303 as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, 304 mem_backend_t *backend, mem_backend_data_t *backend_data) 305 { 306 ipl_t ipl; 307 as_area_t *a; 308 319 309 if (base % PAGE_SIZE) 320 310 return NULL; 321 311 322 312 if (!size) 323 313 return NULL; 324 314 325 315 /* Writeable executable areas are not supported. */ 326 316 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 327 317 return NULL; 328 318 329 ipl _t ipl= interrupts_disable();319 ipl = interrupts_disable(); 330 320 mutex_lock(&as->lock); 331 321 … … 336 326 } 337 327 338 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0); 339 340 mutex_initialize(&area->lock, MUTEX_PASSIVE); 341 342 area->as = as; 343 area->flags = flags; 344 area->attributes = attrs; 345 area->pages = SIZE2FRAMES(size); 346 area->base = base; 347 area->sh_info = NULL; 348 area->backend = backend; 349 328 a = (as_area_t *) malloc(sizeof(as_area_t), 0); 329 330 mutex_initialize(&a->lock, MUTEX_PASSIVE); 331 332 a->as = as; 333 a->flags = flags; 334 a->attributes = attrs; 335 a->pages = SIZE2FRAMES(size); 336 a->base = base; 337 a->sh_info = NULL; 338 a->backend = backend; 350 339 if (backend_data) 351 a rea->backend_data = *backend_data;340 a->backend_data = *backend_data; 352 341 else 353 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 354 355 btree_create(&area->used_space); 356 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 357 342 memsetb(&a->backend_data, sizeof(a->backend_data), 0); 343 344 btree_create(&a->used_space); 345 346 btree_insert(&as->as_area_btree, base, (void *) a, NULL); 347 358 348 mutex_unlock(&as->lock); 359 349 interrupts_restore(ipl); 360 361 return a rea;350 351 return a; 362 352 } 363 353 364 354 /** Find address space area and change it. 365 355 * 366 * @param as Address space. 367 * @param address Virtual address belonging to the area to be changed. 368 * Must be page-aligned. 369 * @param size New size of the virtual memory block starting at 370 * address. 371 * @param flags Flags influencing the remap operation. Currently unused. 372 * 373 * @return Zero on success or a value from @ref errno.h otherwise. 374 * 375 */ 376 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 377 { 378 ipl_t ipl = interrupts_disable(); 356 * @param as Address space. 357 * @param address Virtual address belonging to the area to be changed. 358 * Must be page-aligned. 359 * @param size New size of the virtual memory block starting at 360 * address. 361 * @param flags Flags influencing the remap operation. Currently unused. 362 * 363 * @return Zero on success or a value from @ref errno.h otherwise. 364 */ 365 int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) 366 { 367 as_area_t *area; 368 ipl_t ipl; 369 size_t pages; 370 371 ipl = interrupts_disable(); 379 372 mutex_lock(&as->lock); 380 373 381 374 /* 382 375 * Locate the area. 383 * 384 */ 385 as_area_t *area = find_area_and_lock(as, address); 376 */ 377 area = find_area_and_lock(as, address); 386 378 if (!area) { 387 379 mutex_unlock(&as->lock); … … 389 381 return ENOENT; 390 382 } 391 383 392 384 if (area->backend == &phys_backend) { 393 385 /* 394 386 * Remapping of address space areas associated 395 387 * with memory mapped devices is not supported. 396 *397 388 */ 398 389 mutex_unlock(&area->lock); … … 401 392 return ENOTSUP; 402 393 } 403 404 394 if (area->sh_info) { 405 395 /* 406 * Remapping of shared address space areas 396 * Remapping of shared address space areas 407 397 * is not supported. 408 *409 398 */ 410 399 mutex_unlock(&area->lock); … … 413 402 return ENOTSUP; 414 403 } 415 416 size_tpages = SIZE2FRAMES((address - area->base) + size);404 405 pages = SIZE2FRAMES((address - area->base) + size); 417 406 if (!pages) { 418 407 /* 419 408 * Zero size address space areas are not allowed. 420 *421 409 */ 422 410 mutex_unlock(&area->lock); … … 427 415 428 416 if (pages < area->pages) { 417 bool cond; 429 418 uintptr_t start_free = area->base + pages * PAGE_SIZE; 430 419 431 420 /* 432 421 * Shrinking the area. 433 422 * No need to check for overlaps. 434 * 435 */ 436 437 page_table_lock(as, false); 438 423 */ 424 439 425 /* 440 426 * Start TLB shootdown sequence. 441 *442 427 */ 443 428 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base + 444 429 pages * PAGE_SIZE, area->pages - pages); 445 430 446 431 /* 447 432 * Remove frames belonging to used space starting from … … 450 435 * is also the right way to remove part of the used_space 451 436 * B+tree leaf list. 452 * 453 */454 bool cond = true;455 while (cond) {437 */ 438 for (cond = true; cond;) { 439 btree_node_t *node; 440 456 441 ASSERT(!list_empty(&area->used_space.leaf_head)); 457 458 btree_node_t *node = 442 node = 459 443 list_get_instance(area->used_space.leaf_head.prev, 460 444 btree_node_t, leaf_link); 445 if ((cond = (bool) node->keys)) { 446 uintptr_t b = node->key[node->keys - 1]; 447 size_t c = 448 (size_t) node->value[node->keys - 1]; 449 unsigned int i = 0; 461 450 462 if ((cond = (bool) node->keys)) { 463 uintptr_t ptr = node->key[node->keys - 1]; 464 size_t size = 465 (size_t) node->value[node->keys - 1]; 466 size_t i = 0; 467 468 if (overlaps(ptr, size * PAGE_SIZE, area->base, 451 if (overlaps(b, c * PAGE_SIZE, area->base, 469 452 pages * PAGE_SIZE)) { 470 453 471 if ( ptr + size* PAGE_SIZE <= start_free) {454 if (b + c * PAGE_SIZE <= start_free) { 472 455 /* 473 456 * The whole interval fits 474 457 * completely in the resized 475 458 * address space area. 476 *477 459 */ 478 460 break; 479 461 } 480 462 481 463 /* 482 464 * Part of the interval corresponding 483 465 * to b and c overlaps with the resized 484 466 * address space area. 485 *486 467 */ 487 488 /* We are almost done */ 489 cond = false; 490 i = (start_free - ptr) >> PAGE_WIDTH; 468 469 cond = false; /* we are almost done */ 470 i = (start_free - b) >> PAGE_WIDTH; 491 471 if (!used_space_remove(area, start_free, 492 size - i)) 493 panic("Cannot remove used space."); 472 c - i)) 473 panic("Cannot remove used " 474 "space."); 494 475 } else { 495 476 /* … … 497 478 * completely removed. 498 479 */ 499 if (!used_space_remove(area, ptr, size)) 500 panic("Cannot remove used space."); 480 if (!used_space_remove(area, b, c)) 481 panic("Cannot remove used " 482 "space."); 501 483 } 502 503 for (; i < size; i++) { 504 pte_t *pte = page_mapping_find(as, ptr + 484 485 for (; i < c; i++) { 486 pte_t *pte; 487 488 page_table_lock(as, false); 489 pte = page_mapping_find(as, b + 505 490 i * PAGE_SIZE); 506 507 ASSERT(pte); 508 ASSERT(PTE_VALID(pte)); 509 ASSERT(PTE_PRESENT(pte)); 510 511 if ((area->backend) && 512 (area->backend->frame_free)) { 491 ASSERT(pte && PTE_VALID(pte) && 492 PTE_PRESENT(pte)); 493 if (area->backend && 494 area->backend->frame_free) { 513 495 area->backend->frame_free(area, 514 ptr+ i * PAGE_SIZE,496 b + i * PAGE_SIZE, 515 497 PTE_GET_FRAME(pte)); 516 498 } 517 518 page_mapping_remove(as, ptr + 499 page_mapping_remove(as, b + 519 500 i * PAGE_SIZE); 501 page_table_unlock(as, false); 520 502 } 521 503 } 522 504 } 523 505 524 506 /* 525 507 * Finish TLB shootdown sequence. 526 * 527 */ 528 508 */ 509 529 510 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 530 511 area->pages - pages); 531 532 512 /* 533 513 * Invalidate software translation caches (e.g. TSB on sparc64). 534 *535 514 */ 536 515 as_invalidate_translation_cache(as, area->base + … … 538 517 tlb_shootdown_finalize(); 539 518 540 page_table_unlock(as, false);541 519 } else { 542 520 /* 543 521 * Growing the area. 544 522 * Check for overlaps with other address space areas. 545 *546 523 */ 547 524 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 548 525 area)) { 549 526 mutex_unlock(&area->lock); 550 mutex_unlock(&as->lock); 527 mutex_unlock(&as->lock); 551 528 interrupts_restore(ipl); 552 529 return EADDRNOTAVAIL; 553 530 } 554 } 555 531 } 532 556 533 area->pages = pages; 557 534 … … 559 536 mutex_unlock(&as->lock); 560 537 interrupts_restore(ipl); 561 538 562 539 return 0; 563 540 } … … 565 542 /** Destroy address space area. 566 543 * 567 * @param as Address space. 568 * @param address Address within the area to be deleted. 569 * 570 * @return Zero on success or a value from @ref errno.h on failure. 571 * 544 * @param as Address space. 545 * @param address Address within the area to be deleted. 546 * 547 * @return Zero on success or a value from @ref errno.h on failure. 572 548 */ 573 549 int as_area_destroy(as_t *as, uintptr_t address) 574 550 { 575 ipl_t ipl = interrupts_disable(); 551 as_area_t *area; 552 uintptr_t base; 553 link_t *cur; 554 ipl_t ipl; 555 556 ipl = interrupts_disable(); 576 557 mutex_lock(&as->lock); 577 578 a s_area_t *area = find_area_and_lock(as, address);558 559 area = find_area_and_lock(as, address); 579 560 if (!area) { 580 561 mutex_unlock(&as->lock); … … 582 563 return ENOENT; 583 564 } 584 585 uintptr_t base = area->base; 586 587 page_table_lock(as, false); 588 565 566 base = area->base; 567 589 568 /* 590 569 * Start TLB shootdown sequence. 591 570 */ 592 571 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 593 572 594 573 /* 595 574 * Visit only the pages mapped by used_space B+tree. 596 575 */ 597 link_t *cur;598 576 for (cur = area->used_space.leaf_head.next; 599 577 cur != &area->used_space.leaf_head; cur = cur->next) { 600 578 btree_node_t *node; 601 btree_key_t i;579 unsigned int i; 602 580 603 581 node = list_get_instance(cur, btree_node_t, leaf_link); 604 582 for (i = 0; i < node->keys; i++) { 605 uintptr_t ptr = node->key[i]; 606 size_t size; 583 uintptr_t b = node->key[i]; 584 size_t j; 585 pte_t *pte; 607 586 608 for (size = 0; size < (size_t) node->value[i]; size++) { 609 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 610 611 ASSERT(pte); 612 ASSERT(PTE_VALID(pte)); 613 ASSERT(PTE_PRESENT(pte)); 614 615 if ((area->backend) && 616 (area->backend->frame_free)) { 617 area->backend->frame_free(area, 618 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte)); 587 for (j = 0; j < (size_t) node->value[i]; j++) { 588 page_table_lock(as, false); 589 pte = page_mapping_find(as, b + j * PAGE_SIZE); 590 ASSERT(pte && PTE_VALID(pte) && 591 PTE_PRESENT(pte)); 592 if (area->backend && 593 area->backend->frame_free) { 594 area->backend->frame_free(area, b + 595 j * PAGE_SIZE, PTE_GET_FRAME(pte)); 619 596 } 620 621 page_ mapping_remove(as, ptr + size * PAGE_SIZE);597 page_mapping_remove(as, b + j * PAGE_SIZE); 598 page_table_unlock(as, false); 622 599 } 623 600 } 624 601 } 625 602 626 603 /* 627 604 * Finish TLB shootdown sequence. 628 * 629 */ 630 605 */ 606 631 607 tlb_invalidate_pages(as->asid, area->base, area->pages); 632 633 608 /* 634 609 * Invalidate potential software translation caches (e.g. TSB on 635 610 * sparc64). 636 *637 611 */ 638 612 as_invalidate_translation_cache(as, area->base, area->pages); 639 613 tlb_shootdown_finalize(); 640 614 641 page_table_unlock(as, false);642 643 615 btree_destroy(&area->used_space); 644 616 645 617 area->attributes |= AS_AREA_ATTR_PARTIAL; 646 618 647 619 if (area->sh_info) 648 620 sh_info_remove_reference(area->sh_info); 649 621 650 622 mutex_unlock(&area->lock); 651 623 652 624 /* 653 625 * Remove the empty area from address space. 654 *655 626 */ 656 627 btree_remove(&as->as_area_btree, base, NULL); … … 670 641 * sh_info of the source area. The process of duplicating the 671 642 * mapping is done through the backend share function. 672 * 673 * @param src_as 674 * @param src_base 675 * @param acc_size 676 * @param dst_as 677 * @param dst_base 643 * 644 * @param src_as Pointer to source address space. 645 * @param src_base Base address of the source address space area. 646 * @param acc_size Expected size of the source area. 647 * @param dst_as Pointer to destination address space. 648 * @param dst_base Target base address. 678 649 * @param dst_flags_mask Destination address space area flags mask. 679 650 * 680 * @return Zero on success. 681 * @return ENOENT if there is no such task or such address space. 682 * @return EPERM if there was a problem in accepting the area. 683 * @return ENOMEM if there was a problem in allocating destination 684 * address space area. 685 * @return ENOTSUP if the address space area backend does not support 686 * sharing. 687 * 651 * @return Zero on success or ENOENT if there is no such task or if 652 * there is no such address space area, EPERM if there was 653 * a problem in accepting the area or ENOMEM if there was a 654 * problem in allocating destination address space area. 655 * ENOTSUP is returned if the address space area backend 656 * does not support sharing. 688 657 */ 689 658 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 690 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 691 { 692 ipl_t ipl = interrupts_disable(); 659 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) 660 { 661 ipl_t ipl; 662 int src_flags; 663 size_t src_size; 664 as_area_t *src_area, *dst_area; 665 share_info_t *sh_info; 666 mem_backend_t *src_backend; 667 mem_backend_data_t src_backend_data; 668 669 ipl = interrupts_disable(); 693 670 mutex_lock(&src_as->lock); 694 as_area_t *src_area = find_area_and_lock(src_as, src_base);671 src_area = find_area_and_lock(src_as, src_base); 695 672 if (!src_area) { 696 673 /* 697 674 * Could not find the source address space area. 698 *699 675 */ 700 676 mutex_unlock(&src_as->lock); … … 702 678 return ENOENT; 703 679 } 704 705 if ( (!src_area->backend) || (!src_area->backend->share)) {680 681 if (!src_area->backend || !src_area->backend->share) { 706 682 /* 707 683 * There is no backend or the backend does not 708 684 * know how to share the area. 709 *710 685 */ 711 686 mutex_unlock(&src_area->lock); … … 715 690 } 716 691 717 s ize_t src_size = src_area->pages * PAGE_SIZE;718 unsigned intsrc_flags = src_area->flags;719 mem_backend_t *src_backend = src_area->backend;720 mem_backend_data_tsrc_backend_data = src_area->backend_data;721 692 src_size = src_area->pages * PAGE_SIZE; 693 src_flags = src_area->flags; 694 src_backend = src_area->backend; 695 src_backend_data = src_area->backend_data; 696 722 697 /* Share the cacheable flag from the original mapping */ 723 698 if (src_flags & AS_AREA_CACHEABLE) 724 699 dst_flags_mask |= AS_AREA_CACHEABLE; 725 726 if ( (src_size != acc_size)||727 ( (src_flags & dst_flags_mask) != dst_flags_mask)) {700 701 if (src_size != acc_size || 702 (src_flags & dst_flags_mask) != dst_flags_mask) { 728 703 mutex_unlock(&src_area->lock); 729 704 mutex_unlock(&src_as->lock); … … 731 706 return EPERM; 732 707 } 733 708 734 709 /* 735 710 * Now we are committed to sharing the area. 736 711 * First, prepare the area for sharing. 737 712 * Then it will be safe to unlock it. 738 * 739 */ 740 share_info_t *sh_info = src_area->sh_info; 713 */ 714 sh_info = src_area->sh_info; 741 715 if (!sh_info) { 742 716 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); … … 745 719 btree_create(&sh_info->pagemap); 746 720 src_area->sh_info = sh_info; 747 748 721 /* 749 722 * Call the backend to setup sharing. 750 *751 723 */ 752 724 src_area->backend->share(src_area); … … 756 728 mutex_unlock(&sh_info->lock); 757 729 } 758 730 759 731 mutex_unlock(&src_area->lock); 760 732 mutex_unlock(&src_as->lock); 761 733 762 734 /* 763 735 * Create copy of the source address space area. … … 767 739 * The flags of the source area are masked against dst_flags_mask 768 740 * to support sharing in less privileged mode. 769 * 770 */ 771 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 772 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 741 */ 742 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, 743 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 773 744 if (!dst_area) { 774 745 /* … … 780 751 return ENOMEM; 781 752 } 782 753 783 754 /* 784 755 * Now the destination address space area has been 785 756 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 786 757 * attribute and set the sh_info. 787 * 788 */ 789 mutex_lock(&dst_as->lock); 758 */ 759 mutex_lock(&dst_as->lock); 790 760 mutex_lock(&dst_area->lock); 791 761 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; 792 762 dst_area->sh_info = sh_info; 793 763 mutex_unlock(&dst_area->lock); 794 mutex_unlock(&dst_as->lock); 795 764 mutex_unlock(&dst_as->lock); 765 796 766 interrupts_restore(ipl); 797 767 … … 801 771 /** Check access mode for address space area. 802 772 * 803 * @param area Address space area. 804 * @param access Access mode. 805 * 806 * @return False if access violates area's permissions, true 807 * otherwise. 808 * 773 * The address space area must be locked prior to this call. 774 * 775 * @param area Address space area. 776 * @param access Access mode. 777 * 778 * @return False if access violates area's permissions, true 779 * otherwise. 809 780 */ 810 781 bool as_area_check_access(as_area_t *area, pf_access_t access) … … 816 787 }; 817 788 818 ASSERT(interrupts_disabled());819 ASSERT(mutex_locked(&area->lock));820 821 789 if (!(area->flags & flagmap[access])) 822 790 return false; … … 839 807 * 840 808 */ 841 int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address) 842 { 809 int as_area_change_flags(as_t *as, int flags, uintptr_t address) 810 { 811 as_area_t *area; 812 link_t *cur; 813 ipl_t ipl; 814 int page_flags; 815 uintptr_t *old_frame; 816 size_t frame_idx; 817 size_t used_pages; 818 843 819 /* Flags for the new memory mapping */ 844 unsigned intpage_flags = area_flags_to_page_flags(flags);845 846 ipl _t ipl= interrupts_disable();820 page_flags = area_flags_to_page_flags(flags); 821 822 ipl = interrupts_disable(); 847 823 mutex_lock(&as->lock); 848 849 a s_area_t *area = find_area_and_lock(as, address);824 825 area = find_area_and_lock(as, address); 850 826 if (!area) { 851 827 mutex_unlock(&as->lock); … … 853 829 return ENOENT; 854 830 } 855 831 856 832 if ((area->sh_info) || (area->backend != &anon_backend)) { 857 833 /* Copying shared areas not supported yet */ … … 862 838 return ENOTSUP; 863 839 } 864 840 865 841 /* 866 842 * Compute total number of used pages in the used_space B+tree 867 * 868 */ 869 size_t used_pages = 0; 870 link_t *cur; 871 843 */ 844 used_pages = 0; 845 872 846 for (cur = area->used_space.leaf_head.next; 873 847 cur != &area->used_space.leaf_head; cur = cur->next) { 874 btree_node_t *node 875 = list_get_instance(cur, btree_node_t, leaf_link); 876 btree_key_t i; 848 btree_node_t *node; 849 unsigned int i; 877 850 878 for (i = 0; i < node->keys; i++) 851 node = list_get_instance(cur, btree_node_t, leaf_link); 852 for (i = 0; i < node->keys; i++) { 879 853 used_pages += (size_t) node->value[i]; 880 } 881 854 } 855 } 856 882 857 /* An array for storing frame numbers */ 883 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 884 885 page_table_lock(as, false); 886 858 old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 859 887 860 /* 888 861 * Start TLB shootdown sequence. 889 *890 862 */ 891 863 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 892 864 893 865 /* 894 866 * Remove used pages from page tables and remember their frame 895 867 * numbers. 896 * 897 */ 898 size_t frame_idx = 0; 899 868 */ 869 frame_idx = 0; 870 900 871 for (cur = area->used_space.leaf_head.next; 901 872 cur != &area->used_space.leaf_head; cur = cur->next) { 902 btree_node_t *node 903 = list_get_instance(cur, btree_node_t, leaf_link); 904 btree_key_t i; 873 btree_node_t *node; 874 unsigned int i; 905 875 876 node = list_get_instance(cur, btree_node_t, leaf_link); 906 877 for (i = 0; i < node->keys; i++) { 907 uintptr_t ptr = node->key[i]; 908 size_t size; 878 uintptr_t b = node->key[i]; 879 size_t j; 880 pte_t *pte; 909 881 910 for (size = 0; size < (size_t) node->value[i]; size++) { 911 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 912 913 ASSERT(pte); 914 ASSERT(PTE_VALID(pte)); 915 ASSERT(PTE_PRESENT(pte)); 916 882 for (j = 0; j < (size_t) node->value[i]; j++) { 883 page_table_lock(as, false); 884 pte = page_mapping_find(as, b + j * PAGE_SIZE); 885 ASSERT(pte && PTE_VALID(pte) && 886 PTE_PRESENT(pte)); 917 887 old_frame[frame_idx++] = PTE_GET_FRAME(pte); 918 888 919 889 /* Remove old mapping */ 920 page_mapping_remove(as, ptr + size * PAGE_SIZE); 890 page_mapping_remove(as, b + j * PAGE_SIZE); 891 page_table_unlock(as, false); 921 892 } 922 893 } 923 894 } 924 895 925 896 /* 926 897 * Finish TLB shootdown sequence. 927 * 928 */ 929 898 */ 899 930 900 tlb_invalidate_pages(as->asid, area->base, area->pages); 931 901 … … 933 903 * Invalidate potential software translation caches (e.g. TSB on 934 904 * sparc64). 935 *936 905 */ 937 906 as_invalidate_translation_cache(as, area->base, area->pages); 938 907 tlb_shootdown_finalize(); 939 940 page_table_unlock(as, false); 941 908 942 909 /* 943 910 * Set the new flags. 944 911 */ 945 912 area->flags = flags; 946 913 947 914 /* 948 915 * Map pages back in with new flags. This step is kept separate … … 951 918 */ 952 919 frame_idx = 0; 953 920 954 921 for (cur = area->used_space.leaf_head.next; 955 922 cur != &area->used_space.leaf_head; cur = cur->next) { 956 btree_node_t *node 957 = list_get_instance(cur, btree_node_t, leaf_link); 958 btree_key_t i; 923 btree_node_t *node; 924 unsigned int i; 959 925 926 node = list_get_instance(cur, btree_node_t, leaf_link); 960 927 for (i = 0; i < node->keys; i++) { 961 uintptr_t ptr= node->key[i];962 size_t size;928 uintptr_t b = node->key[i]; 929 size_t j; 963 930 964 for ( size = 0; size < (size_t) node->value[i]; size++) {931 for (j = 0; j < (size_t) node->value[i]; j++) { 965 932 page_table_lock(as, false); 966 933 967 934 /* Insert the new mapping */ 968 page_mapping_insert(as, ptr + size* PAGE_SIZE,935 page_mapping_insert(as, b + j * PAGE_SIZE, 969 936 old_frame[frame_idx++], page_flags); 970 937 971 938 page_table_unlock(as, false); 972 939 } 973 940 } 974 941 } 975 942 976 943 free(old_frame); 977 944 978 945 mutex_unlock(&area->lock); 979 946 mutex_unlock(&as->lock); 980 947 interrupts_restore(ipl); 981 948 982 949 return 0; 983 950 } 951 984 952 985 953 /** Handle page fault within the current address space. … … 991 959 * Interrupts are assumed disabled. 992 960 * 993 * @param page Faulting page. 994 * @param access Access mode that caused the page fault (i.e. 995 * read/write/exec). 996 * @param istate Pointer to the interrupted state. 997 * 998 * @return AS_PF_FAULT on page fault. 999 * @return AS_PF_OK on success. 1000 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace() 1001 * or copy_from_uspace(). 1002 * 961 * @param page Faulting page. 962 * @param access Access mode that caused the page fault (i.e. 963 * read/write/exec). 964 * @param istate Pointer to the interrupted state. 965 * 966 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or 967 * AS_PF_DEFER if the fault was caused by copy_to_uspace() 968 * or copy_from_uspace(). 1003 969 */ 1004 970 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 1005 971 { 972 pte_t *pte; 973 as_area_t *area; 974 1006 975 if (!THREAD) 1007 976 return AS_PF_FAULT; … … 1011 980 1012 981 mutex_lock(&AS->lock); 1013 a s_area_t *area = find_area_and_lock(AS, page);982 area = find_area_and_lock(AS, page); 1014 983 if (!area) { 1015 984 /* 1016 985 * No area contained mapping for 'page'. 1017 986 * Signal page fault to low-level handler. 1018 *1019 987 */ 1020 988 mutex_unlock(&AS->lock); 1021 989 goto page_fault; 1022 990 } 1023 991 1024 992 if (area->attributes & AS_AREA_ATTR_PARTIAL) { 1025 993 /* … … 1029 997 mutex_unlock(&area->lock); 1030 998 mutex_unlock(&AS->lock); 1031 goto page_fault; 1032 } 1033 1034 if ( (!area->backend) || (!area->backend->page_fault)) {999 goto page_fault; 1000 } 1001 1002 if (!area->backend || !area->backend->page_fault) { 1035 1003 /* 1036 1004 * The address space area is not backed by any backend 1037 1005 * or the backend cannot handle page faults. 1038 *1039 1006 */ 1040 1007 mutex_unlock(&area->lock); 1041 1008 mutex_unlock(&AS->lock); 1042 goto page_fault; 1043 } 1044 1009 goto page_fault; 1010 } 1011 1045 1012 page_table_lock(AS, false); 1046 1013 … … 1048 1015 * To avoid race condition between two page faults on the same address, 1049 1016 * we need to make sure the mapping has not been already inserted. 1050 * 1051 */ 1052 pte_t *pte; 1017 */ 1053 1018 if ((pte = page_mapping_find(AS, page))) { 1054 1019 if (PTE_PRESENT(pte)) { … … 1066 1031 /* 1067 1032 * Resort to the backend page fault handler. 1068 *1069 1033 */ 1070 1034 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1079 1043 mutex_unlock(&AS->lock); 1080 1044 return AS_PF_OK; 1081 1045 1082 1046 page_fault: 1083 1047 if (THREAD->in_copy_from_uspace) { … … 1092 1056 return AS_PF_FAULT; 1093 1057 } 1094 1058 1095 1059 return AS_PF_DEFER; 1096 1060 } … … 1104 1068 * When this function is enetered, no spinlocks may be held. 1105 1069 * 1106 * @param old Old address space or NULL. 1107 * @param new New address space. 1108 * 1070 * @param old Old address space or NULL. 1071 * @param new New address space. 1109 1072 */ 1110 1073 void as_switch(as_t *old_as, as_t *new_as) … … 1112 1075 DEADLOCK_PROBE_INIT(p_asidlock); 1113 1076 preemption_disable(); 1114 1115 1077 retry: 1116 1078 (void) interrupts_disable(); 1117 1079 if (!spinlock_trylock(&asidlock)) { 1118 /* 1080 /* 1119 1081 * Avoid deadlock with TLB shootdown. 1120 1082 * We can enable interrupts here because 1121 1083 * preemption is disabled. We should not be 1122 1084 * holding any other lock. 1123 *1124 1085 */ 1125 1086 (void) interrupts_enable(); … … 1128 1089 } 1129 1090 preemption_enable(); 1130 1091 1131 1092 /* 1132 1093 * First, take care of the old address space. 1133 */ 1094 */ 1134 1095 if (old_as) { 1135 1096 ASSERT(old_as->cpu_refcount); 1136 1137 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1097 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1138 1098 /* 1139 1099 * The old address space is no longer active on … … 1141 1101 * list of inactive address spaces with assigned 1142 1102 * ASID. 1143 *1144 1103 */ 1145 1104 ASSERT(old_as->asid != ASID_INVALID); 1146 1147 1105 list_append(&old_as->inactive_as_with_asid_link, 1148 1106 &inactive_as_with_asid_head); 1149 1107 } 1150 1108 1151 1109 /* 1152 1110 * Perform architecture-specific tasks when the address space 1153 1111 * is being removed from the CPU. 1154 *1155 1112 */ 1156 1113 as_deinstall_arch(old_as); 1157 1114 } 1158 1115 1159 1116 /* 1160 1117 * Second, prepare the new address space. 1161 *1162 1118 */ 1163 1119 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1167 1123 new_as->asid = asid_get(); 1168 1124 } 1169 1170 1125 #ifdef AS_PAGE_TABLE 1171 1126 SET_PTL0_ADDRESS(new_as->genarch.page_table); … … 1175 1130 * Perform architecture-specific steps. 1176 1131 * (e.g. write ASID to hardware register etc.) 1177 *1178 1132 */ 1179 1133 as_install_arch(new_as); 1180 1134 1181 1135 spinlock_unlock(&asidlock); 1182 1136 … … 1186 1140 /** Convert address space area flags to page flags. 1187 1141 * 1188 * @param aflags Flags of some address space area. 1189 * 1190 * @return Flags to be passed to page_mapping_insert(). 1191 * 1192 */ 1193 unsigned int area_flags_to_page_flags(unsigned int aflags) 1194 { 1195 unsigned int flags = PAGE_USER | PAGE_PRESENT; 1142 * @param aflags Flags of some address space area. 1143 * 1144 * @return Flags to be passed to page_mapping_insert(). 1145 */ 1146 int area_flags_to_page_flags(int aflags) 1147 { 1148 int flags; 1149 1150 flags = PAGE_USER | PAGE_PRESENT; 1196 1151 1197 1152 if (aflags & AS_AREA_READ) … … 1206 1161 if (aflags & AS_AREA_CACHEABLE) 1207 1162 flags |= PAGE_CACHEABLE; 1208 1163 1209 1164 return flags; 1210 1165 } … … 1212 1167 /** Compute flags for virtual address translation subsytem. 1213 1168 * 1214 * @param area Address space area. 1215 * 1216 * @return Flags to be used in page_mapping_insert(). 1217 * 1218 */ 1219 unsigned int as_area_get_flags(as_area_t *area) 1220 { 1221 ASSERT(interrupts_disabled()); 1222 ASSERT(mutex_locked(&area->lock)); 1223 1224 return area_flags_to_page_flags(area->flags); 1169 * The address space area must be locked. 1170 * Interrupts must be disabled. 1171 * 1172 * @param a Address space area. 1173 * 1174 * @return Flags to be used in page_mapping_insert(). 1175 */ 1176 int as_area_get_flags(as_area_t *a) 1177 { 1178 return area_flags_to_page_flags(a->flags); 1225 1179 } 1226 1180 … … 1230 1184 * table. 1231 1185 * 1232 * @param flags Flags saying whether the page table is for the kernel 1233 * address space. 1234 * 1235 * @return First entry of the page table. 1236 * 1237 */ 1238 pte_t *page_table_create(unsigned int flags) 1186 * @param flags Flags saying whether the page table is for the kernel 1187 * address space. 1188 * 1189 * @return First entry of the page table. 1190 */ 1191 pte_t *page_table_create(int flags) 1239 1192 { 1240 1193 ASSERT(as_operations); … … 1248 1201 * Destroy page table in architecture specific way. 1249 1202 * 1250 * @param page_table Physical address of PTL0. 1251 * 1203 * @param page_table Physical address of PTL0. 1252 1204 */ 1253 1205 void page_table_destroy(pte_t *page_table) … … 1263 1215 * This function should be called before any page_mapping_insert(), 1264 1216 * page_mapping_remove() and page_mapping_find(). 1265 * 1217 * 1266 1218 * Locking order is such that address space areas must be locked 1267 1219 * prior to this call. Address space can be locked prior to this 1268 1220 * call in which case the lock argument is false. 1269 1221 * 1270 * @param as Address space. 1271 * @param lock If false, do not attempt to lock as->lock. 1272 * 1222 * @param as Address space. 1223 * @param lock If false, do not attempt to lock as->lock. 1273 1224 */ 1274 1225 void page_table_lock(as_t *as, bool lock) … … 1282 1233 /** Unlock page table. 1283 1234 * 1284 * @param as Address space. 1285 * @param unlock If false, do not attempt to unlock as->lock. 1286 * 1235 * @param as Address space. 1236 * @param unlock If false, do not attempt to unlock as->lock. 1287 1237 */ 1288 1238 void page_table_unlock(as_t *as, bool unlock) … … 1294 1244 } 1295 1245 1296 /** Test whether page tables are locked.1297 *1298 * @param as Address space where the page tables belong.1299 *1300 * @return True if the page tables belonging to the address soace1301 * are locked, otherwise false.1302 */1303 bool page_table_locked(as_t *as)1304 {1305 ASSERT(as_operations);1306 ASSERT(as_operations->page_table_locked);1307 1308 return as_operations->page_table_locked(as);1309 }1310 1311 1246 1312 1247 /** Find address space area and lock it. 1313 1248 * 1314 * @param as Address space. 1315 * @param va Virtual address. 1316 * 1317 * @return Locked address space area containing va on success or 1318 * NULL on failure. 1319 * 1249 * The address space must be locked and interrupts must be disabled. 1250 * 1251 * @param as Address space. 1252 * @param va Virtual address. 1253 * 1254 * @return Locked address space area containing va on success or 1255 * NULL on failure. 1320 1256 */ 1321 1257 as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 1322 1258 { 1323 ASSERT(interrupts_disabled());1324 ASSERT(mutex_locked(&as->lock));1325 1326 btree_node_t *leaf;1327 a s_area_t *area= (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);1328 if (a rea) {1259 as_area_t *a; 1260 btree_node_t *leaf, *lnode; 1261 unsigned int i; 1262 1263 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1264 if (a) { 1329 1265 /* va is the base address of an address space area */ 1330 mutex_lock(&a rea->lock);1331 return a rea;1266 mutex_lock(&a->lock); 1267 return a; 1332 1268 } 1333 1269 … … 1336 1272 * to find out whether this is a miss or va belongs to an address 1337 1273 * space area found there. 1338 *1339 1274 */ 1340 1275 1341 1276 /* First, search the leaf node itself. */ 1342 btree_key_t i;1343 1344 1277 for (i = 0; i < leaf->keys; i++) { 1345 area = (as_area_t *) leaf->value[i]; 1346 1347 mutex_lock(&area->lock); 1348 1349 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 1350 return area; 1351 1352 mutex_unlock(&area->lock); 1353 } 1354 1278 a = (as_area_t *) leaf->value[i]; 1279 mutex_lock(&a->lock); 1280 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { 1281 return a; 1282 } 1283 mutex_unlock(&a->lock); 1284 } 1285 1355 1286 /* 1356 1287 * Second, locate the left neighbour and test its last record. 1357 1288 * Because of its position in the B+tree, it must have base < va. 1358 * 1359 */ 1360 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1289 */ 1290 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1361 1291 if (lnode) { 1362 area = (as_area_t *) lnode->value[lnode->keys - 1]; 1363 1364 mutex_lock(&area->lock); 1365 1366 if (va < area->base + area->pages * PAGE_SIZE) 1367 return area; 1368 1369 mutex_unlock(&area->lock); 1370 } 1371 1292 a = (as_area_t *) lnode->value[lnode->keys - 1]; 1293 mutex_lock(&a->lock); 1294 if (va < a->base + a->pages * PAGE_SIZE) { 1295 return a; 1296 } 1297 mutex_unlock(&a->lock); 1298 } 1299 1372 1300 return NULL; 1373 1301 } … … 1375 1303 /** Check area conflicts with other areas. 1376 1304 * 1377 * @param as Address space. 1378 * @param va Starting virtual address of the area being tested. 1379 * @param size Size of the area being tested. 1380 * @param avoid_area Do not touch this area. 1381 * 1382 * @return True if there is no conflict, false otherwise. 1383 * 1384 */ 1385 bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 1386 as_area_t *avoid_area) 1387 { 1388 ASSERT(interrupts_disabled()); 1389 ASSERT(mutex_locked(&as->lock)); 1390 1305 * The address space must be locked and interrupts must be disabled. 1306 * 1307 * @param as Address space. 1308 * @param va Starting virtual address of the area being tested. 1309 * @param size Size of the area being tested. 1310 * @param avoid_area Do not touch this area. 1311 * 1312 * @return True if there is no conflict, false otherwise. 1313 */ 1314 bool 1315 check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) 1316 { 1317 as_area_t *a; 1318 btree_node_t *leaf, *node; 1319 unsigned int i; 1320 1391 1321 /* 1392 1322 * We don't want any area to have conflicts with NULL page. 1393 *1394 1323 */ 1395 1324 if (overlaps(va, size, NULL, PAGE_SIZE)) … … 1402 1331 * record in the left neighbour, the leftmost record in the right 1403 1332 * neighbour and all records in the leaf node itself. 1404 * 1405 */ 1406 btree_node_t *leaf; 1407 as_area_t *area = 1408 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1409 if (area) { 1410 if (area != avoid_area) 1333 */ 1334 1335 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { 1336 if (a != avoid_area) 1411 1337 return false; 1412 1338 } 1413 1339 1414 1340 /* First, check the two border cases. */ 1415 btree_node_t *node = 1416 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1417 if (node) { 1418 area = (as_area_t *) node->value[node->keys - 1]; 1419 1420 mutex_lock(&area->lock); 1421 1422 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 1423 mutex_unlock(&area->lock); 1341 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { 1342 a = (as_area_t *) node->value[node->keys - 1]; 1343 mutex_lock(&a->lock); 1344 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1345 mutex_unlock(&a->lock); 1424 1346 return false; 1425 1347 } 1426 1427 mutex_unlock(&area->lock); 1428 } 1429 1348 mutex_unlock(&a->lock); 1349 } 1430 1350 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 1431 1351 if (node) { 1432 area = (as_area_t *) node->value[0]; 1433 1434 mutex_lock(&area->lock); 1435 1436 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 1437 mutex_unlock(&area->lock); 1352 a = (as_area_t *) node->value[0]; 1353 mutex_lock(&a->lock); 1354 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1355 mutex_unlock(&a->lock); 1438 1356 return false; 1439 1357 } 1440 1441 mutex_unlock(&area->lock); 1358 mutex_unlock(&a->lock); 1442 1359 } 1443 1360 1444 1361 /* Second, check the leaf node. */ 1445 btree_key_t i;1446 1362 for (i = 0; i < leaf->keys; i++) { 1447 a rea= (as_area_t *) leaf->value[i];1448 1449 if (a rea== avoid_area)1363 a = (as_area_t *) leaf->value[i]; 1364 1365 if (a == avoid_area) 1450 1366 continue; 1451 1452 mutex_lock(&area->lock); 1453 1454 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 1455 mutex_unlock(&area->lock); 1367 1368 mutex_lock(&a->lock); 1369 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1370 mutex_unlock(&a->lock); 1456 1371 return false; 1457 1372 } 1458 1459 mutex_unlock(&area->lock); 1460 } 1461 1373 mutex_unlock(&a->lock); 1374 } 1375 1462 1376 /* 1463 1377 * So far, the area does not conflict with other areas. 1464 1378 * Check if it doesn't conflict with kernel address space. 1465 * 1466 */ 1379 */ 1467 1380 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 1468 return !overlaps(va, size, 1381 return !overlaps(va, size, 1469 1382 KERNEL_ADDRESS_SPACE_START, 1470 1383 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 1471 1384 } 1472 1385 1473 1386 return true; 1474 1387 } … … 1476 1389 /** Return size of the address space area with given base. 1477 1390 * 1478 * @param base Arbitrary address inside the address space area. 1479 * 1480 * @return Size of the address space area in bytes or zero if it 1481 * does not exist. 1482 * 1391 * @param base Arbitrary address insede the address space area. 1392 * 1393 * @return Size of the address space area in bytes or zero if it 1394 * does not exist. 1483 1395 */ 1484 1396 size_t as_area_get_size(uintptr_t base) 1485 1397 { 1398 ipl_t ipl; 1399 as_area_t *src_area; 1486 1400 size_t size; 1487 1488 ipl_t ipl = interrupts_disable(); 1489 page_table_lock(AS, true); 1490 as_area_t *src_area = find_area_and_lock(AS, base); 1491 1401 1402 ipl = interrupts_disable(); 1403 src_area = find_area_and_lock(AS, base); 1492 1404 if (src_area) { 1493 1405 size = src_area->pages * PAGE_SIZE; 1494 1406 mutex_unlock(&src_area->lock); 1495 } else 1407 } else { 1496 1408 size = 0; 1497 1498 page_table_unlock(AS, true); 1409 } 1499 1410 interrupts_restore(ipl); 1500 1411 return size; … … 1505 1416 * The address space area must be already locked. 1506 1417 * 1507 * @param area Address space area. 1508 * @param page First page to be marked. 1509 * @param count Number of page to be marked. 1510 * 1511 * @return Zero on failure and non-zero on success. 1512 * 1513 */ 1514 int used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1515 { 1516 ASSERT(mutex_locked(&area->lock)); 1418 * @param a Address space area. 1419 * @param page First page to be marked. 1420 * @param count Number of page to be marked. 1421 * 1422 * @return Zero on failure and non-zero on success. 1423 */ 1424 int used_space_insert(as_area_t *a, uintptr_t page, size_t count) 1425 { 1426 btree_node_t *leaf, *node; 1427 size_t pages; 1428 unsigned int i; 1429 1517 1430 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1518 1431 ASSERT(count); 1519 1520 btree_node_t *leaf; 1521 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1432 1433 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1522 1434 if (pages) { 1523 1435 /* 1524 1436 * We hit the beginning of some used space. 1525 *1526 1437 */ 1527 1438 return 0; 1528 1439 } 1529 1440 1530 1441 if (!leaf->keys) { 1531 btree_insert(&a rea->used_space, page, (void *) count, leaf);1442 btree_insert(&a->used_space, page, (void *) count, leaf); 1532 1443 return 1; 1533 1444 } 1534 1535 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);1445 1446 node = btree_leaf_node_left_neighbour(&a->used_space, leaf); 1536 1447 if (node) { 1537 1448 uintptr_t left_pg = node->key[node->keys - 1]; … … 1544 1455 * somewhere between the rightmost interval of 1545 1456 * the left neigbour and the first interval of the leaf. 1546 * 1547 */ 1548 1457 */ 1458 1549 1459 if (page >= right_pg) { 1550 1460 /* Do nothing. */ … … 1556 1466 right_cnt * PAGE_SIZE)) { 1557 1467 /* The interval intersects with the right interval. */ 1558 return 0; 1468 return 0; 1559 1469 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1560 1470 (page + count * PAGE_SIZE == right_pg)) { … … 1562 1472 * The interval can be added by merging the two already 1563 1473 * present intervals. 1564 *1565 1474 */ 1566 1475 node->value[node->keys - 1] += count + right_cnt; 1567 btree_remove(&a rea->used_space, right_pg, leaf);1568 return 1; 1476 btree_remove(&a->used_space, right_pg, leaf); 1477 return 1; 1569 1478 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1570 /* 1479 /* 1571 1480 * The interval can be added by simply growing the left 1572 1481 * interval. 1573 *1574 1482 */ 1575 1483 node->value[node->keys - 1] += count; … … 1580 1488 * the right interval down and increasing its size 1581 1489 * accordingly. 1582 *1583 1490 */ 1584 1491 leaf->value[0] += count; … … 1589 1496 * The interval is between both neigbouring intervals, 1590 1497 * but cannot be merged with any of them. 1591 *1592 1498 */ 1593 btree_insert(&a rea->used_space, page, (void *) count,1499 btree_insert(&a->used_space, page, (void *) count, 1594 1500 leaf); 1595 1501 return 1; … … 1598 1504 uintptr_t right_pg = leaf->key[0]; 1599 1505 size_t right_cnt = (size_t) leaf->value[0]; 1600 1506 1601 1507 /* 1602 1508 * Investigate the border case in which the left neighbour does 1603 1509 * not exist but the interval fits from the left. 1604 * 1605 */ 1606 1510 */ 1511 1607 1512 if (overlaps(page, count * PAGE_SIZE, right_pg, 1608 1513 right_cnt * PAGE_SIZE)) { … … 1614 1519 * right interval down and increasing its size 1615 1520 * accordingly. 1616 *1617 1521 */ 1618 1522 leaf->key[0] = page; … … 1623 1527 * The interval doesn't adjoin with the right interval. 1624 1528 * It must be added individually. 1625 *1626 1529 */ 1627 btree_insert(&a rea->used_space, page, (void *) count,1530 btree_insert(&a->used_space, page, (void *) count, 1628 1531 leaf); 1629 1532 return 1; 1630 1533 } 1631 1534 } 1632 1633 node = btree_leaf_node_right_neighbour(&a rea->used_space, leaf);1535 1536 node = btree_leaf_node_right_neighbour(&a->used_space, leaf); 1634 1537 if (node) { 1635 1538 uintptr_t left_pg = leaf->key[leaf->keys - 1]; … … 1642 1545 * somewhere between the leftmost interval of 1643 1546 * the right neigbour and the last interval of the leaf. 1644 * 1645 */ 1646 1547 */ 1548 1647 1549 if (page < left_pg) { 1648 1550 /* Do nothing. */ … … 1654 1556 right_cnt * PAGE_SIZE)) { 1655 1557 /* The interval intersects with the right interval. */ 1656 return 0; 1558 return 0; 1657 1559 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1658 1560 (page + count * PAGE_SIZE == right_pg)) { … … 1660 1562 * The interval can be added by merging the two already 1661 1563 * present intervals. 1662 * 1663 */ 1564 * */ 1664 1565 leaf->value[leaf->keys - 1] += count + right_cnt; 1665 btree_remove(&a rea->used_space, right_pg, node);1666 return 1; 1566 btree_remove(&a->used_space, right_pg, node); 1567 return 1; 1667 1568 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1668 1569 /* 1669 1570 * The interval can be added by simply growing the left 1670 1571 * interval. 1671 * 1672 */ 1572 * */ 1673 1573 leaf->value[leaf->keys - 1] += count; 1674 1574 return 1; … … 1678 1578 * the right interval down and increasing its size 1679 1579 * accordingly. 1680 *1681 1580 */ 1682 1581 node->value[0] += count; … … 1687 1586 * The interval is between both neigbouring intervals, 1688 1587 * but cannot be merged with any of them. 1689 *1690 1588 */ 1691 btree_insert(&a rea->used_space, page, (void *) count,1589 btree_insert(&a->used_space, page, (void *) count, 1692 1590 leaf); 1693 1591 return 1; … … 1696 1594 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1697 1595 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1698 1596 1699 1597 /* 1700 1598 * Investigate the border case in which the right neighbour 1701 1599 * does not exist but the interval fits from the right. 1702 * 1703 */ 1704 1600 */ 1601 1705 1602 if (overlaps(page, count * PAGE_SIZE, left_pg, 1706 1603 left_cnt * PAGE_SIZE)) { … … 1711 1608 * The interval can be added by growing the left 1712 1609 * interval. 1713 *1714 1610 */ 1715 1611 leaf->value[leaf->keys - 1] += count; … … 1719 1615 * The interval doesn't adjoin with the left interval. 1720 1616 * It must be added individually. 1721 *1722 1617 */ 1723 btree_insert(&a rea->used_space, page, (void *) count,1618 btree_insert(&a->used_space, page, (void *) count, 1724 1619 leaf); 1725 1620 return 1; … … 1731 1626 * only between two other intervals of the leaf. The two border cases 1732 1627 * were already resolved. 1733 * 1734 */ 1735 btree_key_t i; 1628 */ 1736 1629 for (i = 1; i < leaf->keys; i++) { 1737 1630 if (page < leaf->key[i]) { … … 1740 1633 size_t left_cnt = (size_t) leaf->value[i - 1]; 1741 1634 size_t right_cnt = (size_t) leaf->value[i]; 1742 1635 1743 1636 /* 1744 1637 * The interval fits between left_pg and right_pg. 1745 *1746 1638 */ 1747 1639 1748 1640 if (overlaps(page, count * PAGE_SIZE, left_pg, 1749 1641 left_cnt * PAGE_SIZE)) { … … 1751 1643 * The interval intersects with the left 1752 1644 * interval. 1753 *1754 1645 */ 1755 1646 return 0; … … 1759 1650 * The interval intersects with the right 1760 1651 * interval. 1761 *1762 1652 */ 1763 return 0; 1653 return 0; 1764 1654 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1765 1655 (page + count * PAGE_SIZE == right_pg)) { … … 1767 1657 * The interval can be added by merging the two 1768 1658 * already present intervals. 1769 *1770 1659 */ 1771 1660 leaf->value[i - 1] += count + right_cnt; 1772 btree_remove(&a rea->used_space, right_pg, leaf);1773 return 1; 1661 btree_remove(&a->used_space, right_pg, leaf); 1662 return 1; 1774 1663 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1775 1664 /* 1776 1665 * The interval can be added by simply growing 1777 1666 * the left interval. 1778 *1779 1667 */ 1780 1668 leaf->value[i - 1] += count; … … 1782 1670 } else if (page + count * PAGE_SIZE == right_pg) { 1783 1671 /* 1784 1672 * The interval can be addded by simply moving 1785 1673 * base of the right interval down and 1786 1674 * increasing its size accordingly. 1787 * 1788 */ 1675 */ 1789 1676 leaf->value[i] += count; 1790 1677 leaf->key[i] = page; … … 1795 1682 * intervals, but cannot be merged with any of 1796 1683 * them. 1797 *1798 1684 */ 1799 btree_insert(&a rea->used_space, page,1685 btree_insert(&a->used_space, page, 1800 1686 (void *) count, leaf); 1801 1687 return 1; … … 1803 1689 } 1804 1690 } 1805 1691 1806 1692 panic("Inconsistency detected while adding %" PRIs " pages of used " 1807 1693 "space at %p.", count, page); … … 1812 1698 * The address space area must be already locked. 1813 1699 * 1814 * @param area Address space area. 1815 * @param page First page to be marked. 1816 * @param count Number of page to be marked. 1817 * 1818 * @return Zero on failure and non-zero on success. 1819 * 1820 */ 1821 int used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1822 { 1823 ASSERT(mutex_locked(&area->lock)); 1700 * @param a Address space area. 1701 * @param page First page to be marked. 1702 * @param count Number of page to be marked. 1703 * 1704 * @return Zero on failure and non-zero on success. 1705 */ 1706 int used_space_remove(as_area_t *a, uintptr_t page, size_t count) 1707 { 1708 btree_node_t *leaf, *node; 1709 size_t pages; 1710 unsigned int i; 1711 1824 1712 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1825 1713 ASSERT(count); 1826 1827 btree_node_t *leaf; 1828 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1714 1715 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1829 1716 if (pages) { 1830 1717 /* 1831 1718 * We are lucky, page is the beginning of some interval. 1832 *1833 1719 */ 1834 1720 if (count > pages) { 1835 1721 return 0; 1836 1722 } else if (count == pages) { 1837 btree_remove(&a rea->used_space, page, leaf);1723 btree_remove(&a->used_space, page, leaf); 1838 1724 return 1; 1839 1725 } else { … … 1841 1727 * Find the respective interval. 1842 1728 * Decrease its size and relocate its start address. 1843 *1844 1729 */ 1845 btree_key_t i;1846 1730 for (i = 0; i < leaf->keys; i++) { 1847 1731 if (leaf->key[i] == page) { … … 1854 1738 } 1855 1739 } 1856 1857 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf);1858 if ( (node) && (page < leaf->key[0])) {1740 1741 node = btree_leaf_node_left_neighbour(&a->used_space, leaf); 1742 if (node && page < leaf->key[0]) { 1859 1743 uintptr_t left_pg = node->key[node->keys - 1]; 1860 1744 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1861 1745 1862 1746 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1863 1747 count * PAGE_SIZE)) { … … 1869 1753 * removed by updating the size of the bigger 1870 1754 * interval. 1871 *1872 1755 */ 1873 1756 node->value[node->keys - 1] -= count; … … 1875 1758 } else if (page + count * PAGE_SIZE < 1876 1759 left_pg + left_cnt*PAGE_SIZE) { 1760 size_t new_cnt; 1761 1877 1762 /* 1878 1763 * The interval is contained in the rightmost … … 1881 1766 * the original interval and also inserting a 1882 1767 * new interval. 1883 *1884 1768 */ 1885 size_tnew_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1769 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1886 1770 (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1887 1771 node->value[node->keys - 1] -= count + new_cnt; 1888 btree_insert(&a rea->used_space, page +1772 btree_insert(&a->used_space, page + 1889 1773 count * PAGE_SIZE, (void *) new_cnt, leaf); 1890 1774 return 1; … … 1892 1776 } 1893 1777 return 0; 1894 } else if (page < leaf->key[0]) 1778 } else if (page < leaf->key[0]) { 1895 1779 return 0; 1780 } 1896 1781 1897 1782 if (page > leaf->key[leaf->keys - 1]) { 1898 1783 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1899 1784 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1900 1785 1901 1786 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1902 1787 count * PAGE_SIZE)) { 1903 if (page + count * PAGE_SIZE == 1788 if (page + count * PAGE_SIZE == 1904 1789 left_pg + left_cnt * PAGE_SIZE) { 1905 1790 /* … … 1907 1792 * interval of the leaf and can be removed by 1908 1793 * updating the size of the bigger interval. 1909 *1910 1794 */ 1911 1795 leaf->value[leaf->keys - 1] -= count; … … 1913 1797 } else if (page + count * PAGE_SIZE < left_pg + 1914 1798 left_cnt * PAGE_SIZE) { 1799 size_t new_cnt; 1800 1915 1801 /* 1916 1802 * The interval is contained in the rightmost … … 1919 1805 * original interval and also inserting a new 1920 1806 * interval. 1921 *1922 1807 */ 1923 size_tnew_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1808 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1924 1809 (page + count * PAGE_SIZE)) >> PAGE_WIDTH; 1925 1810 leaf->value[leaf->keys - 1] -= count + new_cnt; 1926 btree_insert(&a rea->used_space, page +1811 btree_insert(&a->used_space, page + 1927 1812 count * PAGE_SIZE, (void *) new_cnt, leaf); 1928 1813 return 1; … … 1930 1815 } 1931 1816 return 0; 1932 } 1817 } 1933 1818 1934 1819 /* … … 1936 1821 * Now the interval can be only between intervals of the leaf. 1937 1822 */ 1938 btree_key_t i;1939 1823 for (i = 1; i < leaf->keys - 1; i++) { 1940 1824 if (page < leaf->key[i]) { 1941 1825 uintptr_t left_pg = leaf->key[i - 1]; 1942 1826 size_t left_cnt = (size_t) leaf->value[i - 1]; 1943 1827 1944 1828 /* 1945 1829 * Now the interval is between intervals corresponding … … 1955 1839 * be removed by updating the size of 1956 1840 * the bigger interval. 1957 *1958 1841 */ 1959 1842 leaf->value[i - 1] -= count; … … 1961 1844 } else if (page + count * PAGE_SIZE < 1962 1845 left_pg + left_cnt * PAGE_SIZE) { 1846 size_t new_cnt; 1847 1963 1848 /* 1964 1849 * The interval is contained in the … … 1968 1853 * also inserting a new interval. 1969 1854 */ 1970 size_tnew_cnt = ((left_pg +1855 new_cnt = ((left_pg + 1971 1856 left_cnt * PAGE_SIZE) - 1972 1857 (page + count * PAGE_SIZE)) >> 1973 1858 PAGE_WIDTH; 1974 1859 leaf->value[i - 1] -= count + new_cnt; 1975 btree_insert(&a rea->used_space, page +1860 btree_insert(&a->used_space, page + 1976 1861 count * PAGE_SIZE, (void *) new_cnt, 1977 1862 leaf); … … 1982 1867 } 1983 1868 } 1984 1869 1985 1870 error: 1986 1871 panic("Inconsistency detected while removing %" PRIs " pages of used " … … 1992 1877 * If the reference count drops to 0, the sh_info is deallocated. 1993 1878 * 1994 * @param sh_info Pointer to address space area share info. 1995 * 1879 * @param sh_info Pointer to address space area share info. 1996 1880 */ 1997 1881 void sh_info_remove_reference(share_info_t *sh_info) 1998 1882 { 1999 1883 bool dealloc = false; 2000 1884 2001 1885 mutex_lock(&sh_info->lock); 2002 1886 ASSERT(sh_info->refcount); 2003 2004 1887 if (--sh_info->refcount == 0) { 2005 1888 dealloc = true; … … 2012 1895 for (cur = sh_info->pagemap.leaf_head.next; 2013 1896 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 2014 btree_node_t *node 2015 = list_get_instance(cur, btree_node_t, leaf_link); 2016 btree_key_t i; 1897 btree_node_t *node; 1898 unsigned int i; 2017 1899 2018 for (i = 0; i < node->keys; i++) 1900 node = list_get_instance(cur, btree_node_t, leaf_link); 1901 for (i = 0; i < node->keys; i++) 2019 1902 frame_free((uintptr_t) node->value[i]); 2020 1903 } … … 2034 1917 2035 1918 /** Wrapper for as_area_create(). */ 2036 unative_t sys_as_area_create(uintptr_t address, size_t size, unsignedint flags)1919 unative_t sys_as_area_create(uintptr_t address, size_t size, int flags) 2037 1920 { 2038 1921 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, … … 2044 1927 2045 1928 /** Wrapper for as_area_resize(). */ 2046 unative_t sys_as_area_resize(uintptr_t address, size_t size, unsignedint flags)1929 unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags) 2047 1930 { 2048 1931 return (unative_t) as_area_resize(AS, address, size, 0); … … 2050 1933 2051 1934 /** Wrapper for as_area_change_flags(). */ 2052 unative_t sys_as_area_change_flags(uintptr_t address, unsignedint flags)1935 unative_t sys_as_area_change_flags(uintptr_t address, int flags) 2053 1936 { 2054 1937 return (unative_t) as_area_change_flags(AS, flags, address); … … 2063 1946 /** Get list of adress space areas. 2064 1947 * 2065 * @param as Address space. 2066 * @param obuf Place to save pointer to returned buffer. 2067 * @param osize Place to save size of returned buffer. 2068 * 1948 * @param as Address space. 1949 * @param obuf Place to save pointer to returned buffer. 1950 * @param osize Place to save size of returned buffer. 2069 1951 */ 2070 1952 void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize) 2071 1953 { 2072 ipl_t ipl = interrupts_disable(); 1954 ipl_t ipl; 1955 size_t area_cnt, area_idx, i; 1956 link_t *cur; 1957 1958 as_area_info_t *info; 1959 size_t isize; 1960 1961 ipl = interrupts_disable(); 2073 1962 mutex_lock(&as->lock); 2074 1963 2075 1964 /* First pass, count number of areas. */ 2076 2077 size_t area_cnt = 0; 2078 link_t *cur; 2079 1965 1966 area_cnt = 0; 1967 2080 1968 for (cur = as->as_area_btree.leaf_head.next; 2081 1969 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2082 btree_node_t *node = 2083 list_get_instance(cur, btree_node_t, leaf_link); 1970 btree_node_t *node; 1971 1972 node = list_get_instance(cur, btree_node_t, leaf_link); 2084 1973 area_cnt += node->keys; 2085 1974 } 2086 2087 size_tisize = area_cnt * sizeof(as_area_info_t);2088 as_area_info_t *info = malloc(isize, 0);2089 1975 1976 isize = area_cnt * sizeof(as_area_info_t); 1977 info = malloc(isize, 0); 1978 2090 1979 /* Second pass, record data. */ 2091 2092 size_tarea_idx = 0;2093 1980 1981 area_idx = 0; 1982 2094 1983 for (cur = as->as_area_btree.leaf_head.next; 2095 1984 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2096 btree_node_t *node =2097 list_get_instance(cur, btree_node_t, leaf_link); 2098 btree_key_t i;2099 1985 btree_node_t *node; 1986 1987 node = list_get_instance(cur, btree_node_t, leaf_link); 1988 2100 1989 for (i = 0; i < node->keys; i++) { 2101 1990 as_area_t *area = node->value[i]; 2102 1991 2103 1992 ASSERT(area_idx < area_cnt); 2104 1993 mutex_lock(&area->lock); 2105 1994 2106 1995 info[area_idx].start_addr = area->base; 2107 1996 info[area_idx].size = FRAMES2SIZE(area->pages); 2108 1997 info[area_idx].flags = area->flags; 2109 1998 ++area_idx; 2110 1999 2111 2000 mutex_unlock(&area->lock); 2112 2001 } 2113 2002 } 2114 2003 2115 2004 mutex_unlock(&as->lock); 2116 2005 interrupts_restore(ipl); 2117 2006 2118 2007 *obuf = info; 2119 2008 *osize = isize; 2120 2009 } 2121 2010 2011 2122 2012 /** Print out information about address space. 2123 2013 * 2124 * @param as Address space. 2125 * 2014 * @param as Address space. 2126 2015 */ 2127 2016 void as_print(as_t *as) 2128 2017 { 2129 ipl_t ipl = interrupts_disable(); 2018 ipl_t ipl; 2019 2020 ipl = interrupts_disable(); 2130 2021 mutex_lock(&as->lock); 2131 2022 … … 2134 2025 for (cur = as->as_area_btree.leaf_head.next; 2135 2026 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2136 btree_node_t *node 2137 = list_get_instance(cur, btree_node_t, leaf_link); 2138 btree_key_t i; 2027 btree_node_t *node; 2139 2028 2029 node = list_get_instance(cur, btree_node_t, leaf_link); 2030 2031 unsigned int i; 2140 2032 for (i = 0; i < node->keys; i++) { 2141 2033 as_area_t *area = node->value[i]; 2142 2034 2143 2035 mutex_lock(&area->lock); 2144 2036 printf("as_area: %p, base=%p, pages=%" PRIs
Note:
See TracChangeset
for help on using the changeset viewer.