Changes in kernel/generic/src/mm/as.c [1624aae:8f80c77] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/mm/as.c
r1624aae r8f80c77 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Address space related functions. 36 36 * 37 37 * This file contains address space manipulation functions. … … 86 86 * Each architecture decides what functions will be used to carry out 87 87 * address space operations such as creating or locking page tables. 88 * 88 89 */ 89 90 as_operations_t *as_operations = NULL; … … 91 92 /** 92 93 * Slab for as_t objects. 94 * 93 95 */ 94 96 static slab_cache_t *as_slab; … … 100 102 * - as->asid for each as of the as_t type 101 103 * - asids_allocated counter 104 * 102 105 */ 103 106 SPINLOCK_INITIALIZE(asidlock); … … 106 109 * This list contains address spaces that are not active on any 107 110 * processor and that have valid ASID. 111 * 108 112 */ 109 113 LIST_INITIALIZE(inactive_as_with_asid_head); … … 112 116 as_t *AS_KERNEL = NULL; 113 117 114 static int area_flags_to_page_flags(int);118 static unsigned int area_flags_to_page_flags(unsigned int); 115 119 static as_area_t *find_area_and_lock(as_t *, uintptr_t); 116 120 static bool check_area_conflicts(as_t *, uintptr_t, size_t, as_area_t *); 117 121 static void sh_info_remove_reference(share_info_t *); 118 122 119 static int as_constructor(void *obj, int flags)123 static int as_constructor(void *obj, unsigned int flags) 120 124 { 121 125 as_t *as = (as_t *) obj; 122 int rc; 123 126 124 127 link_initialize(&as->inactive_as_with_asid_link); 125 128 mutex_initialize(&as->lock, MUTEX_PASSIVE); 126 129 127 rc = as_constructor_arch(as, flags);130 int rc = as_constructor_arch(as, flags); 128 131 129 132 return rc; 130 133 } 131 134 132 static int as_destructor(void *obj)135 static size_t as_destructor(void *obj) 133 136 { 134 137 as_t *as = (as_t *) obj; 135 136 138 return as_destructor_arch(as); 137 139 } … … 141 143 { 142 144 as_arch_init(); 143 145 144 146 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, 145 147 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); … … 157 159 /** Create address space. 158 160 * 159 * @param flags Flags that influence the way in wich the address space 160 * is created. 161 */ 162 as_t *as_create(int flags) 163 { 164 as_t *as; 165 166 as = (as_t *) slab_alloc(as_slab, 0); 161 * @param flags Flags that influence the way in wich the address 162 * space is created. 163 * 164 */ 165 as_t *as_create(unsigned int flags) 166 { 167 as_t *as = (as_t *) slab_alloc(as_slab, 0); 167 168 (void) as_create_arch(as, 0); 168 169 … … 176 177 atomic_set(&as->refcount, 0); 177 178 as->cpu_refcount = 0; 179 178 180 #ifdef AS_PAGE_TABLE 179 181 as->genarch.page_table = page_table_create(flags); … … 192 194 * We know that we don't hold any spinlock. 193 195 * 194 * @param as Address space to be destroyed. 196 * @param as Address space to be destroyed. 197 * 195 198 */ 196 199 void as_destroy(as_t *as) 197 200 { 198 ipl_t ipl;199 bool cond;200 201 DEADLOCK_PROBE_INIT(p_asidlock); 201 202 … … 214 215 * disabled to prevent nested context switches. We also depend on the 215 216 * fact that so far no spinlocks are held. 217 * 216 218 */ 217 219 preemption_disable(); 218 ipl = interrupts_read(); 220 ipl_t ipl = interrupts_read(); 221 219 222 retry: 220 223 interrupts_disable(); … … 224 227 goto retry; 225 228 } 226 preemption_enable(); /* Interrupts disabled, enable preemption */ 227 if (as->asid != ASID_INVALID && as != AS_KERNEL) { 229 230 /* Interrupts disabled, enable preemption */ 231 preemption_enable(); 232 233 if ((as->asid != ASID_INVALID) && (as != AS_KERNEL)) { 228 234 if (as->cpu_refcount == 0) 229 235 list_remove(&as->inactive_as_with_asid_link); 236 230 237 asid_put(as->asid); 231 238 } 239 232 240 spinlock_unlock(&asidlock); 233 241 234 242 /* 235 243 * Destroy address space areas of the address space. 236 244 * The B+tree must be walked carefully because it is 237 245 * also being destroyed. 238 * /239 for (cond = true; cond; ) {240 btree_node_t *node;241 246 * 247 */ 248 bool cond = true; 249 while (cond) { 242 250 ASSERT(!list_empty(&as->as_area_btree.leaf_head)); 243 node = list_get_instance(as->as_area_btree.leaf_head.next, 251 252 btree_node_t *node = 253 list_get_instance(as->as_area_btree.leaf_head.next, 244 254 btree_node_t, leaf_link); 245 246 if ((cond = node->keys)) {255 256 if ((cond = node->keys)) 247 257 as_area_destroy(as, node->key[0]); 248 } 249 } 250 258 } 259 251 260 btree_destroy(&as->as_area_btree); 261 252 262 #ifdef AS_PAGE_TABLE 253 263 page_table_destroy(as->genarch.page_table); … … 255 265 page_table_destroy(NULL); 256 266 #endif 257 267 258 268 interrupts_restore(ipl); 259 269 260 270 slab_free(as_slab, as); 261 271 } … … 266 276 * space. 267 277 * 268 * @param a Address space to be held. 278 * @param as Address space to be held. 279 * 269 280 */ 270 281 void as_hold(as_t *as) … … 278 289 * space. 279 290 * 280 * @param a Address space to be released. 291 * @param asAddress space to be released. 292 * 281 293 */ 282 294 void as_release(as_t *as) … … 290 302 * The created address space area is added to the target address space. 291 303 * 292 * @param as Target address space. 293 * @param flags Flags of the area memory. 294 * @param size Size of area. 295 * @param base Base address of area. 296 * @param attrs Attributes of the area. 297 * @param backend Address space area backend. NULL if no backend is used. 298 * @param backend_data NULL or a pointer to an array holding two void *. 299 * 300 * @return Address space area on success or NULL on failure. 301 */ 302 as_area_t * 303 as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs, 304 mem_backend_t *backend, mem_backend_data_t *backend_data) 305 { 306 ipl_t ipl; 307 as_area_t *a; 308 304 * @param as Target address space. 305 * @param flags Flags of the area memory. 306 * @param size Size of area. 307 * @param base Base address of area. 308 * @param attrs Attributes of the area. 309 * @param backend Address space area backend. NULL if no backend is used. 310 * @param backend_data NULL or a pointer to an array holding two void *. 311 * 312 * @return Address space area on success or NULL on failure. 313 * 314 */ 315 as_area_t *as_area_create(as_t *as, unsigned int flags, size_t size, 316 uintptr_t base, unsigned int attrs, mem_backend_t *backend, 317 mem_backend_data_t *backend_data) 318 { 309 319 if (base % PAGE_SIZE) 310 320 return NULL; 311 321 312 322 if (!size) 313 323 return NULL; 314 324 315 325 /* Writeable executable areas are not supported. */ 316 326 if ((flags & AS_AREA_EXEC) && (flags & AS_AREA_WRITE)) 317 327 return NULL; 318 328 319 ipl = interrupts_disable();329 ipl_t ipl = interrupts_disable(); 320 330 mutex_lock(&as->lock); 321 331 … … 326 336 } 327 337 328 a = (as_area_t *) malloc(sizeof(as_area_t), 0); 329 330 mutex_initialize(&a->lock, MUTEX_PASSIVE); 331 332 a->as = as; 333 a->flags = flags; 334 a->attributes = attrs; 335 a->pages = SIZE2FRAMES(size); 336 a->base = base; 337 a->sh_info = NULL; 338 a->backend = backend; 338 as_area_t *area = (as_area_t *) malloc(sizeof(as_area_t), 0); 339 340 mutex_initialize(&area->lock, MUTEX_PASSIVE); 341 342 area->as = as; 343 area->flags = flags; 344 area->attributes = attrs; 345 area->pages = SIZE2FRAMES(size); 346 area->base = base; 347 area->sh_info = NULL; 348 area->backend = backend; 349 339 350 if (backend_data) 340 a ->backend_data = *backend_data;351 area->backend_data = *backend_data; 341 352 else 342 memsetb(&a->backend_data, sizeof(a->backend_data), 0); 343 344 btree_create(&a->used_space); 345 346 btree_insert(&as->as_area_btree, base, (void *) a, NULL); 347 353 memsetb(&area->backend_data, sizeof(area->backend_data), 0); 354 355 btree_create(&area->used_space); 356 btree_insert(&as->as_area_btree, base, (void *) area, NULL); 357 348 358 mutex_unlock(&as->lock); 349 359 interrupts_restore(ipl); 350 351 return a ;360 361 return area; 352 362 } 353 363 354 364 /** Find address space area and change it. 355 365 * 356 * @param as Address space. 357 * @param address Virtual address belonging to the area to be changed. 358 * Must be page-aligned. 359 * @param size New size of the virtual memory block starting at 360 * address. 361 * @param flags Flags influencing the remap operation. Currently unused. 362 * 363 * @return Zero on success or a value from @ref errno.h otherwise. 364 */ 365 int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags) 366 { 367 as_area_t *area; 368 ipl_t ipl; 369 size_t pages; 370 371 ipl = interrupts_disable(); 366 * @param as Address space. 367 * @param address Virtual address belonging to the area to be changed. 368 * Must be page-aligned. 369 * @param size New size of the virtual memory block starting at 370 * address. 371 * @param flags Flags influencing the remap operation. Currently unused. 372 * 373 * @return Zero on success or a value from @ref errno.h otherwise. 374 * 375 */ 376 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 377 { 378 ipl_t ipl = interrupts_disable(); 372 379 mutex_lock(&as->lock); 373 380 374 381 /* 375 382 * Locate the area. 376 */ 377 area = find_area_and_lock(as, address); 383 * 384 */ 385 as_area_t *area = find_area_and_lock(as, address); 378 386 if (!area) { 379 387 mutex_unlock(&as->lock); … … 381 389 return ENOENT; 382 390 } 383 391 384 392 if (area->backend == &phys_backend) { 385 393 /* 386 394 * Remapping of address space areas associated 387 395 * with memory mapped devices is not supported. 396 * 388 397 */ 389 398 mutex_unlock(&area->lock); … … 392 401 return ENOTSUP; 393 402 } 403 394 404 if (area->sh_info) { 395 405 /* 396 * Remapping of shared address space areas 406 * Remapping of shared address space areas 397 407 * is not supported. 408 * 398 409 */ 399 410 mutex_unlock(&area->lock); … … 402 413 return ENOTSUP; 403 414 } 404 405 pages = SIZE2FRAMES((address - area->base) + size);415 416 size_t pages = SIZE2FRAMES((address - area->base) + size); 406 417 if (!pages) { 407 418 /* 408 419 * Zero size address space areas are not allowed. 420 * 409 421 */ 410 422 mutex_unlock(&area->lock); … … 415 427 416 428 if (pages < area->pages) { 417 bool cond;418 429 uintptr_t start_free = area->base + pages * PAGE_SIZE; 419 430 420 431 /* 421 432 * Shrinking the area. 422 433 * No need to check for overlaps. 423 */ 424 434 * 435 */ 436 437 page_table_lock(as, false); 438 425 439 /* 426 440 * Start TLB shootdown sequence. 441 * 427 442 */ 428 443 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base + 429 444 pages * PAGE_SIZE, area->pages - pages); 430 445 431 446 /* 432 447 * Remove frames belonging to used space starting from … … 435 450 * is also the right way to remove part of the used_space 436 451 * B+tree leaf list. 437 * /438 for (cond = true; cond;) {439 btree_node_t *node;440 452 * 453 */ 454 bool cond = true; 455 while (cond) { 441 456 ASSERT(!list_empty(&area->used_space.leaf_head)); 442 node = 457 458 btree_node_t *node = 443 459 list_get_instance(area->used_space.leaf_head.prev, 444 460 btree_node_t, leaf_link); 461 445 462 if ((cond = (bool) node->keys)) { 446 uintptr_t b= node->key[node->keys - 1];447 size_t c=463 uintptr_t ptr = node->key[node->keys - 1]; 464 size_t size = 448 465 (size_t) node->value[node->keys - 1]; 449 unsigned int i = 0;450 451 if (overlaps( b, c* PAGE_SIZE, area->base,466 size_t i = 0; 467 468 if (overlaps(ptr, size * PAGE_SIZE, area->base, 452 469 pages * PAGE_SIZE)) { 453 470 454 if ( b + c* PAGE_SIZE <= start_free) {471 if (ptr + size * PAGE_SIZE <= start_free) { 455 472 /* 456 473 * The whole interval fits 457 474 * completely in the resized 458 475 * address space area. 476 * 459 477 */ 460 478 break; 461 479 } 462 480 463 481 /* 464 482 * Part of the interval corresponding 465 483 * to b and c overlaps with the resized 466 484 * address space area. 485 * 467 486 */ 468 469 cond = false; /* we are almost done */ 470 i = (start_free - b) >> PAGE_WIDTH; 487 488 /* We are almost done */ 489 cond = false; 490 i = (start_free - ptr) >> PAGE_WIDTH; 471 491 if (!used_space_remove(area, start_free, 472 c - i)) 473 panic("Cannot remove used " 474 "space."); 492 size - i)) 493 panic("Cannot remove used space."); 475 494 } else { 476 495 /* … … 478 497 * completely removed. 479 498 */ 480 if (!used_space_remove(area, b, c)) 481 panic("Cannot remove used " 482 "space."); 499 if (!used_space_remove(area, ptr, size)) 500 panic("Cannot remove used space."); 483 501 } 484 485 for (; i < c; i++) { 486 pte_t *pte; 487 488 page_table_lock(as, false); 489 pte = page_mapping_find(as, b + 502 503 for (; i < size; i++) { 504 pte_t *pte = page_mapping_find(as, ptr + 490 505 i * PAGE_SIZE); 491 ASSERT(pte && PTE_VALID(pte) && 492 PTE_PRESENT(pte)); 493 if (area->backend && 494 area->backend->frame_free) { 506 507 ASSERT(pte); 508 ASSERT(PTE_VALID(pte)); 509 ASSERT(PTE_PRESENT(pte)); 510 511 if ((area->backend) && 512 (area->backend->frame_free)) { 495 513 area->backend->frame_free(area, 496 b+ i * PAGE_SIZE,514 ptr + i * PAGE_SIZE, 497 515 PTE_GET_FRAME(pte)); 498 516 } 499 page_mapping_remove(as, b + 517 518 page_mapping_remove(as, ptr + 500 519 i * PAGE_SIZE); 501 page_table_unlock(as, false);502 520 } 503 521 } 504 522 } 505 523 506 524 /* 507 525 * Finish TLB shootdown sequence. 508 */ 509 526 * 527 */ 528 510 529 tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE, 511 530 area->pages - pages); 531 512 532 /* 513 533 * Invalidate software translation caches (e.g. TSB on sparc64). 534 * 514 535 */ 515 536 as_invalidate_translation_cache(as, area->base + … … 517 538 tlb_shootdown_finalize(); 518 539 540 page_table_unlock(as, false); 519 541 } else { 520 542 /* 521 543 * Growing the area. 522 544 * Check for overlaps with other address space areas. 545 * 523 546 */ 524 547 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, 525 548 area)) { 526 549 mutex_unlock(&area->lock); 527 mutex_unlock(&as->lock); 550 mutex_unlock(&as->lock); 528 551 interrupts_restore(ipl); 529 552 return EADDRNOTAVAIL; 530 553 } 531 } 532 554 } 555 533 556 area->pages = pages; 534 557 … … 536 559 mutex_unlock(&as->lock); 537 560 interrupts_restore(ipl); 538 561 539 562 return 0; 540 563 } … … 542 565 /** Destroy address space area. 543 566 * 544 * @param as Address space. 545 * @param address Address within the area to be deleted. 546 * 547 * @return Zero on success or a value from @ref errno.h on failure. 567 * @param as Address space. 568 * @param address Address within the area to be deleted. 569 * 570 * @return Zero on success or a value from @ref errno.h on failure. 571 * 548 572 */ 549 573 int as_area_destroy(as_t *as, uintptr_t address) 550 574 { 551 as_area_t *area; 552 uintptr_t base; 553 link_t *cur; 554 ipl_t ipl; 555 556 ipl = interrupts_disable(); 575 ipl_t ipl = interrupts_disable(); 557 576 mutex_lock(&as->lock); 558 559 a rea = find_area_and_lock(as, address);577 578 as_area_t *area = find_area_and_lock(as, address); 560 579 if (!area) { 561 580 mutex_unlock(&as->lock); … … 563 582 return ENOENT; 564 583 } 565 566 base = area->base; 567 584 585 uintptr_t base = area->base; 586 587 page_table_lock(as, false); 588 568 589 /* 569 590 * Start TLB shootdown sequence. 570 591 */ 571 592 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 572 593 573 594 /* 574 595 * Visit only the pages mapped by used_space B+tree. 575 596 */ 597 link_t *cur; 576 598 for (cur = area->used_space.leaf_head.next; 577 599 cur != &area->used_space.leaf_head; cur = cur->next) { 578 600 btree_node_t *node; 579 unsigned int i;601 btree_key_t i; 580 602 581 603 node = list_get_instance(cur, btree_node_t, leaf_link); 582 604 for (i = 0; i < node->keys; i++) { 583 uintptr_t b = node->key[i]; 584 size_t j; 585 pte_t *pte; 605 uintptr_t ptr = node->key[i]; 606 size_t size; 586 607 587 for (j = 0; j < (size_t) node->value[i]; j++) { 588 page_table_lock(as, false); 589 pte = page_mapping_find(as, b + j * PAGE_SIZE); 590 ASSERT(pte && PTE_VALID(pte) && 591 PTE_PRESENT(pte)); 592 if (area->backend && 593 area->backend->frame_free) { 594 area->backend->frame_free(area, b + 595 j * PAGE_SIZE, PTE_GET_FRAME(pte)); 608 for (size = 0; size < (size_t) node->value[i]; size++) { 609 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 610 611 ASSERT(pte); 612 ASSERT(PTE_VALID(pte)); 613 ASSERT(PTE_PRESENT(pte)); 614 615 if ((area->backend) && 616 (area->backend->frame_free)) { 617 area->backend->frame_free(area, 618 ptr + size * PAGE_SIZE, PTE_GET_FRAME(pte)); 596 619 } 597 page_mapping_remove(as, b + j * PAGE_SIZE);598 page_ table_unlock(as, false);620 621 page_mapping_remove(as, ptr + size * PAGE_SIZE); 599 622 } 600 623 } 601 624 } 602 625 603 626 /* 604 627 * Finish TLB shootdown sequence. 605 */ 606 628 * 629 */ 630 607 631 tlb_invalidate_pages(as->asid, area->base, area->pages); 632 608 633 /* 609 634 * Invalidate potential software translation caches (e.g. TSB on 610 635 * sparc64). 636 * 611 637 */ 612 638 as_invalidate_translation_cache(as, area->base, area->pages); 613 639 tlb_shootdown_finalize(); 614 640 641 page_table_unlock(as, false); 642 615 643 btree_destroy(&area->used_space); 616 644 617 645 area->attributes |= AS_AREA_ATTR_PARTIAL; 618 646 619 647 if (area->sh_info) 620 648 sh_info_remove_reference(area->sh_info); 621 649 622 650 mutex_unlock(&area->lock); 623 651 624 652 /* 625 653 * Remove the empty area from address space. 654 * 626 655 */ 627 656 btree_remove(&as->as_area_btree, base, NULL); … … 641 670 * sh_info of the source area. The process of duplicating the 642 671 * mapping is done through the backend share function. 643 * 644 * @param src_as 645 * @param src_base 646 * @param acc_size 647 * @param dst_as 648 * @param dst_base 672 * 673 * @param src_as Pointer to source address space. 674 * @param src_base Base address of the source address space area. 675 * @param acc_size Expected size of the source area. 676 * @param dst_as Pointer to destination address space. 677 * @param dst_base Target base address. 649 678 * @param dst_flags_mask Destination address space area flags mask. 650 679 * 651 * @return Zero on success or ENOENT if there is no such task or if 652 * there is no such address space area, EPERM if there was 653 * a problem in accepting the area or ENOMEM if there was a 654 * problem in allocating destination address space area. 655 * ENOTSUP is returned if the address space area backend 656 * does not support sharing. 680 * @return Zero on success. 681 * @return ENOENT if there is no such task or such address space. 682 * @return EPERM if there was a problem in accepting the area. 683 * @return ENOMEM if there was a problem in allocating destination 684 * address space area. 685 * @return ENOTSUP if the address space area backend does not support 686 * sharing. 687 * 657 688 */ 658 689 int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, 659 as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) 660 { 661 ipl_t ipl; 662 int src_flags; 663 size_t src_size; 664 as_area_t *src_area, *dst_area; 665 share_info_t *sh_info; 666 mem_backend_t *src_backend; 667 mem_backend_data_t src_backend_data; 668 669 ipl = interrupts_disable(); 690 as_t *dst_as, uintptr_t dst_base, unsigned int dst_flags_mask) 691 { 692 ipl_t ipl = interrupts_disable(); 670 693 mutex_lock(&src_as->lock); 671 src_area = find_area_and_lock(src_as, src_base);694 as_area_t *src_area = find_area_and_lock(src_as, src_base); 672 695 if (!src_area) { 673 696 /* 674 697 * Could not find the source address space area. 698 * 675 699 */ 676 700 mutex_unlock(&src_as->lock); … … 678 702 return ENOENT; 679 703 } 680 681 if ( !src_area->backend || !src_area->backend->share) {704 705 if ((!src_area->backend) || (!src_area->backend->share)) { 682 706 /* 683 707 * There is no backend or the backend does not 684 708 * know how to share the area. 709 * 685 710 */ 686 711 mutex_unlock(&src_area->lock); … … 690 715 } 691 716 692 s rc_size = src_area->pages * PAGE_SIZE;693 src_flags = src_area->flags;694 src_backend = src_area->backend;695 src_backend_data = src_area->backend_data;696 717 size_t src_size = src_area->pages * PAGE_SIZE; 718 unsigned int src_flags = src_area->flags; 719 mem_backend_t *src_backend = src_area->backend; 720 mem_backend_data_t src_backend_data = src_area->backend_data; 721 697 722 /* Share the cacheable flag from the original mapping */ 698 723 if (src_flags & AS_AREA_CACHEABLE) 699 724 dst_flags_mask |= AS_AREA_CACHEABLE; 700 701 if ( src_size != acc_size||702 ( src_flags & dst_flags_mask) != dst_flags_mask) {725 726 if ((src_size != acc_size) || 727 ((src_flags & dst_flags_mask) != dst_flags_mask)) { 703 728 mutex_unlock(&src_area->lock); 704 729 mutex_unlock(&src_as->lock); … … 706 731 return EPERM; 707 732 } 708 733 709 734 /* 710 735 * Now we are committed to sharing the area. 711 736 * First, prepare the area for sharing. 712 737 * Then it will be safe to unlock it. 713 */ 714 sh_info = src_area->sh_info; 738 * 739 */ 740 share_info_t *sh_info = src_area->sh_info; 715 741 if (!sh_info) { 716 742 sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0); … … 719 745 btree_create(&sh_info->pagemap); 720 746 src_area->sh_info = sh_info; 747 721 748 /* 722 749 * Call the backend to setup sharing. 750 * 723 751 */ 724 752 src_area->backend->share(src_area); … … 728 756 mutex_unlock(&sh_info->lock); 729 757 } 730 758 731 759 mutex_unlock(&src_area->lock); 732 760 mutex_unlock(&src_as->lock); 733 761 734 762 /* 735 763 * Create copy of the source address space area. … … 739 767 * The flags of the source area are masked against dst_flags_mask 740 768 * to support sharing in less privileged mode. 741 */ 742 dst_area = as_area_create(dst_as, dst_flags_mask, src_size, dst_base, 743 AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 769 * 770 */ 771 as_area_t *dst_area = as_area_create(dst_as, dst_flags_mask, src_size, 772 dst_base, AS_AREA_ATTR_PARTIAL, src_backend, &src_backend_data); 744 773 if (!dst_area) { 745 774 /* … … 751 780 return ENOMEM; 752 781 } 753 782 754 783 /* 755 784 * Now the destination address space area has been 756 785 * fully initialized. Clear the AS_AREA_ATTR_PARTIAL 757 786 * attribute and set the sh_info. 758 */ 759 mutex_lock(&dst_as->lock); 787 * 788 */ 789 mutex_lock(&dst_as->lock); 760 790 mutex_lock(&dst_area->lock); 761 791 dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; 762 792 dst_area->sh_info = sh_info; 763 793 mutex_unlock(&dst_area->lock); 764 mutex_unlock(&dst_as->lock); 765 794 mutex_unlock(&dst_as->lock); 795 766 796 interrupts_restore(ipl); 767 797 … … 771 801 /** Check access mode for address space area. 772 802 * 773 * The address space area must be locked prior to this call. 774 * 775 * @param area Address space area. 776 * @param access Access mode. 777 * 778 * @return False if access violates area's permissions, true 779 * otherwise. 803 * @param area Address space area. 804 * @param access Access mode. 805 * 806 * @return False if access violates area's permissions, true 807 * otherwise. 808 * 780 809 */ 781 810 bool as_area_check_access(as_area_t *area, pf_access_t access) … … 787 816 }; 788 817 818 ASSERT(interrupts_disabled()); 819 ASSERT(mutex_locked(&area->lock)); 820 789 821 if (!(area->flags & flagmap[access])) 790 822 return false; … … 807 839 * 808 840 */ 809 int as_area_change_flags(as_t *as, int flags, uintptr_t address) 810 { 811 as_area_t *area; 812 link_t *cur; 813 ipl_t ipl; 814 int page_flags; 815 uintptr_t *old_frame; 816 size_t frame_idx; 817 size_t used_pages; 818 841 int as_area_change_flags(as_t *as, unsigned int flags, uintptr_t address) 842 { 819 843 /* Flags for the new memory mapping */ 820 page_flags = area_flags_to_page_flags(flags);821 822 ipl = interrupts_disable();844 unsigned int page_flags = area_flags_to_page_flags(flags); 845 846 ipl_t ipl = interrupts_disable(); 823 847 mutex_lock(&as->lock); 824 825 a rea = find_area_and_lock(as, address);848 849 as_area_t *area = find_area_and_lock(as, address); 826 850 if (!area) { 827 851 mutex_unlock(&as->lock); … … 829 853 return ENOENT; 830 854 } 831 855 832 856 if ((area->sh_info) || (area->backend != &anon_backend)) { 833 857 /* Copying shared areas not supported yet */ … … 838 862 return ENOTSUP; 839 863 } 840 864 841 865 /* 842 866 * Compute total number of used pages in the used_space B+tree 843 */ 844 used_pages = 0; 845 867 * 868 */ 869 size_t used_pages = 0; 870 link_t *cur; 871 846 872 for (cur = area->used_space.leaf_head.next; 847 873 cur != &area->used_space.leaf_head; cur = cur->next) { 848 btree_node_t *node ;849 unsigned int i;850 851 node = list_get_instance(cur, btree_node_t, leaf_link);852 for (i = 0; i < node->keys; i++) {874 btree_node_t *node 875 = list_get_instance(cur, btree_node_t, leaf_link); 876 btree_key_t i; 877 878 for (i = 0; i < node->keys; i++) 853 879 used_pages += (size_t) node->value[i]; 854 } 855 } 856 880 } 881 857 882 /* An array for storing frame numbers */ 858 old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 859 883 uintptr_t *old_frame = malloc(used_pages * sizeof(uintptr_t), 0); 884 885 page_table_lock(as, false); 886 860 887 /* 861 888 * Start TLB shootdown sequence. 889 * 862 890 */ 863 891 tlb_shootdown_start(TLB_INVL_PAGES, as->asid, area->base, area->pages); 864 892 865 893 /* 866 894 * Remove used pages from page tables and remember their frame 867 895 * numbers. 868 */ 869 frame_idx = 0; 870 896 * 897 */ 898 size_t frame_idx = 0; 899 871 900 for (cur = area->used_space.leaf_head.next; 872 901 cur != &area->used_space.leaf_head; cur = cur->next) { 873 btree_node_t *node ;874 unsigned int i;875 876 node = list_get_instance(cur, btree_node_t, leaf_link);902 btree_node_t *node 903 = list_get_instance(cur, btree_node_t, leaf_link); 904 btree_key_t i; 905 877 906 for (i = 0; i < node->keys; i++) { 878 uintptr_t b = node->key[i]; 879 size_t j; 880 pte_t *pte; 907 uintptr_t ptr = node->key[i]; 908 size_t size; 881 909 882 for (j = 0; j < (size_t) node->value[i]; j++) { 910 for (size = 0; size < (size_t) node->value[i]; size++) { 911 pte_t *pte = page_mapping_find(as, ptr + size * PAGE_SIZE); 912 913 ASSERT(pte); 914 ASSERT(PTE_VALID(pte)); 915 ASSERT(PTE_PRESENT(pte)); 916 917 old_frame[frame_idx++] = PTE_GET_FRAME(pte); 918 919 /* Remove old mapping */ 920 page_mapping_remove(as, ptr + size * PAGE_SIZE); 921 } 922 } 923 } 924 925 /* 926 * Finish TLB shootdown sequence. 927 * 928 */ 929 930 tlb_invalidate_pages(as->asid, area->base, area->pages); 931 932 /* 933 * Invalidate potential software translation caches (e.g. TSB on 934 * sparc64). 935 * 936 */ 937 as_invalidate_translation_cache(as, area->base, area->pages); 938 tlb_shootdown_finalize(); 939 940 page_table_unlock(as, false); 941 942 /* 943 * Set the new flags. 944 */ 945 area->flags = flags; 946 947 /* 948 * Map pages back in with new flags. This step is kept separate 949 * so that the memory area could not be accesed with both the old and 950 * the new flags at once. 951 */ 952 frame_idx = 0; 953 954 for (cur = area->used_space.leaf_head.next; 955 cur != &area->used_space.leaf_head; cur = cur->next) { 956 btree_node_t *node 957 = list_get_instance(cur, btree_node_t, leaf_link); 958 btree_key_t i; 959 960 for (i = 0; i < node->keys; i++) { 961 uintptr_t ptr = node->key[i]; 962 size_t size; 963 964 for (size = 0; size < (size_t) node->value[i]; size++) { 883 965 page_table_lock(as, false); 884 pte = page_mapping_find(as, b + j * PAGE_SIZE); 885 ASSERT(pte && PTE_VALID(pte) && 886 PTE_PRESENT(pte)); 887 old_frame[frame_idx++] = PTE_GET_FRAME(pte); 888 889 /* Remove old mapping */ 890 page_mapping_remove(as, b + j * PAGE_SIZE); 966 967 /* Insert the new mapping */ 968 page_mapping_insert(as, ptr + size * PAGE_SIZE, 969 old_frame[frame_idx++], page_flags); 970 891 971 page_table_unlock(as, false); 892 972 } 893 973 } 894 974 } 895 896 /* 897 * Finish TLB shootdown sequence. 898 */ 899 900 tlb_invalidate_pages(as->asid, area->base, area->pages); 901 902 /* 903 * Invalidate potential software translation caches (e.g. TSB on 904 * sparc64). 905 */ 906 as_invalidate_translation_cache(as, area->base, area->pages); 907 tlb_shootdown_finalize(); 908 909 /* 910 * Set the new flags. 911 */ 912 area->flags = flags; 913 914 /* 915 * Map pages back in with new flags. This step is kept separate 916 * so that the memory area could not be accesed with both the old and 917 * the new flags at once. 918 */ 919 frame_idx = 0; 920 921 for (cur = area->used_space.leaf_head.next; 922 cur != &area->used_space.leaf_head; cur = cur->next) { 923 btree_node_t *node; 924 unsigned int i; 925 926 node = list_get_instance(cur, btree_node_t, leaf_link); 927 for (i = 0; i < node->keys; i++) { 928 uintptr_t b = node->key[i]; 929 size_t j; 930 931 for (j = 0; j < (size_t) node->value[i]; j++) { 932 page_table_lock(as, false); 933 934 /* Insert the new mapping */ 935 page_mapping_insert(as, b + j * PAGE_SIZE, 936 old_frame[frame_idx++], page_flags); 937 938 page_table_unlock(as, false); 939 } 940 } 941 } 942 975 943 976 free(old_frame); 944 977 945 978 mutex_unlock(&area->lock); 946 979 mutex_unlock(&as->lock); 947 980 interrupts_restore(ipl); 948 981 949 982 return 0; 950 983 } 951 952 984 953 985 /** Handle page fault within the current address space. … … 959 991 * Interrupts are assumed disabled. 960 992 * 961 * @param page Faulting page. 962 * @param access Access mode that caused the page fault (i.e. 963 * read/write/exec). 964 * @param istate Pointer to the interrupted state. 965 * 966 * @return AS_PF_FAULT on page fault, AS_PF_OK on success or 967 * AS_PF_DEFER if the fault was caused by copy_to_uspace() 968 * or copy_from_uspace(). 993 * @param page Faulting page. 994 * @param access Access mode that caused the page fault (i.e. 995 * read/write/exec). 996 * @param istate Pointer to the interrupted state. 997 * 998 * @return AS_PF_FAULT on page fault. 999 * @return AS_PF_OK on success. 1000 * @return AS_PF_DEFER if the fault was caused by copy_to_uspace() 1001 * or copy_from_uspace(). 1002 * 969 1003 */ 970 1004 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 971 1005 { 972 pte_t *pte;973 as_area_t *area;974 975 1006 if (!THREAD) 976 1007 return AS_PF_FAULT; … … 980 1011 981 1012 mutex_lock(&AS->lock); 982 a rea = find_area_and_lock(AS, page);1013 as_area_t *area = find_area_and_lock(AS, page); 983 1014 if (!area) { 984 1015 /* 985 1016 * No area contained mapping for 'page'. 986 1017 * Signal page fault to low-level handler. 1018 * 987 1019 */ 988 1020 mutex_unlock(&AS->lock); 989 1021 goto page_fault; 990 1022 } 991 1023 992 1024 if (area->attributes & AS_AREA_ATTR_PARTIAL) { 993 1025 /* … … 997 1029 mutex_unlock(&area->lock); 998 1030 mutex_unlock(&AS->lock); 999 goto page_fault; 1000 } 1001 1002 if ( !area->backend || !area->backend->page_fault) {1031 goto page_fault; 1032 } 1033 1034 if ((!area->backend) || (!area->backend->page_fault)) { 1003 1035 /* 1004 1036 * The address space area is not backed by any backend 1005 1037 * or the backend cannot handle page faults. 1038 * 1006 1039 */ 1007 1040 mutex_unlock(&area->lock); 1008 1041 mutex_unlock(&AS->lock); 1009 goto page_fault; 1010 } 1011 1042 goto page_fault; 1043 } 1044 1012 1045 page_table_lock(AS, false); 1013 1046 … … 1015 1048 * To avoid race condition between two page faults on the same address, 1016 1049 * we need to make sure the mapping has not been already inserted. 1017 */ 1050 * 1051 */ 1052 pte_t *pte; 1018 1053 if ((pte = page_mapping_find(AS, page))) { 1019 1054 if (PTE_PRESENT(pte)) { … … 1031 1066 /* 1032 1067 * Resort to the backend page fault handler. 1068 * 1033 1069 */ 1034 1070 if (area->backend->page_fault(area, page, access) != AS_PF_OK) { … … 1043 1079 mutex_unlock(&AS->lock); 1044 1080 return AS_PF_OK; 1045 1081 1046 1082 page_fault: 1047 1083 if (THREAD->in_copy_from_uspace) { … … 1056 1092 return AS_PF_FAULT; 1057 1093 } 1058 1094 1059 1095 return AS_PF_DEFER; 1060 1096 } … … 1068 1104 * When this function is enetered, no spinlocks may be held. 1069 1105 * 1070 * @param old Old address space or NULL. 1071 * @param new New address space. 1106 * @param old Old address space or NULL. 1107 * @param new New address space. 1108 * 1072 1109 */ 1073 1110 void as_switch(as_t *old_as, as_t *new_as) … … 1075 1112 DEADLOCK_PROBE_INIT(p_asidlock); 1076 1113 preemption_disable(); 1114 1077 1115 retry: 1078 1116 (void) interrupts_disable(); 1079 1117 if (!spinlock_trylock(&asidlock)) { 1080 /* 1118 /* 1081 1119 * Avoid deadlock with TLB shootdown. 1082 1120 * We can enable interrupts here because 1083 1121 * preemption is disabled. We should not be 1084 1122 * holding any other lock. 1123 * 1085 1124 */ 1086 1125 (void) interrupts_enable(); … … 1089 1128 } 1090 1129 preemption_enable(); 1091 1130 1092 1131 /* 1093 1132 * First, take care of the old address space. 1094 */ 1133 */ 1095 1134 if (old_as) { 1096 1135 ASSERT(old_as->cpu_refcount); 1097 if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1136 1137 if ((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) { 1098 1138 /* 1099 1139 * The old address space is no longer active on … … 1101 1141 * list of inactive address spaces with assigned 1102 1142 * ASID. 1143 * 1103 1144 */ 1104 1145 ASSERT(old_as->asid != ASID_INVALID); 1146 1105 1147 list_append(&old_as->inactive_as_with_asid_link, 1106 1148 &inactive_as_with_asid_head); 1107 1149 } 1108 1150 1109 1151 /* 1110 1152 * Perform architecture-specific tasks when the address space 1111 1153 * is being removed from the CPU. 1154 * 1112 1155 */ 1113 1156 as_deinstall_arch(old_as); 1114 1157 } 1115 1158 1116 1159 /* 1117 1160 * Second, prepare the new address space. 1161 * 1118 1162 */ 1119 1163 if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) { … … 1123 1167 new_as->asid = asid_get(); 1124 1168 } 1169 1125 1170 #ifdef AS_PAGE_TABLE 1126 1171 SET_PTL0_ADDRESS(new_as->genarch.page_table); … … 1130 1175 * Perform architecture-specific steps. 1131 1176 * (e.g. write ASID to hardware register etc.) 1177 * 1132 1178 */ 1133 1179 as_install_arch(new_as); 1134 1180 1135 1181 spinlock_unlock(&asidlock); 1136 1182 … … 1140 1186 /** Convert address space area flags to page flags. 1141 1187 * 1142 * @param aflags Flags of some address space area. 1143 * 1144 * @return Flags to be passed to page_mapping_insert(). 1145 */ 1146 int area_flags_to_page_flags(int aflags) 1147 { 1148 int flags; 1149 1150 flags = PAGE_USER | PAGE_PRESENT; 1188 * @param aflags Flags of some address space area. 1189 * 1190 * @return Flags to be passed to page_mapping_insert(). 1191 * 1192 */ 1193 unsigned int area_flags_to_page_flags(unsigned int aflags) 1194 { 1195 unsigned int flags = PAGE_USER | PAGE_PRESENT; 1151 1196 1152 1197 if (aflags & AS_AREA_READ) … … 1161 1206 if (aflags & AS_AREA_CACHEABLE) 1162 1207 flags |= PAGE_CACHEABLE; 1163 1208 1164 1209 return flags; 1165 1210 } … … 1167 1212 /** Compute flags for virtual address translation subsytem. 1168 1213 * 1169 * The address space area must be locked. 1170 * Interrupts must be disabled. 1171 * 1172 * @param a Address space area. 1173 * 1174 * @return Flags to be used in page_mapping_insert(). 1175 */ 1176 int as_area_get_flags(as_area_t *a) 1177 { 1178 return area_flags_to_page_flags(a->flags); 1214 * @param area Address space area. 1215 * 1216 * @return Flags to be used in page_mapping_insert(). 1217 * 1218 */ 1219 unsigned int as_area_get_flags(as_area_t *area) 1220 { 1221 ASSERT(interrupts_disabled()); 1222 ASSERT(mutex_locked(&area->lock)); 1223 1224 return area_flags_to_page_flags(area->flags); 1179 1225 } 1180 1226 … … 1184 1230 * table. 1185 1231 * 1186 * @param flags Flags saying whether the page table is for the kernel 1187 * address space. 1188 * 1189 * @return First entry of the page table. 1190 */ 1191 pte_t *page_table_create(int flags) 1232 * @param flags Flags saying whether the page table is for the kernel 1233 * address space. 1234 * 1235 * @return First entry of the page table. 1236 * 1237 */ 1238 pte_t *page_table_create(unsigned int flags) 1192 1239 { 1193 1240 ASSERT(as_operations); … … 1201 1248 * Destroy page table in architecture specific way. 1202 1249 * 1203 * @param page_table Physical address of PTL0. 1250 * @param page_table Physical address of PTL0. 1251 * 1204 1252 */ 1205 1253 void page_table_destroy(pte_t *page_table) … … 1215 1263 * This function should be called before any page_mapping_insert(), 1216 1264 * page_mapping_remove() and page_mapping_find(). 1217 * 1265 * 1218 1266 * Locking order is such that address space areas must be locked 1219 1267 * prior to this call. Address space can be locked prior to this 1220 1268 * call in which case the lock argument is false. 1221 1269 * 1222 * @param as Address space. 1223 * @param lock If false, do not attempt to lock as->lock. 1270 * @param as Address space. 1271 * @param lock If false, do not attempt to lock as->lock. 1272 * 1224 1273 */ 1225 1274 void page_table_lock(as_t *as, bool lock) … … 1233 1282 /** Unlock page table. 1234 1283 * 1235 * @param as Address space. 1236 * @param unlock If false, do not attempt to unlock as->lock. 1284 * @param as Address space. 1285 * @param unlock If false, do not attempt to unlock as->lock. 1286 * 1237 1287 */ 1238 1288 void page_table_unlock(as_t *as, bool unlock) … … 1244 1294 } 1245 1295 1296 /** Test whether page tables are locked. 1297 * 1298 * @param as Address space where the page tables belong. 1299 * 1300 * @return True if the page tables belonging to the address soace 1301 * are locked, otherwise false. 1302 */ 1303 bool page_table_locked(as_t *as) 1304 { 1305 ASSERT(as_operations); 1306 ASSERT(as_operations->page_table_locked); 1307 1308 return as_operations->page_table_locked(as); 1309 } 1310 1246 1311 1247 1312 /** Find address space area and lock it. 1248 1313 * 1249 * The address space must be locked and interrupts must be disabled. 1250 * 1251 * @param as Address space. 1252 * @param va Virtual address. 1253 * 1254 * @return Locked address space area containing va on success or 1255 * NULL on failure. 1314 * @param as Address space. 1315 * @param va Virtual address. 1316 * 1317 * @return Locked address space area containing va on success or 1318 * NULL on failure. 1319 * 1256 1320 */ 1257 1321 as_area_t *find_area_and_lock(as_t *as, uintptr_t va) 1258 1322 { 1259 as_area_t *a;1260 btree_node_t *leaf, *lnode;1261 unsigned int i; 1262 1263 a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);1264 if (a ) {1323 ASSERT(interrupts_disabled()); 1324 ASSERT(mutex_locked(&as->lock)); 1325 1326 btree_node_t *leaf; 1327 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1328 if (area) { 1265 1329 /* va is the base address of an address space area */ 1266 mutex_lock(&a ->lock);1267 return a ;1330 mutex_lock(&area->lock); 1331 return area; 1268 1332 } 1269 1333 … … 1272 1336 * to find out whether this is a miss or va belongs to an address 1273 1337 * space area found there. 1338 * 1274 1339 */ 1275 1340 1276 1341 /* First, search the leaf node itself. */ 1342 btree_key_t i; 1343 1277 1344 for (i = 0; i < leaf->keys; i++) { 1278 a = (as_area_t *) leaf->value[i]; 1279 mutex_lock(&a->lock); 1280 if ((a->base <= va) && (va < a->base + a->pages * PAGE_SIZE)) { 1281 return a; 1282 } 1283 mutex_unlock(&a->lock); 1284 } 1285 1345 area = (as_area_t *) leaf->value[i]; 1346 1347 mutex_lock(&area->lock); 1348 1349 if ((area->base <= va) && (va < area->base + area->pages * PAGE_SIZE)) 1350 return area; 1351 1352 mutex_unlock(&area->lock); 1353 } 1354 1286 1355 /* 1287 1356 * Second, locate the left neighbour and test its last record. 1288 1357 * Because of its position in the B+tree, it must have base < va. 1289 */ 1290 lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1358 * 1359 */ 1360 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1291 1361 if (lnode) { 1292 a = (as_area_t *) lnode->value[lnode->keys - 1]; 1293 mutex_lock(&a->lock); 1294 if (va < a->base + a->pages * PAGE_SIZE) { 1295 return a; 1296 } 1297 mutex_unlock(&a->lock); 1298 } 1299 1362 area = (as_area_t *) lnode->value[lnode->keys - 1]; 1363 1364 mutex_lock(&area->lock); 1365 1366 if (va < area->base + area->pages * PAGE_SIZE) 1367 return area; 1368 1369 mutex_unlock(&area->lock); 1370 } 1371 1300 1372 return NULL; 1301 1373 } … … 1303 1375 /** Check area conflicts with other areas. 1304 1376 * 1305 * The address space must be locked and interrupts must be disabled. 1306 * 1307 * @param as Address space. 1308 * @param va Starting virtual address of the area being tested. 1309 * @param size Size of the area being tested. 1310 * @param avoid_area Do not touch this area. 1311 * 1312 * @return True if there is no conflict, false otherwise. 1313 */ 1314 bool 1315 check_area_conflicts(as_t *as, uintptr_t va, size_t size, as_area_t *avoid_area) 1316 { 1317 as_area_t *a; 1318 btree_node_t *leaf, *node; 1319 unsigned int i; 1320 1377 * @param as Address space. 1378 * @param va Starting virtual address of the area being tested. 1379 * @param size Size of the area being tested. 1380 * @param avoid_area Do not touch this area. 1381 * 1382 * @return True if there is no conflict, false otherwise. 1383 * 1384 */ 1385 bool check_area_conflicts(as_t *as, uintptr_t va, size_t size, 1386 as_area_t *avoid_area) 1387 { 1388 ASSERT(interrupts_disabled()); 1389 ASSERT(mutex_locked(&as->lock)); 1390 1321 1391 /* 1322 1392 * We don't want any area to have conflicts with NULL page. 1393 * 1323 1394 */ 1324 1395 if (overlaps(va, size, NULL, PAGE_SIZE)) … … 1331 1402 * record in the left neighbour, the leftmost record in the right 1332 1403 * neighbour and all records in the leaf node itself. 1333 */ 1334 1335 if ((a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf))) { 1336 if (a != avoid_area) 1404 * 1405 */ 1406 btree_node_t *leaf; 1407 as_area_t *area = 1408 (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 1409 if (area) { 1410 if (area != avoid_area) 1337 1411 return false; 1338 1412 } 1339 1413 1340 1414 /* First, check the two border cases. */ 1341 if ((node = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf))) { 1342 a = (as_area_t *) node->value[node->keys - 1]; 1343 mutex_lock(&a->lock); 1344 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1345 mutex_unlock(&a->lock); 1415 btree_node_t *node = 1416 btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 1417 if (node) { 1418 area = (as_area_t *) node->value[node->keys - 1]; 1419 1420 mutex_lock(&area->lock); 1421 1422 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 1423 mutex_unlock(&area->lock); 1346 1424 return false; 1347 1425 } 1348 mutex_unlock(&a->lock); 1349 } 1426 1427 mutex_unlock(&area->lock); 1428 } 1429 1350 1430 node = btree_leaf_node_right_neighbour(&as->as_area_btree, leaf); 1351 1431 if (node) { 1352 a = (as_area_t *) node->value[0]; 1353 mutex_lock(&a->lock); 1354 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1355 mutex_unlock(&a->lock); 1432 area = (as_area_t *) node->value[0]; 1433 1434 mutex_lock(&area->lock); 1435 1436 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 1437 mutex_unlock(&area->lock); 1356 1438 return false; 1357 1439 } 1358 mutex_unlock(&a->lock); 1440 1441 mutex_unlock(&area->lock); 1359 1442 } 1360 1443 1361 1444 /* Second, check the leaf node. */ 1445 btree_key_t i; 1362 1446 for (i = 0; i < leaf->keys; i++) { 1363 a = (as_area_t *) leaf->value[i];1364 1365 if (a == avoid_area)1447 area = (as_area_t *) leaf->value[i]; 1448 1449 if (area == avoid_area) 1366 1450 continue; 1367 1368 mutex_lock(&a->lock); 1369 if (overlaps(va, size, a->base, a->pages * PAGE_SIZE)) { 1370 mutex_unlock(&a->lock); 1451 1452 mutex_lock(&area->lock); 1453 1454 if (overlaps(va, size, area->base, area->pages * PAGE_SIZE)) { 1455 mutex_unlock(&area->lock); 1371 1456 return false; 1372 1457 } 1373 mutex_unlock(&a->lock); 1374 } 1375 1458 1459 mutex_unlock(&area->lock); 1460 } 1461 1376 1462 /* 1377 1463 * So far, the area does not conflict with other areas. 1378 1464 * Check if it doesn't conflict with kernel address space. 1379 */ 1465 * 1466 */ 1380 1467 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 1381 return !overlaps(va, size, 1468 return !overlaps(va, size, 1382 1469 KERNEL_ADDRESS_SPACE_START, 1383 1470 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 1384 1471 } 1385 1472 1386 1473 return true; 1387 1474 } … … 1389 1476 /** Return size of the address space area with given base. 1390 1477 * 1391 * @param base Arbitrary address insede the address space area. 1392 * 1393 * @return Size of the address space area in bytes or zero if it 1394 * does not exist. 1478 * @param base Arbitrary address inside the address space area. 1479 * 1480 * @return Size of the address space area in bytes or zero if it 1481 * does not exist. 1482 * 1395 1483 */ 1396 1484 size_t as_area_get_size(uintptr_t base) 1397 1485 { 1398 ipl_t ipl;1399 as_area_t *src_area;1400 1486 size_t size; 1401 1402 ipl = interrupts_disable(); 1403 src_area = find_area_and_lock(AS, base); 1487 1488 ipl_t ipl = interrupts_disable(); 1489 page_table_lock(AS, true); 1490 as_area_t *src_area = find_area_and_lock(AS, base); 1491 1404 1492 if (src_area) { 1405 1493 size = src_area->pages * PAGE_SIZE; 1406 1494 mutex_unlock(&src_area->lock); 1407 } else {1495 } else 1408 1496 size = 0; 1409 } 1497 1498 page_table_unlock(AS, true); 1410 1499 interrupts_restore(ipl); 1411 1500 return size; … … 1416 1505 * The address space area must be already locked. 1417 1506 * 1418 * @param a Address space area. 1419 * @param page First page to be marked. 1420 * @param count Number of page to be marked. 1421 * 1422 * @return Zero on failure and non-zero on success. 1423 */ 1424 int used_space_insert(as_area_t *a, uintptr_t page, size_t count) 1425 { 1426 btree_node_t *leaf, *node; 1427 size_t pages; 1428 unsigned int i; 1429 1507 * @param area Address space area. 1508 * @param page First page to be marked. 1509 * @param count Number of page to be marked. 1510 * 1511 * @return Zero on failure and non-zero on success. 1512 * 1513 */ 1514 int used_space_insert(as_area_t *area, uintptr_t page, size_t count) 1515 { 1516 ASSERT(mutex_locked(&area->lock)); 1430 1517 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1431 1518 ASSERT(count); 1432 1433 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1519 1520 btree_node_t *leaf; 1521 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1434 1522 if (pages) { 1435 1523 /* 1436 1524 * We hit the beginning of some used space. 1525 * 1437 1526 */ 1438 1527 return 0; 1439 1528 } 1440 1529 1441 1530 if (!leaf->keys) { 1442 btree_insert(&a ->used_space, page, (void *) count, leaf);1531 btree_insert(&area->used_space, page, (void *) count, leaf); 1443 1532 return 1; 1444 1533 } 1445 1446 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1534 1535 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1447 1536 if (node) { 1448 1537 uintptr_t left_pg = node->key[node->keys - 1]; … … 1455 1544 * somewhere between the rightmost interval of 1456 1545 * the left neigbour and the first interval of the leaf. 1457 */ 1458 1546 * 1547 */ 1548 1459 1549 if (page >= right_pg) { 1460 1550 /* Do nothing. */ … … 1466 1556 right_cnt * PAGE_SIZE)) { 1467 1557 /* The interval intersects with the right interval. */ 1468 return 0; 1558 return 0; 1469 1559 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1470 1560 (page + count * PAGE_SIZE == right_pg)) { … … 1472 1562 * The interval can be added by merging the two already 1473 1563 * present intervals. 1564 * 1474 1565 */ 1475 1566 node->value[node->keys - 1] += count + right_cnt; 1476 btree_remove(&a ->used_space, right_pg, leaf);1477 return 1; 1567 btree_remove(&area->used_space, right_pg, leaf); 1568 return 1; 1478 1569 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1479 /* 1570 /* 1480 1571 * The interval can be added by simply growing the left 1481 1572 * interval. 1573 * 1482 1574 */ 1483 1575 node->value[node->keys - 1] += count; … … 1488 1580 * the right interval down and increasing its size 1489 1581 * accordingly. 1582 * 1490 1583 */ 1491 1584 leaf->value[0] += count; … … 1496 1589 * The interval is between both neigbouring intervals, 1497 1590 * but cannot be merged with any of them. 1591 * 1498 1592 */ 1499 btree_insert(&a ->used_space, page, (void *) count,1593 btree_insert(&area->used_space, page, (void *) count, 1500 1594 leaf); 1501 1595 return 1; … … 1504 1598 uintptr_t right_pg = leaf->key[0]; 1505 1599 size_t right_cnt = (size_t) leaf->value[0]; 1506 1600 1507 1601 /* 1508 1602 * Investigate the border case in which the left neighbour does 1509 1603 * not exist but the interval fits from the left. 1510 */ 1511 1604 * 1605 */ 1606 1512 1607 if (overlaps(page, count * PAGE_SIZE, right_pg, 1513 1608 right_cnt * PAGE_SIZE)) { … … 1519 1614 * right interval down and increasing its size 1520 1615 * accordingly. 1616 * 1521 1617 */ 1522 1618 leaf->key[0] = page; … … 1527 1623 * The interval doesn't adjoin with the right interval. 1528 1624 * It must be added individually. 1625 * 1529 1626 */ 1530 btree_insert(&a ->used_space, page, (void *) count,1627 btree_insert(&area->used_space, page, (void *) count, 1531 1628 leaf); 1532 1629 return 1; 1533 1630 } 1534 1631 } 1535 1536 node = btree_leaf_node_right_neighbour(&a ->used_space, leaf);1632 1633 node = btree_leaf_node_right_neighbour(&area->used_space, leaf); 1537 1634 if (node) { 1538 1635 uintptr_t left_pg = leaf->key[leaf->keys - 1]; … … 1545 1642 * somewhere between the leftmost interval of 1546 1643 * the right neigbour and the last interval of the leaf. 1547 */ 1548 1644 * 1645 */ 1646 1549 1647 if (page < left_pg) { 1550 1648 /* Do nothing. */ … … 1556 1654 right_cnt * PAGE_SIZE)) { 1557 1655 /* The interval intersects with the right interval. */ 1558 return 0; 1656 return 0; 1559 1657 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1560 1658 (page + count * PAGE_SIZE == right_pg)) { … … 1562 1660 * The interval can be added by merging the two already 1563 1661 * present intervals. 1564 * */ 1662 * 1663 */ 1565 1664 leaf->value[leaf->keys - 1] += count + right_cnt; 1566 btree_remove(&a ->used_space, right_pg, node);1567 return 1; 1665 btree_remove(&area->used_space, right_pg, node); 1666 return 1; 1568 1667 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1569 1668 /* 1570 1669 * The interval can be added by simply growing the left 1571 1670 * interval. 1572 * */ 1671 * 1672 */ 1573 1673 leaf->value[leaf->keys - 1] += count; 1574 1674 return 1; … … 1578 1678 * the right interval down and increasing its size 1579 1679 * accordingly. 1680 * 1580 1681 */ 1581 1682 node->value[0] += count; … … 1586 1687 * The interval is between both neigbouring intervals, 1587 1688 * but cannot be merged with any of them. 1689 * 1588 1690 */ 1589 btree_insert(&a ->used_space, page, (void *) count,1691 btree_insert(&area->used_space, page, (void *) count, 1590 1692 leaf); 1591 1693 return 1; … … 1594 1696 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1595 1697 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1596 1698 1597 1699 /* 1598 1700 * Investigate the border case in which the right neighbour 1599 1701 * does not exist but the interval fits from the right. 1600 */ 1601 1702 * 1703 */ 1704 1602 1705 if (overlaps(page, count * PAGE_SIZE, left_pg, 1603 1706 left_cnt * PAGE_SIZE)) { … … 1608 1711 * The interval can be added by growing the left 1609 1712 * interval. 1713 * 1610 1714 */ 1611 1715 leaf->value[leaf->keys - 1] += count; … … 1615 1719 * The interval doesn't adjoin with the left interval. 1616 1720 * It must be added individually. 1721 * 1617 1722 */ 1618 btree_insert(&a ->used_space, page, (void *) count,1723 btree_insert(&area->used_space, page, (void *) count, 1619 1724 leaf); 1620 1725 return 1; … … 1626 1731 * only between two other intervals of the leaf. The two border cases 1627 1732 * were already resolved. 1628 */ 1733 * 1734 */ 1735 btree_key_t i; 1629 1736 for (i = 1; i < leaf->keys; i++) { 1630 1737 if (page < leaf->key[i]) { … … 1633 1740 size_t left_cnt = (size_t) leaf->value[i - 1]; 1634 1741 size_t right_cnt = (size_t) leaf->value[i]; 1635 1742 1636 1743 /* 1637 1744 * The interval fits between left_pg and right_pg. 1745 * 1638 1746 */ 1639 1747 1640 1748 if (overlaps(page, count * PAGE_SIZE, left_pg, 1641 1749 left_cnt * PAGE_SIZE)) { … … 1643 1751 * The interval intersects with the left 1644 1752 * interval. 1753 * 1645 1754 */ 1646 1755 return 0; … … 1650 1759 * The interval intersects with the right 1651 1760 * interval. 1761 * 1652 1762 */ 1653 return 0; 1763 return 0; 1654 1764 } else if ((page == left_pg + left_cnt * PAGE_SIZE) && 1655 1765 (page + count * PAGE_SIZE == right_pg)) { … … 1657 1767 * The interval can be added by merging the two 1658 1768 * already present intervals. 1769 * 1659 1770 */ 1660 1771 leaf->value[i - 1] += count + right_cnt; 1661 btree_remove(&a ->used_space, right_pg, leaf);1662 return 1; 1772 btree_remove(&area->used_space, right_pg, leaf); 1773 return 1; 1663 1774 } else if (page == left_pg + left_cnt * PAGE_SIZE) { 1664 1775 /* 1665 1776 * The interval can be added by simply growing 1666 1777 * the left interval. 1778 * 1667 1779 */ 1668 1780 leaf->value[i - 1] += count; … … 1670 1782 } else if (page + count * PAGE_SIZE == right_pg) { 1671 1783 /* 1672 1784 * The interval can be addded by simply moving 1673 1785 * base of the right interval down and 1674 1786 * increasing its size accordingly. 1675 */ 1787 * 1788 */ 1676 1789 leaf->value[i] += count; 1677 1790 leaf->key[i] = page; … … 1682 1795 * intervals, but cannot be merged with any of 1683 1796 * them. 1797 * 1684 1798 */ 1685 btree_insert(&a ->used_space, page,1799 btree_insert(&area->used_space, page, 1686 1800 (void *) count, leaf); 1687 1801 return 1; … … 1689 1803 } 1690 1804 } 1691 1805 1692 1806 panic("Inconsistency detected while adding %" PRIs " pages of used " 1693 1807 "space at %p.", count, page); … … 1698 1812 * The address space area must be already locked. 1699 1813 * 1700 * @param a Address space area. 1701 * @param page First page to be marked. 1702 * @param count Number of page to be marked. 1703 * 1704 * @return Zero on failure and non-zero on success. 1705 */ 1706 int used_space_remove(as_area_t *a, uintptr_t page, size_t count) 1707 { 1708 btree_node_t *leaf, *node; 1709 size_t pages; 1710 unsigned int i; 1711 1814 * @param area Address space area. 1815 * @param page First page to be marked. 1816 * @param count Number of page to be marked. 1817 * 1818 * @return Zero on failure and non-zero on success. 1819 * 1820 */ 1821 int used_space_remove(as_area_t *area, uintptr_t page, size_t count) 1822 { 1823 ASSERT(mutex_locked(&area->lock)); 1712 1824 ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE)); 1713 1825 ASSERT(count); 1714 1715 pages = (size_t) btree_search(&a->used_space, page, &leaf); 1826 1827 btree_node_t *leaf; 1828 size_t pages = (size_t) btree_search(&area->used_space, page, &leaf); 1716 1829 if (pages) { 1717 1830 /* 1718 1831 * We are lucky, page is the beginning of some interval. 1832 * 1719 1833 */ 1720 1834 if (count > pages) { 1721 1835 return 0; 1722 1836 } else if (count == pages) { 1723 btree_remove(&a ->used_space, page, leaf);1837 btree_remove(&area->used_space, page, leaf); 1724 1838 return 1; 1725 1839 } else { … … 1727 1841 * Find the respective interval. 1728 1842 * Decrease its size and relocate its start address. 1843 * 1729 1844 */ 1845 btree_key_t i; 1730 1846 for (i = 0; i < leaf->keys; i++) { 1731 1847 if (leaf->key[i] == page) { … … 1738 1854 } 1739 1855 } 1740 1741 node = btree_leaf_node_left_neighbour(&a->used_space, leaf);1742 if ( node && page < leaf->key[0]) {1856 1857 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1858 if ((node) && (page < leaf->key[0])) { 1743 1859 uintptr_t left_pg = node->key[node->keys - 1]; 1744 1860 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1745 1861 1746 1862 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1747 1863 count * PAGE_SIZE)) { … … 1753 1869 * removed by updating the size of the bigger 1754 1870 * interval. 1871 * 1755 1872 */ 1756 1873 node->value[node->keys - 1] -= count; … … 1758 1875 } else if (page + count * PAGE_SIZE < 1759 1876 left_pg + left_cnt*PAGE_SIZE) { 1760 size_t new_cnt;1761 1762 1877 /* 1763 1878 * The interval is contained in the rightmost … … 1766 1881 * the original interval and also inserting a 1767 1882 * new interval. 1883 * 1768 1884 */ 1769 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1885 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1770 1886 (page + count*PAGE_SIZE)) >> PAGE_WIDTH; 1771 1887 node->value[node->keys - 1] -= count + new_cnt; 1772 btree_insert(&a ->used_space, page +1888 btree_insert(&area->used_space, page + 1773 1889 count * PAGE_SIZE, (void *) new_cnt, leaf); 1774 1890 return 1; … … 1776 1892 } 1777 1893 return 0; 1778 } else if (page < leaf->key[0]) {1894 } else if (page < leaf->key[0]) 1779 1895 return 0; 1780 }1781 1896 1782 1897 if (page > leaf->key[leaf->keys - 1]) { 1783 1898 uintptr_t left_pg = leaf->key[leaf->keys - 1]; 1784 1899 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1785 1900 1786 1901 if (overlaps(left_pg, left_cnt * PAGE_SIZE, page, 1787 1902 count * PAGE_SIZE)) { 1788 if (page + count * PAGE_SIZE == 1903 if (page + count * PAGE_SIZE == 1789 1904 left_pg + left_cnt * PAGE_SIZE) { 1790 1905 /* … … 1792 1907 * interval of the leaf and can be removed by 1793 1908 * updating the size of the bigger interval. 1909 * 1794 1910 */ 1795 1911 leaf->value[leaf->keys - 1] -= count; … … 1797 1913 } else if (page + count * PAGE_SIZE < left_pg + 1798 1914 left_cnt * PAGE_SIZE) { 1799 size_t new_cnt;1800 1801 1915 /* 1802 1916 * The interval is contained in the rightmost … … 1805 1919 * original interval and also inserting a new 1806 1920 * interval. 1921 * 1807 1922 */ 1808 new_cnt = ((left_pg + left_cnt * PAGE_SIZE) -1923 size_t new_cnt = ((left_pg + left_cnt * PAGE_SIZE) - 1809 1924 (page + count * PAGE_SIZE)) >> PAGE_WIDTH; 1810 1925 leaf->value[leaf->keys - 1] -= count + new_cnt; 1811 btree_insert(&a ->used_space, page +1926 btree_insert(&area->used_space, page + 1812 1927 count * PAGE_SIZE, (void *) new_cnt, leaf); 1813 1928 return 1; … … 1815 1930 } 1816 1931 return 0; 1817 } 1932 } 1818 1933 1819 1934 /* … … 1821 1936 * Now the interval can be only between intervals of the leaf. 1822 1937 */ 1938 btree_key_t i; 1823 1939 for (i = 1; i < leaf->keys - 1; i++) { 1824 1940 if (page < leaf->key[i]) { 1825 1941 uintptr_t left_pg = leaf->key[i - 1]; 1826 1942 size_t left_cnt = (size_t) leaf->value[i - 1]; 1827 1943 1828 1944 /* 1829 1945 * Now the interval is between intervals corresponding … … 1839 1955 * be removed by updating the size of 1840 1956 * the bigger interval. 1957 * 1841 1958 */ 1842 1959 leaf->value[i - 1] -= count; … … 1844 1961 } else if (page + count * PAGE_SIZE < 1845 1962 left_pg + left_cnt * PAGE_SIZE) { 1846 size_t new_cnt;1847 1848 1963 /* 1849 1964 * The interval is contained in the … … 1853 1968 * also inserting a new interval. 1854 1969 */ 1855 new_cnt = ((left_pg +1970 size_t new_cnt = ((left_pg + 1856 1971 left_cnt * PAGE_SIZE) - 1857 1972 (page + count * PAGE_SIZE)) >> 1858 1973 PAGE_WIDTH; 1859 1974 leaf->value[i - 1] -= count + new_cnt; 1860 btree_insert(&a ->used_space, page +1975 btree_insert(&area->used_space, page + 1861 1976 count * PAGE_SIZE, (void *) new_cnt, 1862 1977 leaf); … … 1867 1982 } 1868 1983 } 1869 1984 1870 1985 error: 1871 1986 panic("Inconsistency detected while removing %" PRIs " pages of used " … … 1877 1992 * If the reference count drops to 0, the sh_info is deallocated. 1878 1993 * 1879 * @param sh_info Pointer to address space area share info. 1994 * @param sh_info Pointer to address space area share info. 1995 * 1880 1996 */ 1881 1997 void sh_info_remove_reference(share_info_t *sh_info) 1882 1998 { 1883 1999 bool dealloc = false; 1884 2000 1885 2001 mutex_lock(&sh_info->lock); 1886 2002 ASSERT(sh_info->refcount); 2003 1887 2004 if (--sh_info->refcount == 0) { 1888 2005 dealloc = true; … … 1895 2012 for (cur = sh_info->pagemap.leaf_head.next; 1896 2013 cur != &sh_info->pagemap.leaf_head; cur = cur->next) { 1897 btree_node_t *node; 1898 unsigned int i; 2014 btree_node_t *node 2015 = list_get_instance(cur, btree_node_t, leaf_link); 2016 btree_key_t i; 1899 2017 1900 node = list_get_instance(cur, btree_node_t, leaf_link); 1901 for (i = 0; i < node->keys; i++) 2018 for (i = 0; i < node->keys; i++) 1902 2019 frame_free((uintptr_t) node->value[i]); 1903 2020 } … … 1917 2034 1918 2035 /** Wrapper for as_area_create(). */ 1919 unative_t sys_as_area_create(uintptr_t address, size_t size, int flags)2036 unative_t sys_as_area_create(uintptr_t address, size_t size, unsigned int flags) 1920 2037 { 1921 2038 if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, … … 1927 2044 1928 2045 /** Wrapper for as_area_resize(). */ 1929 unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags)2046 unative_t sys_as_area_resize(uintptr_t address, size_t size, unsigned int flags) 1930 2047 { 1931 2048 return (unative_t) as_area_resize(AS, address, size, 0); … … 1933 2050 1934 2051 /** Wrapper for as_area_change_flags(). */ 1935 unative_t sys_as_area_change_flags(uintptr_t address, int flags)2052 unative_t sys_as_area_change_flags(uintptr_t address, unsigned int flags) 1936 2053 { 1937 2054 return (unative_t) as_area_change_flags(AS, flags, address); … … 1946 2063 /** Get list of adress space areas. 1947 2064 * 1948 * @param as Address space. 1949 * @param obuf Place to save pointer to returned buffer. 1950 * @param osize Place to save size of returned buffer. 2065 * @param as Address space. 2066 * @param obuf Place to save pointer to returned buffer. 2067 * @param osize Place to save size of returned buffer. 2068 * 1951 2069 */ 1952 2070 void as_get_area_info(as_t *as, as_area_info_t **obuf, size_t *osize) 1953 2071 { 1954 ipl_t ipl; 1955 size_t area_cnt, area_idx, i; 2072 ipl_t ipl = interrupts_disable(); 2073 mutex_lock(&as->lock); 2074 2075 /* First pass, count number of areas. */ 2076 2077 size_t area_cnt = 0; 1956 2078 link_t *cur; 1957 1958 as_area_info_t *info; 1959 size_t isize; 1960 1961 ipl = interrupts_disable(); 1962 mutex_lock(&as->lock); 1963 1964 /* First pass, count number of areas. */ 1965 1966 area_cnt = 0; 1967 2079 1968 2080 for (cur = as->as_area_btree.leaf_head.next; 1969 2081 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1970 btree_node_t *node; 1971 1972 node = list_get_instance(cur, btree_node_t, leaf_link); 2082 btree_node_t *node = 2083 list_get_instance(cur, btree_node_t, leaf_link); 1973 2084 area_cnt += node->keys; 1974 2085 } 1975 1976 1977 info = malloc(isize, 0);1978 2086 2087 size_t isize = area_cnt * sizeof(as_area_info_t); 2088 as_area_info_t *info = malloc(isize, 0); 2089 1979 2090 /* Second pass, record data. */ 1980 1981 area_idx = 0;1982 2091 2092 size_t area_idx = 0; 2093 1983 2094 for (cur = as->as_area_btree.leaf_head.next; 1984 2095 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 1985 btree_node_t *node ;1986 1987 node = list_get_instance(cur, btree_node_t, leaf_link);1988 2096 btree_node_t *node = 2097 list_get_instance(cur, btree_node_t, leaf_link); 2098 btree_key_t i; 2099 1989 2100 for (i = 0; i < node->keys; i++) { 1990 2101 as_area_t *area = node->value[i]; 1991 2102 1992 2103 ASSERT(area_idx < area_cnt); 1993 2104 mutex_lock(&area->lock); 1994 2105 1995 2106 info[area_idx].start_addr = area->base; 1996 2107 info[area_idx].size = FRAMES2SIZE(area->pages); 1997 2108 info[area_idx].flags = area->flags; 1998 2109 ++area_idx; 1999 2110 2000 2111 mutex_unlock(&area->lock); 2001 2112 } 2002 2113 } 2003 2114 2004 2115 mutex_unlock(&as->lock); 2005 2116 interrupts_restore(ipl); 2006 2117 2007 2118 *obuf = info; 2008 2119 *osize = isize; 2009 2120 } 2010 2121 2011 2012 2122 /** Print out information about address space. 2013 2123 * 2014 * @param as Address space. 2124 * @param as Address space. 2125 * 2015 2126 */ 2016 2127 void as_print(as_t *as) 2017 2128 { 2018 ipl_t ipl; 2019 2020 ipl = interrupts_disable(); 2129 ipl_t ipl = interrupts_disable(); 2021 2130 mutex_lock(&as->lock); 2022 2131 … … 2025 2134 for (cur = as->as_area_btree.leaf_head.next; 2026 2135 cur != &as->as_area_btree.leaf_head; cur = cur->next) { 2027 btree_node_t *node; 2028 2029 node = list_get_instance(cur, btree_node_t, leaf_link); 2030 2031 unsigned int i; 2136 btree_node_t *node 2137 = list_get_instance(cur, btree_node_t, leaf_link); 2138 btree_key_t i; 2139 2032 2140 for (i = 0; i < node->keys; i++) { 2033 2141 as_area_t *area = node->value[i]; 2034 2142 2035 2143 mutex_lock(&area->lock); 2036 2144 printf("as_area: %p, base=%p, pages=%" PRIs
Note:
See TracChangeset
for help on using the changeset viewer.