Changeset df0103f7 in mainline
- Timestamp:
- 2006-04-26T11:43:47Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f3ac636
- Parents:
- dbbeb26
- Location:
- generic
- Files:
-
- 1 added
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
generic/include/mm/as.h
rdbbeb26 rdf0103f7 30 30 #define __AS_H__ 31 31 32 #include <mm/as_arg.h> 32 33 #include <arch/mm/page.h> 33 34 #include <arch/mm/as.h> … … 114 115 extern as_area_t *as_area_create(as_t *as, int flags, size_t size, __address base); 115 116 extern __address as_area_resize(as_t *as, __address address, size_t size, int flags); 117 int as_area_send(task_id_t id, __address base, size_t size, int flags); 116 118 extern void as_set_mapping(as_t *as, __address page, __address frame); 117 119 extern int as_page_fault(__address page); … … 124 126 #endif /* !def as_install_arch */ 125 127 128 /* Address space area related syscalls. */ 129 extern __native sys_as_area_create(__address address, size_t size, int flags); 130 extern __native sys_as_area_resize(__address address, size_t size, int flags); 131 extern __native sys_as_area_accept(as_area_acptsnd_arg_t *uspace_accept_arg); 132 extern __native sys_as_area_send(as_area_acptsnd_arg_t *uspace_send_arg); 133 126 134 #endif -
generic/include/proc/task.h
rdbbeb26 rdf0103f7 36 36 #include <ipc/ipc.h> 37 37 #include <security/cap.h> 38 #include <mm/as_arg.h> 38 39 #include <arch/proc/task.h> 39 40 … … 52 53 phone_t phones[IPC_MAX_PHONES]; 53 54 atomic_t active_calls; /**< Active asynchronous messages */ 55 56 /** Accept argument of SYS_AS_AREA_ACCEPT. */ 57 as_area_acptsnd_arg_t accept_arg; 54 58 55 59 task_arch_t arch; /**< Architecture specific task data. */ -
generic/src/mm/as.c
rdbbeb26 rdf0103f7 44 44 #include <mm/asid.h> 45 45 #include <arch/mm/asid.h> 46 #include <arch/types.h>47 #include <typedefs.h>48 46 #include <synch/spinlock.h> 49 #include <config.h>50 47 #include <adt/list.h> 51 48 #include <adt/btree.h> 49 #include <proc/task.h> 50 #include <arch/asm.h> 52 51 #include <panic.h> 53 #include <arch/asm.h>54 52 #include <debug.h> 53 #include <print.h> 55 54 #include <memstr.h> 56 55 #include <macros.h> 57 56 #include <arch.h> 58 #include <print.h> 57 #include <errno.h> 58 #include <config.h> 59 #include <arch/types.h> 60 #include <typedefs.h> 59 61 60 62 as_operations_t *as_operations = NULL; … … 72 74 as_t *AS_KERNEL = NULL; 73 75 76 static int area_flags_to_page_flags(int aflags); 74 77 static int get_area_flags(as_area_t *a); 75 78 static as_area_t *find_area_and_lock(as_t *as, __address va); … … 168 171 169 172 return a; 173 } 174 175 /** Find address space area and change it. 176 * 177 * @param as Address space. 178 * @param address Virtual address belonging to the area to be changed. Must be page-aligned. 179 * @param size New size of the virtual memory block starting at address. 180 * @param flags Flags influencing the remap operation. Currently unused. 181 * 182 * @return address on success, (__address) -1 otherwise. 183 */ 184 __address as_area_resize(as_t *as, __address address, size_t size, int flags) 185 { 186 as_area_t *area = NULL; 187 ipl_t ipl; 188 size_t pages; 189 190 ipl = interrupts_disable(); 191 spinlock_lock(&as->lock); 192 193 /* 194 * Locate the area. 195 */ 196 area = find_area_and_lock(as, address); 197 if (!area) { 198 spinlock_unlock(&as->lock); 199 interrupts_restore(ipl); 200 return (__address) -1; 201 } 202 203 if (area->flags & AS_AREA_DEVICE) { 204 /* 205 * Remapping of address space areas associated 206 * with memory mapped devices is not supported. 207 */ 208 spinlock_unlock(&area->lock); 209 spinlock_unlock(&as->lock); 210 interrupts_restore(ipl); 211 return (__address) -1; 212 } 213 214 pages = SIZE2FRAMES((address - area->base) + size); 215 if (!pages) { 216 /* 217 * Zero size address space areas are not allowed. 218 */ 219 spinlock_unlock(&area->lock); 220 spinlock_unlock(&as->lock); 221 interrupts_restore(ipl); 222 return (__address) -1; 223 } 224 225 if (pages < area->pages) { 226 int i; 227 228 /* 229 * Shrinking the area. 230 * No need to check for overlaps. 231 */ 232 for (i = pages; i < area->pages; i++) { 233 pte_t *pte; 234 235 /* 236 * Releasing physical memory. 237 * This depends on the fact that the memory was allocated using frame_alloc(). 238 */ 239 page_table_lock(as, false); 240 pte = page_mapping_find(as, area->base + i*PAGE_SIZE); 241 if (pte && PTE_VALID(pte)) { 242 __address frame; 243 244 ASSERT(PTE_PRESENT(pte)); 245 frame = PTE_GET_FRAME(pte); 246 page_mapping_remove(as, area->base + i*PAGE_SIZE); 247 page_table_unlock(as, false); 248 249 frame_free(ADDR2PFN(frame)); 250 } else { 251 page_table_unlock(as, false); 252 } 253 } 254 /* 255 * Invalidate TLB's. 256 */ 257 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); 258 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages); 259 tlb_shootdown_finalize(); 260 } else { 261 /* 262 * Growing the area. 263 * Check for overlaps with other address space areas. 264 */ 265 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) { 266 spinlock_unlock(&area->lock); 267 spinlock_unlock(&as->lock); 268 interrupts_restore(ipl); 269 return (__address) -1; 270 } 271 } 272 273 area->pages = pages; 274 275 spinlock_unlock(&area->lock); 276 spinlock_unlock(&as->lock); 277 interrupts_restore(ipl); 278 279 return address; 280 } 281 282 /** Send address space area to another task. 283 * 284 * Address space area is sent to the specified task. 285 * If the destination task is willing to accept the 286 * area, a new area is created according to the 287 * source area. Moreover, any existing mapping 288 * is copied as well, providing thus a mechanism 289 * for sharing group of pages. The source address 290 * space area and any associated mapping is preserved. 291 * 292 * @param id Task ID of the accepting task. 293 * @param base Base address of the source address space area. 294 * @param size Size of the source address space area. 295 * @param flags Flags of the source address space area. 296 * 297 * @return 0 on success or ENOENT if there is no such task or 298 * if there is no such address space area, 299 * EPERM if there was a problem in accepting the area or 300 * ENOMEM if there was a problem in allocating destination 301 * address space area. 302 */ 303 int as_area_send(task_id_t id, __address base, size_t size, int flags) 304 { 305 ipl_t ipl; 306 task_t *t; 307 count_t i; 308 as_t *as; 309 __address dst_base; 310 311 ipl = interrupts_disable(); 312 spinlock_lock(&tasks_lock); 313 314 t = task_find_by_id(id); 315 if (!NULL) { 316 spinlock_unlock(&tasks_lock); 317 interrupts_restore(ipl); 318 return ENOENT; 319 } 320 321 spinlock_lock(&t->lock); 322 spinlock_unlock(&tasks_lock); 323 324 as = t->as; 325 dst_base = (__address) t->accept_arg.base; 326 327 if (as == AS) { 328 /* 329 * The two tasks share the entire address space. 330 * Return error since there is no point in continuing. 331 */ 332 spinlock_unlock(&t->lock); 333 interrupts_restore(ipl); 334 return EPERM; 335 } 336 337 if ((t->accept_arg.task_id != TASK->taskid) || (t->accept_arg.size != size) || 338 (t->accept_arg.flags != flags)) { 339 /* 340 * Discrepancy in either task ID, size or flags. 341 */ 342 spinlock_unlock(&t->lock); 343 interrupts_restore(ipl); 344 return EPERM; 345 } 346 347 /* 348 * Create copy of the address space area. 349 */ 350 if (!as_area_create(as, flags, size, dst_base)) { 351 /* 352 * Destination address space area could not be created. 353 */ 354 spinlock_unlock(&t->lock); 355 interrupts_restore(ipl); 356 return ENOMEM; 357 } 358 359 /* 360 * NOTE: we have just introduced a race condition. 361 * The destination task can try to attempt the newly 362 * created area before its mapping is copied from 363 * the source address space area. In result, frames 364 * can get lost. 365 * 366 * Currently, this race is not solved, but one of the 367 * possible solutions would be to sleep in as_page_fault() 368 * when this situation is detected. 369 */ 370 371 memsetb((__address) &t->accept_arg, sizeof(as_area_acptsnd_arg_t), 0); 372 spinlock_unlock(&t->lock); 373 374 /* 375 * Avoid deadlock by first locking the address space with lower address. 376 */ 377 if (as < AS) { 378 spinlock_lock(&as->lock); 379 spinlock_lock(&AS->lock); 380 } else { 381 spinlock_lock(&AS->lock); 382 spinlock_lock(&as->lock); 383 } 384 385 for (i = 0; i < SIZE2FRAMES(size); i++) { 386 pte_t *pte; 387 __address frame; 388 389 page_table_lock(AS, false); 390 pte = page_mapping_find(AS, base + i*PAGE_SIZE); 391 if (pte && PTE_VALID(pte)) { 392 ASSERT(PTE_PRESENT(pte)); 393 frame = PTE_GET_FRAME(pte); 394 if (!(flags & AS_AREA_DEVICE)) { 395 /* TODO: increment frame reference count */ 396 } 397 page_table_unlock(AS, false); 398 } else { 399 page_table_unlock(AS, false); 400 continue; 401 } 402 403 page_table_lock(as, false); 404 page_mapping_insert(as, dst_base + i*PAGE_SIZE, frame, area_flags_to_page_flags(flags)); 405 page_table_unlock(as, false); 406 } 407 408 spinlock_unlock(&AS->lock); 409 spinlock_unlock(&as->lock); 410 interrupts_restore(ipl); 411 412 return 0; 170 413 } 171 414 … … 345 588 } 346 589 347 /** Compute flags for virtual address translation subsytem. 348 * 349 * The address space area must be locked. 350 * Interrupts must be disabled. 351 * 352 * @param a Address space area. 353 * 354 * @return Flags to be used in page_mapping_insert(). 355 */ 356 int get_area_flags(as_area_t *a) 590 /** Convert address space area flags to page flags. 591 * 592 * @param aflags Flags of some address space area. 593 * 594 * @return Flags to be passed to page_mapping_insert(). 595 */ 596 int area_flags_to_page_flags(int aflags) 357 597 { 358 598 int flags; … … 360 600 flags = PAGE_USER | PAGE_PRESENT; 361 601 362 if (a ->flags & AS_AREA_READ)602 if (aflags & AS_AREA_READ) 363 603 flags |= PAGE_READ; 364 604 365 if (a ->flags & AS_AREA_WRITE)605 if (aflags & AS_AREA_WRITE) 366 606 flags |= PAGE_WRITE; 367 607 368 if (a ->flags & AS_AREA_EXEC)608 if (aflags & AS_AREA_EXEC) 369 609 flags |= PAGE_EXEC; 370 610 371 if (!(a ->flags & AS_AREA_DEVICE))611 if (!(aflags & AS_AREA_DEVICE)) 372 612 flags |= PAGE_CACHEABLE; 373 613 374 614 return flags; 615 } 616 617 /** Compute flags for virtual address translation subsytem. 618 * 619 * The address space area must be locked. 620 * Interrupts must be disabled. 621 * 622 * @param a Address space area. 623 * 624 * @return Flags to be used in page_mapping_insert(). 625 */ 626 int get_area_flags(as_area_t *a) 627 { 628 return area_flags_to_page_flags(a->flags); 375 629 } 376 630 … … 425 679 } 426 680 427 /** Find address space area and change it.428 *429 * @param as Address space.430 * @param address Virtual address belonging to the area to be changed. Must be page-aligned.431 * @param size New size of the virtual memory block starting at address.432 * @param flags Flags influencing the remap operation. Currently unused.433 *434 * @return address on success, (__address) -1 otherwise.435 */436 __address as_area_resize(as_t *as, __address address, size_t size, int flags)437 {438 as_area_t *area = NULL;439 ipl_t ipl;440 size_t pages;441 442 ipl = interrupts_disable();443 spinlock_lock(&as->lock);444 445 /*446 * Locate the area.447 */448 area = find_area_and_lock(as, address);449 if (!area) {450 spinlock_unlock(&as->lock);451 interrupts_restore(ipl);452 return (__address) -1;453 }454 455 if (area->flags & AS_AREA_DEVICE) {456 /*457 * Remapping of address space areas associated458 * with memory mapped devices is not supported.459 */460 spinlock_unlock(&area->lock);461 spinlock_unlock(&as->lock);462 interrupts_restore(ipl);463 return (__address) -1;464 }465 466 pages = SIZE2FRAMES((address - area->base) + size);467 if (!pages) {468 /*469 * Zero size address space areas are not allowed.470 */471 spinlock_unlock(&area->lock);472 spinlock_unlock(&as->lock);473 interrupts_restore(ipl);474 return (__address) -1;475 }476 477 if (pages < area->pages) {478 int i;479 480 /*481 * Shrinking the area.482 * No need to check for overlaps.483 */484 for (i = pages; i < area->pages; i++) {485 pte_t *pte;486 487 /*488 * Releasing physical memory.489 * This depends on the fact that the memory was allocated using frame_alloc().490 */491 page_table_lock(as, false);492 pte = page_mapping_find(as, area->base + i*PAGE_SIZE);493 if (pte && PTE_VALID(pte)) {494 __address frame;495 496 ASSERT(PTE_PRESENT(pte));497 frame = PTE_GET_FRAME(pte);498 page_mapping_remove(as, area->base + i*PAGE_SIZE);499 page_table_unlock(as, false);500 501 frame_free(ADDR2PFN(frame));502 } else {503 page_table_unlock(as, false);504 }505 }506 /*507 * Invalidate TLB's.508 */509 tlb_shootdown_start(TLB_INVL_PAGES, AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);510 tlb_invalidate_pages(AS->asid, area->base + pages*PAGE_SIZE, area->pages - pages);511 tlb_shootdown_finalize();512 } else {513 /*514 * Growing the area.515 * Check for overlaps with other address space areas.516 */517 if (!check_area_conflicts(as, address, pages * PAGE_SIZE, area)) {518 spinlock_unlock(&area->lock);519 spinlock_unlock(&as->lock);520 interrupts_restore(ipl);521 return (__address) -1;522 }523 }524 525 area->pages = pages;526 527 spinlock_unlock(&area->lock);528 spinlock_unlock(&as->lock);529 interrupts_restore(ipl);530 531 return address;532 }533 681 534 682 /** Find address space area and lock it. … … 668 816 return true; 669 817 } 818 819 /* 820 * Address space related syscalls. 821 */ 822 823 /** Wrapper for as_area_create(). */ 824 __native sys_as_area_create(__address address, size_t size, int flags) 825 { 826 if (as_area_create(AS, flags, size, address)) 827 return (__native) address; 828 else 829 return (__native) -1; 830 } 831 832 /** Wrapper for as_area_resize. */ 833 __native sys_as_area_resize(__address address, size_t size, int flags) 834 { 835 return as_area_resize(AS, address, size, 0); 836 } 837 838 /** Prepare task for accepting address space area from another task. 839 * 840 * @param uspace_accept_arg Accept structure passed from userspace. 841 * 842 * @return EPERM if the task ID encapsulated in @uspace_accept_arg references 843 * TASK. Otherwise zero is returned. 844 */ 845 __native sys_as_area_accept(as_area_acptsnd_arg_t *uspace_accept_arg) 846 { 847 as_area_acptsnd_arg_t arg; 848 849 copy_from_uspace(&arg, uspace_accept_arg, sizeof(as_area_acptsnd_arg_t)); 850 851 if (!arg.size) 852 return (__native) EPERM; 853 854 if (arg.task_id == TASK->taskid) { 855 /* 856 * Accepting from itself is not allowed. 857 */ 858 return (__native) EPERM; 859 } 860 861 memcpy(&TASK->accept_arg, &arg, sizeof(as_area_acptsnd_arg_t)); 862 863 return 0; 864 } 865 866 /** Wrapper for as_area_send. */ 867 __native sys_as_area_send(as_area_acptsnd_arg_t *uspace_send_arg) 868 { 869 as_area_acptsnd_arg_t arg; 870 871 copy_from_uspace(&arg, uspace_send_arg, sizeof(as_area_acptsnd_arg_t)); 872 873 if (!arg.size) 874 return (__native) EPERM; 875 876 if (arg.task_id == TASK->taskid) { 877 /* 878 * Sending to itself is not allowed. 879 */ 880 return (__native) EPERM; 881 } 882 883 return (__native) as_area_send(arg.task_id, (__address) arg.base, arg.size, arg.flags); 884 } -
generic/src/proc/task.c
rdbbeb26 rdf0103f7 32 32 #include <proc/uarg.h> 33 33 #include <mm/as.h> 34 #include <mm/as_arg.h> 34 35 #include <mm/slab.h> 35 36 #include <synch/spinlock.h> … … 98 99 ipc_phone_connect(&ta->phones[0], ipc_phone_0); 99 100 atomic_set(&ta->active_calls, 0); 101 102 memsetb((__address) &ta->accept_arg, sizeof(as_area_acptsnd_arg_t), 0); 100 103 101 104 ipl = interrupts_disable(); -
generic/src/syscall/syscall.c
rdbbeb26 rdf0103f7 52 52 } 53 53 54 static __native sys_as_area_create(void *address, size_t size, int flags)55 {56 if (as_area_create(AS, flags, size, (__address) address))57 return (__native) address;58 else59 return (__native) -1;60 }61 62 static __native sys_as_area_resize(void *address, size_t size, int flags)63 {64 return as_area_resize(AS, (__address) address, size, 0);65 }66 67 static __native sys_as_area_share_approve()68 {69 return 0;70 }71 72 static __native sys_as_area_share_perform()73 {74 return 0;75 }76 77 54 static __native sys_int_control(int enable) 78 55 { … … 94 71 sys_tls_set, 95 72 sys_int_control, 73 74 /* Thread and task related syscalls. */ 96 75 sys_thread_create, 97 76 sys_thread_exit, 98 77 sys_task_get_id, 78 79 /* Synchronization related syscalls. */ 99 80 sys_futex_sleep_timeout, 100 81 sys_futex_wakeup, 82 83 /* Address space related syscalls. */ 101 84 sys_as_area_create, 102 85 sys_as_area_resize, 103 sys_as_area_share_approve, 104 sys_as_area_share_perform, 86 sys_as_area_accept, 87 sys_as_area_send, 88 89 /* IPC related syscalls. */ 105 90 sys_ipc_call_sync_fast, 106 91 sys_ipc_call_sync, … … 112 97 sys_ipc_wait_for_call, 113 98 sys_ipc_hangup, 99 100 /* DDI related syscalls. */ 114 101 sys_physmem_map, 115 102 sys_iospace_enable
Note:
See TracChangeset
for help on using the changeset viewer.