Changeset 59fb782 in mainline
- Timestamp:
- 2013-03-24T19:38:18Z (12 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- d965dc3
- Parents:
- 119b46e
- Location:
- kernel
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/src/mm/page.c
r119b46e r59fb782 78 78 void page_fault(unsigned int n, istate_t *istate) 79 79 { 80 uintptr_t page= read_cr2();80 uintptr_t badvaddr = read_cr2(); 81 81 82 82 if (istate->error_word & PFERR_CODE_RSVD) … … 92 92 access = PF_ACCESS_READ; 93 93 94 as_page_fault(page, access, istate);94 (void) as_page_fault(badvaddr, access, istate); 95 95 } 96 96 -
kernel/arch/ia32/src/mm/page.c
r119b46e r59fb782 84 84 void page_fault(unsigned int n __attribute__((unused)), istate_t *istate) 85 85 { 86 uintptr_t page;86 uintptr_t badvaddr; 87 87 pf_access_t access; 88 88 89 page= read_cr2();89 badvaddr = read_cr2(); 90 90 91 91 if (istate->error_word & PFERR_CODE_RSVD) … … 97 97 access = PF_ACCESS_READ; 98 98 99 if (as_page_fault(page, access, istate) == AS_PF_FAULT) { 100 fault_if_from_uspace(istate, "Page fault: %#x.", page); 101 panic_memtrap(istate, access, page, NULL); 102 } 99 (void) as_page_fault(badvaddr, access, istate); 103 100 } 104 101 -
kernel/arch/mips32/src/mm/tlb.c
r119b46e r59fb782 94 94 entry_lo_t lo; 95 95 uintptr_t badvaddr; 96 uintptr_t page;97 96 pte_t *pte; 98 97 99 98 badvaddr = cp0_badvaddr_read(); 100 page = ALIGN_DOWN(badvaddr, PAGE_SIZE); 101 102 pte = page_mapping_find(AS, page, true); 99 100 pte = page_mapping_find(AS, badvaddr, true); 103 101 if (pte && pte->p) { 104 102 /* … … 125 123 } 126 124 127 (void) as_page_fault( page, PF_ACCESS_READ, istate);125 (void) as_page_fault(badvaddr, PF_ACCESS_READ, istate); 128 126 } 129 127 … … 137 135 tlb_index_t index; 138 136 uintptr_t badvaddr; 139 uintptr_t page;140 137 pte_t *pte; 141 138 … … 161 158 162 159 badvaddr = cp0_badvaddr_read(); 163 page = ALIGN_DOWN(badvaddr, PAGE_SIZE); 164 165 pte = page_mapping_find(AS, page, true); 160 161 pte = page_mapping_find(AS, badvaddr, true); 166 162 if (pte && pte->p) { 167 163 /* … … 189 185 } 190 186 191 (void) as_page_fault( page, PF_ACCESS_READ, istate);187 (void) as_page_fault(badvaddr, PF_ACCESS_READ, istate); 192 188 } 193 189 … … 201 197 tlb_index_t index; 202 198 uintptr_t badvaddr; 203 uintptr_t page;204 199 pte_t *pte; 205 200 206 201 badvaddr = cp0_badvaddr_read(); 207 page = ALIGN_DOWN(badvaddr, PAGE_SIZE);208 202 209 203 /* … … 227 221 } 228 222 229 pte = page_mapping_find(AS, page, true);223 pte = page_mapping_find(AS, badvaddr, true); 230 224 if (pte && pte->p && pte->w) { 231 225 /* … … 254 248 } 255 249 256 (void) as_page_fault( page, PF_ACCESS_WRITE, istate);250 (void) as_page_fault(badvaddr, PF_ACCESS_WRITE, istate); 257 251 } 258 252 -
kernel/arch/sparc64/src/mm/sun4u/tlb.c
r119b46e r59fb782 196 196 void fast_instruction_access_mmu_miss(sysarg_t unused, istate_t *istate) 197 197 { 198 uintptr_t page_16k = ALIGN_DOWN(istate->tpc, PAGE_SIZE);199 198 size_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; 200 199 pte_t *t; 201 200 202 t = page_mapping_find(AS, page_16k, true);201 t = page_mapping_find(AS, istate->tpc, true); 203 202 if (t && PTE_EXECUTABLE(t)) { 204 203 /* … … 216 215 * handler. 217 216 */ 218 as_page_fault( page_16k, PF_ACCESS_EXEC, istate);217 as_page_fault(istate->tpc, PF_ACCESS_EXEC, istate); 219 218 } 220 219 } -
kernel/generic/src/mm/as.c
r119b46e r59fb782 544 544 mem_backend_data_t *backend_data, uintptr_t *base, uintptr_t bound) 545 545 { 546 if ((*base != (uintptr_t) -1) && ((*base % PAGE_SIZE) != 0))546 if ((*base != (uintptr_t) -1) && !IS_ALIGNED(*base, PAGE_SIZE)) 547 547 return NULL; 548 548 … … 688 688 int as_area_resize(as_t *as, uintptr_t address, size_t size, unsigned int flags) 689 689 { 690 if (!IS_ALIGNED(address, PAGE_SIZE)) 691 return EINVAL; 692 690 693 mutex_lock(&as->lock); 691 694 … … 1350 1353 * Interrupts are assumed disabled. 1351 1354 * 1352 * @param page Faulting page.1353 * @param access Access mode that caused the page fault (i.e.1354 * read/write/exec).1355 * @param istate Pointer to the interrupted state.1355 * @param address Faulting address. 1356 * @param access Access mode that caused the page fault (i.e. 1357 * read/write/exec). 1358 * @param istate Pointer to the interrupted state. 1356 1359 * 1357 1360 * @return AS_PF_FAULT on page fault. … … 1361 1364 * 1362 1365 */ 1363 int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate) 1364 { 1366 int as_page_fault(uintptr_t address, pf_access_t access, istate_t *istate) 1367 { 1368 uintptr_t page = ALIGN_DOWN(address, PAGE_SIZE); 1365 1369 int rc = AS_PF_FAULT; 1366 1370 … … 1452 1456 task_kill_self(true); 1453 1457 } else { 1454 fault_if_from_uspace(istate, "Page fault: %p.", (void *) page);1455 panic_memtrap(istate, access, page, NULL);1458 fault_if_from_uspace(istate, "Page fault: %p.", (void *) address); 1459 panic_memtrap(istate, access, address, NULL); 1456 1460 } 1457 1461 … … 1679 1683 { 1680 1684 ASSERT(mutex_locked(&area->lock)); 1681 ASSERT( page == ALIGN_DOWN(page, PAGE_SIZE));1685 ASSERT(IS_ALIGNED(page, PAGE_SIZE)); 1682 1686 ASSERT(count); 1683 1687 … … 1963 1967 { 1964 1968 ASSERT(mutex_locked(&area->lock)); 1965 ASSERT( page == ALIGN_DOWN(page, PAGE_SIZE));1969 ASSERT(IS_ALIGNED(page, PAGE_SIZE)); 1966 1970 ASSERT(count); 1967 1971 -
kernel/generic/src/mm/backend_anon.c
r119b46e r59fb782 173 173 * 174 174 * @param area Pointer to the address space area. 175 * @param addr Faulting virtual address.175 * @param upage Faulting virtual page. 176 176 * @param access Access mode that caused the fault (i.e. read/write/exec). 177 177 * … … 179 179 * serviced). 180 180 */ 181 int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access) 182 { 183 uintptr_t upage = ALIGN_DOWN(addr, PAGE_SIZE); 181 int anon_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access) 182 { 184 183 uintptr_t kpage; 185 184 uintptr_t frame; … … 187 186 ASSERT(page_table_locked(AS)); 188 187 ASSERT(mutex_locked(&area->lock)); 188 ASSERT(IS_ALIGNED(upage, PAGE_SIZE)); 189 189 190 190 if (!as_area_check_access(area, access)) -
kernel/generic/src/mm/backend_elf.c
r119b46e r59fb782 235 235 * 236 236 * @param area Pointer to the address space area. 237 * @param addr Faulting virtual address.237 * @param upage Faulting virtual page. 238 238 * @param access Access mode that caused the fault (i.e. 239 239 * read/write/exec). … … 242 242 * on success (i.e. serviced). 243 243 */ 244 int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)244 int elf_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access) 245 245 { 246 246 elf_header_t *elf = area->backend_data.elf; … … 250 250 uintptr_t frame; 251 251 uintptr_t kpage; 252 uintptr_t upage;253 252 uintptr_t start_anon; 254 253 size_t i; … … 257 256 ASSERT(page_table_locked(AS)); 258 257 ASSERT(mutex_locked(&area->lock)); 258 ASSERT(IS_ALIGNED(upage, PAGE_SIZE)); 259 259 260 260 if (!as_area_check_access(area, access)) 261 261 return AS_PF_FAULT; 262 262 263 if ( addr< ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))263 if (upage < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) 264 264 return AS_PF_FAULT; 265 265 266 if ( addr>= entry->p_vaddr + entry->p_memsz)266 if (upage >= entry->p_vaddr + entry->p_memsz) 267 267 return AS_PF_FAULT; 268 268 269 i = ( addr- ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH;269 i = (upage - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >> PAGE_WIDTH; 270 270 base = (uintptr_t) 271 271 (((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE)); 272 273 /* Virtual address of faulting page */274 upage = ALIGN_DOWN(addr, PAGE_SIZE);275 272 276 273 /* Virtual address of the end of initialized part of segment */ -
kernel/generic/src/mm/backend_phys.c
r119b46e r59fb782 111 111 * 112 112 * @param area Pointer to the address space area. 113 * @param addr Faulting virtual address.113 * @param upage Faulting virtual page. 114 114 * @param access Access mode that caused the fault (i.e. read/write/exec). 115 115 * … … 117 117 * serviced). 118 118 */ 119 int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)119 int phys_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access) 120 120 { 121 121 uintptr_t base = area->backend_data.base; … … 123 123 ASSERT(page_table_locked(AS)); 124 124 ASSERT(mutex_locked(&area->lock)); 125 ASSERT(IS_ALIGNED(upage, PAGE_SIZE)); 125 126 126 127 if (!as_area_check_access(area, access)) 127 128 return AS_PF_FAULT; 128 129 129 ASSERT( addr- area->base < area->backend_data.frames * FRAME_SIZE);130 page_mapping_insert(AS, addr, base + (addr- area->base),130 ASSERT(upage - area->base < area->backend_data.frames * FRAME_SIZE); 131 page_mapping_insert(AS, upage, base + (upage - area->base), 131 132 as_area_get_flags(area)); 132 133 133 if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))134 if (!used_space_insert(area, upage, 1)) 134 135 panic("Cannot insert used space."); 135 136 -
kernel/generic/src/mm/page.c
r119b46e r59fb782 104 104 ASSERT(page_mapping_operations->mapping_insert); 105 105 106 page_mapping_operations->mapping_insert(as, page, frame, flags); 106 page_mapping_operations->mapping_insert(as, ALIGN_DOWN(page, PAGE_SIZE), 107 ALIGN_DOWN(frame, FRAME_SIZE), flags); 107 108 108 109 /* Repel prefetched accesses to the old mapping. */ … … 127 128 ASSERT(page_mapping_operations->mapping_remove); 128 129 129 page_mapping_operations->mapping_remove(as, page); 130 page_mapping_operations->mapping_remove(as, 131 ALIGN_DOWN(page, PAGE_SIZE)); 130 132 131 133 /* Repel prefetched accesses to the old mapping. */ … … 150 152 ASSERT(page_mapping_operations->mapping_find); 151 153 152 return page_mapping_operations->mapping_find(as, page, nolock); 154 return page_mapping_operations->mapping_find(as, 155 ALIGN_DOWN(page, PAGE_SIZE), nolock); 153 156 } 154 157
Note:
See TracChangeset
for help on using the changeset viewer.