Changes in / [864a081:5c460cc] in mainline
- Files:
-
- 1 added
- 2 deleted
- 43 edited
Legend:
- Unmodified
- Added
- Removed
-
boot/arch/ppc32/src/asm.S
r864a081 r5c460cc 56 56 tlbsync 57 57 sync 58 .endm59 60 .macro BAT_COMPUTE base size mask lower upper61 # less than 128 KB -> no BAT62 63 lis \upper, 0x000264 cmpw \size, \upper65 blt no_bat66 67 # mask = total >> 1868 69 li \upper, 1870 srw \mask, \size, \upper71 72 # create Block Length mask by replicating73 # the leading logical one 14 times74 75 li \upper, 1476 mtctr \mask77 li \upper, 178 79 0:80 # mask = (mask >> 1) | mask81 82 srw \lower, \mask, \upper83 or \mask, \mask, \lower84 85 bdnz 0b86 87 # mask = mask & 0x07ff88 # (BAT can map up to 256 MB)89 90 andi. \mask, \mask, 0x07ff91 92 # mask = (mask << 2) | 0x000293 # (priviledged access only)94 95 li \upper, 296 slw \mask, \mask, \upper97 ori \mask, \mask, 0x000298 99 lis \upper, (0x8000 + \base)100 or \upper, \upper, \mask101 102 lis \lower, \base103 ori \lower, \lower, 0x0002104 58 .endm 105 59 … … 293 247 lwz r31, 4(r3) # r31 = memory size 294 248 295 lis r30, 268435456@h 296 ori r30, r30, 268435456@l # r30 = 256 MB 297 298 # BAT0 299 300 # r29 = min(r31, r30) 301 302 cmpw r31, r30 303 blt bat0_r31 304 305 mr r29, r30 306 b bat0_r30 307 308 bat0_r31: 309 310 mr r29, r31 311 312 bat0_r30: 313 314 BAT_COMPUTE 0x0000 r29 r28 r27 r26 315 mtspr ibat0u, r26 316 mtspr ibat0l, r27 317 318 mtspr dbat0u, r26 319 mtspr dbat0l, r27 320 321 # BAT1 322 323 sub r31, r31, r29 # r31 = r31 - r29 324 325 # r29 = min(r31, r30) 326 327 cmpw r31, r30 328 blt bat1_r31 329 330 mr r29, r30 331 b bat1_r30 332 333 bat1_r31: 334 335 mr r29, r31 336 337 bat1_r30: 338 339 BAT_COMPUTE 0x1000 r29 r28 r27 r26 340 mtspr ibat1u, r26 341 mtspr ibat1l, r27 342 343 mtspr dbat1u, r26 344 mtspr dbat1l, r27 345 346 # BAT2 347 348 sub r31, r31, r29 # r31 = r31 - r29 349 350 # r29 = min(r31, r30) 351 352 cmpw r31, r30 353 blt bat2_r31 354 355 mr r29, r30 356 b bat2_r30 357 358 bat2_r31: 359 360 mr r29, r31 361 362 bat2_r30: 363 364 BAT_COMPUTE 0x2000 r29 r28 r27 r26 365 mtspr ibat2u, r26 366 mtspr ibat2l, r27 367 368 mtspr dbat2u, r26 369 mtspr dbat2l, r27 370 371 # BAT3 372 373 sub r31, r31, r29 # r31 = r31 - r29 374 375 # r29 = min(r31, r30) 376 377 cmpw r31, r30 378 blt bat3_r31 379 380 mr r29, r30 381 b bat3_r30 382 383 bat3_r31: 384 385 mr r29, r31 386 387 bat3_r30: 388 389 BAT_COMPUTE 0x3000 r29 r28 r27 r26 390 mtspr ibat3u, r26 391 mtspr ibat3l, r27 392 393 mtspr dbat3u, r26 394 mtspr dbat3l, r27 249 lis r29, 0x0002 250 cmpw r31, r29 251 blt no_bat # less than 128 KB -> no BAT 252 253 li r29, 18 254 srw r31, r31, r29 # r31 = total >> 18 255 256 # create Block Length mask by replicating 257 # the leading logical one 14 times 258 259 li r29, 14 260 mtctr r31 261 li r29, 1 262 263 bat_mask: 264 srw r30, r31, r29 # r30 = mask >> 1 265 or r31, r31, r30 # mask = mask | r30 266 267 bdnz bat_mask 268 269 andi. r31, r31, 0x07ff # mask = mask & 0x07ff (BAT can map up to 256 MB) 270 271 li r29, 2 272 slw r31, r31, r29 # mask = mask << 2 273 ori r31, r31, 0x0002 # mask = mask | 0x0002 (priviledged access only) 274 275 lis r29, 0x8000 276 or r29, r29, r31 277 278 lis r30, 0x0000 279 ori r30, r30, 0x0002 280 281 mtspr ibat0u, r29 282 mtspr ibat0l, r30 283 284 mtspr dbat0u, r29 285 mtspr dbat0l, r30 395 286 396 287 no_bat: -
contrib/conf/arm32-qe.sh
r864a081 r5c460cc 1 1 #!/bin/sh 2 2 3 qemu-system-arm $@-M integratorcp --kernel image.boot3 qemu-system-arm -M integratorcp --kernel image.boot -
contrib/conf/ia32-qe.sh
r864a081 r5c460cc 8 8 fi 9 9 10 qemu $@-m 32 -hda "$DISK_IMG" -cdrom image.iso -boot d10 qemu -m 32 -hda "$DISK_IMG" -cdrom image.iso -boot d -
contrib/conf/ppc32-qe.sh
r864a081 r5c460cc 1 1 #!/bin/sh 2 2 3 qemu-system-ppc $@-M mac99 -boot d -cdrom image.iso3 qemu-system-ppc -M mac99 -boot d -cdrom image.iso -
kernel/arch/ia64/src/mm/tlb.c
r864a081 r5c460cc 481 481 482 482 page_table_lock(AS, true); 483 t = page_mapping_find(AS, va , true);483 t = page_mapping_find(AS, va); 484 484 if (t) { 485 485 /* … … 599 599 600 600 page_table_lock(AS, true); 601 pte_t *entry = page_mapping_find(AS, va , true);601 pte_t *entry = page_mapping_find(AS, va); 602 602 if (entry) { 603 603 /* … … 651 651 652 652 page_table_lock(AS, true); 653 t = page_mapping_find(AS, va , true);653 t = page_mapping_find(AS, va); 654 654 ASSERT((t) && (t->p)); 655 655 if ((t) && (t->p) && (t->w)) { … … 684 684 685 685 page_table_lock(AS, true); 686 t = page_mapping_find(AS, va , true);686 t = page_mapping_find(AS, va); 687 687 ASSERT((t) && (t->p)); 688 688 if ((t) && (t->p) && (t->x)) { … … 717 717 718 718 page_table_lock(AS, true); 719 t = page_mapping_find(AS, va , true);719 t = page_mapping_find(AS, va); 720 720 ASSERT((t) && (t->p)); 721 721 if ((t) && (t->p)) { … … 753 753 */ 754 754 page_table_lock(AS, true); 755 t = page_mapping_find(AS, va , true);755 t = page_mapping_find(AS, va); 756 756 ASSERT((t) && (t->p)); 757 757 ASSERT(!t->w); … … 778 778 779 779 page_table_lock(AS, true); 780 t = page_mapping_find(AS, va , true);780 t = page_mapping_find(AS, va); 781 781 ASSERT(t); 782 782 -
kernel/arch/mips32/src/mm/tlb.c
r864a081 r5c460cc 100 100 mutex_unlock(&AS->lock); 101 101 102 page_table_lock(AS, true); 103 102 104 pte = find_mapping_and_check(badvaddr, PF_ACCESS_READ, istate, &pfrc); 103 105 if (!pte) { … … 111 113 * or copy_to_uspace(). 112 114 */ 115 page_table_unlock(AS, true); 113 116 return; 114 117 default: … … 141 144 tlbwr(); 142 145 146 page_table_unlock(AS, true); 143 147 return; 144 148 145 149 fail: 150 page_table_unlock(AS, true); 146 151 tlb_refill_fail(istate); 147 152 } … … 171 176 index.value = cp0_index_read(); 172 177 178 page_table_lock(AS, true); 179 173 180 /* 174 181 * Fail if the entry is not in TLB. … … 190 197 * or copy_to_uspace(). 191 198 */ 199 page_table_unlock(AS, true); 192 200 return; 193 201 default: … … 219 227 tlbwi(); 220 228 229 page_table_unlock(AS, true); 221 230 return; 222 231 223 232 fail: 233 page_table_unlock(AS, true); 224 234 tlb_invalid_fail(istate); 225 235 } … … 249 259 index.value = cp0_index_read(); 250 260 261 page_table_lock(AS, true); 262 251 263 /* 252 264 * Fail if the entry is not in TLB. … … 268 280 * or copy_to_uspace(). 269 281 */ 282 page_table_unlock(AS, true); 270 283 return; 271 284 default: … … 298 311 tlbwi(); 299 312 313 page_table_unlock(AS, true); 300 314 return; 301 315 302 316 fail: 317 page_table_unlock(AS, true); 303 318 tlb_modified_fail(istate); 304 319 } … … 349 364 pte_t *pte; 350 365 366 ASSERT(mutex_locked(&AS->lock)); 367 351 368 hi.value = cp0_entry_hi_read(); 352 369 … … 362 379 * Check if the mapping exists in page tables. 363 380 */ 364 pte = page_mapping_find(AS, badvaddr , true);381 pte = page_mapping_find(AS, badvaddr); 365 382 if (pte && pte->p && (pte->w || access != PF_ACCESS_WRITE)) { 366 383 /* … … 376 393 * Resort to higher-level page fault handler. 377 394 */ 395 page_table_unlock(AS, true); 378 396 switch (rc = as_page_fault(badvaddr, access, istate)) { 379 397 case AS_PF_OK: … … 382 400 * The mapping ought to be in place. 383 401 */ 384 pte = page_mapping_find(AS, badvaddr, true); 402 page_table_lock(AS, true); 403 pte = page_mapping_find(AS, badvaddr); 385 404 ASSERT(pte && pte->p); 386 405 ASSERT(pte->w || access != PF_ACCESS_WRITE); … … 388 407 break; 389 408 case AS_PF_DEFER: 409 page_table_lock(AS, true); 390 410 *pfrc = AS_PF_DEFER; 391 411 return NULL; 392 412 break; 393 413 case AS_PF_FAULT: 414 page_table_lock(AS, true); 394 415 *pfrc = AS_PF_FAULT; 395 416 return NULL; -
kernel/arch/ppc32/Makefile.inc
r864a081 r5c460cc 55 55 arch/$(KARCH)/src/mm/frame.c \ 56 56 arch/$(KARCH)/src/mm/page.c \ 57 arch/$(KARCH)/src/mm/pht.c \58 57 arch/$(KARCH)/src/mm/tlb.c \ 59 58 arch/$(KARCH)/src/drivers/pic.c -
kernel/arch/ppc32/include/mm/as.h
r864a081 r5c460cc 36 36 #define KERN_ppc32_AS_H_ 37 37 38 #include <arch/mm/pht.h>39 40 38 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 0 41 39 … … 54 52 #define as_create_arch(as, flags) (as != as) 55 53 #define as_deinstall_arch(as) 56 57 #define as_invalidate_translation_cache(as, page, cnt) \ 58 pht_invalidate((as), (page), (cnt)) 54 #define as_invalidate_translation_cache(as, page, cnt) 59 55 60 56 extern void as_arch_init(void); -
kernel/arch/ppc32/include/mm/tlb.h
r864a081 r5c460cc 37 37 38 38 #include <arch/interrupt.h> 39 #include <typedefs.h> 39 40 40 41 #define WIMG_GUARDED 0x01 … … 74 75 } ptelo_t; 75 76 77 extern void pht_init(void); 78 extern void pht_refill(unsigned int, istate_t *); 76 79 extern void tlb_refill(unsigned int, istate_t *); 77 80 -
kernel/arch/ppc32/src/interrupt.c
r864a081 r5c460cc 42 42 #include <arch/drivers/pic.h> 43 43 #include <arch/mm/tlb.h> 44 #include <arch/mm/pht.h>45 44 #include <print.h> 46 45 -
kernel/arch/ppc32/src/mm/page.c
r864a081 r5c460cc 43 43 if (config.cpu_active == 1) 44 44 page_mapping_operations = &pt_mapping_operations; 45 as_switch(NULL, AS_KERNEL);46 45 } 47 46 -
kernel/arch/ppc32/src/mm/tlb.c
r864a081 r5c460cc 33 33 */ 34 34 35 #include <mm/tlb.h> 35 36 #include <arch/mm/tlb.h> 37 #include <arch/interrupt.h> 36 38 #include <interrupt.h> 37 #include <typedefs.h> 39 #include <mm/as.h> 40 #include <mm/page.h> 41 #include <arch.h> 42 #include <print.h> 43 #include <macros.h> 44 #include <symtab.h> 45 46 static unsigned int seed = 42; 47 48 /** Try to find PTE for faulting address 49 * 50 * @param as Address space. 51 * @param lock Lock/unlock the address space. 52 * @param badvaddr Faulting virtual address. 53 * @param access Access mode that caused the fault. 54 * @param istate Pointer to interrupted state. 55 * @param pfrc Pointer to variable where as_page_fault() return code 56 * will be stored. 57 * 58 * @return PTE on success, NULL otherwise. 59 * 60 */ 61 static pte_t *find_mapping_and_check(as_t *as, uintptr_t badvaddr, int access, 62 istate_t *istate, int *pfrc) 63 { 64 ASSERT(mutex_locked(&as->lock)); 65 66 /* 67 * Check if the mapping exists in page tables. 68 */ 69 pte_t *pte = page_mapping_find(as, badvaddr); 70 if ((pte) && (pte->present)) { 71 /* 72 * Mapping found in page tables. 73 * Immediately succeed. 74 */ 75 return pte; 76 } else { 77 /* 78 * Mapping not found in page tables. 79 * Resort to higher-level page fault handler. 80 */ 81 page_table_unlock(as, true); 82 83 int rc = as_page_fault(badvaddr, access, istate); 84 switch (rc) { 85 case AS_PF_OK: 86 /* 87 * The higher-level page fault handler succeeded, 88 * The mapping ought to be in place. 89 */ 90 page_table_lock(as, true); 91 pte = page_mapping_find(as, badvaddr); 92 ASSERT((pte) && (pte->present)); 93 *pfrc = 0; 94 return pte; 95 case AS_PF_DEFER: 96 page_table_lock(as, true); 97 *pfrc = rc; 98 return NULL; 99 case AS_PF_FAULT: 100 page_table_lock(as, true); 101 *pfrc = rc; 102 return NULL; 103 default: 104 panic("Unexpected rc (%d).", rc); 105 } 106 } 107 } 108 109 static void pht_refill_fail(uintptr_t badvaddr, istate_t *istate) 110 { 111 fault_if_from_uspace(istate, "PHT Refill Exception on %p.", 112 (void *) badvaddr); 113 panic_memtrap(istate, PF_ACCESS_UNKNOWN, badvaddr, 114 "PHT Refill Exception."); 115 } 116 117 static void pht_insert(const uintptr_t vaddr, const pte_t *pte) 118 { 119 uint32_t page = (vaddr >> 12) & 0xffff; 120 uint32_t api = (vaddr >> 22) & 0x3f; 121 122 uint32_t vsid = sr_get(vaddr); 123 uint32_t sdr1 = sdr1_get(); 124 125 // FIXME: compute size of PHT exactly 126 phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000); 127 128 /* Primary hash (xor) */ 129 uint32_t h = 0; 130 uint32_t hash = vsid ^ page; 131 uint32_t base = (hash & 0x3ff) << 3; 132 uint32_t i; 133 bool found = false; 134 135 /* Find colliding PTE in PTEG */ 136 for (i = 0; i < 8; i++) { 137 if ((phte[base + i].v) 138 && (phte[base + i].vsid == vsid) 139 && (phte[base + i].api == api) 140 && (phte[base + i].h == 0)) { 141 found = true; 142 break; 143 } 144 } 145 146 if (!found) { 147 /* Find unused PTE in PTEG */ 148 for (i = 0; i < 8; i++) { 149 if (!phte[base + i].v) { 150 found = true; 151 break; 152 } 153 } 154 } 155 156 if (!found) { 157 /* Secondary hash (not) */ 158 uint32_t base2 = (~hash & 0x3ff) << 3; 159 160 /* Find colliding PTE in PTEG */ 161 for (i = 0; i < 8; i++) { 162 if ((phte[base2 + i].v) 163 && (phte[base2 + i].vsid == vsid) 164 && (phte[base2 + i].api == api) 165 && (phte[base2 + i].h == 1)) { 166 found = true; 167 base = base2; 168 h = 1; 169 break; 170 } 171 } 172 173 if (!found) { 174 /* Find unused PTE in PTEG */ 175 for (i = 0; i < 8; i++) { 176 if (!phte[base2 + i].v) { 177 found = true; 178 base = base2; 179 h = 1; 180 break; 181 } 182 } 183 } 184 185 if (!found) 186 i = RANDI(seed) % 8; 187 } 188 189 phte[base + i].v = 1; 190 phte[base + i].vsid = vsid; 191 phte[base + i].h = h; 192 phte[base + i].api = api; 193 phte[base + i].rpn = pte->pfn; 194 phte[base + i].r = 0; 195 phte[base + i].c = 0; 196 phte[base + i].wimg = (pte->page_cache_disable ? WIMG_NO_CACHE : 0); 197 phte[base + i].pp = 2; // FIXME 198 } 199 200 /** Process Instruction/Data Storage Exception 201 * 202 * @param n Exception vector number. 203 * @param istate Interrupted register context. 204 * 205 */ 206 void pht_refill(unsigned int n, istate_t *istate) 207 { 208 as_t *as = (AS == NULL) ? AS_KERNEL : AS; 209 uintptr_t badvaddr; 210 211 if (n == VECTOR_DATA_STORAGE) 212 badvaddr = istate->dar; 213 else 214 badvaddr = istate->pc; 215 216 page_table_lock(as, true); 217 218 int pfrc; 219 pte_t *pte = find_mapping_and_check(as, badvaddr, 220 PF_ACCESS_READ /* FIXME */, istate, &pfrc); 221 222 if (!pte) { 223 switch (pfrc) { 224 case AS_PF_FAULT: 225 page_table_unlock(as, true); 226 pht_refill_fail(badvaddr, istate); 227 return; 228 case AS_PF_DEFER: 229 /* 230 * The page fault came during copy_from_uspace() 231 * or copy_to_uspace(). 232 */ 233 page_table_unlock(as, true); 234 return; 235 default: 236 panic("Unexpected pfrc (%d).", pfrc); 237 } 238 } 239 240 /* Record access to PTE */ 241 pte->accessed = 1; 242 pht_insert(badvaddr, pte); 243 244 page_table_unlock(as, true); 245 } 38 246 39 247 void tlb_refill(unsigned int n, istate_t *istate) … … 81 289 void tlb_invalidate_all(void) 82 290 { 291 uint32_t index; 292 83 293 asm volatile ( 294 "li %[index], 0\n" 84 295 "sync\n" 85 ); 86 87 for (unsigned int i = 0; i < 0x00040000; i += 0x00001000) { 88 asm volatile ( 89 "tlbie %[i]\n" 90 :: [i] "r" (i) 91 ); 92 } 93 94 asm volatile ( 296 297 ".rept 64\n" 298 " tlbie %[index]\n" 299 " addi %[index], %[index], 0x1000\n" 300 ".endr\n" 301 95 302 "eieio\n" 96 303 "tlbsync\n" 97 304 "sync\n" 305 : [index] "=r" (index) 98 306 ); 99 307 } … … 101 309 void tlb_invalidate_asid(asid_t asid) 102 310 { 311 uint32_t sdr1 = sdr1_get(); 312 313 // FIXME: compute size of PHT exactly 314 phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000); 315 316 size_t i; 317 for (i = 0; i < 8192; i++) { 318 if ((phte[i].v) && (phte[i].vsid >= (asid << 4)) && 319 (phte[i].vsid < ((asid << 4) + 16))) 320 phte[i].v = 0; 321 } 322 103 323 tlb_invalidate_all(); 104 324 } … … 106 326 void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt) 107 327 { 328 // TODO 108 329 tlb_invalidate_all(); 109 330 } -
kernel/arch/sparc64/src/mm/sun4u/as.c
r864a081 r5c460cc 47 47 #include <bitops.h> 48 48 #include <macros.h> 49 #include <memstr.h>50 49 51 50 #endif /* CONFIG_TSB */ -
kernel/arch/sparc64/src/mm/sun4u/tlb.c
r864a081 r5c460cc 207 207 208 208 page_table_lock(AS, true); 209 t = page_mapping_find(AS, page_16k , true);209 t = page_mapping_find(AS, page_16k); 210 210 if (t && PTE_EXECUTABLE(t)) { 211 211 /* … … 275 275 276 276 page_table_lock(AS, true); 277 t = page_mapping_find(AS, page_16k , true);277 t = page_mapping_find(AS, page_16k); 278 278 if (t) { 279 279 /* … … 319 319 320 320 page_table_lock(AS, true); 321 t = page_mapping_find(AS, page_16k , true);321 t = page_mapping_find(AS, page_16k); 322 322 if (t && PTE_WRITABLE(t)) { 323 323 /* -
kernel/arch/sparc64/src/mm/sun4v/as.c
r864a081 r5c460cc 50 50 #include <bitops.h> 51 51 #include <macros.h> 52 #include <memstr.h>53 52 54 53 #endif /* CONFIG_TSB */ -
kernel/arch/sparc64/src/mm/sun4v/tlb.c
r864a081 r5c460cc 219 219 220 220 page_table_lock(AS, true); 221 t = page_mapping_find(AS, va , true);221 t = page_mapping_find(AS, va); 222 222 223 223 if (t && PTE_EXECUTABLE(t)) { … … 275 275 276 276 page_table_lock(AS, true); 277 t = page_mapping_find(AS, va , true);277 t = page_mapping_find(AS, va); 278 278 if (t) { 279 279 /* … … 317 317 318 318 page_table_lock(AS, true); 319 t = page_mapping_find(AS, va , true);319 t = page_mapping_find(AS, va); 320 320 if (t && PTE_WRITABLE(t)) { 321 321 /* -
kernel/genarch/include/mm/page_pt.h
r864a081 r5c460cc 129 129 130 130 extern void page_mapping_insert_pt(as_t *, uintptr_t, uintptr_t, unsigned int); 131 extern pte_t *page_mapping_find_pt(as_t *, uintptr_t , bool);131 extern pte_t *page_mapping_find_pt(as_t *, uintptr_t); 132 132 133 133 #endif -
kernel/genarch/src/drivers/ega/ega.c
r864a081 r5c460cc 41 41 #include <mm/slab.h> 42 42 #include <arch/mm/page.h> 43 #include <synch/spinlock.h> 43 44 #include <typedefs.h> 44 45 #include <arch/asm.h> -
kernel/genarch/src/mm/page_ht.c
r864a081 r5c460cc 58 58 static void ht_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 59 59 static void ht_mapping_remove(as_t *, uintptr_t); 60 static pte_t *ht_mapping_find(as_t *, uintptr_t , bool);60 static pte_t *ht_mapping_find(as_t *, uintptr_t); 61 61 62 62 /** … … 214 214 * this call visible. 215 215 * 216 * @param as Address space to w hich page belongs.216 * @param as Address space to wich page belongs. 217 217 * @param page Virtual address of the page to be demapped. 218 218 * … … 237 237 /** Find mapping for virtual page in page hash table. 238 238 * 239 * @param as Address space to which page belongs. 240 * @param page Virtual page. 241 * @param nolock True if the page tables need not be locked. 239 * Find mapping for virtual page. 240 * 241 * @param as Address space to wich page belongs. 242 * @param page Virtual page. 242 243 * 243 244 * @return NULL if there is no such mapping; requested mapping otherwise. 244 245 * 245 246 */ 246 pte_t *ht_mapping_find(as_t *as, uintptr_t page , bool nolock)247 pte_t *ht_mapping_find(as_t *as, uintptr_t page) 247 248 { 248 249 sysarg_t key[2] = { … … 251 252 }; 252 253 253 ASSERT( nolock ||page_table_locked(as));254 ASSERT(page_table_locked(as)); 254 255 255 256 link_t *cur = hash_table_find(&page_ht, key); -
kernel/genarch/src/mm/page_pt.c
r864a081 r5c460cc 48 48 static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 49 49 static void pt_mapping_remove(as_t *, uintptr_t); 50 static pte_t *pt_mapping_find(as_t *, uintptr_t , bool);50 static pte_t *pt_mapping_find(as_t *, uintptr_t); 51 51 52 52 page_mapping_operations_t pt_mapping_operations = { … … 238 238 /** Find mapping for virtual page in hierarchical page tables. 239 239 * 240 * @param as Address space to which page belongs. 241 * @param page Virtual page. 242 * @param nolock True if the page tables need not be locked. 240 * Find mapping for virtual page. 241 * 242 * @param as Address space to which page belongs. 243 * @param page Virtual page. 243 244 * 244 245 * @return NULL if there is no such mapping; entry from PTL3 describing … … 246 247 * 247 248 */ 248 pte_t *pt_mapping_find(as_t *as, uintptr_t page , bool nolock)249 pte_t *pt_mapping_find(as_t *as, uintptr_t page) 249 250 { 250 ASSERT( nolock ||page_table_locked(as));251 ASSERT(page_table_locked(as)); 251 252 252 253 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table); -
kernel/generic/include/mm/page.h
r864a081 r5c460cc 38 38 #include <typedefs.h> 39 39 #include <mm/as.h> 40 #include <arch/mm/page.h> 41 42 #define P2SZ(pages) \ 43 ((pages) << PAGE_WIDTH) 40 #include <memstr.h> 44 41 45 42 /** Operations to manipulate page mappings. */ … … 47 44 void (* mapping_insert)(as_t *, uintptr_t, uintptr_t, unsigned int); 48 45 void (* mapping_remove)(as_t *, uintptr_t); 49 pte_t *(* mapping_find)(as_t *, uintptr_t , bool);46 pte_t *(* mapping_find)(as_t *, uintptr_t); 50 47 } page_mapping_operations_t; 51 48 … … 58 55 extern void page_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int); 59 56 extern void page_mapping_remove(as_t *, uintptr_t); 60 extern pte_t *page_mapping_find(as_t *, uintptr_t , bool);57 extern pte_t *page_mapping_find(as_t *, uintptr_t); 61 58 extern pte_t *page_table_create(unsigned int); 62 59 extern void page_table_destroy(pte_t *); -
kernel/generic/include/mm/tlb.h
r864a081 r5c460cc 86 86 extern void tlb_invalidate_asid(asid_t); 87 87 extern void tlb_invalidate_pages(asid_t, uintptr_t, size_t); 88 89 88 #endif 90 89 -
kernel/generic/src/console/console.c
r864a081 r5c460cc 60 60 61 61 /** Kernel log initialized */ 62 static atomic_t klog_inited = {false};62 static bool klog_inited = false; 63 63 64 64 /** First kernel log characters */ … … 75 75 76 76 /** Kernel log spinlock */ 77 SPINLOCK_STATIC_INITIALIZE_NAME(klog_lock, " klog_lock");77 SPINLOCK_STATIC_INITIALIZE_NAME(klog_lock, "*klog_lock"); 78 78 79 79 /** Physical memory area used for klog buffer */ … … 166 166 167 167 event_set_unmask_callback(EVENT_KLOG, klog_update); 168 atomic_set(&klog_inited, true); 168 169 spinlock_lock(&klog_lock); 170 klog_inited = true; 171 spinlock_unlock(&klog_lock); 169 172 } 170 173 … … 261 264 void klog_update(void) 262 265 { 263 if (!atomic_get(&klog_inited))264 return;265 266 266 spinlock_lock(&klog_lock); 267 267 268 if ( klog_uspace > 0) {268 if ((klog_inited) && (klog_uspace > 0)) { 269 269 if (event_notify_3(EVENT_KLOG, true, klog_start, klog_len, 270 270 klog_uspace) == EOK) … … 277 277 void putchar(const wchar_t ch) 278 278 { 279 bool ordy = ((stdout) && (stdout->op->write));280 281 279 spinlock_lock(&klog_lock); 282 280 283 /* Print charaters stored in kernel log */ 284 if (ordy) { 285 while (klog_stored > 0) { 286 wchar_t tmp = klog[(klog_start + klog_len - klog_stored) % KLOG_LENGTH]; 287 klog_stored--; 288 289 /* 290 * We need to give up the spinlock for 291 * the physical operation of writting out 292 * the character. 293 */ 294 spinlock_unlock(&klog_lock); 295 stdout->op->write(stdout, tmp, silent); 296 spinlock_lock(&klog_lock); 297 } 281 if ((klog_stored > 0) && (stdout) && (stdout->op->write)) { 282 /* Print charaters stored in kernel log */ 283 size_t i; 284 for (i = klog_len - klog_stored; i < klog_len; i++) 285 stdout->op->write(stdout, klog[(klog_start + i) % KLOG_LENGTH], silent); 286 klog_stored = 0; 298 287 } 299 288 … … 305 294 klog_start = (klog_start + 1) % KLOG_LENGTH; 306 295 307 if (!ordy) { 308 if (klog_stored < klog_len) 309 klog_stored++; 310 } 311 312 /* The character is stored for uspace */ 313 if (klog_uspace < klog_len) 314 klog_uspace++; 315 316 spinlock_unlock(&klog_lock); 317 318 if (ordy) { 319 /* 320 * Output the character. In this case 321 * it should be no longer buffered. 322 */ 296 if ((stdout) && (stdout->op->write)) 323 297 stdout->op->write(stdout, ch, silent); 324 }else {298 else { 325 299 /* 326 300 * No standard output routine defined yet. … … 332 306 * Note that the early_putc() function might be 333 307 * a no-op on certain hardware configurations. 308 * 334 309 */ 335 310 early_putchar(ch); 336 } 311 312 if (klog_stored < klog_len) 313 klog_stored++; 314 } 315 316 /* The character is stored for uspace */ 317 if (klog_uspace < klog_len) 318 klog_uspace++; 319 320 spinlock_unlock(&klog_lock); 337 321 338 322 /* Force notification on newline */ -
kernel/generic/src/mm/as.c
r864a081 r5c460cc 302 302 * We don't want any area to have conflicts with NULL page. 303 303 */ 304 if (overlaps(addr, P2SZ(count), (uintptr_t) NULL, PAGE_SIZE))304 if (overlaps(addr, count << PAGE_WIDTH, (uintptr_t) NULL, PAGE_SIZE)) 305 305 return false; 306 306 … … 329 329 mutex_lock(&area->lock); 330 330 331 if (overlaps(addr, P2SZ(count), area->base,332 P2SZ(area->pages))) {331 if (overlaps(addr, count << PAGE_WIDTH, 332 area->base, area->pages << PAGE_WIDTH)) { 333 333 mutex_unlock(&area->lock); 334 334 return false; … … 346 346 mutex_lock(&area->lock); 347 347 348 if (overlaps(addr, P2SZ(count), area->base,349 P2SZ(area->pages))) {348 if (overlaps(addr, count << PAGE_WIDTH, 349 area->base, area->pages << PAGE_WIDTH)) { 350 350 mutex_unlock(&area->lock); 351 351 return false; … … 366 366 mutex_lock(&area->lock); 367 367 368 if (overlaps(addr, P2SZ(count), area->base,369 P2SZ(area->pages))) {368 if (overlaps(addr, count << PAGE_WIDTH, 369 area->base, area->pages << PAGE_WIDTH)) { 370 370 mutex_unlock(&area->lock); 371 371 return false; … … 380 380 */ 381 381 if (!KERNEL_ADDRESS_SPACE_SHADOWED) { 382 return !overlaps(addr, P2SZ(count), KERNEL_ADDRESS_SPACE_START, 382 return !overlaps(addr, count << PAGE_WIDTH, 383 KERNEL_ADDRESS_SPACE_START, 383 384 KERNEL_ADDRESS_SPACE_END - KERNEL_ADDRESS_SPACE_START); 384 385 } … … 473 474 474 475 btree_node_t *leaf; 475 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, 476 &leaf); 476 as_area_t *area = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf); 477 477 if (area) { 478 478 /* va is the base address of an address space area */ … … 495 495 mutex_lock(&area->lock); 496 496 497 if ((area->base <= va) &&498 (va <= area->base + (P2SZ(area->pages)- 1)))497 size_t size = area->pages << PAGE_WIDTH; 498 if ((area->base <= va) && (va <= area->base + (size - 1))) 499 499 return area; 500 500 … … 506 506 * Because of its position in the B+tree, it must have base < va. 507 507 */ 508 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, 509 leaf); 508 btree_node_t *lnode = btree_leaf_node_left_neighbour(&as->as_area_btree, leaf); 510 509 if (lnode) { 511 510 area = (as_area_t *) lnode->value[lnode->keys - 1]; … … 513 512 mutex_lock(&area->lock); 514 513 515 if (va < = area->base + (P2SZ(area->pages) - 1))514 if (va < area->base + (area->pages << PAGE_WIDTH)) 516 515 return area; 517 516 … … 578 577 579 578 if (pages < area->pages) { 580 uintptr_t start_free = area->base + P2SZ(pages);579 uintptr_t start_free = area->base + (pages << PAGE_WIDTH); 581 580 582 581 /* … … 591 590 */ 592 591 ipl_t ipl = tlb_shootdown_start(TLB_INVL_PAGES, as->asid, 593 area->base + P2SZ(pages), area->pages - pages);592 area->base + (pages << PAGE_WIDTH), area->pages - pages); 594 593 595 594 /* … … 614 613 size_t i = 0; 615 614 616 if (overlaps(ptr, P2SZ(size), area->base,617 P2SZ(pages))) {615 if (overlaps(ptr, size << PAGE_WIDTH, area->base, 616 pages << PAGE_WIDTH)) { 618 617 619 if (ptr + P2SZ(size) <= start_free) {618 if (ptr + (size << PAGE_WIDTH) <= start_free) { 620 619 /* 621 620 * The whole interval fits … … 648 647 649 648 for (; i < size; i++) { 650 pte_t *pte = page_mapping_find(as, 651 ptr + P2SZ(i), false);649 pte_t *pte = page_mapping_find(as, ptr + 650 (i << PAGE_WIDTH)); 652 651 653 652 ASSERT(pte); … … 658 657 (area->backend->frame_free)) { 659 658 area->backend->frame_free(area, 660 ptr + P2SZ(i),659 ptr + (i << PAGE_WIDTH), 661 660 PTE_GET_FRAME(pte)); 662 661 } 663 662 664 page_mapping_remove(as, ptr + P2SZ(i)); 663 page_mapping_remove(as, ptr + 664 (i << PAGE_WIDTH)); 665 665 } 666 666 } … … 671 671 */ 672 672 673 tlb_invalidate_pages(as->asid, area->base + P2SZ(pages),673 tlb_invalidate_pages(as->asid, area->base + (pages << PAGE_WIDTH), 674 674 area->pages - pages); 675 675 676 676 /* 677 * Invalidate software translation caches 678 * (e.g. TSB on sparc64, PHT on ppc32). 679 */ 680 as_invalidate_translation_cache(as, area->base + P2SZ(pages), 681 area->pages - pages); 677 * Invalidate software translation caches (e.g. TSB on sparc64). 678 */ 679 as_invalidate_translation_cache(as, area->base + 680 (pages << PAGE_WIDTH), area->pages - pages); 682 681 tlb_shootdown_finalize(ipl); 683 682 … … 798 797 799 798 for (size = 0; size < (size_t) node->value[i]; size++) { 800 pte_t *pte = page_mapping_find(as,801 ptr + P2SZ(size), false);799 pte_t *pte = 800 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 802 801 803 802 ASSERT(pte); … … 808 807 (area->backend->frame_free)) { 809 808 area->backend->frame_free(area, 810 ptr + P2SZ(size), 811 PTE_GET_FRAME(pte)); 809 ptr + (size << PAGE_WIDTH), PTE_GET_FRAME(pte)); 812 810 } 813 811 814 page_mapping_remove(as, ptr + P2SZ(size));812 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 815 813 } 816 814 } … … 824 822 825 823 /* 826 * Invalidate potential software translation caches 827 * (e.g. TSB on sparc64, PHT on ppc32).824 * Invalidate potential software translation caches (e.g. TSB on 825 * sparc64). 828 826 */ 829 827 as_invalidate_translation_cache(as, area->base, area->pages); … … 899 897 } 900 898 901 size_t src_size = P2SZ(src_area->pages);899 size_t src_size = src_area->pages << PAGE_WIDTH; 902 900 unsigned int src_flags = src_area->flags; 903 901 mem_backend_t *src_backend = src_area->backend; … … 1096 1094 for (cur = area->used_space.leaf_head.next; 1097 1095 cur != &area->used_space.leaf_head; cur = cur->next) { 1098 btree_node_t *node = list_get_instance(cur, btree_node_t,1099 leaf_link);1096 btree_node_t *node 1097 = list_get_instance(cur, btree_node_t, leaf_link); 1100 1098 btree_key_t i; 1101 1099 … … 1105 1103 1106 1104 for (size = 0; size < (size_t) node->value[i]; size++) { 1107 pte_t *pte = page_mapping_find(as,1108 p tr + P2SZ(size), false);1105 pte_t *pte = 1106 page_mapping_find(as, ptr + (size << PAGE_WIDTH)); 1109 1107 1110 1108 ASSERT(pte); … … 1115 1113 1116 1114 /* Remove old mapping */ 1117 page_mapping_remove(as, ptr + P2SZ(size));1115 page_mapping_remove(as, ptr + (size << PAGE_WIDTH)); 1118 1116 } 1119 1117 } … … 1127 1125 1128 1126 /* 1129 * Invalidate potential software translation caches 1130 * (e.g. TSB on sparc64, PHT on ppc32).1127 * Invalidate potential software translation caches (e.g. TSB on 1128 * sparc64). 1131 1129 */ 1132 1130 as_invalidate_translation_cache(as, area->base, area->pages); … … 1161 1159 1162 1160 /* Insert the new mapping */ 1163 page_mapping_insert(as, ptr + P2SZ(size),1161 page_mapping_insert(as, ptr + (size << PAGE_WIDTH), 1164 1162 old_frame[frame_idx++], page_flags); 1165 1163 … … 1242 1240 */ 1243 1241 pte_t *pte; 1244 if ((pte = page_mapping_find(AS, page , false))) {1242 if ((pte = page_mapping_find(AS, page))) { 1245 1243 if (PTE_PRESENT(pte)) { 1246 1244 if (((access == PF_ACCESS_READ) && PTE_READABLE(pte)) || … … 1483 1481 1484 1482 if (src_area) { 1485 size = P2SZ(src_area->pages);1483 size = src_area->pages << PAGE_WIDTH; 1486 1484 mutex_unlock(&src_area->lock); 1487 1485 } else … … 1538 1536 if (page >= right_pg) { 1539 1537 /* Do nothing. */ 1540 } else if (overlaps(page, P2SZ(count), left_pg,1541 P2SZ(left_cnt))) {1538 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1539 left_cnt << PAGE_WIDTH)) { 1542 1540 /* The interval intersects with the left interval. */ 1543 1541 return false; 1544 } else if (overlaps(page, P2SZ(count), right_pg,1545 P2SZ(right_cnt))) {1542 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1543 right_cnt << PAGE_WIDTH)) { 1546 1544 /* The interval intersects with the right interval. */ 1547 1545 return false; 1548 } else if ((page == left_pg + P2SZ(left_cnt)) &&1549 (page + P2SZ(count) == right_pg)) {1546 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1547 (page + (count << PAGE_WIDTH) == right_pg)) { 1550 1548 /* 1551 1549 * The interval can be added by merging the two already … … 1555 1553 btree_remove(&area->used_space, right_pg, leaf); 1556 1554 goto success; 1557 } else if (page == left_pg + P2SZ(left_cnt)) {1555 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1558 1556 /* 1559 1557 * The interval can be added by simply growing the left … … 1562 1560 node->value[node->keys - 1] += count; 1563 1561 goto success; 1564 } else if (page + P2SZ(count) == right_pg) {1562 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1565 1563 /* 1566 1564 * The interval can be addded by simply moving base of … … 1589 1587 */ 1590 1588 1591 if (overlaps(page, P2SZ(count), right_pg, P2SZ(right_cnt))) { 1589 if (overlaps(page, count << PAGE_WIDTH, right_pg, 1590 right_cnt << PAGE_WIDTH)) { 1592 1591 /* The interval intersects with the right interval. */ 1593 1592 return false; 1594 } else if (page + P2SZ(count) == right_pg) {1593 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1595 1594 /* 1596 1595 * The interval can be added by moving the base of the … … 1627 1626 if (page < left_pg) { 1628 1627 /* Do nothing. */ 1629 } else if (overlaps(page, P2SZ(count), left_pg,1630 P2SZ(left_cnt))) {1628 } else if (overlaps(page, count << PAGE_WIDTH, left_pg, 1629 left_cnt << PAGE_WIDTH)) { 1631 1630 /* The interval intersects with the left interval. */ 1632 1631 return false; 1633 } else if (overlaps(page, P2SZ(count), right_pg,1634 P2SZ(right_cnt))) {1632 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1633 right_cnt << PAGE_WIDTH)) { 1635 1634 /* The interval intersects with the right interval. */ 1636 1635 return false; 1637 } else if ((page == left_pg + P2SZ(left_cnt)) &&1638 (page + P2SZ(count) == right_pg)) {1636 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1637 (page + (count << PAGE_WIDTH) == right_pg)) { 1639 1638 /* 1640 1639 * The interval can be added by merging the two already … … 1644 1643 btree_remove(&area->used_space, right_pg, node); 1645 1644 goto success; 1646 } else if (page == left_pg + P2SZ(left_cnt)) {1645 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1647 1646 /* 1648 1647 * The interval can be added by simply growing the left … … 1651 1650 leaf->value[leaf->keys - 1] += count; 1652 1651 goto success; 1653 } else if (page + P2SZ(count) == right_pg) {1652 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1654 1653 /* 1655 1654 * The interval can be addded by simply moving base of … … 1678 1677 */ 1679 1678 1680 if (overlaps(page, P2SZ(count), left_pg, P2SZ(left_cnt))) { 1679 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1680 left_cnt << PAGE_WIDTH)) { 1681 1681 /* The interval intersects with the left interval. */ 1682 1682 return false; 1683 } else if (left_pg + P2SZ(left_cnt) == page) {1683 } else if (left_pg + (left_cnt << PAGE_WIDTH) == page) { 1684 1684 /* 1685 1685 * The interval can be added by growing the left … … 1716 1716 */ 1717 1717 1718 if (overlaps(page, P2SZ(count), left_pg,1719 P2SZ(left_cnt))) {1718 if (overlaps(page, count << PAGE_WIDTH, left_pg, 1719 left_cnt << PAGE_WIDTH)) { 1720 1720 /* 1721 1721 * The interval intersects with the left … … 1723 1723 */ 1724 1724 return false; 1725 } else if (overlaps(page, P2SZ(count), right_pg,1726 P2SZ(right_cnt))) {1725 } else if (overlaps(page, count << PAGE_WIDTH, right_pg, 1726 right_cnt << PAGE_WIDTH)) { 1727 1727 /* 1728 1728 * The interval intersects with the right … … 1730 1730 */ 1731 1731 return false; 1732 } else if ((page == left_pg + P2SZ(left_cnt)) &&1733 (page + P2SZ(count) == right_pg)) {1732 } else if ((page == left_pg + (left_cnt << PAGE_WIDTH)) && 1733 (page + (count << PAGE_WIDTH) == right_pg)) { 1734 1734 /* 1735 1735 * The interval can be added by merging the two … … 1739 1739 btree_remove(&area->used_space, right_pg, leaf); 1740 1740 goto success; 1741 } else if (page == left_pg + P2SZ(left_cnt)) {1741 } else if (page == left_pg + (left_cnt << PAGE_WIDTH)) { 1742 1742 /* 1743 1743 * The interval can be added by simply growing … … 1746 1746 leaf->value[i - 1] += count; 1747 1747 goto success; 1748 } else if (page + P2SZ(count) == right_pg) {1748 } else if (page + (count << PAGE_WIDTH) == right_pg) { 1749 1749 /* 1750 1750 * The interval can be addded by simply moving … … 1812 1812 for (i = 0; i < leaf->keys; i++) { 1813 1813 if (leaf->key[i] == page) { 1814 leaf->key[i] += P2SZ(count);1814 leaf->key[i] += count << PAGE_WIDTH; 1815 1815 leaf->value[i] -= count; 1816 1816 goto success; … … 1822 1822 } 1823 1823 1824 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, 1825 leaf); 1824 btree_node_t *node = btree_leaf_node_left_neighbour(&area->used_space, leaf); 1826 1825 if ((node) && (page < leaf->key[0])) { 1827 1826 uintptr_t left_pg = node->key[node->keys - 1]; 1828 1827 size_t left_cnt = (size_t) node->value[node->keys - 1]; 1829 1828 1830 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 1831 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 1829 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1830 count << PAGE_WIDTH)) { 1831 if (page + (count << PAGE_WIDTH) == 1832 left_pg + (left_cnt << PAGE_WIDTH)) { 1832 1833 /* 1833 1834 * The interval is contained in the rightmost … … 1838 1839 node->value[node->keys - 1] -= count; 1839 1840 goto success; 1840 } else if (page + P2SZ(count) < 1841 left_pg + P2SZ(left_cnt)) { 1842 size_t new_cnt; 1843 1841 } else if (page + (count << PAGE_WIDTH) < 1842 left_pg + (left_cnt << PAGE_WIDTH)) { 1844 1843 /* 1845 1844 * The interval is contained in the rightmost … … 1849 1848 * new interval. 1850 1849 */ 1851 new_cnt = ((left_pg + P2SZ(left_cnt)) -1852 (page + P2SZ(count))) >> PAGE_WIDTH;1850 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1851 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1853 1852 node->value[node->keys - 1] -= count + new_cnt; 1854 1853 btree_insert(&area->used_space, page + 1855 P2SZ(count), (void *) new_cnt, leaf);1854 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1856 1855 goto success; 1857 1856 } … … 1866 1865 size_t left_cnt = (size_t) leaf->value[leaf->keys - 1]; 1867 1866 1868 if (overlaps(left_pg, P2SZ(left_cnt), page, P2SZ(count))) { 1869 if (page + P2SZ(count) == left_pg + P2SZ(left_cnt)) { 1867 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1868 count << PAGE_WIDTH)) { 1869 if (page + (count << PAGE_WIDTH) == 1870 left_pg + (left_cnt << PAGE_WIDTH)) { 1870 1871 /* 1871 1872 * The interval is contained in the rightmost … … 1875 1876 leaf->value[leaf->keys - 1] -= count; 1876 1877 goto success; 1877 } else if (page + P2SZ(count) < left_pg + 1878 P2SZ(left_cnt)) { 1879 size_t new_cnt; 1880 1878 } else if (page + (count << PAGE_WIDTH) < left_pg + 1879 (left_cnt << PAGE_WIDTH)) { 1881 1880 /* 1882 1881 * The interval is contained in the rightmost … … 1886 1885 * interval. 1887 1886 */ 1888 new_cnt = ((left_pg + P2SZ(left_cnt)) -1889 (page + P2SZ(count))) >> PAGE_WIDTH;1887 size_t new_cnt = ((left_pg + (left_cnt << PAGE_WIDTH)) - 1888 (page + (count << PAGE_WIDTH))) >> PAGE_WIDTH; 1890 1889 leaf->value[leaf->keys - 1] -= count + new_cnt; 1891 1890 btree_insert(&area->used_space, page + 1892 P2SZ(count), (void *) new_cnt, leaf);1891 (count << PAGE_WIDTH), (void *) new_cnt, leaf); 1893 1892 goto success; 1894 1893 } … … 1912 1911 * to (i - 1) and i. 1913 1912 */ 1914 if (overlaps(left_pg, P2SZ(left_cnt), page,1915 P2SZ(count))) {1916 if (page + P2SZ(count) ==1917 left_pg + P2SZ(left_cnt)) {1913 if (overlaps(left_pg, left_cnt << PAGE_WIDTH, page, 1914 count << PAGE_WIDTH)) { 1915 if (page + (count << PAGE_WIDTH) == 1916 left_pg + (left_cnt << PAGE_WIDTH)) { 1918 1917 /* 1919 1918 * The interval is contained in the … … 1924 1923 leaf->value[i - 1] -= count; 1925 1924 goto success; 1926 } else if (page + P2SZ(count) < 1927 left_pg + P2SZ(left_cnt)) { 1928 size_t new_cnt; 1929 1925 } else if (page + (count << PAGE_WIDTH) < 1926 left_pg + (left_cnt << PAGE_WIDTH)) { 1930 1927 /* 1931 1928 * The interval is contained in the … … 1935 1932 * also inserting a new interval. 1936 1933 */ 1937 new_cnt = ((left_pg + P2SZ(left_cnt)) - 1938 (page + P2SZ(count))) >> 1934 size_t new_cnt = ((left_pg + 1935 (left_cnt << PAGE_WIDTH)) - 1936 (page + (count << PAGE_WIDTH))) >> 1939 1937 PAGE_WIDTH; 1940 1938 leaf->value[i - 1] -= count + new_cnt; 1941 1939 btree_insert(&area->used_space, page + 1942 P2SZ(count), (void *) new_cnt,1940 (count << PAGE_WIDTH), (void *) new_cnt, 1943 1941 leaf); 1944 1942 goto success; … … 2036 2034 btree_key_t i; 2037 2035 for (i = 0; (ret == 0) && (i < node->keys); i++) { 2038 uintptr_t addr;2039 2040 2036 as_area_t *area = (as_area_t *) node->value[i]; 2041 2037 2042 2038 mutex_lock(&area->lock); 2043 2039 2044 addr = ALIGN_UP(area->base + P2SZ(area->pages), 2040 uintptr_t addr = 2041 ALIGN_UP(area->base + (area->pages << PAGE_WIDTH), 2045 2042 PAGE_SIZE); 2046 2043 … … 2101 2098 2102 2099 info[area_idx].start_addr = area->base; 2103 info[area_idx].size = P2SZ(area->pages);2100 info[area_idx].size = FRAMES2SIZE(area->pages); 2104 2101 info[area_idx].flags = area->flags; 2105 2102 ++area_idx; … … 2139 2136 " (%p - %p)\n", area, (void *) area->base, 2140 2137 area->pages, (void *) area->base, 2141 (void *) (area->base + P2SZ(area->pages)));2138 (void *) (area->base + FRAMES2SIZE(area->pages))); 2142 2139 mutex_unlock(&area->lock); 2143 2140 } -
kernel/generic/src/mm/backend_anon.c
r864a081 r5c460cc 50 50 #include <typedefs.h> 51 51 #include <align.h> 52 #include <memstr.h>53 52 #include <arch.h> 54 53 … … 122 121 page_table_lock(area->as, false); 123 122 pte = page_mapping_find(area->as, 124 base + P2SZ(j), false);123 base + j * PAGE_SIZE); 125 124 ASSERT(pte && PTE_VALID(pte) && 126 125 PTE_PRESENT(pte)); 127 126 btree_insert(&area->sh_info->pagemap, 128 (base + P2SZ(j)) - area->base,127 (base + j * PAGE_SIZE) - area->base, 129 128 (void *) PTE_GET_FRAME(pte), NULL); 130 129 page_table_unlock(area->as, false); -
kernel/generic/src/mm/backend_elf.c
r864a081 r5c460cc 170 170 if (!(area->flags & AS_AREA_WRITE)) 171 171 if (base >= entry->p_vaddr && 172 base + P2SZ(count)<= start_anon)172 base + count * PAGE_SIZE <= start_anon) 173 173 continue; 174 174 … … 182 182 if (!(area->flags & AS_AREA_WRITE)) 183 183 if (base >= entry->p_vaddr && 184 base + P2SZ(j + 1) <= start_anon) 184 base + (j + 1) * PAGE_SIZE <= 185 start_anon) 185 186 continue; 186 187 187 188 page_table_lock(area->as, false); 188 189 pte = page_mapping_find(area->as, 189 base + P2SZ(j), false);190 base + j * PAGE_SIZE); 190 191 ASSERT(pte && PTE_VALID(pte) && 191 192 PTE_PRESENT(pte)); 192 193 btree_insert(&area->sh_info->pagemap, 193 (base + P2SZ(j)) - area->base,194 (base + j * PAGE_SIZE) - area->base, 194 195 (void *) PTE_GET_FRAME(pte), NULL); 195 196 page_table_unlock(area->as, false); -
kernel/generic/src/mm/page.c
r864a081 r5c460cc 108 108 * using flags. Allocate and setup any missing page tables. 109 109 * 110 * @param as Address space to w hich page belongs.110 * @param as Address space to wich page belongs. 111 111 * @param page Virtual address of the page to be mapped. 112 112 * @param frame Physical address of memory frame to which the mapping is … … 135 135 * this call visible. 136 136 * 137 * @param as Address space to w hich page belongs.137 * @param as Address space to wich page belongs. 138 138 * @param page Virtual address of the page to be demapped. 139 139 * … … 152 152 } 153 153 154 /** Find mapping for virtual page .154 /** Find mapping for virtual page 155 155 * 156 * @param as Address space to which page belongs. 157 * @param page Virtual page. 158 * @param nolock True if the page tables need not be locked. 156 * Find mapping for virtual page. 157 * 158 * @param as Address space to wich page belongs. 159 * @param page Virtual page. 159 160 * 160 161 * @return NULL if there is no such mapping; requested mapping … … 162 163 * 163 164 */ 164 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page , bool nolock)165 NO_TRACE pte_t *page_mapping_find(as_t *as, uintptr_t page) 165 166 { 166 ASSERT( nolock ||page_table_locked(as));167 ASSERT(page_table_locked(as)); 167 168 168 169 ASSERT(page_mapping_operations); 169 170 ASSERT(page_mapping_operations->mapping_find); 170 171 171 return page_mapping_operations->mapping_find(as, page , nolock);172 return page_mapping_operations->mapping_find(as, page); 172 173 } 173 174 -
kernel/generic/src/printf/vprintf.c
r864a081 r5c460cc 41 41 #include <typedefs.h> 42 42 #include <str.h> 43 44 IRQ_SPINLOCK_STATIC_INITIALIZE_NAME(printf_lock, "*printf_lock"); 43 45 44 46 static int vprintf_str_write(const char *str, size_t size, void *data) … … 91 93 }; 92 94 93 return printf_core(fmt, &ps, ap); 95 irq_spinlock_lock(&printf_lock, true); 96 int ret = printf_core(fmt, &ps, ap); 97 irq_spinlock_unlock(&printf_lock, true); 98 99 return ret; 94 100 } 95 101 -
kernel/generic/src/synch/futex.c
r864a081 r5c460cc 119 119 */ 120 120 page_table_lock(AS, true); 121 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE) , false);121 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE)); 122 122 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 123 123 page_table_unlock(AS, true); … … 155 155 */ 156 156 page_table_lock(AS, true); 157 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE) , false);157 t = page_mapping_find(AS, ALIGN_DOWN(uaddr, PAGE_SIZE)); 158 158 if (!t || !PTE_VALID(t) || !PTE_PRESENT(t)) { 159 159 page_table_unlock(AS, true); -
kernel/generic/src/synch/spinlock.c
r864a081 r5c460cc 96 96 * run in a simulator) that caused problems with both 97 97 * printf_lock and the framebuffer lock. 98 * 98 99 */ 99 100 if (lock->name[0] == '*') -
kernel/test/mm/mapping1.c
r864a081 r5c460cc 35 35 #include <typedefs.h> 36 36 #include <debug.h> 37 #include <arch.h>38 37 39 38 #define PAGE0 0x10000000 … … 59 58 *((uint32_t *) frame1) = VALUE1; 60 59 61 page_table_lock(AS, true);62 63 60 TPRINTF("Mapping virtual address %p to physical address %p.\n", 64 61 (void *) PAGE0, (void *) KA2PA(frame0)); … … 68 65 (void *) PAGE1, (void *) KA2PA(frame1)); 69 66 page_mapping_insert(AS_KERNEL, PAGE1, KA2PA(frame1), PAGE_PRESENT | PAGE_WRITE); 70 71 page_table_unlock(AS, true);72 67 73 68 v0 = *((uint32_t *) PAGE0); -
uspace/app/tester/mm/common.c
r864a081 r5c460cc 135 135 } 136 136 137 static void check_consistency(const char *loc)138 {139 /* Check heap consistency */140 void *prob = heap_check();141 if (prob != NULL) {142 TPRINTF("\nError: Heap inconsistency at %p in %s.\n",143 prob, loc);144 TSTACKTRACE();145 error_flag = true;146 }147 }148 149 137 /** Checked malloc 150 138 * … … 165 153 /* Allocate the chunk of memory */ 166 154 data = malloc(size); 167 check_consistency("checked_malloc");168 155 if (data == NULL) 169 156 return NULL; … … 173 160 TPRINTF("\nError: Allocated block overlaps with another " 174 161 "previously allocated block.\n"); 175 TSTACKTRACE();176 162 error_flag = true; 177 163 } … … 212 198 if (block->addr == NULL) { 213 199 free(block); 214 check_consistency("alloc_block");215 200 return NULL; 216 201 } … … 243 228 /* Free the memory */ 244 229 free(block->addr); 245 check_consistency("free_block (a)");246 230 free(block); 247 check_consistency("free_block (b)");248 231 } 249 232 … … 274 257 pos < end; pos++) 275 258 *pos = block_expected_value(block, pos); 276 277 check_consistency("fill_block");278 259 } 279 260 … … 292 273 if (*pos != block_expected_value(block, pos)) { 293 274 TPRINTF("\nError: Corrupted content of a data block.\n"); 294 TSTACKTRACE();295 275 error_flag = true; 296 276 return; … … 316 296 if (entry == NULL) { 317 297 TPRINTF("\nError: Corrupted list of allocated memory blocks.\n"); 318 TSTACKTRACE();319 298 error_flag = true; 320 299 } … … 346 325 if (addr == NULL) { 347 326 free(area); 348 check_consistency("map_area (a)");349 327 return NULL; 350 328 } … … 353 331 if (area->addr == (void *) -1) { 354 332 free(area); 355 check_consistency("map_area (b)");356 333 return NULL; 357 334 } … … 384 361 385 362 free(area); 386 check_consistency("unmap_area");387 363 } 388 364 … … 413 389 pos < end; pos++) 414 390 *pos = area_expected_value(area, pos); 415 416 check_consistency("fill_area"); 417 } 391 } -
uspace/app/tester/mm/malloc1.c
r864a081 r5c460cc 241 241 TPRINTF("A"); 242 242 fill_block(blk); 243 RETURN_IF_ERROR;244 243 } 245 244 -
uspace/app/tester/mm/malloc3.c
r864a081 r5c460cc 232 232 TPRINTF("A"); 233 233 fill_block(blk); 234 RETURN_IF_ERROR;235 234 236 235 if ((mem_blocks_count % AREA_GRANULARITY) == 0) { 237 236 mem_area_t *area = map_area(AREA_SIZE); 238 237 RETURN_IF_ERROR; 239 240 238 if (area != NULL) { 241 239 TPRINTF("*"); 242 240 fill_area(area); 243 RETURN_IF_ERROR;244 241 } else 245 242 TPRINTF("F(*)"); -
uspace/app/tester/tester.h
r864a081 r5c460cc 38 38 #include <sys/types.h> 39 39 #include <bool.h> 40 #include <stacktrace.h>41 40 42 41 #define IPC_TEST_SERVICE 10240 … … 60 59 if (!test_quiet) { \ 61 60 fprintf(stderr, (format), ##__VA_ARGS__); \ 62 } \63 } while (0)64 65 #define TSTACKTRACE() \66 do { \67 if (!test_quiet) { \68 stacktrace_print(); \69 61 } \ 70 62 } while (0) -
uspace/lib/c/arch/ppc32/_link.ld.in
r864a081 r5c460cc 10 10 #endif 11 11 data PT_LOAD FLAGS(6); 12 debug PT_NOTE;13 12 } 14 13 … … 56 55 } :data 57 56 58 #ifdef CONFIG_LINE_DEBUG59 .comment 0 : { *(.comment); } :debug60 .debug_abbrev 0 : { *(.debug_abbrev); } :debug61 .debug_aranges 0 : { *(.debug_aranges); } :debug62 .debug_info 0 : { *(.debug_info); } :debug63 .debug_line 0 : { *(.debug_line); } :debug64 .debug_loc 0 : { *(.debug_loc); } :debug65 .debug_pubnames 0 : { *(.debug_pubnames); } :debug66 .debug_pubtypes 0 : { *(.debug_pubtypes); } :debug67 .debug_ranges 0 : { *(.debug_ranges); } :debug68 .debug_str 0 : { *(.debug_str); } :debug69 #endif70 71 57 /DISCARD/ : { 72 58 *(*); -
uspace/lib/c/generic/assert.c
r864a081 r5c460cc 33 33 #include <assert.h> 34 34 #include <stdio.h> 35 #include <io/klog.h>36 35 #include <stdlib.h> 37 #include <atomic.h>38 36 #include <stacktrace.h> 39 #include <stdint.h>40 41 static atomic_t failed_asserts = {0};42 37 43 38 void assert_abort(const char *cond, const char *file, unsigned int line) 44 39 { 45 /*46 * Send the message safely to klog. Nested asserts should not occur.47 */48 klog_printf("Assertion failed (%s) in file \"%s\", line %u.\n",49 cond, file, line);50 51 /*52 * Check if this is a nested or parallel assert.53 */54 if (atomic_postinc(&failed_asserts))55 abort();56 57 /*58 * Attempt to print the message to standard output and display59 * the stack trace. These operations can theoretically trigger nested60 * assertions.61 */62 40 printf("Assertion failed (%s) in file \"%s\", line %u.\n", 63 41 cond, file, line); 64 42 stacktrace_print(); 65 66 43 abort(); 67 44 } -
uspace/lib/c/generic/io/klog.c
r864a081 r5c460cc 38 38 #include <sys/types.h> 39 39 #include <unistd.h> 40 #include <errno.h>41 40 #include <io/klog.h> 42 #include <io/printf_core.h>43 41 44 42 size_t klog_write(const void *buf, size_t size) … … 57 55 } 58 56 59 /** Print formatted text to klog.60 *61 * @param fmt Format string62 *63 * \see For more details about format string see printf_core.64 *65 */66 int klog_printf(const char *fmt, ...)67 {68 va_list args;69 va_start(args, fmt);70 71 int ret = klog_vprintf(fmt, args);72 73 va_end(args);74 75 return ret;76 }77 78 static int klog_vprintf_str_write(const char *str, size_t size, void *data)79 {80 size_t wr = klog_write(str, size);81 return str_nlength(str, wr);82 }83 84 static int klog_vprintf_wstr_write(const wchar_t *str, size_t size, void *data)85 {86 size_t offset = 0;87 size_t chars = 0;88 89 while (offset < size) {90 char buf[STR_BOUNDS(1)];91 size_t sz = 0;92 93 if (chr_encode(str[chars], buf, &sz, STR_BOUNDS(1)) == EOK)94 klog_write(buf, sz);95 96 chars++;97 offset += sizeof(wchar_t);98 }99 100 return chars;101 }102 103 /** Print formatted text to klog.104 *105 * @param fmt Format string106 * @param ap Format parameters107 *108 * \see For more details about format string see printf_core.109 *110 */111 int klog_vprintf(const char *fmt, va_list ap)112 {113 printf_spec_t ps = {114 klog_vprintf_str_write,115 klog_vprintf_wstr_write,116 NULL117 };118 119 return printf_core(fmt, &ps, ap);120 }121 122 57 /** @} 123 58 */ -
uspace/lib/c/generic/io/vprintf.c
r864a081 r5c460cc 96 96 /** Print formatted text to stdout. 97 97 * 98 * @param fmt Format string 99 * @param ap Format parameters 98 * @param file Output stream 99 * @param fmt Format string 100 * @param ap Format parameters 100 101 * 101 102 * \see For more details about format string see printf_core. -
uspace/lib/c/generic/malloc.c
r864a081 r5c460cc 79 79 (sizeof(heap_block_head_t) + sizeof(heap_block_foot_t)) 80 80 81 /** Overhead of each area. */82 #define AREA_OVERHEAD(size) \83 (ALIGN_UP(size + sizeof(heap_area_t), BASE_ALIGN))84 85 81 /** Calculate real size of a heap block. 86 82 * … … 187 183 188 184 /** Next heap block to examine (next fit algorithm) */ 189 static heap_block_head_t *next _fit= NULL;185 static heap_block_head_t *next = NULL; 190 186 191 187 /** Futex for thread-safe heap manipulation */ 192 188 static futex_t malloc_futex = FUTEX_INITIALIZER; 193 194 #ifndef NDEBUG195 196 #define malloc_assert(expr) \197 do { \198 if (!(expr)) {\199 futex_up(&malloc_futex); \200 assert_abort(#expr, __FILE__, __LINE__); \201 } \202 } while (0)203 204 #else /* NDEBUG */205 206 #define malloc_assert(expr)207 208 #endif /* NDEBUG */209 189 210 190 /** Initialize a heap block … … 248 228 heap_block_head_t *head = (heap_block_head_t *) addr; 249 229 250 malloc_assert(head->magic == HEAP_BLOCK_HEAD_MAGIC);230 assert(head->magic == HEAP_BLOCK_HEAD_MAGIC); 251 231 252 232 heap_block_foot_t *foot = BLOCK_FOOT(head); 253 233 254 malloc_assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC);255 malloc_assert(head->size == foot->size);234 assert(foot->magic == HEAP_BLOCK_FOOT_MAGIC); 235 assert(head->size == foot->size); 256 236 } 257 237 … … 267 247 heap_area_t *area = (heap_area_t *) addr; 268 248 269 malloc_assert(area->magic == HEAP_AREA_MAGIC);270 malloc_assert(addr == area->start);271 malloc_assert(area->start < area->end);272 malloc_assert(((uintptr_t) area->start % PAGE_SIZE) == 0);273 malloc_assert(((uintptr_t) area->end % PAGE_SIZE) == 0);249 assert(area->magic == HEAP_AREA_MAGIC); 250 assert(addr == area->start); 251 assert(area->start < area->end); 252 assert(((uintptr_t) area->start % PAGE_SIZE) == 0); 253 assert(((uintptr_t) area->end % PAGE_SIZE) == 0); 274 254 } 275 255 … … 382 362 383 363 /* Eventually try to create a new area */ 384 return area_create(AREA_ OVERHEAD(size));364 return area_create(AREA_FIRST_BLOCK_HEAD(size)); 385 365 } 386 366 … … 402 382 403 383 block_check((void *) last_head); 404 malloc_assert(last_head->area == area);384 assert(last_head->area == area); 405 385 406 386 if (last_head->free) { … … 415 395 416 396 block_check((void *) first_head); 417 malloc_assert(first_head->area == area);397 assert(first_head->area == area); 418 398 419 399 size_t shrink_size = ALIGN_DOWN(last_head->size, PAGE_SIZE); … … 459 439 /* Update heap area parameters */ 460 440 area->end = end; 461 size_t excess = ((size_t) area->end) - ((size_t) last_head); 441 442 /* Update block layout */ 443 void *last = (void *) last_head; 444 size_t excess = (size_t) (area->end - last); 462 445 463 446 if (excess > 0) { … … 468 451 * create a new free block. 469 452 */ 470 block_init( (void *) last_head, excess, true, area);453 block_init(last, excess, true, area); 471 454 } else { 472 455 /* … … 487 470 } 488 471 489 next _fit= NULL;472 next = NULL; 490 473 } 491 474 … … 514 497 static void split_mark(heap_block_head_t *cur, const size_t size) 515 498 { 516 malloc_assert(cur->size >= size);499 assert(cur->size >= size); 517 500 518 501 /* See if we should split the block. */ … … 550 533 { 551 534 area_check((void *) area); 552 malloc_assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area));553 malloc_assert((void *) first_block < area->end);535 assert((void *) first_block >= (void *) AREA_FIRST_BLOCK_HEAD(area)); 536 assert((void *) first_block < area->end); 554 537 555 538 for (heap_block_head_t *cur = first_block; (void *) cur < area->end; … … 576 559 split_mark(cur, real_size); 577 560 578 next _fit= cur;561 next = cur; 579 562 return addr; 580 563 } else { … … 628 611 split_mark(next_head, real_size); 629 612 630 next _fit= next_head;613 next = next_head; 631 614 return aligned; 632 615 } else { … … 654 637 split_mark(cur, real_size); 655 638 656 next _fit= cur;639 next = cur; 657 640 return aligned; 658 641 } … … 678 661 static void *malloc_internal(const size_t size, const size_t align) 679 662 { 680 malloc_assert(first_heap_area != NULL);663 assert(first_heap_area != NULL); 681 664 682 665 if (align == 0) … … 692 675 693 676 /* Try the next fit approach */ 694 split = next _fit;677 split = next; 695 678 696 679 if (split != NULL) { … … 803 786 804 787 block_check(head); 805 malloc_assert(!head->free);788 assert(!head->free); 806 789 807 790 heap_area_t *area = head->area; 808 791 809 792 area_check(area); 810 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));811 malloc_assert((void *) head < area->end);793 assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area)); 794 assert((void *) head < area->end); 812 795 813 796 void *ptr = NULL; … … 848 831 849 832 ptr = ((void *) head) + sizeof(heap_block_head_t); 850 next _fit= NULL;833 next = NULL; 851 834 } else 852 835 reloc = true; … … 880 863 881 864 block_check(head); 882 malloc_assert(!head->free);865 assert(!head->free); 883 866 884 867 heap_area_t *area = head->area; 885 868 886 869 area_check(area); 887 malloc_assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area));888 malloc_assert((void *) head < area->end);870 assert((void *) head >= (void *) AREA_FIRST_BLOCK_HEAD(area)); 871 assert((void *) head < area->end); 889 872 890 873 /* Mark the block itself as free. */ … … 921 904 } 922 905 923 void *heap_check(void)924 {925 futex_down(&malloc_futex);926 927 if (first_heap_area == NULL) {928 futex_up(&malloc_futex);929 return (void *) -1;930 }931 932 /* Walk all heap areas */933 for (heap_area_t *area = first_heap_area; area != NULL;934 area = area->next) {935 936 /* Check heap area consistency */937 if ((area->magic != HEAP_AREA_MAGIC) ||938 ((void *) area != area->start) ||939 (area->start >= area->end) ||940 (((uintptr_t) area->start % PAGE_SIZE) != 0) ||941 (((uintptr_t) area->end % PAGE_SIZE) != 0)) {942 futex_up(&malloc_futex);943 return (void *) area;944 }945 946 /* Walk all heap blocks */947 for (heap_block_head_t *head = (heap_block_head_t *)948 AREA_FIRST_BLOCK_HEAD(area); (void *) head < area->end;949 head = (heap_block_head_t *) (((void *) head) + head->size)) {950 951 /* Check heap block consistency */952 if (head->magic != HEAP_BLOCK_HEAD_MAGIC) {953 futex_up(&malloc_futex);954 return (void *) head;955 }956 957 heap_block_foot_t *foot = BLOCK_FOOT(head);958 959 if ((foot->magic != HEAP_BLOCK_FOOT_MAGIC) ||960 (head->size != foot->size)) {961 futex_up(&malloc_futex);962 return (void *) foot;963 }964 }965 }966 967 futex_up(&malloc_futex);968 969 return NULL;970 }971 972 906 /** @} 973 907 */ -
uspace/lib/c/generic/thread.c
r864a081 r5c460cc 44 44 45 45 #ifndef THREAD_INITIAL_STACK_PAGES_NO 46 #define THREAD_INITIAL_STACK_PAGES_NO 246 #define THREAD_INITIAL_STACK_PAGES_NO 1 47 47 #endif 48 48 -
uspace/lib/c/include/io/klog.h
r864a081 r5c460cc 37 37 38 38 #include <sys/types.h> 39 #include <stdarg.h>40 39 41 40 extern size_t klog_write(const void *, size_t); 42 41 extern void klog_update(void); 43 extern int klog_printf(const char *, ...);44 extern int klog_vprintf(const char *, va_list);45 42 46 43 #endif -
uspace/lib/c/include/malloc.h
r864a081 r5c460cc 46 46 extern void *realloc(const void *addr, const size_t size); 47 47 extern void free(const void *addr); 48 extern void *heap_check(void);49 48 50 49 #endif
Note:
See TracChangeset
for help on using the changeset viewer.