Changes in / [ad4b32c:7e266ff] in mainline
- Files:
-
- 2 deleted
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/arm32/include/elf.h
rad4b32c r7e266ff 27 27 */ 28 28 29 /** @addtogroup arm32 29 /** @addtogroup arm32 30 30 * @{ 31 31 */ … … 37 37 #define KERN_arm32_ELF_H_ 38 38 39 #define ELF_MACHINEEM_ARM39 #define ELF_MACHINE EM_ARM 40 40 41 #ifdef __BE__42 #define ELF_DATA_ENCODINGELFDATA2MSB41 #ifdef BIG_ENDIAN 42 #define ELF_DATA_ENCODING ELFDATA2MSB 43 43 #else 44 #define ELF_DATA_ENCODINGELFDATA2LSB44 #define ELF_DATA_ENCODING ELFDATA2LSB 45 45 #endif 46 46 47 #define ELF_CLASS 47 #define ELF_CLASS ELFCLASS32 48 48 49 49 #endif -
kernel/arch/ia64/include/asm.h
rad4b32c r7e266ff 27 27 */ 28 28 29 /** @addtogroup ia64 29 /** @addtogroup ia64 30 30 * @{ 31 31 */ … … 41 41 #include <arch/register.h> 42 42 43 #define IA64_IOSPACE_ADDRESS 43 #define IA64_IOSPACE_ADDRESS 0xE001000000000000ULL 44 44 45 45 static inline void pio_write_8(ioport8_t *port, uint8_t v) 46 46 { 47 47 uintptr_t prt = (uintptr_t) port; 48 49 *((ioport8_t *) 48 49 *((ioport8_t *)(IA64_IOSPACE_ADDRESS + 50 50 ((prt & 0xfff) | ((prt >> 2) << 12)))) = v; 51 52 asm volatile ( 53 "mf\n" 54 ::: "memory" 55 ); 51 52 asm volatile ("mf\n" ::: "memory"); 56 53 } 57 54 … … 59 56 { 60 57 uintptr_t prt = (uintptr_t) port; 61 62 *((ioport16_t *) 58 59 *((ioport16_t *)(IA64_IOSPACE_ADDRESS + 63 60 ((prt & 0xfff) | ((prt >> 2) << 12)))) = v; 64 65 asm volatile ( 66 "mf\n" 67 ::: "memory" 68 ); 61 62 asm volatile ("mf\n" ::: "memory"); 69 63 } 70 64 … … 72 66 { 73 67 uintptr_t prt = (uintptr_t) port; 74 75 *((ioport32_t *) 68 69 *((ioport32_t *)(IA64_IOSPACE_ADDRESS + 76 70 ((prt & 0xfff) | ((prt >> 2) << 12)))) = v; 77 78 asm volatile ( 79 "mf\n" 80 ::: "memory" 81 ); 71 72 asm volatile ("mf\n" ::: "memory"); 82 73 } 83 74 … … 85 76 { 86 77 uintptr_t prt = (uintptr_t) port; 87 88 asm volatile ( 89 "mf\n" 90 ::: "memory" 91 ); 92 93 return *((ioport8_t *) (IA64_IOSPACE_ADDRESS + 78 79 asm volatile ("mf\n" ::: "memory"); 80 81 return *((ioport8_t *)(IA64_IOSPACE_ADDRESS + 94 82 ((prt & 0xfff) | ((prt >> 2) << 12)))); 95 83 } … … 98 86 { 99 87 uintptr_t prt = (uintptr_t) port; 100 101 asm volatile ( 102 "mf\n" 103 ::: "memory" 104 ); 105 106 return *((ioport16_t *) (IA64_IOSPACE_ADDRESS + 88 89 asm volatile ("mf\n" ::: "memory"); 90 91 return *((ioport16_t *)(IA64_IOSPACE_ADDRESS + 107 92 ((prt & 0xfff) | ((prt >> 2) << 12)))); 108 93 } … … 111 96 { 112 97 uintptr_t prt = (uintptr_t) port; 113 114 asm volatile ( 115 "mf\n" 116 ::: "memory" 117 ); 118 119 return *((ioport32_t *) (IA64_IOSPACE_ADDRESS + 98 99 asm volatile ("mf\n" ::: "memory"); 100 101 return *((ioport32_t *)(IA64_IOSPACE_ADDRESS + 120 102 ((prt & 0xfff) | ((prt >> 2) << 12)))); 121 103 } … … 130 112 { 131 113 uint64_t v; 132 133 /* I'm not sure why but this code bad inlines in scheduler, 134 so THE shifts about 16B and causes kernel panic 135 136 asm volatile ( 137 "and %[value] = %[mask], r12" 138 : [value] "=r" (v) 139 : [mask] "r" (~(STACK_SIZE - 1)) 140 ); 141 return v; 142 143 This code have the same meaning but inlines well. 144 */ 145 146 asm volatile ( 147 "mov %[value] = r12" 148 : [value] "=r" (v) 149 ); 150 151 return (v & (~(STACK_SIZE - 1))); 114 115 //I'm not sure why but this code bad inlines in scheduler, 116 //so THE shifts about 16B and causes kernel panic 117 //asm volatile ("and %0 = %1, r12" : "=r" (v) : "r" (~(STACK_SIZE-1))); 118 //return v; 119 120 //this code have the same meaning but inlines well 121 asm volatile ("mov %0 = r12" : "=r" (v) ); 122 return v & (~(STACK_SIZE-1)); 152 123 } 153 124 … … 160 131 uint64_t v; 161 132 162 asm volatile ( 163 "mov %[value] = psr\n" 164 : [value] "=r" (v) 165 ); 133 asm volatile ("mov %0 = psr\n" : "=r" (v)); 166 134 167 135 return v; … … 176 144 uint64_t v; 177 145 178 asm volatile ( 179 "mov %[value] = cr.iva\n" 180 : [value] "=r" (v) 181 ); 146 asm volatile ("mov %0 = cr.iva\n" : "=r" (v)); 182 147 183 148 return v; … … 190 155 static inline void iva_write(uint64_t v) 191 156 { 192 asm volatile ( 193 "mov cr.iva = %[value]\n" 194 :: [value] "r" (v) 195 ); 157 asm volatile ("mov cr.iva = %0\n" : : "r" (v)); 196 158 } 197 159 … … 205 167 uint64_t v; 206 168 207 asm volatile ( 208 "mov %[value] = cr.ivr\n" 209 : [value] "=r" (v) 210 ); 169 asm volatile ("mov %0 = cr.ivr\n" : "=r" (v)); 211 170 212 171 return v; … … 217 176 uint64_t v; 218 177 219 asm volatile ( 220 "mov %[value] = cr64\n" 221 : [value] "=r" (v) 222 ); 178 asm volatile ("mov %0 = cr64\n" : "=r" (v)); 223 179 224 180 return v; … … 232 188 static inline void itc_write(uint64_t v) 233 189 { 234 asm volatile ( 235 "mov ar.itc = %[value]\n" 236 :: [value] "r" (v) 237 ); 190 asm volatile ("mov ar.itc = %0\n" : : "r" (v)); 238 191 } 239 192 … … 246 199 uint64_t v; 247 200 248 asm volatile ( 249 "mov %[value] = ar.itc\n" 250 : [value] "=r" (v) 251 ); 201 asm volatile ("mov %0 = ar.itc\n" : "=r" (v)); 252 202 253 203 return v; … … 260 210 static inline void itm_write(uint64_t v) 261 211 { 262 asm volatile ( 263 "mov cr.itm = %[value]\n" 264 :: [value] "r" (v) 265 ); 212 asm volatile ("mov cr.itm = %0\n" : : "r" (v)); 266 213 } 267 214 … … 274 221 uint64_t v; 275 222 276 asm volatile ( 277 "mov %[value] = cr.itm\n" 278 : [value] "=r" (v) 279 ); 223 asm volatile ("mov %0 = cr.itm\n" : "=r" (v)); 280 224 281 225 return v; … … 290 234 uint64_t v; 291 235 292 asm volatile ( 293 "mov %[value] = cr.itv\n" 294 : [value] "=r" (v) 295 ); 236 asm volatile ("mov %0 = cr.itv\n" : "=r" (v)); 296 237 297 238 return v; … … 304 245 static inline void itv_write(uint64_t v) 305 246 { 306 asm volatile ( 307 "mov cr.itv = %[value]\n" 308 :: [value] "r" (v) 309 ); 247 asm volatile ("mov cr.itv = %0\n" : : "r" (v)); 310 248 } 311 249 … … 316 254 static inline void eoi_write(uint64_t v) 317 255 { 318 asm volatile ( 319 "mov cr.eoi = %[value]\n" 320 :: [value] "r" (v) 321 ); 256 asm volatile ("mov cr.eoi = %0\n" : : "r" (v)); 322 257 } 323 258 … … 329 264 { 330 265 uint64_t v; 331 332 asm volatile ( 333 "mov %[value] = cr.tpr\n" 334 : [value] "=r" (v) 335 ); 266 267 asm volatile ("mov %0 = cr.tpr\n" : "=r" (v)); 336 268 337 269 return v; … … 344 276 static inline void tpr_write(uint64_t v) 345 277 { 346 asm volatile ( 347 "mov cr.tpr = %[value]\n" 348 :: [value] "r" (v) 349 ); 278 asm volatile ("mov cr.tpr = %0\n" : : "r" (v)); 350 279 } 351 280 … … 362 291 363 292 asm volatile ( 364 "mov % [value]= psr\n"365 "rsm % [mask]\n"366 : [value]"=r" (v)367 : [mask]"i" (PSR_I_MASK)293 "mov %0 = psr\n" 294 "rsm %1\n" 295 : "=r" (v) 296 : "i" (PSR_I_MASK) 368 297 ); 369 298 … … 383 312 384 313 asm volatile ( 385 "mov % [value]= psr\n"386 "ssm % [mask]\n"314 "mov %0 = psr\n" 315 "ssm %1\n" 387 316 ";;\n" 388 317 "srlz.d\n" 389 : [value]"=r" (v)390 : [mask]"i" (PSR_I_MASK)318 : "=r" (v) 319 : "i" (PSR_I_MASK) 391 320 ); 392 321 … … 420 349 static inline void pk_disable(void) 421 350 { 422 asm volatile ( 423 "rsm %[mask]\n" 424 :: [mask] "i" (PSR_PK_MASK) 425 ); 351 asm volatile ("rsm %0\n" : : "i" (PSR_PK_MASK)); 426 352 } 427 353 -
kernel/arch/ia64/include/atomic.h
rad4b32c r7e266ff 27 27 */ 28 28 29 /** @addtogroup ia64 29 /** @addtogroup ia64 30 30 * @{ 31 31 */ … … 36 36 #define KERN_ia64_ATOMIC_H_ 37 37 38 /** Atomic addition. 39 * 40 * @param val Atomic value. 41 * @param imm Value to add. 42 * 43 * @return Value before addition. 44 */ 45 static inline long atomic_add(atomic_t *val, int imm) 46 { 47 long v; 48 49 asm volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), 50 "+m" (val->count) : "i" (imm)); 51 52 return v; 53 } 54 38 55 static inline uint64_t test_and_set(atomic_t *val) 39 56 { … … 41 58 42 59 asm volatile ( 43 "movl %[v] = 0x1;;\n" 44 "xchg8 %[v] = %[count], %[v];;\n" 45 : [v] "=r" (v), 46 [count] "+m" (val->count) 60 "movl %0 = 0x1;;\n" 61 "xchg8 %0 = %1, %0;;\n" 62 : "=r" (v), "+m" (val->count) 47 63 ); 48 64 … … 60 76 static inline void atomic_inc(atomic_t *val) 61 77 { 62 long v; 63 64 asm volatile ( 65 "fetchadd8.rel %[v] = %[count], 1\n" 66 : [v] "=r" (v), 67 [count] "+m" (val->count) 68 ); 78 atomic_add(val, 1); 69 79 } 70 80 71 81 static inline void atomic_dec(atomic_t *val) 72 82 { 73 long v; 74 75 asm volatile ( 76 "fetchadd8.rel %[v] = %[count], -1\n" 77 : [v] "=r" (v), 78 [count] "+m" (val->count) 79 ); 83 atomic_add(val, -1); 80 84 } 81 85 82 86 static inline long atomic_preinc(atomic_t *val) 83 87 { 84 long v; 85 86 asm volatile ( 87 "fetchadd8.rel %[v] = %[count], 1\n" 88 : [v] "=r" (v), 89 [count] "+m" (val->count) 90 ); 91 92 return (v + 1); 88 return atomic_add(val, 1) + 1; 93 89 } 94 90 95 91 static inline long atomic_predec(atomic_t *val) 96 92 { 97 long v; 98 99 asm volatile ( 100 "fetchadd8.rel %[v] = %[count], -1\n" 101 : [v] "=r" (v), 102 [count] "+m" (val->count) 103 ); 104 105 return (v - 1); 93 return atomic_add(val, -1) - 1; 106 94 } 107 95 108 96 static inline long atomic_postinc(atomic_t *val) 109 97 { 110 long v; 111 112 asm volatile ( 113 "fetchadd8.rel %[v] = %[count], 1\n" 114 : [v] "=r" (v), 115 [count] "+m" (val->count) 116 ); 117 118 return v; 98 return atomic_add(val, 1); 119 99 } 120 100 121 101 static inline long atomic_postdec(atomic_t *val) 122 102 { 123 long v; 124 125 asm volatile ( 126 "fetchadd8.rel %[v] = %[count], -1\n" 127 : [v] "=r" (v), 128 [count] "+m" (val->count) 129 ); 130 131 return v; 103 return atomic_add(val, -1); 132 104 } 133 105 -
kernel/arch/ia64/include/interrupt.h
rad4b32c r7e266ff 40 40 41 41 /** ia64 has 256 INRs. */ 42 #define INR_COUNT 42 #define INR_COUNT 256 43 43 44 44 /* … … 47 47 * to genarch. 48 48 */ 49 #define IVT_ITEMS 050 #define IVT_FIRST 049 #define IVT_ITEMS 0 50 #define IVT_FIRST 0 51 51 52 52 /** External Interrupt vectors. */ 53 53 54 #define VECTOR_TLB_SHOOTDOWN_IPI 0xf0 55 56 #define INTERRUPT_SPURIOUS 15 57 #define INTERRUPT_TIMER 255 58 59 #define LEGACY_INTERRUPT_BASE 0x20 60 61 #define IRQ_KBD (0x01 + LEGACY_INTERRUPT_BASE) 62 #define IRQ_MOUSE (0x0c + LEGACY_INTERRUPT_BASE) 54 #define VECTOR_TLB_SHOOTDOWN_IPI 0xf0 55 #define INTERRUPT_TIMER 255 56 #define IRQ_KBD (0x01 + LEGACY_INTERRUPT_BASE) 57 #define IRQ_MOUSE (0x0c + LEGACY_INTERRUPT_BASE) 58 #define INTERRUPT_SPURIOUS 15 59 #define LEGACY_INTERRUPT_BASE 0x20 63 60 64 61 /** General Exception codes. */ 65 #define GE_ILLEGALOP 66 #define GE_PRIVOP 67 #define GE_PRIVREG 68 #define GE_RESREGFLD 69 #define GE_DISBLDISTRAN 70 #define GE_ILLEGALDEP 62 #define GE_ILLEGALOP 0 63 #define GE_PRIVOP 1 64 #define GE_PRIVREG 2 65 #define GE_RESREGFLD 3 66 #define GE_DISBLDISTRAN 4 67 #define GE_ILLEGALDEP 8 71 68 72 #define EOI 0/**< The actual value doesn't matter. */69 #define EOI 0 /**< The actual value doesn't matter. */ 73 70 74 71 typedef struct { … … 103 100 uint128_t f30; 104 101 uint128_t f31; 105 102 106 103 uintptr_t ar_bsp; 107 104 uintptr_t ar_bspstore; … … 135 132 { 136 133 istate->cr_iip = retaddr; 137 istate->cr_ipsr.ri = 0; 134 istate->cr_ipsr.ri = 0; /* return to instruction slot #0 */ 138 135 } 139 136 -
kernel/arch/ia64/include/mm/as.h
rad4b32c r7e266ff 27 27 */ 28 28 29 /** @addtogroup ia64mm 29 /** @addtogroup ia64mm 30 30 * @{ 31 31 */ … … 36 36 #define KERN_ia64_AS_H_ 37 37 38 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 38 #define KERNEL_ADDRESS_SPACE_SHADOWED_ARCH 0 39 39 40 #define KERNEL_ADDRESS_SPACE_START_ARCH ((unsigned long) 0xe000000000000000ULL)41 #define KERNEL_ADDRESS_SPACE_END_ARCH ((unsigned long) 0xffffffffffffffffULL)42 #define USER_ADDRESS_SPACE_START_ARCH ((unsigned long) 0x0000000000000000ULL)43 #define USER_ADDRESS_SPACE_END_ARCH ((unsigned long) 0xdfffffffffffffffULL)40 #define KERNEL_ADDRESS_SPACE_START_ARCH (unsigned long) 0xe000000000000000ULL 41 #define KERNEL_ADDRESS_SPACE_END_ARCH (unsigned long) 0xffffffffffffffffULL 42 #define USER_ADDRESS_SPACE_START_ARCH (unsigned long) 0x0000000000000000ULL 43 #define USER_ADDRESS_SPACE_END_ARCH (unsigned long) 0xdfffffffffffffffULL 44 44 45 #define USTACK_ADDRESS_ARCH 45 #define USTACK_ADDRESS_ARCH 0x0000000ff0000000ULL 46 46 47 47 typedef struct { … … 50 50 #include <genarch/mm/as_ht.h> 51 51 52 #define as_constructor_arch(as, flags) 53 #define as_destructor_arch(as) 54 #define as_create_arch(as, flags) 52 #define as_constructor_arch(as, flags) (as != as) 53 #define as_destructor_arch(as) (as != as) 54 #define as_create_arch(as, flags) (as != as) 55 55 #define as_deinstall_arch(as) 56 56 #define as_invalidate_translation_cache(as, page, cnt) -
kernel/arch/ia64/include/mm/page.h
rad4b32c r7e266ff 28 28 */ 29 29 30 /** @addtogroup ia64mm 30 /** @addtogroup ia64mm 31 31 * @{ 32 32 */ … … 39 39 #include <arch/mm/frame.h> 40 40 41 #define PAGE_SIZE 42 #define PAGE_WIDTH 41 #define PAGE_SIZE FRAME_SIZE 42 #define PAGE_WIDTH FRAME_WIDTH 43 43 44 44 #ifdef KERNEL 45 45 46 46 /** Bit width of the TLB-locked portion of kernel address space. */ 47 #define KERNEL_PAGE_WIDTH 28/* 256M */48 #define IO_PAGE_WIDTH 26/* 64M */49 #define FW_PAGE_WIDTH 28/* 256M */50 51 #define USPACE_IO_PAGE_WIDTH 12/* 4K */47 #define KERNEL_PAGE_WIDTH 28 /* 256M */ 48 #define IO_PAGE_WIDTH 26 /* 64M */ 49 #define FW_PAGE_WIDTH 28 /* 256M */ 50 51 #define USPACE_IO_PAGE_WIDTH 12 /* 4K */ 52 52 53 53 … … 59 59 60 60 /* Firmware area (bellow 4GB in phys mem) */ 61 #define FW_OFFSET 0x00000000F000000061 #define FW_OFFSET 0x00000000F0000000 62 62 /* Legacy IO space */ 63 #define IO_OFFSET 0x000100000000000063 #define IO_OFFSET 0x0001000000000000 64 64 /* Videoram - now mapped to 0 as VGA text mode vram on 0xb8000 */ 65 #define VIO_OFFSET 0x000200000000000066 67 68 #define PPN_SHIFT 69 70 #define VRN_SHIFT 71 #define VRN_MASK (7ULL << VRN_SHIFT)72 #define VA2VRN(va) ((va) >>VRN_SHIFT)65 #define VIO_OFFSET 0x0002000000000000 66 67 68 #define PPN_SHIFT 12 69 70 #define VRN_SHIFT 61 71 #define VRN_MASK (7LL << VRN_SHIFT) 72 #define VA2VRN(va) ((va)>>VRN_SHIFT) 73 73 74 74 #ifdef __ASM__ 75 #define VRN_KERNEL775 #define VRN_KERNEL 7 76 76 #else 77 #define VRN_KERNEL 7ULL77 #define VRN_KERNEL 7LL 78 78 #endif 79 79 80 #define REGION_REGISTERS 81 82 #define KA2PA(x) ((uintptr_t) ((x)- (VRN_KERNEL << VRN_SHIFT)))83 #define PA2KA(x) ((uintptr_t) ((x)+ (VRN_KERNEL << VRN_SHIFT)))84 85 #define VHPT_WIDTH 20/* 1M */86 #define VHPT_SIZE 87 88 #define PTA_BASE_SHIFT 80 #define REGION_REGISTERS 8 81 82 #define KA2PA(x) ((uintptr_t) (x - (VRN_KERNEL << VRN_SHIFT))) 83 #define PA2KA(x) ((uintptr_t) (x + (VRN_KERNEL << VRN_SHIFT))) 84 85 #define VHPT_WIDTH 20 /* 1M */ 86 #define VHPT_SIZE (1 << VHPT_WIDTH) 87 88 #define PTA_BASE_SHIFT 15 89 89 90 90 /** Memory Attributes. */ 91 #define MA_WRITEBACK 0x0092 #define MA_UNCACHEABLE 0x0491 #define MA_WRITEBACK 0x0 92 #define MA_UNCACHEABLE 0x4 93 93 94 94 /** Privilege Levels. Only the most and the least privileged ones are ever used. */ 95 #define PL_KERNEL 0x0096 #define PL_USER 0x0395 #define PL_KERNEL 0x0 96 #define PL_USER 0x3 97 97 98 98 /* Access Rigths. Only certain combinations are used by the kernel. */ 99 #define AR_READ 0x00100 #define AR_EXECUTE 0x01101 #define AR_WRITE 0x0299 #define AR_READ 0x0 100 #define AR_EXECUTE 0x1 101 #define AR_WRITE 0x2 102 102 103 103 #ifndef __ASM__ … … 113 113 struct vhpt_tag_info { 114 114 unsigned long long tag : 63; 115 unsigned intti : 1;115 unsigned ti : 1; 116 116 } __attribute__ ((packed)); 117 117 … … 123 123 struct vhpt_entry_present { 124 124 /* Word 0 */ 125 unsigned intp : 1;126 unsigned int: 1;127 unsigned intma : 3;128 unsigned inta : 1;129 unsigned intd : 1;130 unsigned intpl : 2;131 unsigned intar : 3;125 unsigned p : 1; 126 unsigned : 1; 127 unsigned ma : 3; 128 unsigned a : 1; 129 unsigned d : 1; 130 unsigned pl : 2; 131 unsigned ar : 3; 132 132 unsigned long long ppn : 38; 133 unsigned int: 2;134 unsigned inted : 1;135 unsigned i nt ig1 : 11;133 unsigned : 2; 134 unsigned ed : 1; 135 unsigned ig1 : 11; 136 136 137 137 /* Word 1 */ 138 unsigned int: 2;139 unsigned intps : 6;140 unsigned intkey : 24;141 unsigned int: 32;138 unsigned : 2; 139 unsigned ps : 6; 140 unsigned key : 24; 141 unsigned : 32; 142 142 143 143 /* Word 2 */ 144 144 union vhpt_tag tag; 145 145 146 /* Word 3 */ 146 /* Word 3 */ 147 147 uint64_t ig3 : 64; 148 148 } __attribute__ ((packed)); … … 150 150 struct vhpt_entry_not_present { 151 151 /* Word 0 */ 152 unsigned intp : 1;152 unsigned p : 1; 153 153 unsigned long long ig0 : 52; 154 unsigned i nt ig1 : 11;154 unsigned ig1 : 11; 155 155 156 156 /* Word 1 */ 157 unsigned int: 2;158 unsigned intps : 6;157 unsigned : 2; 158 unsigned ps : 6; 159 159 unsigned long long ig2 : 56; 160 160 161 161 /* Word 2 */ 162 162 union vhpt_tag tag; 163 163 164 /* Word 3 */ 164 /* Word 3 */ 165 165 uint64_t ig3 : 64; 166 166 } __attribute__ ((packed)); 167 167 168 typedef union {168 typedef union vhpt_entry { 169 169 struct vhpt_entry_present present; 170 170 struct vhpt_entry_not_present not_present; … … 173 173 174 174 struct region_register_map { 175 unsigned intve : 1;176 unsigned int: 1;177 unsigned intps : 6;178 unsigned intrid : 24;179 unsigned int: 32;180 } __attribute__ ((packed)); 181 182 typedef union {175 unsigned ve : 1; 176 unsigned : 1; 177 unsigned ps : 6; 178 unsigned rid : 24; 179 unsigned : 32; 180 } __attribute__ ((packed)); 181 182 typedef union region_register { 183 183 struct region_register_map map; 184 184 unsigned long long word; 185 } region_register _t;185 } region_register; 186 186 187 187 struct pta_register_map { 188 unsigned intve : 1;189 unsigned int: 1;190 unsigned intsize : 6;191 unsigned intvf : 1;192 unsigned int: 6;188 unsigned ve : 1; 189 unsigned : 1; 190 unsigned size : 6; 191 unsigned vf : 1; 192 unsigned : 6; 193 193 unsigned long long base : 49; 194 194 } __attribute__ ((packed)); … … 197 197 struct pta_register_map map; 198 198 uint64_t word; 199 } pta_register _t;199 } pta_register; 200 200 201 201 /** Return Translation Hashed Entry Address. … … 211 211 { 212 212 uint64_t ret; 213 214 asm volatile ( 215 "thash %[ret] = %[va]\n" 216 : [ret] "=r" (ret) 217 : [va] "r" (va) 218 ); 219 213 214 asm volatile ("thash %0 = %1\n" : "=r" (ret) : "r" (va)); 215 220 216 return ret; 221 217 } … … 233 229 { 234 230 uint64_t ret; 235 236 asm volatile ( 237 "ttag %[ret] = %[va]\n" 238 : [ret] "=r" (ret) 239 : [va] "r" (va) 240 ); 241 231 232 asm volatile ("ttag %0 = %1\n" : "=r" (ret) : "r" (va)); 233 242 234 return ret; 243 235 } … … 252 244 { 253 245 uint64_t ret; 254 255 246 ASSERT(i < REGION_REGISTERS); 256 257 asm volatile ( 258 "mov %[ret] = rr[%[index]]\n" 259 : [ret] "=r" (ret) 260 : [index] "r" (i << VRN_SHIFT) 261 ); 262 247 asm volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT)); 263 248 return ret; 264 249 } … … 272 257 { 273 258 ASSERT(i < REGION_REGISTERS); 274 275 259 asm volatile ( 276 "mov rr[% [index]] = %[value]\n"277 : : [index] "r" (i << VRN_SHIFT),278 [value]"r" (v)260 "mov rr[%0] = %1\n" 261 : 262 : "r" (i << VRN_SHIFT), "r" (v) 279 263 ); 280 264 } 281 265 282 266 /** Read Page Table Register. 283 267 * … … 288 272 uint64_t ret; 289 273 290 asm volatile ( 291 "mov %[ret] = cr.pta\n" 292 : [ret] "=r" (ret) 293 ); 274 asm volatile ("mov %0 = cr.pta\n" : "=r" (ret)); 294 275 295 276 return ret; … … 302 283 static inline void pta_write(uint64_t v) 303 284 { 304 asm volatile ( 305 "mov cr.pta = %[value]\n" 306 :: [value] "r" (v) 307 ); 285 asm volatile ("mov cr.pta = %0\n" : : "r" (v)); 308 286 } 309 287 -
kernel/arch/ia64/include/mm/tlb.h
rad4b32c r7e266ff 27 27 */ 28 28 29 /** @addtogroup ia64mm 29 /** @addtogroup ia64mm 30 30 * @{ 31 31 */ … … 42 42 43 43 /** Data and instruction Translation Register indices. */ 44 #define DTR_KERNEL 45 #define ITR_KERNEL 46 #define DTR_KSTACK1 47 #define DTR_KSTACK2 44 #define DTR_KERNEL 0 45 #define ITR_KERNEL 0 46 #define DTR_KSTACK1 4 47 #define DTR_KSTACK2 5 48 48 49 49 /** Portion of TLB insertion format data structure. */ 50 typedef union{50 union tlb_entry { 51 51 uint64_t word[2]; 52 52 struct { 53 53 /* Word 0 */ 54 unsigned int p : 1;/**< Present. */55 unsigned int: 1;56 unsigned int ma : 3;/**< Memory attribute. */57 unsigned int a : 1;/**< Accessed. */58 unsigned int d : 1;/**< Dirty. */59 unsigned int pl : 2;/**< Privilege level. */60 unsigned int ar : 3;/**< Access rights. */61 unsigned long long ppn : 38; 62 unsigned int: 2;63 unsigned inted : 1;64 unsigned i nt ig1 : 11;65 54 unsigned p : 1; /**< Present. */ 55 unsigned : 1; 56 unsigned ma : 3; /**< Memory attribute. */ 57 unsigned a : 1; /**< Accessed. */ 58 unsigned d : 1; /**< Dirty. */ 59 unsigned pl : 2; /**< Privilege level. */ 60 unsigned ar : 3; /**< Access rights. */ 61 unsigned long long ppn : 38; /**< Physical Page Number, a.k.a. PFN. */ 62 unsigned : 2; 63 unsigned ed : 1; 64 unsigned ig1 : 11; 65 66 66 /* Word 1 */ 67 unsigned int: 2;68 unsigned int ps : 6;/**< Page size will be 2^ps. */69 unsigned int key : 24;/**< Protection key, unused. */70 unsigned int: 32;67 unsigned : 2; 68 unsigned ps : 6; /**< Page size will be 2^ps. */ 69 unsigned key : 24; /**< Protection key, unused. */ 70 unsigned : 32; 71 71 } __attribute__ ((packed)); 72 } __attribute__ ((packed)) tlb_entry_t; 72 } __attribute__ ((packed)); 73 typedef union tlb_entry tlb_entry_t; 73 74 74 75 extern void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc); -
kernel/arch/ia64/include/register.h
rad4b32c r7e266ff 27 27 */ 28 28 29 /** @addtogroup ia64 29 /** @addtogroup ia64 30 30 * @{ 31 31 */ … … 36 36 #define KERN_ia64_REGISTER_H_ 37 37 38 #define DCR_PP_MASK (1 << 0) 39 #define DCR_BE_MASK (1 << 1) 40 #define DCR_LC_MASK (1 << 2) 41 #define DCR_DM_MASK (1 << 8) 42 #define DCR_DP_MASK (1 << 9) 43 #define DCR_DK_MASK (1 << 10) 44 #define DCR_DX_MASK (1 << 11) 45 #define DCR_DR_MASK (1 << 12) 46 #define DCR_DA_MASK (1 << 13) 47 #define DCR_DD_MASK (1 << 14) 48 49 #define CR_IVR_MASK 0x0f 50 51 #define PSR_IC_MASK (1 << 13) 52 #define PSR_I_MASK (1 << 14) 53 #define PSR_PK_MASK (1 << 15) 54 #define PSR_DT_MASK (1 << 17) 55 #define PSR_DFL_MASK (1 << 18) 56 #define PSR_DFH_MASK (1 << 19) 57 #define PSR_RT_MASK (1 << 27) 58 #define PSR_IT_MASK (1 << 36) 59 60 #define PSR_CPL_SHIFT 32 61 #define PSR_CPL_MASK_SHIFTED 3 62 63 #define PFM_MASK (~0x3fffffffff) 64 65 #define RSC_MODE_MASK 3 66 #define RSC_PL_MASK 12 38 #define CR_IVR_MASK 0xf 39 #define PSR_IC_MASK 0x2000 40 #define PSR_I_MASK 0x4000 41 #define PSR_PK_MASK 0x8000 42 43 #define PSR_DT_MASK (1 << 17) 44 #define PSR_RT_MASK (1 << 27) 45 46 #define PSR_DFL_MASK (1 << 18) 47 #define PSR_DFH_MASK (1 << 19) 48 49 #define PSR_IT_MASK 0x0000001000000000 50 51 #define PSR_CPL_SHIFT 32 52 #define PSR_CPL_MASK_SHIFTED 3 53 54 #define PFM_MASK (~0x3fffffffff) 55 56 #define RSC_MODE_MASK 3 57 #define RSC_PL_MASK 12 67 58 68 59 /** Application registers. */ 69 #define AR_KR0 70 #define AR_KR1 71 #define AR_KR2 72 #define AR_KR3 73 #define AR_KR4 74 #define AR_KR5 75 #define AR_KR6 76 #define AR_KR7 77 /* AR s 8-15 arereserved */78 #define AR_RSC 79 #define AR_BSP 80 #define AR_BSPSTORE 81 #define AR_RNAT 82 /* AR 20 isreserved */83 #define AR_FCR 84 /* AR s 22-23 arereserved */85 #define AR_EFLAG 86 #define AR_CSD 87 #define AR_SSD 88 #define AR_CFLG 89 #define AR_FSR 90 #define AR_FIR 91 #define AR_FDR 92 /* AR 31 isreserved */93 #define AR_CCV 94 /* AR s 33-35 arereserved */95 #define AR_UNAT 96 /* AR s 37-39 arereserved */97 #define AR_FPSR 98 /* AR s 41-43 arereserved */99 #define AR_ITC 100 /* AR s 45-47 arereserved */101 /* AR s 48-63 areignored */102 #define AR_PFS 103 #define AR_LC 104 #define AR_EC 105 /* AR s 67-111 arereserved */106 /* AR s 112-127 areignored */60 #define AR_KR0 0 61 #define AR_KR1 1 62 #define AR_KR2 2 63 #define AR_KR3 3 64 #define AR_KR4 4 65 #define AR_KR5 5 66 #define AR_KR6 6 67 #define AR_KR7 7 68 /* AR 8-15 reserved */ 69 #define AR_RSC 16 70 #define AR_BSP 17 71 #define AR_BSPSTORE 18 72 #define AR_RNAT 19 73 /* AR 20 reserved */ 74 #define AR_FCR 21 75 /* AR 22-23 reserved */ 76 #define AR_EFLAG 24 77 #define AR_CSD 25 78 #define AR_SSD 26 79 #define AR_CFLG 27 80 #define AR_FSR 28 81 #define AR_FIR 29 82 #define AR_FDR 30 83 /* AR 31 reserved */ 84 #define AR_CCV 32 85 /* AR 33-35 reserved */ 86 #define AR_UNAT 36 87 /* AR 37-39 reserved */ 88 #define AR_FPSR 40 89 /* AR 41-43 reserved */ 90 #define AR_ITC 44 91 /* AR 45-47 reserved */ 92 /* AR 48-63 ignored */ 93 #define AR_PFS 64 94 #define AR_LC 65 95 #define AR_EC 66 96 /* AR 67-111 reserved */ 97 /* AR 112-127 ignored */ 107 98 108 99 /** Control registers. */ 109 #define CR_DCR 110 #define CR_ITM 111 #define CR_IVA 112 /* CR3-CR7 arereserved */113 #define CR_PTA 114 /* CR9-CR15 arereserved */115 #define CR_IPSR 116 #define CR_ISR 117 /* CR18 isreserved */118 #define CR_IIP 119 #define CR_IFA 120 #define CR_ITIR 121 #define CR_IIPA 122 #define CR_IFS 123 #define CR_IIM 124 #define CR_IHA 125 /* CR26-CR63 arereserved */126 #define CR_LID 127 #define CR_IVR 128 #define CR_TPR 129 #define CR_EOI 130 #define CR_IRR0 131 #define CR_IRR1 132 #define CR_IRR2 133 #define CR_IRR3 134 #define CR_ITV 135 #define CR_PMV 136 #define CR_CMCV 137 /* CR75-CR79 arereserved */138 #define CR_LRR0 139 #define CR_LRR1 140 /* CR82-CR127 arereserved */100 #define CR_DCR 0 101 #define CR_ITM 1 102 #define CR_IVA 2 103 /* CR3-CR7 reserved */ 104 #define CR_PTA 8 105 /* CR9-CR15 reserved */ 106 #define CR_IPSR 16 107 #define CR_ISR 17 108 /* CR18 reserved */ 109 #define CR_IIP 19 110 #define CR_IFA 20 111 #define CR_ITIR 21 112 #define CR_IIPA 22 113 #define CR_IFS 23 114 #define CR_IIM 24 115 #define CR_IHA 25 116 /* CR26-CR63 reserved */ 117 #define CR_LID 64 118 #define CR_IVR 65 119 #define CR_TPR 66 120 #define CR_EOI 67 121 #define CR_IRR0 68 122 #define CR_IRR1 69 123 #define CR_IRR2 70 124 #define CR_IRR3 71 125 #define CR_ITV 72 126 #define CR_PMV 73 127 #define CR_CMCV 74 128 /* CR75-CR79 reserved */ 129 #define CR_LRR0 80 130 #define CR_LRR1 81 131 /* CR82-CR127 reserved */ 141 132 142 133 #ifndef __ASM__ … … 145 136 146 137 /** Processor Status Register. */ 147 typedef union { 148 uint64_t value; 149 struct { 150 unsigned int : 1; 151 unsigned int be : 1; /**< Big-Endian data accesses. */ 152 unsigned int up : 1; /**< User Performance monitor enable. */ 153 unsigned int ac : 1; /**< Alignment Check. */ 154 unsigned int mfl : 1; /**< Lower floating-point register written. */ 155 unsigned int mfh : 1; /**< Upper floating-point register written. */ 156 unsigned int : 7; 157 unsigned int ic : 1; /**< Interruption Collection. */ 158 unsigned int i : 1; /**< Interrupt Bit. */ 159 unsigned int pk : 1; /**< Protection Key enable. */ 160 unsigned int : 1; 161 unsigned int dt : 1; /**< Data address Translation. */ 162 unsigned int dfl : 1; /**< Disabled Floating-point Low register set. */ 163 unsigned int dfh : 1; /**< Disabled Floating-point High register set. */ 164 unsigned int sp : 1; /**< Secure Performance monitors. */ 165 unsigned int pp : 1; /**< Privileged Performance monitor enable. */ 166 unsigned int di : 1; /**< Disable Instruction set transition. */ 167 unsigned int si : 1; /**< Secure Interval timer. */ 168 unsigned int db : 1; /**< Debug Breakpoint fault. */ 169 unsigned int lp : 1; /**< Lower Privilege transfer trap. */ 170 unsigned int tb : 1; /**< Taken Branch trap. */ 171 unsigned int rt : 1; /**< Register Stack Translation. */ 172 unsigned int : 4; 173 unsigned int cpl : 2; /**< Current Privilege Level. */ 174 unsigned int is : 1; /**< Instruction Set. */ 175 unsigned int mc : 1; /**< Machine Check abort mask. */ 176 unsigned int it : 1; /**< Instruction address Translation. */ 177 unsigned int id : 1; /**< Instruction Debug fault disable. */ 178 unsigned int da : 1; /**< Disable Data Access and Dirty-bit faults. */ 179 unsigned int dd : 1; /**< Data Debug fault disable. */ 180 unsigned int ss : 1; /**< Single Step enable. */ 181 unsigned int ri : 2; /**< Restart Instruction. */ 182 unsigned int ed : 1; /**< Exception Deferral. */ 183 unsigned int bn : 1; /**< Register Bank. */ 184 unsigned int ia : 1; /**< Disable Instruction Access-bit faults. */ 185 } __attribute__ ((packed)); 186 } psr_t; 138 union psr { 139 uint64_t value; 140 struct { 141 unsigned : 1; 142 unsigned be : 1; /**< Big-Endian data accesses. */ 143 unsigned up : 1; /**< User Performance monitor enable. */ 144 unsigned ac : 1; /**< Alignment Check. */ 145 unsigned mfl : 1; /**< Lower floating-point register written. */ 146 unsigned mfh : 1; /**< Upper floating-point register written. */ 147 unsigned : 7; 148 unsigned ic : 1; /**< Interruption Collection. */ 149 unsigned i : 1; /**< Interrupt Bit. */ 150 unsigned pk : 1; /**< Protection Key enable. */ 151 unsigned : 1; 152 unsigned dt : 1; /**< Data address Translation. */ 153 unsigned dfl : 1; /**< Disabled Floating-point Low register set. */ 154 unsigned dfh : 1; /**< Disabled Floating-point High register set. */ 155 unsigned sp : 1; /**< Secure Performance monitors. */ 156 unsigned pp : 1; /**< Privileged Performance monitor enable. */ 157 unsigned di : 1; /**< Disable Instruction set transition. */ 158 unsigned si : 1; /**< Secure Interval timer. */ 159 unsigned db : 1; /**< Debug Breakpoint fault. */ 160 unsigned lp : 1; /**< Lower Privilege transfer trap. */ 161 unsigned tb : 1; /**< Taken Branch trap. */ 162 unsigned rt : 1; /**< Register Stack Translation. */ 163 unsigned : 4; 164 unsigned cpl : 2; /**< Current Privilege Level. */ 165 unsigned is : 1; /**< Instruction Set. */ 166 unsigned mc : 1; /**< Machine Check abort mask. */ 167 unsigned it : 1; /**< Instruction address Translation. */ 168 unsigned id : 1; /**< Instruction Debug fault disable. */ 169 unsigned da : 1; /**< Disable Data Access and Dirty-bit faults. */ 170 unsigned dd : 1; /**< Data Debug fault disable. */ 171 unsigned ss : 1; /**< Single Step enable. */ 172 unsigned ri : 2; /**< Restart Instruction. */ 173 unsigned ed : 1; /**< Exception Deferral. */ 174 unsigned bn : 1; /**< Register Bank. */ 175 unsigned ia : 1; /**< Disable Instruction Access-bit faults. */ 176 } __attribute__ ((packed)); 177 }; 178 typedef union psr psr_t; 187 179 188 180 /** Register Stack Configuration Register */ 189 typedef union { 190 uint64_t value; 191 struct { 192 unsigned int mode : 2; 193 unsigned int pl : 2; /**< Privilege Level. */ 194 unsigned int be : 1; /**< Big-endian. */ 195 unsigned int : 11; 196 unsigned int loadrs : 14; 197 } __attribute__ ((packed)); 198 } rsc_t; 181 union rsc { 182 uint64_t value; 183 struct { 184 unsigned mode : 2; 185 unsigned pl : 2; /**< Privilege Level. */ 186 unsigned be : 1; /**< Big-endian. */ 187 unsigned : 11; 188 unsigned loadrs : 14; 189 } __attribute__ ((packed)); 190 }; 191 typedef union rsc rsc_t; 199 192 200 193 /** External Interrupt Vector Register */ 201 typedef union { 202 uint8_t vector; 203 uint64_t value; 204 } cr_ivr_t; 194 union cr_ivr { 195 uint8_t vector; 196 uint64_t value; 197 }; 198 199 typedef union cr_ivr cr_ivr_t; 205 200 206 201 /** Task Priority Register */ 207 typedef union { 208 uint64_t value; 209 struct { 210 unsigned int : 4; 211 unsigned int mic: 4; /**< Mask Interrupt Class. */ 212 unsigned int : 8; 213 unsigned int mmi: 1; /**< Mask Maskable Interrupts. */ 214 } __attribute__ ((packed)); 215 } cr_tpr_t; 202 union cr_tpr { 203 struct { 204 unsigned : 4; 205 unsigned mic: 4; /**< Mask Interrupt Class. */ 206 unsigned : 8; 207 unsigned mmi: 1; /**< Mask Maskable Interrupts. */ 208 } __attribute__ ((packed)); 209 uint64_t value; 210 }; 211 212 typedef union cr_tpr cr_tpr_t; 216 213 217 214 /** Interval Timer Vector */ 218 typedef union { 219 uint64_t value; 220 struct { 221 unsigned int vector : 8; 222 unsigned int : 4; 223 unsigned int : 1; 224 unsigned int : 3; 225 unsigned int m : 1; /**< Mask. */ 226 } __attribute__ ((packed)); 227 } cr_itv_t; 215 union cr_itv { 216 struct { 217 unsigned vector : 8; 218 unsigned : 4; 219 unsigned : 1; 220 unsigned : 3; 221 unsigned m : 1; /**< Mask. */ 222 } __attribute__ ((packed)); 223 uint64_t value; 224 }; 225 226 typedef union cr_itv cr_itv_t; 228 227 229 228 /** Interruption Status Register */ 230 typedef union { 231 uint64_t value; 229 union cr_isr { 232 230 struct { 233 231 union { 234 232 /** General Exception code field structuring. */ 233 struct { 234 unsigned ge_na : 4; 235 unsigned ge_code : 4; 236 } __attribute__ ((packed)); 235 237 uint16_t code; 236 struct {237 unsigned int ge_na : 4;238 unsigned int ge_code : 4;239 } __attribute__ ((packed));240 238 }; 241 239 uint8_t vector; 242 unsigned int : 8; 243 unsigned int x : 1; /**< Execute exception. */ 244 unsigned int w : 1; /**< Write exception. */ 245 unsigned int r : 1; /**< Read exception. */ 246 unsigned int na : 1; /**< Non-access exception. */ 247 unsigned int sp : 1; /**< Speculative load exception. */ 248 unsigned int rs : 1; /**< Register stack. */ 249 unsigned int ir : 1; /**< Incomplete Register frame. */ 250 unsigned int ni : 1; /**< Nested Interruption. */ 251 unsigned int so : 1; /**< IA-32 Supervisor Override. */ 252 unsigned int ei : 2; /**< Excepting Instruction. */ 253 unsigned int ed : 1; /**< Exception Deferral. */ 254 unsigned int : 20; 255 } __attribute__ ((packed)); 256 } cr_isr_t; 240 unsigned : 8; 241 unsigned x : 1; /**< Execute exception. */ 242 unsigned w : 1; /**< Write exception. */ 243 unsigned r : 1; /**< Read exception. */ 244 unsigned na : 1; /**< Non-access exception. */ 245 unsigned sp : 1; /**< Speculative load exception. */ 246 unsigned rs : 1; /**< Register stack. */ 247 unsigned ir : 1; /**< Incomplete Register frame. */ 248 unsigned ni : 1; /**< Nested Interruption. */ 249 unsigned so : 1; /**< IA-32 Supervisor Override. */ 250 unsigned ei : 2; /**< Excepting Instruction. */ 251 unsigned ed : 1; /**< Exception Deferral. */ 252 unsigned : 20; 253 } __attribute__ ((packed)); 254 uint64_t value; 255 }; 256 257 typedef union cr_isr cr_isr_t; 257 258 258 259 /** CPUID Register 3 */ 259 typedef union { 260 uint64_t value; 260 union cpuid3 { 261 261 struct { 262 262 uint8_t number; … … 266 266 uint8_t archrev; 267 267 } __attribute__ ((packed)); 268 } cpuid3_t; 268 uint64_t value; 269 }; 270 271 typedef union cpuid3 cpuid3_t; 269 272 270 273 #endif /* !__ASM__ */ -
kernel/arch/ia64/src/mm/as.c
rad4b32c r7e266ff 55 55 void as_install_arch(as_t *as) 56 56 { 57 region_register _trr;57 region_register rr; 58 58 int i; 59 59 -
kernel/arch/ia64/src/mm/page.c
rad4b32c r7e266ff 63 63 void set_environment(void) 64 64 { 65 region_register _trr;66 pta_register _tpta;65 region_register rr; 66 pta_register pta; 67 67 int i; 68 68 #ifdef CONFIG_VHPT … … 131 131 vhpt_entry_t *vhpt_hash(uintptr_t page, asid_t asid) 132 132 { 133 region_register _trr_save, rr;133 region_register rr_save, rr; 134 134 size_t vrn; 135 135 rid_t rid; … … 176 176 bool vhpt_compare(uintptr_t page, asid_t asid, vhpt_entry_t *v) 177 177 { 178 region_register _t rr_save, rr;178 region_register rr_save, rr; 179 179 size_t vrn; 180 180 rid_t rid; … … 223 223 int flags) 224 224 { 225 region_register _t rr_save, rr;225 region_register rr_save, rr; 226 226 size_t vrn; 227 227 rid_t rid; … … 257 257 v->present.ma = (flags & PAGE_CACHEABLE) ? 258 258 MA_WRITEBACK : MA_UNCACHEABLE; 259 v->present.a = false; 260 v->present.d = false; 259 v->present.a = false; /* not accessed */ 260 v->present.d = false; /* not dirty */ 261 261 v->present.pl = (flags & PAGE_USER) ? PL_USER : PL_KERNEL; 262 262 v->present.ar = (flags & PAGE_WRITE) ? AR_WRITE : AR_READ; 263 263 v->present.ar |= (flags & PAGE_EXEC) ? AR_EXECUTE : 0; 264 264 v->present.ppn = frame >> PPN_SHIFT; 265 v->present.ed = false; 265 v->present.ed = false; /* exception not deffered */ 266 266 v->present.ps = PAGE_WIDTH; 267 267 v->present.key = 0; -
kernel/arch/ia64/src/mm/tlb.c
rad4b32c r7e266ff 27 27 */ 28 28 29 /** @addtogroup ia64mm 29 /** @addtogroup ia64mm 30 30 * @{ 31 31 */ … … 53 53 #include <interrupt.h> 54 54 55 #define IO_FRAME_BASE 0xFFFFC00000056 57 55 /** Invalidate all TLB entries. */ 58 56 void tlb_invalidate_all(void) … … 61 59 uintptr_t adr; 62 60 uint32_t count1, count2, stride1, stride2; 63 61 64 62 unsigned int i, j; 65 63 66 64 adr = PAL_PTCE_INFO_BASE(); 67 65 count1 = PAL_PTCE_INFO_COUNT1(); … … 69 67 stride1 = PAL_PTCE_INFO_STRIDE1(); 70 68 stride2 = PAL_PTCE_INFO_STRIDE2(); 71 69 72 70 ipl = interrupts_disable(); 73 71 74 72 for (i = 0; i < count1; i++) { 75 73 for (j = 0; j < count2; j++) { 76 74 asm volatile ( 77 "ptc.e %[adr] ;;" 78 :: [adr] "r" (adr) 75 "ptc.e %0 ;;" 76 : 77 : "r" (adr) 79 78 ); 80 79 adr += stride2; … … 82 81 adr += stride1; 83 82 } 84 83 85 84 interrupts_restore(ipl); 86 85 87 86 srlz_d(); 88 87 srlz_i(); 89 90 88 #ifdef CONFIG_VHPT 91 89 vhpt_invalidate_all(); 92 #endif 90 #endif 93 91 } 94 92 95 93 /** Invalidate entries belonging to an address space. 96 94 * 97 * @param asid Address space identifier. 98 * 95 * @param asid Address space identifier. 99 96 */ 100 97 void tlb_invalidate_asid(asid_t asid) … … 106 103 void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt) 107 104 { 108 region_register _trr;105 region_register rr; 109 106 bool restore_rr = false; 110 107 int b = 0; 111 108 int c = cnt; 112 109 113 110 uintptr_t va; 114 111 va = page; 115 112 116 113 rr.word = rr_read(VA2VRN(va)); 117 114 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { … … 120 117 * Save the old content of the register and replace the RID. 121 118 */ 122 region_register _trr0;123 119 region_register rr0; 120 124 121 rr0 = rr; 125 122 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); … … 129 126 } 130 127 131 while 128 while(c >>= 1) 132 129 b++; 133 130 b >>= 1; … … 172 169 break; 173 170 } 174 175 for (; va < (page + cnt * PAGE_SIZE); va += (1 << ps)) 176 asm volatile ( 177 "ptc.l %[va], %[ps] ;;" 178 :: [va]"r" (va), 179 [ps] "r" (ps << 2) 180 ); 181 171 for(; va < (page + cnt * PAGE_SIZE); va += (1 << ps)) 172 asm volatile ("ptc.l %0, %1;;" :: "r" (va), "r" (ps << 2)); 182 173 srlz_d(); 183 174 srlz_i(); … … 192 183 /** Insert data into data translation cache. 193 184 * 194 * @param va Virtual page address. 195 * @param asid Address space identifier. 196 * @param entry The rest of TLB entry as required by TLB insertion 197 * format. 198 * 185 * @param va Virtual page address. 186 * @param asid Address space identifier. 187 * @param entry The rest of TLB entry as required by TLB insertion 188 * format. 199 189 */ 200 190 void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) … … 205 195 /** Insert data into instruction translation cache. 206 196 * 207 * @param va 208 * @param asid 209 * @param entry 210 * 197 * @param va Virtual page address. 198 * @param asid Address space identifier. 199 * @param entry The rest of TLB entry as required by TLB insertion 200 * format. 211 201 */ 212 202 void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) … … 217 207 /** Insert data into instruction or data translation cache. 218 208 * 219 * @param va Virtual page address. 220 * @param asid Address space identifier. 221 * @param entry The rest of TLB entry as required by TLB insertion 222 * format. 223 * @param dtc If true, insert into data translation cache, use 224 * instruction translation cache otherwise. 225 * 209 * @param va Virtual page address. 210 * @param asid Address space identifier. 211 * @param entry The rest of TLB entry as required by TLB insertion 212 * format. 213 * @param dtc If true, insert into data translation cache, use 214 * instruction translation cache otherwise. 226 215 */ 227 216 void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc) 228 217 { 229 region_register _trr;218 region_register rr; 230 219 bool restore_rr = false; 231 220 232 221 rr.word = rr_read(VA2VRN(va)); 233 222 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { … … 236 225 * Save the old content of the register and replace the RID. 237 226 */ 238 region_register _trr0;239 227 region_register rr0; 228 240 229 rr0 = rr; 241 230 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); … … 246 235 247 236 asm volatile ( 248 "mov r8 = psr ;;\n" 249 "rsm %[mask] ;;\n" /* PSR_IC_MASK */ 250 "srlz.d ;;\n" 251 "srlz.i ;;\n" 252 "mov cr.ifa = %[va]\n" /* va */ 253 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */ 254 "cmp.eq p6, p7 = %[dtc], r0 ;;\n" /* decide between itc and dtc */ 255 "(p6) itc.i %[word0] ;;\n" 256 "(p7) itc.d %[word0] ;;\n" 257 "mov psr.l = r8 ;;\n" 258 "srlz.d ;;\n" 259 :: [mask] "i" (PSR_IC_MASK), 260 [va] "r" (va), 261 [word0] "r" (entry.word[0]), 262 [word1] "r" (entry.word[1]), 263 [dtc] "r" (dtc) 237 "mov r8 = psr;;\n" 238 "rsm %0;;\n" /* PSR_IC_MASK */ 239 "srlz.d;;\n" 240 "srlz.i;;\n" 241 "mov cr.ifa = %1\n" /* va */ 242 "mov cr.itir = %2;;\n" /* entry.word[1] */ 243 "cmp.eq p6,p7 = %4,r0;;\n" /* decide between itc and dtc */ 244 "(p6) itc.i %3;;\n" 245 "(p7) itc.d %3;;\n" 246 "mov psr.l = r8;;\n" 247 "srlz.d;;\n" 248 : 249 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), 250 "r" (entry.word[0]), "r" (dtc) 264 251 : "p6", "p7", "r8" 265 252 ); … … 274 261 /** Insert data into instruction translation register. 275 262 * 276 * @param va 277 * @param asid 278 * @param entry 279 * 280 * @param tr 281 * 282 */ 283 voiditr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)263 * @param va Virtual page address. 264 * @param asid Address space identifier. 265 * @param entry The rest of TLB entry as required by TLB insertion 266 * format. 267 * @param tr Translation register. 268 */ 269 void 270 itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr) 284 271 { 285 272 tr_mapping_insert(va, asid, entry, false, tr); … … 288 275 /** Insert data into data translation register. 289 276 * 290 * @param va 291 * @param asid 292 * @param entry 293 * 294 * @param tr 295 * 296 */ 297 voiddtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)277 * @param va Virtual page address. 278 * @param asid Address space identifier. 279 * @param entry The rest of TLB entry as required by TLB insertion 280 * format. 281 * @param tr Translation register. 282 */ 283 void 284 dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr) 298 285 { 299 286 tr_mapping_insert(va, asid, entry, true, tr); … … 302 289 /** Insert data into instruction or data translation register. 303 290 * 304 * @param va 305 * @param asid 306 * @param entry 307 * 308 * @param dtr 309 * 310 * @param tr 311 * 312 */ 313 voidtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,291 * @param va Virtual page address. 292 * @param asid Address space identifier. 293 * @param entry The rest of TLB entry as required by TLB insertion 294 * format. 295 * @param dtr If true, insert into data translation register, use 296 * instruction translation register otherwise. 297 * @param tr Translation register. 298 */ 299 void 300 tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, 314 301 size_t tr) 315 302 { 316 region_register _trr;303 region_register rr; 317 304 bool restore_rr = false; 318 305 319 306 rr.word = rr_read(VA2VRN(va)); 320 307 if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) { … … 323 310 * Save the old content of the register and replace the RID. 324 311 */ 325 region_register _trr0;326 312 region_register rr0; 313 327 314 rr0 = rr; 328 315 rr0.map.rid = ASID2RID(asid, VA2VRN(va)); … … 331 318 srlz_i(); 332 319 } 333 320 334 321 asm volatile ( 335 "mov r8 = psr ;;\n" 336 "rsm %[mask] ;;\n" /* PSR_IC_MASK */ 337 "srlz.d ;;\n" 338 "srlz.i ;;\n" 339 "mov cr.ifa = %[va]\n" /* va */ 340 "mov cr.itir = %[word1] ;;\n" /* entry.word[1] */ 341 "cmp.eq p6, p7 = %[dtr], r0 ;;\n" /* decide between itr and dtr */ 342 "(p6) itr.i itr[%[tr]] = %[word0] ;;\n" 343 "(p7) itr.d dtr[%[tr]] = %[word0] ;;\n" 344 "mov psr.l = r8 ;;\n" 345 "srlz.d ;;\n" 346 :: [mask] "i" (PSR_IC_MASK), 347 [va] "r" (va), 348 [word1] "r" (entry.word[1]), 349 [word0] "r" (entry.word[0]), 350 [tr] "r" (tr), 351 [dtr] "r" (dtr) 322 "mov r8 = psr;;\n" 323 "rsm %0;;\n" /* PSR_IC_MASK */ 324 "srlz.d;;\n" 325 "srlz.i;;\n" 326 "mov cr.ifa = %1\n" /* va */ 327 "mov cr.itir = %2;;\n" /* entry.word[1] */ 328 "cmp.eq p6,p7 = %5,r0;;\n" /* decide between itr and dtr */ 329 "(p6) itr.i itr[%4] = %3;;\n" 330 "(p7) itr.d dtr[%4] = %3;;\n" 331 "mov psr.l = r8;;\n" 332 "srlz.d;;\n" 333 : 334 : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), 335 "r" (entry.word[0]), "r" (tr), "r" (dtr) 352 336 : "p6", "p7", "r8" 353 337 ); … … 362 346 /** Insert data into DTLB. 363 347 * 364 * @param page 365 * @param frame 366 * @param dtr 367 * 368 * @param tr 369 * 370 */ 371 voiddtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,348 * @param page Virtual page address including VRN bits. 349 * @param frame Physical frame address. 350 * @param dtr If true, insert into data translation register, use data 351 * translation cache otherwise. 352 * @param tr Translation register if dtr is true, ignored otherwise. 353 */ 354 void 355 dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, 372 356 size_t tr) 373 357 { … … 377 361 entry.word[1] = 0; 378 362 379 entry.p = true; 363 entry.p = true; /* present */ 380 364 entry.ma = MA_WRITEBACK; 381 entry.a = true; 382 entry.d = true; 365 entry.a = true; /* already accessed */ 366 entry.d = true; /* already dirty */ 383 367 entry.pl = PL_KERNEL; 384 368 entry.ar = AR_READ | AR_WRITE; … … 396 380 * Purge DTR entries used by the kernel. 397 381 * 398 * @param page Virtual page address including VRN bits. 399 * @param width Width of the purge in bits. 400 * 382 * @param page Virtual page address including VRN bits. 383 * @param width Width of the purge in bits. 401 384 */ 402 385 void dtr_purge(uintptr_t page, size_t width) 403 386 { 404 asm volatile ( 405 "ptr.d %[page], %[width]\n" 406 :: [page] "r" (page), 407 [width] "r" (width << 2) 408 ); 387 asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width << 2)); 409 388 } 410 389 … … 412 391 /** Copy content of PTE into data translation cache. 413 392 * 414 * @param t PTE. 415 * 393 * @param t PTE. 416 394 */ 417 395 void dtc_pte_copy(pte_t *t) 418 396 { 419 397 tlb_entry_t entry; 420 398 421 399 entry.word[0] = 0; 422 400 entry.word[1] = 0; … … 432 410 433 411 dtc_mapping_insert(t->page, t->as->asid, entry); 434 435 412 #ifdef CONFIG_VHPT 436 413 vhpt_mapping_insert(t->page, t->as->asid, entry); 437 #endif 414 #endif 438 415 } 439 416 440 417 /** Copy content of PTE into instruction translation cache. 441 418 * 442 * @param t PTE. 443 * 419 * @param t PTE. 444 420 */ 445 421 void itc_pte_copy(pte_t *t) 446 422 { 447 423 tlb_entry_t entry; 448 424 449 425 entry.word[0] = 0; 450 426 entry.word[1] = 0; … … 461 437 462 438 itc_mapping_insert(t->page, t->as->asid, entry); 463 464 439 #ifdef CONFIG_VHPT 465 440 vhpt_mapping_insert(t->page, t->as->asid, entry); 466 #endif 441 #endif 467 442 } 468 443 469 444 /** Instruction TLB fault handler for faults with VHPT turned off. 470 445 * 471 * @param vector Interruption vector. 472 * @param istate Structure with saved interruption state. 473 * 446 * @param vector Interruption vector. 447 * @param istate Structure with saved interruption state. 474 448 */ 475 449 void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate) 476 450 { 477 region_register _trr;451 region_register rr; 478 452 rid_t rid; 479 453 uintptr_t va; 480 454 pte_t *t; 481 455 482 va = istate->cr_ifa; 456 va = istate->cr_ifa; /* faulting address */ 483 457 rr.word = rr_read(VA2VRN(va)); 484 458 rid = rr.map.rid; 485 459 486 460 page_table_lock(AS, true); 487 461 t = page_mapping_find(AS, va); … … 499 473 page_table_unlock(AS, true); 500 474 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { 501 fault_if_from_uspace(istate, "Page fault at %p.",va);475 fault_if_from_uspace(istate,"Page fault at %p.",va); 502 476 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 503 477 istate->cr_iip); … … 514 488 } 515 489 490 #define IO_FRAME_BASE 0xFFFFC000000 491 516 492 /** 517 493 * There is special handling of memory mapped legacy io, because of 4KB sized 518 494 * access for userspace. 519 495 * 520 * @param va Virtual address of page fault. 521 * @param istate Structure with saved interruption state. 522 * 523 * @return One on success, zero on failure. 524 * 496 * @param va Virtual address of page fault. 497 * @param istate Structure with saved interruption state. 498 * 499 * @return One on success, zero on failure. 525 500 */ 526 501 static int try_memmap_io_insertion(uintptr_t va, istate_t *istate) … … 530 505 uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >> 531 506 USPACE_IO_PAGE_WIDTH; 532 507 533 508 if (is_io_page_accessible(io_page)) { 534 509 uint64_t page, frame; 535 510 536 511 page = IO_OFFSET + 537 512 (1 << USPACE_IO_PAGE_WIDTH) * io_page; 538 513 frame = IO_FRAME_BASE + 539 514 (1 << USPACE_IO_PAGE_WIDTH) * io_page; 540 515 541 516 tlb_entry_t entry; 542 517 543 518 entry.word[0] = 0; 544 519 entry.word[1] = 0; 545 546 entry.p = true; 547 entry.ma = MA_UNCACHEABLE; 548 entry.a = true; 549 entry.d = true; 520 521 entry.p = true; /* present */ 522 entry.ma = MA_UNCACHEABLE; 523 entry.a = true; /* already accessed */ 524 entry.d = true; /* already dirty */ 550 525 entry.pl = PL_USER; 551 526 entry.ar = AR_READ | AR_WRITE; 552 527 entry.ppn = frame >> PPN_SHIFT; 553 528 entry.ps = USPACE_IO_PAGE_WIDTH; 554 529 555 530 dtc_mapping_insert(page, TASK->as->asid, entry); 556 531 return 1; … … 561 536 } 562 537 } 563 538 564 539 return 0; 565 540 } … … 567 542 /** Data TLB fault handler for faults with VHPT turned off. 568 543 * 569 * @param vector Interruption vector. 570 * @param istate Structure with saved interruption state. 571 * 544 * @param vector Interruption vector. 545 * @param istate Structure with saved interruption state. 572 546 */ 573 547 void alternate_data_tlb_fault(uint64_t vector, istate_t *istate) 574 548 { 575 if (istate->cr_isr.sp) { 576 /* Speculative load. Deffer the exception 577 until a more clever approach can be used. 578 579 Currently if we try to find the mapping 580 for the speculative load while in the kernel, 581 we might introduce a livelock because of 582 the possibly invalid values of the address. */ 583 istate->cr_ipsr.ed = true; 584 return; 585 } 586 587 uintptr_t va = istate->cr_ifa; /* faulting address */ 588 589 region_register_t rr; 590 rr.word = rr_read(VA2VRN(va)); 591 rid_t rid = rr.map.rid; 549 region_register rr; 550 rid_t rid; 551 uintptr_t va; 552 pte_t *t; 553 554 va = istate->cr_ifa; /* faulting address */ 555 rr.word = rr_read(VA2VRN(va)); 556 rid = rr.map.rid; 592 557 if (RID2ASID(rid) == ASID_KERNEL) { 593 558 if (VA2VRN(va) == VRN_KERNEL) { … … 600 565 } 601 566 } 602 603 567 604 568 page_table_lock(AS, true); 605 pte_t *entry= page_mapping_find(AS, va);606 if ( entry) {569 t = page_mapping_find(AS, va); 570 if (t) { 607 571 /* 608 572 * The mapping was found in the software page hash table. 609 573 * Insert it into data translation cache. 610 574 */ 611 dtc_pte_copy( entry);575 dtc_pte_copy(t); 612 576 page_table_unlock(AS, true); 613 577 } else { … … 615 579 if (try_memmap_io_insertion(va, istate)) 616 580 return; 617 618 /* 619 * Forward the page fault to the address space page fault 581 /* 582 * Forward the page fault to the address space page fault 620 583 * handler. 621 584 */ 622 585 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { 623 fault_if_from_uspace(istate, "Page fault at %p.",va);586 fault_if_from_uspace(istate,"Page fault at %p.",va); 624 587 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 625 588 istate->cr_iip); … … 632 595 * This fault should not occur. 633 596 * 634 * @param vector Interruption vector. 635 * @param istate Structure with saved interruption state. 636 * 597 * @param vector Interruption vector. 598 * @param istate Structure with saved interruption state. 637 599 */ 638 600 void data_nested_tlb_fault(uint64_t vector, istate_t *istate) 639 601 { 640 ASSERT(false);602 panic("%s.", __func__); 641 603 } 642 604 643 605 /** Data Dirty bit fault handler. 644 606 * 645 * @param vector Interruption vector. 646 * @param istate Structure with saved interruption state. 647 * 607 * @param vector Interruption vector. 608 * @param istate Structure with saved interruption state. 648 609 */ 649 610 void data_dirty_bit_fault(uint64_t vector, istate_t *istate) 650 611 { 651 region_register _trr;612 region_register rr; 652 613 rid_t rid; 653 614 uintptr_t va; 654 615 pte_t *t; 655 616 656 va = istate->cr_ifa; 617 va = istate->cr_ifa; /* faulting address */ 657 618 rr.word = rr_read(VA2VRN(va)); 658 619 rid = rr.map.rid; 659 620 660 621 page_table_lock(AS, true); 661 622 t = page_mapping_find(AS, va); 662 ASSERT( (t) && (t->p));663 if ( (t) && (t->p) && (t->w)) {623 ASSERT(t && t->p); 624 if (t && t->p && t->w) { 664 625 /* 665 626 * Update the Dirty bit in page tables and reinsert … … 670 631 } else { 671 632 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { 672 fault_if_from_uspace(istate, "Page fault at %p.",va);633 fault_if_from_uspace(istate,"Page fault at %p.",va); 673 634 panic("%s: va=%p, rid=%d, iip=%p.", __func__, va, rid, 674 635 istate->cr_iip); … … 680 641 /** Instruction access bit fault handler. 681 642 * 682 * @param vector Interruption vector. 683 * @param istate Structure with saved interruption state. 684 * 643 * @param vector Interruption vector. 644 * @param istate Structure with saved interruption state. 685 645 */ 686 646 void instruction_access_bit_fault(uint64_t vector, istate_t *istate) 687 647 { 688 region_register _trr;648 region_register rr; 689 649 rid_t rid; 690 650 uintptr_t va; 691 pte_t *t; 692 693 va = istate->cr_ifa; 651 pte_t *t; 652 653 va = istate->cr_ifa; /* faulting address */ 694 654 rr.word = rr_read(VA2VRN(va)); 695 655 rid = rr.map.rid; 696 656 697 657 page_table_lock(AS, true); 698 658 t = page_mapping_find(AS, va); 699 ASSERT( (t) && (t->p));700 if ( (t) && (t->p) && (t->x)) {659 ASSERT(t && t->p); 660 if (t && t->p && t->x) { 701 661 /* 702 662 * Update the Accessed bit in page tables and reinsert … … 719 679 * @param vector Interruption vector. 720 680 * @param istate Structure with saved interruption state. 721 *722 681 */ 723 682 void data_access_bit_fault(uint64_t vector, istate_t *istate) 724 683 { 725 region_register _trr;684 region_register rr; 726 685 rid_t rid; 727 686 uintptr_t va; 728 687 pte_t *t; 729 730 va = istate->cr_ifa; 688 689 va = istate->cr_ifa; /* faulting address */ 731 690 rr.word = rr_read(VA2VRN(va)); 732 691 rid = rr.map.rid; 733 692 734 693 page_table_lock(AS, true); 735 694 t = page_mapping_find(AS, va); 736 ASSERT( (t) && (t->p));737 if ( (t) && (t->p)) {695 ASSERT(t && t->p); 696 if (t && t->p) { 738 697 /* 739 698 * Update the Accessed bit in page tables and reinsert … … 756 715 * @param vector Interruption vector. 757 716 * @param istate Structure with saved interruption state. 758 *759 717 */ 760 718 void data_access_rights_fault(uint64_t vector, istate_t *istate) 761 719 { 762 region_register _trr;720 region_register rr; 763 721 rid_t rid; 764 722 uintptr_t va; 765 723 pte_t *t; 766 767 va = istate->cr_ifa; 724 725 va = istate->cr_ifa; /* faulting address */ 768 726 rr.word = rr_read(VA2VRN(va)); 769 727 rid = rr.map.rid; 770 728 771 729 /* 772 730 * Assume a write to a read-only page. … … 774 732 page_table_lock(AS, true); 775 733 t = page_mapping_find(AS, va); 776 ASSERT( (t) && (t->p));734 ASSERT(t && t->p); 777 735 ASSERT(!t->w); 778 736 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { … … 788 746 * @param vector Interruption vector. 789 747 * @param istate Structure with saved interruption state. 790 *791 748 */ 792 749 void page_not_present(uint64_t vector, istate_t *istate) 793 750 { 794 region_register _trr;751 region_register rr; 795 752 rid_t rid; 796 753 uintptr_t va; 797 754 pte_t *t; 798 755 799 va = istate->cr_ifa; 756 va = istate->cr_ifa; /* faulting address */ 800 757 rr.word = rr_read(VA2VRN(va)); 801 758 rid = rr.map.rid; 802 759 803 760 page_table_lock(AS, true); 804 761 t = page_mapping_find(AS, va); -
kernel/arch/ia64/src/mm/vhpt.c
rad4b32c r7e266ff 27 27 */ 28 28 29 /** @addtogroup ia64mm 29 /** @addtogroup ia64mm 30 30 * @{ 31 31 */ … … 44 44 vhpt_base = frame_alloc(VHPT_WIDTH - FRAME_WIDTH, 45 45 FRAME_KA | FRAME_ATOMIC); 46 if (!vhpt_base) 46 if (!vhpt_base) 47 47 panic("Kernel configured with VHPT but no memory for table."); 48 48 vhpt_invalidate_all(); … … 53 53 void vhpt_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) 54 54 { 55 region_register _trr_save, rr;55 region_register rr_save, rr; 56 56 size_t vrn; 57 57 rid_t rid; 58 58 uint64_t tag; 59 59 60 60 vhpt_entry_t *ventry; 61 61 62 62 63 vrn = va >> VRN_SHIFT; 63 64 rid = ASID2RID(asid, vrn); 64 65 65 66 rr_save.word = rr_read(vrn); 66 67 rr.word = rr_save.word; … … 74 75 srlz_i(); 75 76 srlz_d(); 76 77 77 78 ventry->word[0] = entry.word[0]; 78 79 ventry->word[1] = entry.word[1]; -
kernel/arch/ia64/src/start.S
rad4b32c r7e266ff 32 32 #include <mm/asid.h> 33 33 34 #define RR_MASK 35 #define RID_SHIFT 36 #define PS_SHIFT 37 38 #define KERNEL_TRANSLATION_I 39 #define KERNEL_TRANSLATION_D 40 #define KERNEL_TRANSLATION_VIO 41 #define KERNEL_TRANSLATION_IO 0x00100FFFFC00067142 #define KERNEL_TRANSLATION_FW 0x00100000F000067134 #define RR_MASK (0xFFFFFFFF00000002) 35 #define RID_SHIFT 8 36 #define PS_SHIFT 2 37 38 #define KERNEL_TRANSLATION_I 0x0010000000000661 39 #define KERNEL_TRANSLATION_D 0x0010000000000661 40 #define KERNEL_TRANSLATION_VIO 0x0010000000000671 41 #define KERNEL_TRANSLATION_IO 0x00100FFFFC000671 42 #define KERNEL_TRANSLATION_FW 0x00100000F0000671 43 43 44 44 .section K_TEXT_START, "ax" … … 49 49 kernel_image_start: 50 50 .auto 51 51 52 52 #ifdef CONFIG_SMP 53 53 # Identify self(CPU) in OS structures by ID / EID 54 54 55 55 mov r9 = cr64 56 56 mov r10 = 1 … … 62 62 st1 [r8] = r10 63 63 #endif 64 64 65 65 mov psr.l = r0 66 66 srlz.i 67 67 srlz.d 68 68 69 69 # Fill TR.i and TR.d using Region Register #VRN_KERNEL 70 70 71 71 movl r8 = (VRN_KERNEL << VRN_SHIFT) 72 72 mov r9 = rr[r8] 73 73 74 74 movl r10 = (RR_MASK) 75 75 and r9 = r10, r9 76 76 movl r10 = ((RID_KERNEL << RID_SHIFT) | (KERNEL_PAGE_WIDTH << PS_SHIFT)) 77 or r9 = r10, r978 77 or r9 = r10, r9 78 79 79 mov rr[r8] = r9 80 80 81 81 movl r8 = (VRN_KERNEL << VRN_SHIFT) 82 82 mov cr.ifa = r8 83 83 84 84 mov r11 = cr.itir 85 85 movl r10 = (KERNEL_PAGE_WIDTH << PS_SHIFT) 86 86 or r10 = r10, r11 87 87 mov cr.itir = r10 88 88 89 89 movl r10 = (KERNEL_TRANSLATION_I) 90 90 itr.i itr[r0] = r10 91 91 movl r10 = (KERNEL_TRANSLATION_D) 92 92 itr.d dtr[r0] = r10 93 93 94 94 movl r7 = 1 95 95 movl r8 = (VRN_KERNEL << VRN_SHIFT) | VIO_OFFSET … … 97 97 movl r10 = (KERNEL_TRANSLATION_VIO) 98 98 itr.d dtr[r7] = r10 99 99 100 100 mov r11 = cr.itir 101 101 movl r10 = ~0xfc … … 104 104 or r10 = r10, r11 105 105 mov cr.itir = r10 106 106 107 107 movl r7 = 2 108 108 movl r8 = (VRN_KERNEL << VRN_SHIFT) | IO_OFFSET … … 110 110 movl r10 = (KERNEL_TRANSLATION_IO) 111 111 itr.d dtr[r7] = r10 112 113 # Setup mapping for fi rmware area (also SAPIC)114 112 113 # Setup mapping for fimware arrea (also SAPIC) 114 115 115 mov r11 = cr.itir 116 116 movl r10 = ~0xfc … … 119 119 or r10 = r10, r11 120 120 mov cr.itir = r10 121 121 122 122 movl r7 = 3 123 123 movl r8 = (VRN_KERNEL << VRN_SHIFT) | FW_OFFSET … … 125 125 movl r10 = (KERNEL_TRANSLATION_FW) 126 126 itr.d dtr[r7] = r10 127 128 # Initialize DSR 129 130 movl r10 = (DCR_DP_MASK | DCR_DK_MASK | DCR_DX_MASK | DCR_DR_MASK | DCR_DA_MASK | DCR_DD_MASK | DCR_LC_MASK) 131 mov r9 = cr.dcr 132 or r10 = r10, r9 133 mov cr.dcr = r10 134 127 135 128 # Initialize PSR 136 129 137 130 movl r10 = (PSR_DT_MASK | PSR_RT_MASK | PSR_IT_MASK | PSR_IC_MASK) /* Enable paging */ 138 131 mov r9 = psr 139 132 140 133 or r10 = r10, r9 141 134 mov cr.ipsr = r10 … … 145 138 srlz.d 146 139 srlz.i 147 140 148 141 .explicit 149 142 150 143 /* 151 144 * Return From Interrupt is the only way to … … 154 147 rfi ;; 155 148 149 156 150 .global paging_start 157 151 paging_start: 158 152 159 153 /* 160 154 * Now we are paging. 161 155 */ 162 156 163 157 # Switch to register bank 1 164 158 bsw.1 165 159 166 160 #ifdef CONFIG_SMP 167 161 # Am I BSP or AP? … … 170 164 cmp.eq p3, p2 = r20, r0 ;; 171 165 #else 172 cmp.eq p3, p2 = r0, r0 ;; 173 #endif 166 cmp.eq p3, p2 = r0, r0 ;; /* you are BSP */ 167 #endif /* CONFIG_SMP */ 174 168 175 169 # Initialize register stack … … 178 172 mov ar.bspstore = r8 179 173 loadrs 180 174 181 175 # Initialize memory stack to some sane value 182 176 movl r12 = stack0 ;; 183 add r12 = -16, r12 184 177 add r12 = -16, r12 /* allocate a scratch area on the stack */ 178 185 179 # Initialize gp (Global Pointer) register 186 movl r20 = (VRN_KERNEL << VRN_SHIFT) 187 or r20 = r20, r1;;180 movl r20 = (VRN_KERNEL << VRN_SHIFT);; 181 or r20 = r20,r1;; 188 182 movl r1 = _hardcoded_load_address 189 183 … … 198 192 (p3) addl r19 = @gprel(hardcoded_load_address), gp 199 193 (p3) addl r21 = @gprel(bootinfo), gp 200 194 ;; 201 195 (p3) st8 [r17] = r14 202 196 (p3) st8 [r18] = r15 203 197 (p3) st8 [r19] = r16 204 198 (p3) st8 [r21] = r20 205 199 206 200 ssm (1 << 19) ;; /* Disable f32 - f127 */ 207 201 srlz.i 208 202 srlz.d ;; 209 203 210 204 #ifdef CONFIG_SMP 211 205 (p2) movl r18 = main_ap ;; 212 (p2) mov b1 = r18 ;;206 (p2) mov b1 = r18 ;; 213 207 (p2) br.call.sptk.many b0 = b1 214 208 215 209 # Mark that BSP is on 216 217 210 mov r20 = 1 ;; 218 211 movl r21 = bsp_started ;; 219 212 st8 [r21] = r20 ;; 220 213 #endif 221 214 222 215 br.call.sptk.many b0 = arch_pre_main 223 216 224 217 movl r18 = main_bsp ;; 225 218 mov b1 = r18 ;; … … 234 227 kernel_image_ap_start: 235 228 .auto 236 229 237 230 # Identify self(CPU) in OS structures by ID / EID 238 231 239 232 mov r9 = cr64 240 233 mov r10 = 1 … … 247 240 248 241 # Wait for wakeup synchro signal (#3 in cpu_by_id_eid_list) 249 242 250 243 kernel_image_ap_start_loop: 251 244 movl r11 = kernel_image_ap_start_loop 252 245 and r11 = r11, r12 253 mov b1 = r11 254 255 ld1 r20 = [r8] 256 movl r21 = 3 257 cmp.eq p2, p3 = r20, r21 246 mov b1 = r11 247 248 ld1 r20 = [r8] ;; 249 movl r21 = 3 ;; 250 cmp.eq p2, p3 = r20, r21 ;; 258 251 (p3) br.call.sptk.many b0 = b1 259 252 260 253 movl r11 = kernel_image_start 261 254 and r11 = r11, r12 262 mov b1 = r11 255 mov b1 = r11 263 256 br.call.sptk.many b0 = b1 264 257 … … 266 259 .global bsp_started 267 260 bsp_started: 268 261 .space 8 269 262 270 263 .align 4096 271 264 .global cpu_by_id_eid_list 272 265 cpu_by_id_eid_list: 273 274 275 #endif 266 .space 65536 267 268 #endif /* CONFIG_SMP */ -
kernel/arch/mips32/include/mm/tlb.h
rad4b32c r7e266ff 59 59 typedef union { 60 60 struct { 61 #ifdef __BE__61 #ifdef BIG_ENDIAN 62 62 unsigned : 2; /* zero */ 63 63 unsigned pfn : 24; /* frame number */ … … 80 80 typedef union { 81 81 struct { 82 #ifdef __BE__82 #ifdef BIG_ENDIAN 83 83 unsigned vpn2 : 19; 84 84 unsigned : 5; … … 95 95 typedef union { 96 96 struct { 97 #ifdef __BE__97 #ifdef BIG_ENDIAN 98 98 unsigned : 7; 99 99 unsigned mask : 12; … … 110 110 typedef union { 111 111 struct { 112 #ifdef __BE__112 #ifdef BIG_ENDIAN 113 113 unsigned p : 1; 114 114 unsigned : 27; -
uspace/app/bdsh/cmds/modules/bdd/bdd.c
rad4b32c r7e266ff 112 112 while (size > 0) { 113 113 rc = block_get(&block, handle, boff, 0); 114 if (rc != EOK) { 115 printf("Error: could not get block %u, device %u.\n", 116 boff, handle); 117 return CMD_FAILURE; 118 } 114 assert(rc == EOK); 119 115 blk = (uint8_t *) block->data; 120 116 … … 146 142 147 143 rc = block_put(block); 148 if (rc != EOK) { 149 printf("Error: could not put block %p.\n", 150 block); 151 return CMD_FAILURE; 152 } 144 assert(rc == EOK); 153 145 154 146 if (size > rows * BPR) -
uspace/lib/libblock/libblock.c
rad4b32c r7e266ff 345 345 link_t *l; 346 346 unsigned long key = boff; 347 int rc ;347 int rc = EOK; 348 348 349 349 devcon = devcon_search(dev_handle); … … 355 355 356 356 retry: 357 rc = EOK;358 b = NULL;359 360 357 fibril_mutex_lock(&cache->lock); 361 358 l = hash_table_find(&cache->block_hash, &key); … … 397 394 unsigned long temp_key; 398 395 recycle: 399 if (list_empty(&cache->free_head)) { 400 fibril_mutex_unlock(&cache->lock); 401 rc = ENOMEM; 402 goto out; 403 } 396 assert(!list_empty(&cache->free_head)); 404 397 l = cache->free_head.next; 405 398 b = list_get_instance(l, block_t, free_link); … … 484 477 485 478 fibril_mutex_unlock(&b->lock); 486 }487 out:488 if ((rc != EOK) && b) {489 assert(b->toxic);490 (void) block_put(b);491 b = NULL;492 479 } 493 480 *block = b; -
uspace/lib/libc/arch/ia64/include/atomic.h
rad4b32c r7e266ff 27 27 */ 28 28 29 /** @addtogroup libcia64 29 /** @addtogroup libcia64 30 30 * @{ 31 31 */ … … 36 36 #define LIBC_ia64_ATOMIC_H_ 37 37 38 static inline void atomic_inc(atomic_t *val) 38 /** Atomic addition. 39 * 40 * @param val Atomic value. 41 * @param imm Value to add. 42 * 43 * @return Value before addition. 44 */ 45 static inline long atomic_add(atomic_t *val, int imm) 39 46 { 40 47 long v; 41 42 asm volatile (43 "fetchadd8.rel %[v] = %[count], 1\n"44 : [v] "=r" (v),45 [count] "+m" (val->count)46 );47 }48 48 49 static inline void atomic_dec(atomic_t *val) 50 { 51 long v; 52 53 asm volatile ( 54 "fetchadd8.rel %[v] = %[count], -1\n" 55 : [v] "=r" (v), 56 [count] "+m" (val->count) 57 ); 58 } 59 60 static inline long atomic_preinc(atomic_t *val) 61 { 62 long v; 63 64 asm volatile ( 65 "fetchadd8.rel %[v] = %[count], 1\n" 66 : [v] "=r" (v), 67 [count] "+m" (val->count) 68 ); 69 70 return (v + 1); 71 } 72 73 static inline long atomic_predec(atomic_t *val) 74 { 75 long v; 76 77 asm volatile ( 78 "fetchadd8.rel %[v] = %[count], -1\n" 79 : [v] "=r" (v), 80 [count] "+m" (val->count) 81 ); 82 83 return (v - 1); 84 } 85 86 static inline long atomic_postinc(atomic_t *val) 87 { 88 long v; 89 90 asm volatile ( 91 "fetchadd8.rel %[v] = %[count], 1\n" 92 : [v] "=r" (v), 93 [count] "+m" (val->count) 94 ); 95 49 asm volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm)); 50 96 51 return v; 97 52 } 98 53 99 static inline long atomic_postdec(atomic_t *val) 100 { 101 long v; 102 103 asm volatile ( 104 "fetchadd8.rel %[v] = %[count], -1\n" 105 : [v] "=r" (v), 106 [count] "+m" (val->count) 107 ); 108 109 return v; 110 } 54 static inline void atomic_inc(atomic_t *val) { atomic_add(val, 1); } 55 static inline void atomic_dec(atomic_t *val) { atomic_add(val, -1); } 56 57 static inline long atomic_preinc(atomic_t *val) { return atomic_add(val, 1) + 1; } 58 static inline long atomic_predec(atomic_t *val) { return atomic_add(val, -1) - 1; } 59 60 static inline long atomic_postinc(atomic_t *val) { return atomic_add(val, 1); } 61 static inline long atomic_postdec(atomic_t *val) { return atomic_add(val, -1); } 111 62 112 63 #endif -
uspace/srv/fs/fat/fat_fat.c
rad4b32c r7e266ff 61 61 * @param dev_handle Device handle of the device with the file. 62 62 * @param firstc First cluster to start the walk with. 63 * @param lastc If non-NULL, output argument hodling the last cluster 64 * number visited. 65 * @param numc If non-NULL, output argument holding the number of 66 * clusters seen during the walk. 63 * @param lastc If non-NULL, output argument hodling the last cluster number visited. 67 64 * @param max_clusters Maximum number of clusters to visit. 68 65 * 69 * @return EOK on success or a negative error code.70 */ 71 int66 * @return Number of clusters seen during the walk. 67 */ 68 uint16_t 72 69 fat_cluster_walk(fat_bs_t *bs, dev_handle_t dev_handle, fat_cluster_t firstc, 73 fat_cluster_t *lastc, uint16_t *numc, uint16_tmax_clusters)70 fat_cluster_t *lastc, uint16_t max_clusters) 74 71 { 75 72 block_t *b; … … 87 84 if (lastc) 88 85 *lastc = firstc; 89 if (numc) 90 *numc = 0; 91 return EOK; 86 return 0; 92 87 } 93 88 … … 103 98 /* read FAT1 */ 104 99 rc = block_get(&b, dev_handle, rscnt + fsec, BLOCK_FLAGS_NONE); 105 if (rc != EOK) 106 return rc; 100 assert(rc == EOK); 107 101 clst = uint16_t_le2host(((fat_cluster_t *)b->data)[fidx]); 108 102 assert(clst != FAT_CLST_BAD); 109 103 rc = block_put(b); 110 if (rc != EOK) 111 return rc; 104 assert(rc == EOK); 112 105 clusters++; 113 106 } … … 115 108 if (lastc && clst < FAT_CLST_LAST1) 116 109 *lastc = clst; 117 if (numc) 118 *numc = clusters; 119 120 return EOK; 110 111 return clusters; 121 112 } 122 113 123 114 /** Read block from file located on a FAT file system. 124 115 * 125 * @param block Pointer to a block pointer for storing result.126 116 * @param bs Buffer holding the boot sector of the file system. 127 117 * @param dev_handle Device handle of the file system. … … 131 121 * @param flags Flags passed to libblock. 132 122 * 133 * @return EOK on success or a negative error code. 134 */ 135 int 136 _fat_block_get(block_t **block, fat_bs_t *bs, dev_handle_t dev_handle, 137 fat_cluster_t firstc, bn_t bn, int flags) 138 { 123 * @return Block structure holding the requested block. 124 */ 125 block_t * 126 _fat_block_get(fat_bs_t *bs, dev_handle_t dev_handle, fat_cluster_t firstc, 127 bn_t bn, int flags) 128 { 129 block_t *b; 139 130 unsigned bps; 140 131 unsigned rscnt; /* block address of the first FAT */ … … 143 134 unsigned sf; 144 135 unsigned ssa; /* size of the system area */ 145 uint16_t clusters; 146 unsigned max_clusters; 136 unsigned clusters, max_clusters; 147 137 fat_cluster_t lastc; 148 138 int rc; … … 160 150 /* root directory special case */ 161 151 assert(bn < rds); 162 rc = block_get( block, dev_handle, rscnt + bs->fatcnt * sf + bn,152 rc = block_get(&b, dev_handle, rscnt + bs->fatcnt * sf + bn, 163 153 flags); 164 return rc; 154 assert(rc == EOK); 155 return b; 165 156 } 166 157 167 158 max_clusters = bn / bs->spc; 168 rc = fat_cluster_walk(bs, dev_handle, firstc, &lastc, &clusters,159 clusters = fat_cluster_walk(bs, dev_handle, firstc, &lastc, 169 160 max_clusters); 170 if (rc != EOK)171 return rc;172 161 assert(clusters == max_clusters); 173 162 174 rc = block_get(block, dev_handle, 175 ssa + (lastc - FAT_CLST_FIRST) * bs->spc + bn % bs->spc, flags); 176 177 return rc; 163 rc = block_get(&b, dev_handle, ssa + 164 (lastc - FAT_CLST_FIRST) * bs->spc + bn % bs->spc, flags); 165 assert(rc == EOK); 166 167 return b; 178 168 } 179 169 … … 187 177 * this argument is ignored. 188 178 * @param pos Position in the last node block. 189 * 190 * @return EOK on success or a negative error code. 191 */ 192 int fat_fill_gap(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t mcl, off_t pos) 179 */ 180 void fat_fill_gap(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t mcl, off_t pos) 193 181 { 194 182 uint16_t bps; … … 208 196 int flags = (o % bps == 0) ? 209 197 BLOCK_FLAGS_NOREAD : BLOCK_FLAGS_NONE; 210 rc = fat_block_get(&b, bs, nodep, o / bps, flags); 211 if (rc != EOK) 212 return rc; 198 b = fat_block_get(bs, nodep, o / bps, flags); 213 199 memset(b->data + o % bps, 0, bps - o % bps); 214 200 b->dirty = true; /* need to sync node */ 215 201 rc = block_put(b); 216 if (rc != EOK) 217 return rc; 202 assert(rc == EOK); 218 203 } 219 204 220 205 if (o >= pos) 221 return EOK;206 return; 222 207 223 208 /* zero out the initial part of the new cluster chain */ 224 209 for (o = boundary; o < pos; o += bps) { 225 rc = _fat_block_get(&b,bs, nodep->idx->dev_handle, mcl,210 b = _fat_block_get(bs, nodep->idx->dev_handle, mcl, 226 211 (o - boundary) / bps, BLOCK_FLAGS_NOREAD); 227 if (rc != EOK)228 return rc;229 212 memset(b->data, 0, min(bps, pos - o)); 230 213 b->dirty = true; /* need to sync node */ 231 214 rc = block_put(b); 232 if (rc != EOK) 233 return rc; 234 } 235 236 return EOK; 215 assert(rc == EOK); 216 } 237 217 } 238 218 … … 242 222 * @param dev_handle Device handle for the file system. 243 223 * @param clst Cluster which to get. 244 * @param value Output argument holding the value of the cluster. 245 * 246 * @return EOK or a negative error code. 247 */ 248 int 249 fat_get_cluster(fat_bs_t *bs, dev_handle_t dev_handle, fat_cluster_t clst, 250 fat_cluster_t *value) 224 * 225 * @return Value found in the cluster. 226 */ 227 fat_cluster_t 228 fat_get_cluster(fat_bs_t *bs, dev_handle_t dev_handle, fat_cluster_t clst) 251 229 { 252 230 block_t *b; 253 231 uint16_t bps; 254 232 uint16_t rscnt; 255 fat_cluster_t *cp ;233 fat_cluster_t *cp, value; 256 234 int rc; 257 235 … … 261 239 rc = block_get(&b, dev_handle, rscnt + 262 240 (clst * sizeof(fat_cluster_t)) / bps, BLOCK_FLAGS_NONE); 263 if (rc != EOK) 264 return rc; 241 assert(rc == EOK); 265 242 cp = (fat_cluster_t *)b->data + clst % (bps / sizeof(fat_cluster_t)); 266 *value = uint16_t_le2host(*cp);243 value = uint16_t_le2host(*cp); 267 244 rc = block_put(b); 268 269 return rc; 245 assert(rc == EOK); 246 247 return value; 270 248 } 271 249 … … 277 255 * @param clst Cluster which is to be set. 278 256 * @param value Value to set the cluster with. 279 * 280 * @return EOK on success or a negative error code. 281 */ 282 int 257 */ 258 void 283 259 fat_set_cluster(fat_bs_t *bs, dev_handle_t dev_handle, unsigned fatno, 284 260 fat_cluster_t clst, fat_cluster_t value) … … 298 274 rc = block_get(&b, dev_handle, rscnt + sf * fatno + 299 275 (clst * sizeof(fat_cluster_t)) / bps, BLOCK_FLAGS_NONE); 300 if (rc != EOK) 301 return rc; 276 assert(rc == EOK); 302 277 cp = (fat_cluster_t *)b->data + clst % (bps / sizeof(fat_cluster_t)); 303 278 *cp = host2uint16_t_le(value); 304 279 b->dirty = true; /* need to sync block */ 305 280 rc = block_put(b); 306 return rc;281 assert(rc == EOK); 307 282 } 308 283 … … 313 288 * @param lifo Chain of allocated clusters. 314 289 * @param nclsts Number of clusters in the lifo chain. 315 * 316 * @return EOK on success or a negative error code. 317 */ 318 int fat_alloc_shadow_clusters(fat_bs_t *bs, dev_handle_t dev_handle, 290 */ 291 void fat_alloc_shadow_clusters(fat_bs_t *bs, dev_handle_t dev_handle, 319 292 fat_cluster_t *lifo, unsigned nclsts) 320 293 { 321 294 uint8_t fatno; 322 295 unsigned c; 323 int rc;324 296 325 297 for (fatno = FAT1 + 1; fatno < bs->fatcnt; fatno++) { 326 298 for (c = 0; c < nclsts; c++) { 327 rc =fat_set_cluster(bs, dev_handle, fatno, lifo[c],299 fat_set_cluster(bs, dev_handle, fatno, lifo[c], 328 300 c == 0 ? FAT_CLST_LAST1 : lifo[c - 1]); 329 if (rc != EOK)330 return rc;331 301 } 332 302 } 333 334 return EOK;335 303 } 336 304 … … 379 347 for (b = 0, cl = 0; b < sf; b++) { 380 348 rc = block_get(&blk, dev_handle, rscnt + b, BLOCK_FLAGS_NONE); 381 if (rc != EOK)382 goto error;383 349 for (c = 0; c < bps / sizeof(fat_cluster_t); c++, cl++) { 384 350 fat_cluster_t *clst = (fat_cluster_t *)blk->data + c; … … 396 362 /* we are almost done */ 397 363 rc = block_put(blk); 398 if (rc != EOK) 399 goto error; 364 assert(rc == EOK); 400 365 /* update the shadow copies of FAT */ 401 rc =fat_alloc_shadow_clusters(bs,366 fat_alloc_shadow_clusters(bs, 402 367 dev_handle, lifo, nclsts); 403 if (rc != EOK)404 goto error;405 368 *mcl = lifo[found - 1]; 406 369 *lcl = lifo[0]; … … 412 375 } 413 376 rc = block_put(blk); 414 if (rc != EOK) { 415 error: 416 fibril_mutex_unlock(&fat_alloc_lock); 417 free(lifo); 418 return rc; 419 } 377 assert(rc == EOK); 420 378 } 421 379 fibril_mutex_unlock(&fat_alloc_lock); … … 426 384 */ 427 385 while (found--) { 428 rc =fat_set_cluster(bs, dev_handle, FAT1, lifo[found],386 fat_set_cluster(bs, dev_handle, FAT1, lifo[found], 429 387 FAT_CLST_RES0); 430 if (rc != EOK) {431 free(lifo);432 return rc;433 }434 388 } 435 389 … … 443 397 * @param dev_handle Device handle of the file system. 444 398 * @param firstc First cluster in the chain which is to be freed. 445 * 446 * @return EOK on success or a negative return code. 447 */ 448 int 399 */ 400 void 449 401 fat_free_clusters(fat_bs_t *bs, dev_handle_t dev_handle, fat_cluster_t firstc) 450 402 { 451 403 unsigned fatno; 452 404 fat_cluster_t nextc; 453 int rc;454 405 455 406 /* Mark all clusters in the chain as free in all copies of FAT. */ 456 407 while (firstc < FAT_CLST_LAST1) { 457 408 assert(firstc >= FAT_CLST_FIRST && firstc < FAT_CLST_BAD); 458 rc = fat_get_cluster(bs, dev_handle, firstc, &nextc); 459 if (rc != EOK) 460 return rc; 461 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) { 462 rc = fat_set_cluster(bs, dev_handle, fatno, firstc, 409 nextc = fat_get_cluster(bs, dev_handle, firstc); 410 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) 411 fat_set_cluster(bs, dev_handle, fatno, firstc, 463 412 FAT_CLST_RES0); 464 if (rc != EOK)465 return rc;466 }467 468 413 firstc = nextc; 469 414 } 470 471 return EOK;472 415 } 473 416 … … 477 420 * @param nodep Node representing the file. 478 421 * @param mcl First cluster of the cluster chain to append. 479 * 480 * @return EOK on success or a negative error code. 481 */ 482 int fat_append_clusters(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t mcl) 422 */ 423 void fat_append_clusters(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t mcl) 483 424 { 484 425 dev_handle_t dev_handle = nodep->idx->dev_handle; 485 426 fat_cluster_t lcl; 486 uint16_t numc;487 427 uint8_t fatno; 488 int rc; 489 490 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc, &lcl, &numc, 491 (uint16_t) -1); 492 if (rc != EOK) 493 return rc; 494 495 if (numc == 0) { 428 429 if (fat_cluster_walk(bs, dev_handle, nodep->firstc, &lcl, 430 (uint16_t) -1) == 0) { 496 431 /* No clusters allocated to the node yet. */ 497 432 nodep->firstc = mcl; 498 433 nodep->dirty = true; /* need to sync node */ 499 return EOK; 500 } 501 502 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) { 503 rc = fat_set_cluster(bs, nodep->idx->dev_handle, fatno, lcl, 504 mcl); 505 if (rc != EOK) 506 return rc; 507 } 508 509 return EOK; 434 return; 435 } 436 437 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) 438 fat_set_cluster(bs, nodep->idx->dev_handle, fatno, lcl, mcl); 510 439 } 511 440 … … 517 446 * argument is FAT_CLST_RES0, then all clusters will 518 447 * be chopped off. 519 * 520 * @return EOK on success or a negative return code. 521 */ 522 int fat_chop_clusters(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t lastc) 523 { 524 int rc; 525 448 */ 449 void fat_chop_clusters(fat_bs_t *bs, fat_node_t *nodep, fat_cluster_t lastc) 450 { 526 451 dev_handle_t dev_handle = nodep->idx->dev_handle; 527 452 if (lastc == FAT_CLST_RES0) { 528 453 /* The node will have zero size and no clusters allocated. */ 529 rc = fat_free_clusters(bs, dev_handle, nodep->firstc); 530 if (rc != EOK) 531 return rc; 454 fat_free_clusters(bs, dev_handle, nodep->firstc); 532 455 nodep->firstc = FAT_CLST_RES0; 533 456 nodep->dirty = true; /* need to sync node */ … … 536 459 unsigned fatno; 537 460 538 rc = fat_get_cluster(bs, dev_handle, lastc, &nextc); 539 if (rc != EOK) 540 return rc; 461 nextc = fat_get_cluster(bs, dev_handle, lastc); 541 462 542 463 /* Terminate the cluster chain in all copies of FAT. */ 543 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) { 544 rc = fat_set_cluster(bs, dev_handle, fatno, lastc, 545 FAT_CLST_LAST1); 546 if (rc != EOK) 547 return rc; 548 } 464 for (fatno = FAT1; fatno < bs->fatcnt; fatno++) 465 fat_set_cluster(bs, dev_handle, fatno, lastc, FAT_CLST_LAST1); 549 466 550 467 /* Free all following clusters. */ 551 rc = fat_free_clusters(bs, dev_handle, nextc); 552 if (rc != EOK) 553 return rc; 554 } 555 556 return EOK; 557 } 558 559 int 468 fat_free_clusters(bs, dev_handle, nextc); 469 } 470 } 471 472 void 560 473 fat_zero_cluster(struct fat_bs *bs, dev_handle_t dev_handle, fat_cluster_t c) 561 474 { … … 568 481 569 482 for (i = 0; i < bs->spc; i++) { 570 rc = _fat_block_get(&b, bs, dev_handle, c, i, 571 BLOCK_FLAGS_NOREAD); 572 if (rc != EOK) 573 return rc; 483 b = _fat_block_get(bs, dev_handle, c, i, BLOCK_FLAGS_NOREAD); 574 484 memset(b->data, 0, bps); 575 485 b->dirty = true; 576 486 rc = block_put(b); 577 if (rc != EOK) 578 return rc; 579 } 580 581 return EOK; 487 assert(rc == EOK); 488 } 582 489 } 583 490 -
uspace/srv/fs/fat/fat_fat.h
rad4b32c r7e266ff 59 59 typedef uint16_t fat_cluster_t; 60 60 61 #define fat_clusters_get( numc,bs, dh, fc) \62 fat_cluster_walk((bs), (dh), (fc), NULL, ( numc), (uint16_t) -1)63 extern int fat_cluster_walk(struct fat_bs *, dev_handle_t, fat_cluster_t,64 fat_cluster_t *, uint16_t *, uint16_t);61 #define fat_clusters_get(bs, dh, fc) \ 62 fat_cluster_walk((bs), (dh), (fc), NULL, (uint16_t) -1) 63 extern uint16_t fat_cluster_walk(struct fat_bs *, dev_handle_t, fat_cluster_t, 64 fat_cluster_t *, uint16_t); 65 65 66 #define fat_block_get(b, bs, np, bn, flags) \ 67 _fat_block_get((b), (bs), (np)->idx->dev_handle, (np)->firstc, (bn), \ 68 (flags)) 66 #define fat_block_get(bs, np, bn, flags) \ 67 _fat_block_get((bs), (np)->idx->dev_handle, (np)->firstc, (bn), (flags)) 69 68 70 extern int _fat_block_get(block_t **,struct fat_bs *, dev_handle_t,69 extern struct block *_fat_block_get(struct fat_bs *, dev_handle_t, 71 70 fat_cluster_t, bn_t, int); 72 71 73 extern intfat_append_clusters(struct fat_bs *, struct fat_node *,72 extern void fat_append_clusters(struct fat_bs *, struct fat_node *, 74 73 fat_cluster_t); 75 extern intfat_chop_clusters(struct fat_bs *, struct fat_node *,74 extern void fat_chop_clusters(struct fat_bs *, struct fat_node *, 76 75 fat_cluster_t); 77 76 extern int fat_alloc_clusters(struct fat_bs *, dev_handle_t, unsigned, 78 77 fat_cluster_t *, fat_cluster_t *); 79 extern intfat_free_clusters(struct fat_bs *, dev_handle_t, fat_cluster_t);80 extern intfat_alloc_shadow_clusters(struct fat_bs *, dev_handle_t,78 extern void fat_free_clusters(struct fat_bs *, dev_handle_t, fat_cluster_t); 79 extern void fat_alloc_shadow_clusters(struct fat_bs *, dev_handle_t, 81 80 fat_cluster_t *, unsigned); 82 extern int fat_get_cluster(struct fat_bs *, dev_handle_t, fat_cluster_t, 83 fat_cluster_t *); 84 extern int fat_set_cluster(struct fat_bs *, dev_handle_t, unsigned, 81 extern fat_cluster_t fat_get_cluster(struct fat_bs *, dev_handle_t, fat_cluster_t); 82 extern void fat_set_cluster(struct fat_bs *, dev_handle_t, unsigned, 85 83 fat_cluster_t, fat_cluster_t); 86 extern intfat_fill_gap(struct fat_bs *, struct fat_node *, fat_cluster_t,84 extern void fat_fill_gap(struct fat_bs *, struct fat_node *, fat_cluster_t, 87 85 off_t); 88 extern intfat_zero_cluster(struct fat_bs *, dev_handle_t, fat_cluster_t);86 extern void fat_zero_cluster(struct fat_bs *, dev_handle_t, fat_cluster_t); 89 87 90 88 #endif -
uspace/srv/fs/fat/fat_ops.c
rad4b32c r7e266ff 94 94 95 95 /* Read the block that contains the dentry of interest. */ 96 rc = _fat_block_get(&b,bs, node->idx->dev_handle, node->idx->pfc,96 b = _fat_block_get(bs, node->idx->dev_handle, node->idx->pfc, 97 97 (node->idx->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); 98 assert(rc == EOK);99 98 100 99 d = ((fat_dentry_t *)b->data) + (node->idx->pdi % dps); … … 203 202 204 203 /* Read the block that contains the dentry of interest. */ 205 rc = _fat_block_get(&b,bs, idxp->dev_handle, idxp->pfc,204 b = _fat_block_get(bs, idxp->dev_handle, idxp->pfc, 206 205 (idxp->pdi * sizeof(fat_dentry_t)) / bps, BLOCK_FLAGS_NONE); 207 assert( rc == EOK);206 assert(b); 208 207 209 208 d = ((fat_dentry_t *)b->data) + (idxp->pdi % dps); … … 220 219 * size of the directory by walking the FAT. 221 220 */ 222 uint16_t clusters; 223 rc = fat_clusters_get(&clusters, bs, idxp->dev_handle, 221 nodep->size = bps * spc * fat_clusters_get(bs, idxp->dev_handle, 224 222 uint16_t_le2host(d->firstc)); 225 assert(rc == EOK);226 nodep->size = bps * spc * clusters;227 223 } else { 228 224 nodep->type = FAT_FILE; … … 329 325 nodep = fat_node_get_new(); 330 326 if (!nodep) { 331 (void) fat_free_clusters(bs, dev_handle, mcl);327 fat_free_clusters(bs, dev_handle, mcl); 332 328 return NULL; 333 329 } 334 330 idxp = fat_idx_get_new(dev_handle); 335 331 if (!idxp) { 336 (void)fat_free_clusters(bs, dev_handle, mcl);332 fat_free_clusters(bs, dev_handle, mcl); 337 333 fat_node_put(FS_NODE(nodep)); 338 334 return NULL; … … 341 337 if (flags & L_DIRECTORY) { 342 338 /* Populate the new cluster with unused dentries. */ 343 rc = fat_zero_cluster(bs, dev_handle, mcl); 344 assert(rc == EOK); 339 fat_zero_cluster(bs, dev_handle, mcl); 345 340 nodep->type = FAT_DIRECTORY; 346 341 nodep->firstc = mcl; … … 366 361 fat_node_t *nodep = FAT_NODE(fn); 367 362 fat_bs_t *bs; 368 int rc = EOK;369 363 370 364 /* … … 385 379 assert(nodep->size); 386 380 /* Free all clusters allocated to the node. */ 387 rc = fat_free_clusters(bs, nodep->idx->dev_handle, 388 nodep->firstc); 381 fat_free_clusters(bs, nodep->idx->dev_handle, nodep->firstc); 389 382 } 390 383 … … 392 385 free(nodep->bp); 393 386 free(nodep); 394 return rc;387 return EOK; 395 388 } 396 389 … … 440 433 441 434 for (i = 0; i < blocks; i++) { 442 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE); 443 assert(rc == EOK); 435 b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); 444 436 for (j = 0; j < dps; j++) { 445 437 d = ((fat_dentry_t *)b->data) + j; … … 473 465 return rc; 474 466 } 475 rc = fat_zero_cluster(bs, parentp->idx->dev_handle, mcl); 476 assert(rc == EOK); 477 rc = fat_append_clusters(bs, parentp, mcl); 478 assert(rc == EOK); 467 fat_zero_cluster(bs, parentp->idx->dev_handle, mcl); 468 fat_append_clusters(bs, parentp, mcl); 479 469 parentp->size += bps * bs->spc; 480 470 parentp->dirty = true; /* need to sync node */ 481 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE); 482 assert(rc == EOK); 471 b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); 483 472 d = (fat_dentry_t *)b->data; 484 473 … … 505 494 * not use them anyway, so this is rather a sign of our good will. 506 495 */ 507 rc = fat_block_get(&b, bs, childp, 0, BLOCK_FLAGS_NONE); 508 assert(rc == EOK); 496 b = fat_block_get(bs, childp, 0, BLOCK_FLAGS_NONE); 509 497 d = (fat_dentry_t *)b->data; 510 498 if (fat_classify_dentry(d) == FAT_DENTRY_LAST || … … 573 561 bps = uint16_t_le2host(bs->bps); 574 562 575 rc = _fat_block_get(&b,bs, childp->idx->dev_handle, childp->idx->pfc,563 b = _fat_block_get(bs, childp->idx->dev_handle, childp->idx->pfc, 576 564 (childp->idx->pdi * sizeof(fat_dentry_t)) / bps, 577 565 BLOCK_FLAGS_NONE); 578 assert(rc == EOK);579 566 d = (fat_dentry_t *)b->data + 580 567 (childp->idx->pdi % (bps / sizeof(fat_dentry_t))); … … 618 605 blocks = parentp->size / bps; 619 606 for (i = 0; i < blocks; i++) { 620 rc = fat_block_get(&b, bs, parentp, i, BLOCK_FLAGS_NONE); 621 assert(rc == EOK); 607 b = fat_block_get(bs, parentp, i, BLOCK_FLAGS_NONE); 622 608 for (j = 0; j < dps; j++) { 623 609 d = ((fat_dentry_t *)b->data) + j; … … 712 698 fat_dentry_t *d; 713 699 714 rc = fat_block_get(&b, bs, nodep, i, BLOCK_FLAGS_NONE); 715 assert(rc == EOK); 700 b = fat_block_get(bs, nodep, i, BLOCK_FLAGS_NONE); 716 701 for (j = 0; j < dps; j++) { 717 702 d = ((fat_dentry_t *)b->data) + j; … … 968 953 bytes = min(len, bps - pos % bps); 969 954 bytes = min(bytes, nodep->size - pos); 970 rc = fat_block_get(&b,bs, nodep, pos / bps,955 b = fat_block_get(bs, nodep, pos / bps, 971 956 BLOCK_FLAGS_NONE); 972 assert(rc == EOK);973 957 (void) ipc_data_read_finalize(callid, b->data + pos % bps, 974 958 bytes); … … 996 980 off_t o; 997 981 998 rc = fat_block_get(&b, bs, nodep, bnum, 999 BLOCK_FLAGS_NONE); 1000 assert(rc == EOK); 982 b = fat_block_get(bs, nodep, bnum, BLOCK_FLAGS_NONE); 1001 983 for (o = pos % (bps / sizeof(fat_dentry_t)); 1002 984 o < bps / sizeof(fat_dentry_t); … … 1093 1075 * next block size boundary. 1094 1076 */ 1095 rc = fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos); 1096 assert(rc == EOK); 1097 rc = fat_block_get(&b, bs, nodep, pos / bps, flags); 1098 assert(rc == EOK); 1077 fat_fill_gap(bs, nodep, FAT_CLST_RES0, pos); 1078 b = fat_block_get(bs, nodep, pos / bps, flags); 1099 1079 (void) ipc_data_write_finalize(callid, b->data + pos % bps, 1100 1080 bytes); … … 1129 1109 } 1130 1110 /* zero fill any gaps */ 1131 rc = fat_fill_gap(bs, nodep, mcl, pos); 1132 assert(rc == EOK); 1133 rc = _fat_block_get(&b, bs, dev_handle, lcl, (pos / bps) % spc, 1111 fat_fill_gap(bs, nodep, mcl, pos); 1112 b = _fat_block_get(bs, dev_handle, lcl, (pos / bps) % spc, 1134 1113 flags); 1135 assert(rc == EOK);1136 1114 (void) ipc_data_write_finalize(callid, b->data + pos % bps, 1137 1115 bytes); … … 1143 1121 * node's cluster chain. 1144 1122 */ 1145 rc = fat_append_clusters(bs, nodep, mcl); 1146 assert(rc == EOK); 1123 fat_append_clusters(bs, nodep, mcl); 1147 1124 nodep->size = pos + bytes; 1148 1125 nodep->dirty = true; /* need to sync node */ … … 1197 1174 */ 1198 1175 if (size == 0) { 1199 rc = fat_chop_clusters(bs, nodep, FAT_CLST_RES0); 1200 if (rc != EOK) 1201 goto out; 1176 fat_chop_clusters(bs, nodep, FAT_CLST_RES0); 1202 1177 } else { 1203 1178 fat_cluster_t lastc; 1204 rc = fat_cluster_walk(bs, dev_handle, nodep->firstc, 1205 &lastc, NULL, (size - 1) / bpc); 1206 if (rc != EOK) 1207 goto out; 1208 rc = fat_chop_clusters(bs, nodep, lastc); 1209 if (rc != EOK) 1210 goto out; 1179 (void) fat_cluster_walk(bs, dev_handle, nodep->firstc, 1180 &lastc, (size - 1) / bpc); 1181 fat_chop_clusters(bs, nodep, lastc); 1211 1182 } 1212 1183 nodep->size = size; … … 1214 1185 rc = EOK; 1215 1186 } 1216 out:1217 1187 fat_node_put(fn); 1218 1188 ipc_answer_0(rid, rc);
Note:
See TracChangeset
for help on using the changeset viewer.