Changeset 57da95c in mainline
- Timestamp:
- 2006-09-18T11:47:28Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 29b2bbf
- Parents:
- f1d1f5d3
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/amd64/include/mm/as.h
rf1d1f5d3 r57da95c 49 49 50 50 #define as_install_arch(as) 51 #define as_deinstall_arch(as) 51 52 #define as_invalidate_translation_cache(as, page, cnt) 52 53 -
kernel/arch/ia32/include/mm/as.h
rf1d1f5d3 r57da95c 49 49 50 50 #define as_install_arch(as) 51 #define as_deinstall_arch(as) 51 52 #define as_invalidate_translation_cache(as, page, cnt) 52 53 -
kernel/arch/ia64/include/mm/as.h
rf1d1f5d3 r57da95c 48 48 } as_arch_t; 49 49 50 #define as_deinstall_arch(as) 50 51 #define as_invalidate_translation_cache(as, page, cnt) 51 52 -
kernel/arch/mips32/include/mm/as.h
rf1d1f5d3 r57da95c 48 48 } as_arch_t; 49 49 50 #define as_deinstall_arch(as) 50 51 #define as_invalidate_translation_cache(as, page, cnt) 51 52 -
kernel/arch/ppc32/include/mm/as.h
rf1d1f5d3 r57da95c 48 48 } as_arch_t; 49 49 50 #define as_deinstall_arch(as) 50 51 #define as_invalidate_translation_cache(as, page, cnt) 51 52 -
kernel/arch/ppc64/include/mm/as.h
rf1d1f5d3 r57da95c 49 49 50 50 #define as_install_arch(as) 51 #define as_deinstall_arch(as) 51 52 #define as_invalidate_translation_cache(as, page, cnt) 52 53 -
kernel/arch/sparc64/include/atomic.h
rf1d1f5d3 r57da95c 57 57 a = *((uint64_t *) x); 58 58 b = a + i; 59 __asm__ volatile ("casx %0, % 1, %2\n": "+m" (*((uint64_t *)x)), "+r" (a), "+r" (b));59 __asm__ volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a)); 60 60 } while (a != b); 61 61 -
kernel/arch/sparc64/include/mm/tsb.h
rf1d1f5d3 r57da95c 37 37 38 38 #include <arch/mm/tte.h> 39 #include <arch/mm/mmu.h> 39 40 #include <arch/types.h> 40 41 #include <typedefs.h> … … 47 48 * in TLBs - only one TLB entry will do. 48 49 */ 49 #define ITSB_ENTRY_COUNT 2048 50 #define DTSB_ENTRY_COUNT 2048 50 #define TSB_SIZE 2 /* when changing this, change as.c as well */ 51 #define ITSB_ENTRY_COUNT (512*(1<<TSB_SIZE)) 52 #define DTSB_ENTRY_COUNT (512*(1<<TSB_SIZE)) 51 53 52 54 struct tsb_entry { … … 54 56 tte_data_t data; 55 57 } __attribute__ ((packed)); 58 typedef struct tsb_entry tsb_entry_t; 56 59 57 typedef struct tsb_entry tsb_entry_t; 60 /** TSB Base register. */ 61 union tsb_base_reg { 62 uint64_t value; 63 struct { 64 uint64_t base : 51; /**< TSB base address, bits 63:13. */ 65 unsigned split : 1; /**< Split vs. common TSB for 8K and 64K pages. 66 * HelenOS uses only 8K pages for user mappings, 67 * so we always set this to 0. 68 */ 69 unsigned : 9; 70 unsigned size : 3; /**< TSB size. Number of entries is 512*2^size. */ 71 } __attribute__ ((packed)); 72 }; 73 typedef union tsb_base_reg tsb_base_reg_t; 74 75 /** Read ITSB Base register. 76 * 77 * @return Content of the ITSB Base register. 78 */ 79 static inline uint64_t itsb_base_read(void) 80 { 81 return asi_u64_read(ASI_IMMU, VA_IMMU_TSB_BASE); 82 } 83 84 /** Read DTSB Base register. 85 * 86 * @return Content of the DTSB Base register. 87 */ 88 static inline uint64_t dtsb_base_read(void) 89 { 90 return asi_u64_read(ASI_DMMU, VA_DMMU_TSB_BASE); 91 } 92 93 /** Write ITSB Base register. 94 * 95 * @param v New content of the ITSB Base register. 96 */ 97 static inline void itsb_base_write(uint64_t v) 98 { 99 asi_u64_write(ASI_IMMU, VA_IMMU_TSB_BASE, v); 100 } 101 102 /** Write DTSB Base register. 103 * 104 * @param v New content of the DTSB Base register. 105 */ 106 static inline void dtsb_base_write(uint64_t v) 107 { 108 asi_u64_write(ASI_DMMU, VA_DMMU_TSB_BASE, v); 109 } 58 110 59 111 extern void tsb_invalidate(as_t *as, uintptr_t page, count_t pages); -
kernel/arch/sparc64/src/mm/as.c
rf1d1f5d3 r57da95c 37 37 #include <genarch/mm/as_ht.h> 38 38 #include <genarch/mm/asid_fifo.h> 39 #include <debug.h> 40 41 #ifdef CONFIG_TSB 42 #include <arch/mm/tsb.h> 43 #endif 39 44 40 45 /** Architecture dependent address space init. */ … … 45 50 } 46 51 52 /** Perform sparc64-specific tasks when an address space becomes active on the processor. 53 * 54 * Install ASID and map TSBs. 55 * 56 * @param as Address space. 57 */ 47 58 void as_install_arch(as_t *as) 48 59 { 49 60 tlb_context_reg_t ctx; 61 62 /* 63 * Note that we don't lock the address space. 64 * That's correct - we can afford it here 65 * because we only read members that are 66 * currently read-only. 67 */ 50 68 51 69 /* … … 59 77 ctx.context = as->asid; 60 78 mmu_secondary_context_write(ctx.v); 79 80 #ifdef CONFIG_TSB 81 if (as != AS_KERNEL) { 82 uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); 83 84 ASSERT(as->arch.itsb && as->arch.dtsb); 85 86 uintptr_t tsb = as->arch.itsb; 87 88 if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 89 /* 90 * TSBs were allocated from memory not covered 91 * by the locked 4M kernel DTLB entry. We need 92 * to map both TSBs explicitly. 93 */ 94 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb); 95 dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true); 96 } 97 98 /* 99 * Setup TSB Base registers. 100 */ 101 tsb_base_reg_t tsb_base; 102 103 tsb_base.value = 0; 104 tsb_base.size = TSB_SIZE; 105 tsb_base.split = 0; 106 107 tsb_base.base = as->arch.itsb >> PAGE_WIDTH; 108 itsb_base_write(tsb_base.value); 109 tsb_base.base = as->arch.dtsb >> PAGE_WIDTH; 110 dtsb_base_write(tsb_base.value); 111 } 112 #endif 113 } 114 115 /** Perform sparc64-specific tasks when an address space is removed from the processor. 116 * 117 * Demap TSBs. 118 * 119 * @param as Address space. 120 */ 121 void as_deinstall_arch(as_t *as) 122 { 123 124 /* 125 * Note that we don't lock the address space. 126 * That's correct - we can afford it here 127 * because we only read members that are 128 * currently read-only. 129 */ 130 131 #ifdef CONFIG_TSB 132 if (as != AS_KERNEL) { 133 uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH); 134 135 ASSERT(as->arch.itsb && as->arch.dtsb); 136 137 uintptr_t tsb = as->arch.itsb; 138 139 if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { 140 /* 141 * TSBs were allocated from memory not covered 142 * by the locked 4M kernel DTLB entry. We need 143 * to demap the entry installed by as_install_arch(). 144 */ 145 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb); 146 } 147 148 } 149 #endif 61 150 } 62 151 -
kernel/arch/sparc64/src/proc/scheduler.c
rf1d1f5d3 r57da95c 121 121 if ((THREAD->flags & THREAD_FLAG_USPACE)) { 122 122 /* 123 * If this thread executes also in userspace, we have to force all 124 * its still-active userspace windows into the userspace window buffer 125 * and demap the buffer from DTLB. 123 * If this thread executes also in userspace, we have to 124 * demap the userspace window buffer from DTLB. 126 125 */ 127 126 ASSERT(THREAD->arch.uspace_window_buffer); -
kernel/arch/xen32/include/mm/as.h
rf1d1f5d3 r57da95c 49 49 50 50 #define as_install_arch(as) 51 #define as_deinstall_arch(as) 51 52 #define as_invalidate_translation_cache(as, page, cnt) 52 53 -
kernel/generic/include/mm/as.h
rf1d1f5d3 r57da95c 161 161 /** Data to be used by the backend. */ 162 162 mem_backend_data_t backend_data; 163 164 as_arch_t arch; 163 165 }; 164 166 … … 193 195 extern void as_install_arch(as_t *as); 194 196 #endif /* !def as_install_arch */ 197 #ifndef as_deinstall_arch 198 extern void as_deinstall_arch(as_t *as); 199 #endif /* !def as_deinstall_arch */ 195 200 196 201 /* Backend declarations. */ -
kernel/generic/src/mm/as.c
rf1d1f5d3 r57da95c 85 85 as_operations_t *as_operations = NULL; 86 86 87 /** 88 * Slab for as_t objects. 89 */ 90 static slab_cache_t *as_slab; 91 87 92 /** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */ 88 93 SPINLOCK_INITIALIZE(inactive_as_with_asid_lock); … … 106 111 { 107 112 as_arch_init(); 113 114 as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); 115 108 116 AS_KERNEL = as_create(FLAG_AS_KERNEL); 109 117 if (!AS_KERNEL) … … 120 128 as_t *as; 121 129 122 as = (as_t *) malloc(sizeof(as_t), 0);130 as = (as_t *) slab_alloc(as_slab, 0); 123 131 link_initialize(&as->inactive_as_with_asid_link); 124 132 mutex_initialize(&as->lock); … … 183 191 interrupts_restore(ipl); 184 192 185 free(as);193 slab_free(as_slab, as); 186 194 } 187 195 … … 799 807 } 800 808 mutex_unlock(&old->lock); 809 810 /* 811 * Perform architecture-specific tasks when the address space 812 * is being removed from the CPU. 813 */ 814 as_deinstall_arch(old); 801 815 } 802 816 -
uspace/libc/arch/sparc64/include/atomic.h
rf1d1f5d3 r57da95c 52 52 53 53 do { 54 volatile uintptr_t x = (uint64_t) &val->count; 55 56 a = *((uint64_t *) x); 54 a = val->count; 57 55 b = a + i; 58 __asm__ volatile ("casx %0, % 1, %2\n": "+m" (*((uint64_t *)x)), "+r" (a), "+r" (b));56 __asm__ volatile ("casx %0, %2, %1\n" : "+m" (*val), "+r" (b) : "r" (a)); 59 57 } while (a != b); 60 58
Note:
See TracChangeset
for help on using the changeset viewer.