Changeset 7e4e532 in mainline
- Timestamp:
- 2006-02-08T23:37:38Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 874878a
- Parents:
- bb68433
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
arch/ia32/include/mm/asid.h
rbb68433 r7e4e532 38 38 typedef int asid_t; 39 39 40 #define ASID_MAX_ARCH 040 #define ASID_MAX_ARCH 3 41 41 42 #define asid_ install(as)42 #define asid_get() (ASID_START+1) 43 43 44 44 #endif -
arch/ia64/src/mm/frame.c
rbb68433 r7e4e532 44 44 45 45 /* 46 47 46 * Workaround to prevent slab allocator from allocating frame 0. 47 * Remove the following statement when the kernel is no longer 48 48 * identity mapped. 49 49 */ 50 50 frame_mark_unavailable(0, 1); 51 51 -
arch/ppc32/include/mm/asid.h
rbb68433 r7e4e532 32 32 typedef int asid_t; 33 33 34 #define ASID_MAX_ARCH 034 #define ASID_MAX_ARCH 3 35 35 36 #define asid_ install(as)36 #define asid_get() (ASID_START+1) 37 37 38 38 #endif -
arch/sparc64/src/mm/frame.c
rbb68433 r7e4e532 34 34 void frame_arch_init(void) 35 35 { 36 zone_create(0, config.memory_size >> FRAME_WIDTH, ADDR2PFN(ALIGN_UP(config.base + config.kernel_size, FRAME_SIZE)), 0);36 zone_create(0, config.memory_size >> FRAME_WIDTH, 1, 0); 37 37 38 38 /* -
genarch/src/mm/asid.c
rbb68433 r7e4e532 44 44 * a while. 45 45 * 46 * This code depends on the fact that ASIDS_ALLOCABLE 47 * is greater than number of supported CPUs (i.e. the 48 * amount of concurently active address spaces). 49 * 46 50 * Architectures that don't have hardware support for address 47 51 * spaces do not compile with this file. … … 58 62 59 63 /** 60 * asidlock protects both the asids_allocated counter 61 * and the list of address spaces that were already 62 * assigned ASID. 64 * asidlock protects the asids_allocated counter. 63 65 */ 64 66 SPINLOCK_INITIALIZE(asidlock); … … 66 68 static count_t asids_allocated = 0; 67 69 68 /**69 * List of address spaces with assigned ASID.70 * When the system runs short of allocable71 * ASIDS, inactive address spaces are guaranteed72 * to be at the beginning of the list.73 */74 LIST_INITIALIZE(as_with_asid_head);75 76 77 70 /** Allocate free address space identifier. 78 71 * 79 * This code depends on the fact that ASIDS_ALLOCABLE80 * is greater than number of supported CPUs.72 * Interrupts must be disabled and as_lock must be held 73 * prior to this call 81 74 * 82 75 * @return New ASID. … … 84 77 asid_t asid_get(void) 85 78 { 86 ipl_t ipl;87 79 asid_t asid; 88 80 link_t *tmp; … … 93 85 */ 94 86 95 ipl = interrupts_disable();96 87 spinlock_lock(&asidlock); 97 88 if (asids_allocated == ASIDS_ALLOCABLE) { … … 107 98 * inactive address space. 108 99 */ 109 tmp = as_with_asid_head.next;110 ASSERT(tmp != &as_with_asid_head);100 ASSERT(!list_empty(&inactive_as_with_asid_head)); 101 tmp = inactive_as_with_asid_head.next; 111 102 list_remove(tmp); 112 103 113 as = list_get_instance(tmp, as_t, as_with_asid_link);104 as = list_get_instance(tmp, as_t, inactive_as_with_asid_link); 114 105 spinlock_lock(&as->lock); 115 106 … … 146 137 147 138 spinlock_unlock(&asidlock); 148 interrupts_restore(ipl);149 139 150 140 return asid; … … 171 161 interrupts_restore(ipl); 172 162 } 173 174 /** Install ASID.175 *176 * This function is to be executed on each address space switch.177 *178 * @param as Address space.179 */180 void asid_install(as_t *as)181 {182 ipl_t ipl;183 184 ipl = interrupts_disable();185 spinlock_lock(&asidlock);186 spinlock_lock(&as->lock);187 188 if (as->asid != ASID_KERNEL) {189 if (as->asid != ASID_INVALID) {190 /*191 * This address space has valid ASID.192 * Remove 'as' from the list of address spaces193 * with assigned ASID, so that it can be later194 * appended to the tail of the same list.195 * This is to prevent stealing of ASIDs from196 * recently installed address spaces.197 */198 list_remove(&as->as_with_asid_link);199 } else {200 spinlock_unlock(&as->lock);201 spinlock_unlock(&asidlock);202 203 /*204 * This address space doesn't have ASID assigned.205 * It was stolen or the address space is being206 * installed for the first time.207 * Allocate new ASID for it.208 */209 as->asid = asid_get();210 spinlock_lock(&asidlock);211 spinlock_lock(&as->lock);212 }213 214 /*215 * Now it is sure that 'as' has ASID.216 * It is therefore appended to the list217 * of address spaces from which it can218 * be stolen.219 */220 list_append(&as->as_with_asid_link, &as_with_asid_head);221 }222 223 spinlock_unlock(&as->lock);224 spinlock_unlock(&asidlock);225 interrupts_restore(ipl);226 } -
genarch/src/mm/page_ht.c
rbb68433 r7e4e532 170 170 171 171 if (!hash_table_find(&page_ht, key)) { 172 t = (pte_t *) malloc(sizeof(pte_t) );172 t = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC); 173 173 ASSERT(t != NULL); 174 174 -
generic/include/mm/as.h
rbb68433 r7e4e532 77 77 struct as { 78 78 /** Protected by asidlock. Must be acquired before as->lock. */ 79 link_t as_with_asid_link;79 link_t inactive_as_with_asid_link; 80 80 81 81 SPINLOCK_DECLARE(lock); 82 83 /** Number of processors on wich is this address space active. */ 84 count_t refcount; 85 82 86 link_t as_area_head; 83 87 … … 97 101 extern as_operations_t *as_operations; 98 102 103 extern spinlock_t as_lock; 104 extern link_t inactive_as_with_asid_head; 105 99 106 extern void as_init(void); 100 107 extern as_t *as_create(int flags); … … 102 109 extern void as_set_mapping(as_t *as, __address page, __address frame); 103 110 extern int as_page_fault(__address page); 104 extern void as_ install(as_t *m);111 extern void as_switch(as_t *old, as_t *new); 105 112 106 113 /* Interface to be implemented by architectures. */ -
generic/include/mm/asid.h
rbb68433 r7e4e532 46 46 47 47 extern spinlock_t asidlock; 48 extern link_t as_with_asid_head;49 48 49 #ifndef asid_get 50 50 extern asid_t asid_get(void); 51 #endif /* !def asid_get */ 51 52 extern void asid_put(asid_t asid); 52 53 -
generic/src/mm/as.c
rbb68433 r7e4e532 57 57 as_operations_t *as_operations = NULL; 58 58 59 /** Address space lock. It protects inactive_as_with_asid_head. */ 60 SPINLOCK_INITIALIZE(as_lock); 61 62 /** 63 * This list contains address spaces that are not active on any 64 * processor and that have valid ASID. 65 */ 66 LIST_INITIALIZE(inactive_as_with_asid_head); 67 59 68 /** Kernel address space. */ 60 69 as_t *AS_KERNEL = NULL; … … 80 89 81 90 as = (as_t *) malloc(sizeof(as_t), 0); 82 83 list_initialize(&as->as_with_asid_link); 91 link_initialize(&as->inactive_as_with_asid_link); 84 92 spinlock_initialize(&as->lock, "as_lock"); 85 93 list_initialize(&as->as_area_head); … … 90 98 as->asid = ASID_INVALID; 91 99 100 as->refcount = 0; 92 101 as->page_table = page_table_create(flags); 93 102 … … 268 277 } 269 278 270 /** Install address space on CPU. 271 * 272 * @param as Address space. 273 */ 274 void as_install(as_t *as) 279 /** Switch address spaces. 280 * 281 * @param old Old address space or NULL. 282 * @param new New address space. 283 */ 284 void as_switch(as_t *old, as_t *new) 275 285 { 276 286 ipl_t ipl; 277 278 asid_install(as); 287 bool needs_asid = false; 279 288 280 289 ipl = interrupts_disable(); 281 spinlock_lock(&as->lock); 282 SET_PTL0_ADDRESS(as->page_table); 283 spinlock_unlock(&as->lock); 290 spinlock_lock(&as_lock); 291 292 /* 293 * First, take care of the old address space. 294 */ 295 if (old) { 296 spinlock_lock(&old->lock); 297 ASSERT(old->refcount); 298 if((--old->refcount == 0) && (old != AS_KERNEL)) { 299 /* 300 * The old address space is no longer active on 301 * any processor. It can be appended to the 302 * list of inactive address spaces with assigned 303 * ASID. 304 */ 305 ASSERT(old->asid != ASID_INVALID); 306 list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head); 307 } 308 spinlock_unlock(&old->lock); 309 } 310 311 /* 312 * Second, prepare the new address space. 313 */ 314 spinlock_lock(&new->lock); 315 if ((new->refcount++ == 0) && (new != AS_KERNEL)) { 316 if (new->asid != ASID_INVALID) 317 list_remove(&new->inactive_as_with_asid_link); 318 else 319 needs_asid = true; /* defer call to asid_get() until new->lock is released */ 320 } 321 SET_PTL0_ADDRESS(new->page_table); 322 spinlock_unlock(&new->lock); 323 324 if (needs_asid) { 325 /* 326 * Allocation of new ASID was deferred 327 * until now in order to avoid deadlock. 328 */ 329 asid_t asid; 330 331 asid = asid_get(); 332 spinlock_lock(&new->lock); 333 new->asid = asid; 334 spinlock_unlock(&new->lock); 335 } 336 spinlock_unlock(&as_lock); 284 337 interrupts_restore(ipl); 285 338 286 339 /* 287 340 * Perform architecture-specific steps. 288 341 * (e.g. write ASID to hardware register etc.) 289 342 */ 290 as_install_arch( as);291 292 AS = as;343 as_install_arch(new); 344 345 AS = new; 293 346 } 294 347 -
generic/src/proc/scheduler.c
rbb68433 r7e4e532 328 328 * Replace the old one with the new one. 329 329 */ 330 as_ install(as2);330 as_switch(as1, as2); 331 331 } 332 332 TASK = THREAD->task; … … 336 336 337 337 #ifdef SCHEDULER_VERBOSE 338 printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy);338 printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); 339 339 #endif 340 340
Note:
See TracChangeset
for help on using the changeset viewer.