Changeset 8e1ea655 in mainline
- Timestamp:
- 2006-02-05T21:51:19Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- c585827
- Parents:
- 5c9a08b
- Location:
- generic
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
generic/include/mm/as.h
r5c9a08b r8e1ea655 50 50 51 51 #define FLAG_AS_KERNEL (1 << 0) /**< Kernel address space. */ 52 #define FLAG_AS_EARLYMALLOC (1 << 1) /**< Use early malloc */53 52 54 53 enum as_area_type { -
generic/include/mm/slab.h
r5c9a08b r8e1ea655 56 56 #define SLAB_CACHE_NOMAGAZINE 0x1 /**< Do not use per-cpu cache */ 57 57 #define SLAB_CACHE_SLINSIDE 0x2 /**< Have control structure inside SLAB */ 58 /** We add magazine cache later, if we have this flag */ 59 #define SLAB_CACHE_MAGDEFERRED (0x4 | SLAB_CACHE_NOMAGAZINE) 58 60 59 61 typedef struct { … … 63 65 void *objs[0]; /**< Slots in magazine */ 64 66 }slab_magazine_t; 67 68 typedef struct { 69 slab_magazine_t *current; 70 slab_magazine_t *last; 71 SPINLOCK_DECLARE(lock); 72 }slab_mag_cache_t; 73 65 74 66 75 typedef struct { … … 93 102 94 103 /** CPU cache */ 95 struct { 96 slab_magazine_t *current; 97 slab_magazine_t *last; 98 SPINLOCK_DECLARE(lock); 99 }mag_cache[0]; 104 slab_mag_cache_t *mag_cache; 100 105 }slab_cache_t; 101 106 … … 114 119 /** Initialize SLAB subsytem */ 115 120 extern void slab_cache_init(void); 121 extern void slab_enable_cpucache(void); 116 122 117 123 /* KConsole debug */ … … 121 127 extern void * kalloc(unsigned int size, int flags); 122 128 extern void kfree(void *obj); 123 124 129 #endif -
generic/src/main/main.c
r5c9a08b r8e1ea655 161 161 early_heap_init(config.heap_addr, config.heap_size + config.heap_delta); 162 162 frame_init(); 163 slab_cache_init(); 163 164 as_init(); 164 165 page_init(); … … 174 175 smp_init(); 175 176 /* Slab must be initialized AFTER we know the number of processors */ 176 slab_ cache_init();177 slab_enable_cpucache(); 177 178 178 179 printf("config.memory_size=%dM\n", config.memory_size/(1024*1024)); -
generic/src/mm/as.c
r5c9a08b r8e1ea655 66 66 { 67 67 as_arch_init(); 68 AS_KERNEL = as_create(FLAG_AS_KERNEL | FLAG_AS_EARLYMALLOC);68 AS_KERNEL = as_create(FLAG_AS_KERNEL); 69 69 if (!AS_KERNEL) 70 70 panic("can't create kernel address space\n"); … … 79 79 as_t *as; 80 80 81 if (flags & FLAG_AS_EARLYMALLOC) 82 as = (as_t *) early_malloc(sizeof(as_t)); 83 else 84 as = (as_t *) malloc(sizeof(as_t)); 81 as = (as_t *) malloc(sizeof(as_t)); 85 82 if (as) { 86 83 list_initialize(&as->as_with_asid_link); -
generic/src/mm/slab.c
r5c9a08b r8e1ea655 113 113 /** Cache for cache descriptors */ 114 114 static slab_cache_t slab_cache_cache; 115 115 /** Cache for magcache structure from cache_t */ 116 static slab_cache_t *cpu_cache = NULL; 116 117 /** Cache for external slab descriptors 117 118 * This time we want per-cpu cache, so do not make it static … … 235 236 236 237 ASSERT(slab->cache == cache); 237 ASSERT(slab->available < cache->objects);238 238 239 239 if (cache->destructor) … … 241 241 242 242 spinlock_lock(&cache->slablock); 243 ASSERT(slab->available < cache->objects); 243 244 244 245 *((int *)obj) = slab->nextavail; … … 537 538 } 538 539 540 /** 541 * Initialize mag_cache structure in slab cache 542 */ 543 static void make_magcache(slab_cache_t *cache) 544 { 545 int i; 546 547 ASSERT(cpu_cache); 548 cache->mag_cache = slab_alloc(cpu_cache, 0); 549 for (i=0; i < config.cpu_count; i++) { 550 memsetb((__address)&cache->mag_cache[i], 551 sizeof(cache->mag_cache[i]), 0); 552 spinlock_initialize(&cache->mag_cache[i].lock, 553 "slab_maglock_cpu"); 554 } 555 } 556 539 557 /** Initialize allocated memory as a slab cache */ 540 558 static void … … 547 565 int flags) 548 566 { 549 int i;550 567 int pages; 551 568 ipl_t ipl; … … 569 586 spinlock_initialize(&cache->slablock, "slab_lock"); 570 587 spinlock_initialize(&cache->maglock, "slab_maglock"); 571 if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { 572 for (i=0; i < config.cpu_count; i++) { 573 memsetb((__address)&cache->mag_cache[i], 574 sizeof(cache->mag_cache[i]), 0); 575 spinlock_initialize(&cache->mag_cache[i].lock, 576 "slab_maglock_cpu"); 577 } 578 } 588 if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) 589 make_magcache(cache); 579 590 580 591 /* Compute slab sizes, object counts in slabs etc. */ … … 697 708 panic("Destroying cache that is not empty."); 698 709 710 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 711 slab_free(cpu_cache, cache->mag_cache); 699 712 slab_free(&slab_cache_cache, cache); 700 713 } … … 811 824 _slab_cache_create(&slab_cache_cache, 812 825 "slab_cache", 813 sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]),826 sizeof(slab_cache_cache), 814 827 sizeof(__address), 815 828 NULL, NULL, … … 819 832 sizeof(slab_t), 820 833 0, NULL, NULL, 821 SLAB_CACHE_SLINSIDE );834 SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); 822 835 823 836 /* Initialize structures for malloc */ … … 827 840 malloc_caches[i] = slab_cache_create(malloc_names[i], 828 841 size, 0, 829 NULL,NULL, 0);842 NULL,NULL, SLAB_CACHE_MAGDEFERRED); 830 843 } 831 844 #ifdef CONFIG_DEBUG … … 834 847 } 835 848 849 /** Enable cpu_cache 850 * 851 * Kernel calls this function, when it knows the real number of 852 * processors. 853 * Allocate slab for cpucache and enable it on all existing 854 * slabs that are SLAB_CACHE_MAGDEFERRED 855 */ 856 void slab_enable_cpucache(void) 857 { 858 link_t *cur; 859 slab_cache_t *s; 860 861 cpu_cache = slab_cache_create("magcpucache", 862 sizeof(slab_mag_cache_t) * config.cpu_count, 863 0, NULL, NULL, 864 SLAB_CACHE_NOMAGAZINE); 865 spinlock_lock(&slab_cache_lock); 866 867 for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ 868 s = list_get_instance(cur, slab_cache_t, link); 869 if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) 870 continue; 871 make_magcache(s); 872 s->flags &= ~SLAB_CACHE_MAGDEFERRED; 873 } 874 875 spinlock_unlock(&slab_cache_lock); 876 } 877 836 878 /**************************************/ 837 879 /* kalloc/kfree functions */
Note:
See TracChangeset
for help on using the changeset viewer.