Changeset c7ec94a4 in mainline for genarch/src/mm/page_ht.c
- Timestamp:
- 2006-02-06T14:18:28Z (19 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- f5935ed
- Parents:
- 214f5bb
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
genarch/src/mm/page_ht.c
r214f5bb rc7ec94a4 29 29 #include <genarch/mm/page_ht.h> 30 30 #include <mm/page.h> 31 #include <arch/mm/page.h> 31 32 #include <mm/frame.h> 32 33 #include <mm/heap.h> … … 40 41 #include <debug.h> 41 42 #include <memstr.h> 43 #include <adt/hash_table.h> 44 45 static index_t hash(__native key[]); 46 static bool compare(__native key[], count_t keys, link_t *item); 47 static void remove_callback(link_t *item); 48 49 static void ht_mapping_insert(as_t *as, __address page, __address frame, int flags); 50 static pte_t *ht_mapping_find(as_t *as, __address page); 42 51 43 52 /** … … 47 56 48 57 /** 49 * Page hash table pointer.58 * Page hash table. 50 59 * The page hash table may be accessed only when page_ht_lock is held. 51 60 */ 52 pte_t *page_ht = NULL; 53 54 static void ht_mapping_insert(as_t *as, __address page, __address frame, int flags); 55 static pte_t *ht_mapping_find(as_t *as, __address page); 61 hash_table_t page_ht; 62 63 /** Hash table operations for page hash table. */ 64 hash_table_operations_t ht_operations = { 65 .hash = hash, 66 .compare = compare, 67 .remove_callback = remove_callback 68 }; 56 69 57 70 page_operations_t page_ht_operations = { … … 60 73 }; 61 74 75 /** Compute page hash table index. 76 * 77 * @param key Array of two keys (i.e. page and address space). 78 * 79 * @return Index into page hash table. 80 */ 81 index_t hash(__native key[]) 82 { 83 as_t *as = (as_t *) key[KEY_AS]; 84 __address page = (__address) key[KEY_PAGE]; 85 index_t index; 86 87 /* 88 * Virtual page addresses have roughly the same probability 89 * of occurring. Least significant bits of VPN compose the 90 * hash index. 91 */ 92 index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES-1)); 93 94 /* 95 * Address space structures are likely to be allocated from 96 * similar addresses. Least significant bits compose the 97 * hash index. 98 */ 99 index |= ((__native) as) & (PAGE_HT_ENTRIES-1); 100 101 return index; 102 } 103 104 /** Compare page hash table item with page and/or address space. 105 * 106 * @param key Array of one or two keys (i.e. page and/or address space). 107 * @param keys Number of keys passed. 108 * @param item Item to compare the keys with. 109 * 110 * @return true on match, false otherwise. 111 */ 112 bool compare(__native key[], count_t keys, link_t *item) 113 { 114 pte_t *t; 115 116 ASSERT(item); 117 ASSERT((keys > 0) && (keys <= PAGE_HT_KEYS)); 118 119 /* 120 * Convert item to PTE. 121 */ 122 t = list_get_instance(item, pte_t, link); 123 124 if (keys == PAGE_HT_KEYS) { 125 return (key[KEY_AS] == (__address) t->as) && (key[KEY_PAGE] == t->page); 126 } else { 127 return (key[KEY_AS] == (__address) t->as); 128 } 129 } 130 131 /** Callback on page hash table item removal. 132 * 133 * @param item Page hash table item being removed. 134 */ 135 void remove_callback(link_t *item) 136 { 137 pte_t *t; 138 139 ASSERT(item); 140 141 /* 142 * Convert item to PTE. 143 */ 144 t = list_get_instance(item, pte_t, link); 145 146 free(t); 147 } 148 62 149 /** Map page to frame using page hash table. 63 150 * … … 74 161 void ht_mapping_insert(as_t *as, __address page, __address frame, int flags) 75 162 { 76 pte_t *t , *u;163 pte_t *t; 77 164 ipl_t ipl; 165 __native key[2] = { (__address) as, page }; 78 166 79 167 ipl = interrupts_disable(); 80 168 spinlock_lock(&page_ht_lock); 81 169 82 t = HT_HASH(page, as->asid); 83 if (!HT_SLOT_EMPTY(t)) { 84 85 /* 86 * The slot is occupied. 87 * Walk through the collision chain and append the mapping to its end. 88 */ 89 90 do { 91 u = t; 92 if (HT_COMPARE(page, as->asid, t)) { 93 /* 94 * Nothing to do, 95 * the record is already there. 96 */ 97 spinlock_unlock(&page_ht_lock); 98 interrupts_restore(ipl); 99 return; 100 } 101 } while ((t = HT_GET_NEXT(t))); 102 103 t = (pte_t *) malloc(sizeof(pte_t)); /* FIXME: use slab allocator for this */ 104 if (!t) 105 panic("could not allocate memory\n"); 106 107 HT_SET_NEXT(u, t); 170 if (!hash_table_find(&page_ht, key)) { 171 t = (pte_t *) malloc(sizeof(pte_t)); 172 ASSERT(t != NULL); 173 174 hash_table_insert(&page_ht, key, &t->link); 108 175 } 109 110 HT_SET_RECORD(t, page, as->asid, frame, flags);111 HT_SET_NEXT(t, NULL);112 176 113 177 spinlock_unlock(&page_ht_lock); … … 128 192 pte_t *ht_mapping_find(as_t *as, __address page) 129 193 { 130 pte_t *t; 194 link_t *hlp; 195 pte_t *t = NULL; 196 __native key[2] = { (__address) as, page }; 131 197 132 198 spinlock_lock(&page_ht_lock); 133 t = HT_HASH(page, as->asid); 134 if (!HT_SLOT_EMPTY(t)) { 135 while (!HT_COMPARE(page, as->asid, t) && HT_GET_NEXT(t)) 136 t = HT_GET_NEXT(t); 137 t = HT_COMPARE(page, as->asid, t) ? t : NULL; 138 } else { 139 t = NULL; 140 } 199 200 hlp = hash_table_find(&page_ht, key); 201 if (hlp) 202 t = list_get_instance(hlp, pte_t, link); 203 141 204 spinlock_unlock(&page_ht_lock); 142 205 return t; 143 206 } 144 145 /** Invalidate page hash table.146 *147 * Interrupts must be disabled.148 */149 void ht_invalidate_all(void)150 {151 pte_t *t, *u;152 int i;153 154 spinlock_lock(&page_ht_lock);155 for (i = 0; i < HT_ENTRIES; i++) {156 if (!HT_SLOT_EMPTY(&page_ht[i])) {157 t = HT_GET_NEXT(&page_ht[i]);158 while (t) {159 u = t;160 t = HT_GET_NEXT(t);161 free(u); /* FIXME: use slab allocator for this */162 }163 HT_INVALIDATE_SLOT(&page_ht[i]);164 }165 }166 spinlock_unlock(&page_ht_lock);167 }
Note:
See TracChangeset
for help on using the changeset viewer.