Changeset 961c0484 in mainline
- Timestamp:
- 2011-12-04T22:33:03Z (13 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- b4e59b3
- Parents:
- 375fc3f
- Location:
- kernel/generic
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/lib/ra.h
r375fc3f r961c0484 45 45 46 46 typedef struct { 47 link_t span_link; /**< Link for the arena's list of spans. */47 link_t span_link; /**< Arena's list of spans link. */ 48 48 49 49 list_t segments; /**< List of span's segments. */ … … 53 53 54 54 hash_table_t used; 55 56 uintptr_t base; /**< Span base. */ 57 size_t size; /**< Span size. */ 55 58 } ra_span_t; 56 59 60 /* 61 * We would like to achieve a good ratio of the size of one unit of the 62 * represented resource (e.g. a page) and sizeof(ra_segment_t). We therefore 63 * attempt to have as few redundant information in the segment as possible. For 64 * example, the size of the segment needs to be calculated from the segment 65 * base and the base of the following segment. 66 */ 57 67 typedef struct { 58 link_t segment_link; /**< Link for the span's list of segments. */ 59 link_t free_link; /**< Link for the span's free list. */ 60 link_t used_link; /**< Link for the span's used hash table. */ 68 link_t segment_link; /**< Span's segment list link. */ 69 link_t fu_link; /**< Span's free list or used hash link. */ 61 70 62 71 uintptr_t base; /**< Segment base. */ 63 uintptr_t size; /**< Segment size. */64 65 72 } ra_segment_t; 66 73 67 74 extern ra_arena_t *ra_arena_create(uintptr_t, size_t); 68 extern voidra_span_add(ra_arena_t *, uintptr_t, size_t);75 extern bool ra_span_add(ra_arena_t *, uintptr_t, size_t); 69 76 extern uintptr_t ra_alloc(ra_arena_t *, size_t, size_t); 70 77 extern void ra_free(ra_arena_t *, uintptr_t, size_t); -
kernel/generic/src/lib/ra.c
r375fc3f r961c0484 48 48 #include <bitops.h> 49 49 #include <debug.h> 50 #include <panic.h> 51 #include <adt/list.h> 52 #include <adt/hash_table.h> 53 #include <align.h> 54 #include <macros.h> 55 56 /* 57 * The last segment on the segment list will be a special sentinel segment which 58 * is neither in any free list nor in the used segment hash. 59 */ 60 #define IS_LAST_SEG(seg) (!(seg)->fu_link.next) 50 61 51 62 static hash_table_operations_t used_ops = { … … 55 66 }; 56 67 57 static ra_segment_t *ra_segment_create(uintptr_t base, size_t size) 68 /** Calculate the segment size. */ 69 static size_t ra_segment_size_get(ra_segment_t *seg) 70 { 71 ra_segment_t *nextseg; 72 73 ASSERT(!IS_LAST_SEG(seg)); 74 75 nextseg = list_get_instance(seg->segment_link.next, ra_segment_t, 76 segment_link); 77 return nextseg->base - seg->base; 78 } 79 80 static ra_segment_t *ra_segment_create(uintptr_t base) 58 81 { 59 82 ra_segment_t *seg; … … 64 87 65 88 link_initialize(&seg->segment_link); 66 link_initialize(&seg->free_link); 67 link_initialize(&seg->used_link); 89 link_initialize(&seg->fu_link); 68 90 69 91 seg->base = base; 70 seg->size = size;71 92 72 93 return seg; 73 94 } 74 95 75 /*76 96 static void ra_segment_destroy(ra_segment_t *seg) 77 97 { 78 98 free(seg); 79 99 } 80 */81 100 82 101 static ra_span_t *ra_span_create(uintptr_t base, size_t size) 83 102 { 84 103 ra_span_t *span; 85 ra_segment_t *seg ;104 ra_segment_t *seg, *lastseg; 86 105 unsigned int i; 87 106 … … 91 110 92 111 span->max_order = fnzb(size); 112 span->base = base; 113 span->size = size; 93 114 94 115 span->free = (list_t *) malloc((span->max_order + 1) * sizeof(list_t), … … 98 119 return NULL; 99 120 } 100 seg = ra_segment_create(base, size); 121 122 /* 123 * Create a segment to represent the entire size of the span. 124 */ 125 seg = ra_segment_create(base); 101 126 if (!seg) { 102 127 free(span->free); … … 105 130 } 106 131 132 /* 133 * The last segment will be used as a sentinel at the end of the 134 * segment list so that it is possible to calculate the size for 135 * all other segments. It will not be placed in any free list or 136 * in the used segment hash and adjacent segments will not be 137 * coalesced with it. 138 */ 139 lastseg = ra_segment_create(base + size); 140 if (!lastseg) { 141 ra_segment_destroy(seg); 142 free(span->free); 143 free(span); 144 return NULL; 145 } 146 /* Make sure we have NULL here so that we can recognize the sentinel. */ 147 lastseg->fu_link.next = NULL; 148 107 149 link_initialize(&span->span_link); 108 150 list_initialize(&span->segments); … … 113 155 list_initialize(&span->free[i]); 114 156 157 /* Insert the first segment into the list of segments. */ 115 158 list_append(&seg->segment_link, &span->segments); 116 list_append(&seg->free_link, &span->free[span->max_order]); 159 /* Insert the last segment into the list of segments. */ 160 list_append(&lastseg->segment_link, &span->segments); 161 162 /* Insert the first segment into the respective free list. */ 163 list_append(&seg->fu_link, &span->free[span->max_order]); 117 164 118 165 return span; 119 166 } 120 167 168 /** Create arena with initial span. */ 121 169 ra_arena_t *ra_arena_create(uintptr_t base, size_t size) 122 170 { 123 171 ra_arena_t *arena; 124 172 ra_span_t *span; 173 174 /* 175 * At the moment, we can only create resources that don't include 0. 176 * If 0 needs to be considered as a valid resource, we would need to 177 * slightly change the API of the resource allocator. 178 */ 179 if (base == 0) 180 return NULL; 125 181 126 182 arena = (ra_arena_t *) malloc(sizeof(ra_arena_t), FRAME_ATOMIC); … … 140 196 } 141 197 142 void ra_span_add(ra_arena_t *arena, uintptr_t base, size_t size) 198 /** Add additional span to arena. */ 199 bool ra_span_add(ra_arena_t *arena, uintptr_t base, size_t size) 143 200 { 144 201 ra_span_t *span; 145 202 203 if (base == 0) 204 return false; 205 146 206 span = ra_span_create(base, size); 147 ASSERT(span); 207 if (!span) 208 return false; 148 209 149 210 /* TODO: check for overlaps */ 150 211 list_append(&span->span_link, &arena->spans); 151 } 152 212 return true; 213 } 214 215 static uintptr_t ra_span_alloc(ra_span_t *span, size_t size, size_t align) 216 { 217 /* 218 * We need to add the maximum of align - 1 to be able to compensate for 219 * the worst case unaligned segment. 220 */ 221 size_t needed = size + align - 1; 222 size_t order = ispwr2(needed) ? fnzb(needed) : fnzb(needed) + 1; 223 ra_segment_t *pred = NULL; 224 ra_segment_t *succ = NULL; 225 226 /* 227 * Find the free list of the smallest order which can satisfy this 228 * request. 229 */ 230 for (; order <= span->max_order; order++) { 231 ra_segment_t *seg; 232 uintptr_t newbase; 233 234 if (list_empty(&span->free[order])) 235 continue; 236 237 /* Take the first segment from the free list. */ 238 seg = list_get_instance(list_first(&span->free[order]), 239 ra_segment_t, fu_link); 240 241 /* 242 * See if we need to allocate new segments for the chopped-off 243 * parts of this segment. 244 */ 245 if (!IS_ALIGNED(seg->base, align)) { 246 pred = ra_segment_create(seg->base); 247 if (!pred) { 248 /* 249 * Fail as we are unable to split the segment. 250 */ 251 break; 252 } 253 } 254 newbase = ALIGN_UP(seg->base, align); 255 if (newbase + size != seg->base + ra_segment_size_get(seg)) { 256 ASSERT(newbase + size < seg->base + 257 ra_segment_size_get(seg)); 258 succ = ra_segment_create(newbase + size); 259 if (!succ) { 260 if (pred) 261 ra_segment_destroy(pred); 262 /* 263 * Fail as we are unable to split the segment. 264 */ 265 break; 266 } 267 } 268 269 270 /* Put unneeded parts back. */ 271 if (pred) { 272 size_t pred_order; 273 274 list_insert_before(&pred->segment_link, 275 &seg->segment_link); 276 pred_order = fnzb(ra_segment_size_get(pred)); 277 list_append(&pred->fu_link, &span->free[pred_order]); 278 } 279 if (succ) { 280 size_t succ_order; 281 282 list_insert_after(&succ->segment_link, 283 &seg->segment_link); 284 succ_order = fnzb(ra_segment_size_get(succ)); 285 list_append(&succ->fu_link, &span->free[succ_order]); 286 } 287 288 /* Now remove the found segment from the free list. */ 289 list_remove(&seg->fu_link); 290 seg->base = newbase; 291 292 /* Hash-in the segment into the used hash. */ 293 sysarg_t key = seg->base; 294 hash_table_insert(&span->used, &key, &seg->fu_link); 295 296 return newbase; 297 } 298 299 return 0; 300 } 301 302 static void ra_span_free(ra_span_t *span, size_t base, size_t size) 303 { 304 } 305 306 /** Allocate resources from arena. */ 153 307 uintptr_t ra_alloc(ra_arena_t *arena, size_t size, size_t alignment) 154 308 { 155 return 0; 156 } 157 309 uintptr_t base = 0; 310 311 ASSERT(size >= 1); 312 ASSERT(alignment >= 1); 313 ASSERT(ispwr2(alignment)); 314 315 list_foreach(arena->spans, cur) { 316 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); 317 318 base = ra_span_alloc(span, size, alignment); 319 if (base) 320 break; 321 } 322 323 return base; 324 } 325 326 /* Return resources to arena. */ 158 327 void ra_free(ra_arena_t *arena, uintptr_t base, size_t size) 159 328 { 329 list_foreach(arena->spans, cur) { 330 ra_span_t *span = list_get_instance(cur, ra_span_t, span_link); 331 332 if (iswithin(span->base, span->size, base, size)) { 333 ra_span_free(span, base, size); 334 return; 335 } 336 } 337 338 panic("Freeing to wrong arena (base=%" PRIxn ", size=%" PRIdn ").", 339 base, size); 160 340 } 161 341
Note:
See TracChangeset
for help on using the changeset viewer.