Changeset 82d515e9 in mainline
- Timestamp:
- 2017-12-05T11:30:02Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 9af1c61
- Parents:
- 9a09212
- git-author:
- Jakub Jermar <jakub@…> (2017-12-05 11:25:41)
- git-committer:
- Jakub Jermar <jakub@…> (2017-12-05 11:30:02)
- Location:
- kernel/generic
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/include/ddi/irq.h
r9a09212 r82d515e9 132 132 extern hash_table_t irq_uspace_hash_table; 133 133 134 extern slab_cache_t *irq_ slab;134 extern slab_cache_t *irq_cache; 135 135 136 136 extern inr_t last_inr; -
kernel/generic/include/ipc/ipc.h
r9a09212 r82d515e9 173 173 } call_t; 174 174 175 extern slab_cache_t *phone_ slab;175 extern slab_cache_t *phone_cache; 176 176 177 177 extern answerbox_t *ipc_phone_0; -
kernel/generic/include/proc/thread.h
r9a09212 r82d515e9 272 272 273 273 /** Fpu context slab cache. */ 274 extern slab_cache_t *fpu_context_ slab;274 extern slab_cache_t *fpu_context_cache; 275 275 276 276 /* Thread syscall prototypes. */ -
kernel/generic/src/adt/btree.c
r9a09212 r82d515e9 55 55 #include <trace.h> 56 56 57 static slab_cache_t *btree_node_ slab;57 static slab_cache_t *btree_node_cache; 58 58 59 59 #define ROOT_NODE(n) (!(n)->parent) … … 71 71 void btree_init(void) 72 72 { 73 btree_node_ slab= slab_cache_create("btree_node_t",73 btree_node_cache = slab_cache_create("btree_node_t", 74 74 sizeof(btree_node_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED); 75 75 } … … 109 109 { 110 110 list_initialize(&t->leaf_list); 111 t->root = (btree_node_t *) slab_alloc(btree_node_ slab, 0);111 t->root = (btree_node_t *) slab_alloc(btree_node_cache, 0); 112 112 node_initialize(t->root); 113 113 list_append(&t->root->leaf_link, &t->leaf_list); … … 130 130 } 131 131 132 slab_free(btree_node_ slab, root);132 slab_free(btree_node_cache, root); 133 133 } 134 134 … … 516 516 * Allocate and initialize new right sibling. 517 517 */ 518 rnode = (btree_node_t *) slab_alloc(btree_node_ slab, 0);518 rnode = (btree_node_t *) slab_alloc(btree_node_cache, 0); 519 519 node_initialize(rnode); 520 520 rnode->parent = node->parent; … … 595 595 * We split the root node. Create new root. 596 596 */ 597 t->root = (btree_node_t *) slab_alloc(btree_node_ slab, 0);597 t->root = (btree_node_t *) slab_alloc(btree_node_cache, 0); 598 598 node->parent = t->root; 599 599 rnode->parent = t->root; … … 779 779 t->root = node->subtree[0]; 780 780 t->root->parent = NULL; 781 slab_free(btree_node_ slab, node);781 slab_free(btree_node_cache, node); 782 782 } else { 783 783 /* … … 838 838 idx = find_key_by_subtree(parent, rnode, true); 839 839 assert((int) idx != -1); 840 slab_free(btree_node_ slab, rnode);840 slab_free(btree_node_cache, rnode); 841 841 _btree_remove(t, parent->key[idx], parent); 842 842 } -
kernel/generic/src/cap/cap.c
r9a09212 r82d515e9 87 87 #define CAPS_LAST (CAPS_SIZE - 1) 88 88 89 static slab_cache_t *cap_ slab;89 static slab_cache_t *cap_cache; 90 90 91 91 static size_t caps_hash(const ht_link_t *item) … … 116 116 void caps_init(void) 117 117 { 118 cap_ slab= slab_cache_create("cap_t", sizeof(cap_t), 0, NULL,118 cap_cache = slab_cache_create("cap_t", sizeof(cap_t), 0, NULL, 119 119 NULL, 0); 120 120 } … … 277 277 */ 278 278 if (!cap) { 279 cap = slab_alloc(cap_ slab, FRAME_ATOMIC);279 cap = slab_alloc(cap_cache, FRAME_ATOMIC); 280 280 if (!cap) { 281 281 mutex_unlock(&task->cap_info->lock); … … 284 284 uintptr_t hbase; 285 285 if (!ra_alloc(task->cap_info->handles, 1, 1, &hbase)) { 286 slab_free(cap_ slab, cap);286 slab_free(cap_cache, cap); 287 287 mutex_unlock(&task->cap_info->lock); 288 288 return ENOMEM; … … 371 371 hash_table_remove_item(&task->cap_info->caps, &cap->caps_link); 372 372 ra_free(task->cap_info->handles, handle, 1); 373 slab_free(cap_ slab, cap);373 slab_free(cap_cache, cap); 374 374 mutex_unlock(&task->cap_info->lock); 375 375 } -
kernel/generic/src/console/cmd.c
r9a09212 r82d515e9 497 497 }; 498 498 499 static int cmd_ slabs(cmd_arg_t *argv);500 static cmd_info_t slabs_info = {501 .name = " slabs",499 static int cmd_caches(cmd_arg_t *argv); 500 static cmd_info_t caches_info = { 501 .name = "caches", 502 502 .description = "List slab caches.", 503 .func = cmd_ slabs,503 .func = cmd_caches, 504 504 .argc = 0 505 505 }; … … 605 605 &call0_info, 606 606 &mcall0_info, 607 &caches_info, 607 608 &call1_info, 608 609 &call2_info, … … 620 621 &sched_info, 621 622 &set4_info, 622 &slabs_info,623 623 &symaddr_info, 624 624 &sysinfo_info, … … 1214 1214 } 1215 1215 1216 /** Command for listing s SLABcaches1217 * 1218 * @param argv Ignore s1216 /** Command for listing slab allocator caches 1217 * 1218 * @param argv Ignored 1219 1219 * 1220 1220 * @return Always 1 1221 1221 */ 1222 int cmd_ slabs(cmd_arg_t *argv)1222 int cmd_caches(cmd_arg_t *argv) 1223 1223 { 1224 1224 slab_print_list(); -
kernel/generic/src/ddi/irq.c
r9a09212 r82d515e9 50 50 #include <arch.h> 51 51 52 slab_cache_t *irq_ slab= NULL;52 slab_cache_t *irq_cache = NULL; 53 53 54 54 /** Spinlock protecting the kernel IRQ hash table … … 97 97 last_inr = inrs - 1; 98 98 99 irq_ slab= slab_cache_create("irq_t", sizeof(irq_t), 0, NULL, NULL,99 irq_cache = slab_cache_create("irq_t", sizeof(irq_t), 0, NULL, NULL, 100 100 FRAME_ATOMIC); 101 assert(irq_ slab);101 assert(irq_cache); 102 102 103 103 hash_table_create(&irq_uspace_hash_table, chains, 0, &irq_ht_ops); -
kernel/generic/src/ipc/ipc.c
r9a09212 r82d515e9 66 66 answerbox_t *ipc_phone_0 = NULL; 67 67 68 static slab_cache_t *call_ slab;69 static slab_cache_t *answerbox_ slab;70 71 slab_cache_t *phone_ slab= NULL;68 static slab_cache_t *call_cache; 69 static slab_cache_t *answerbox_cache; 70 71 slab_cache_t *phone_cache = NULL; 72 72 73 73 /** Initialize a call structure. … … 95 95 if (call->caller_phone) 96 96 kobject_put(call->caller_phone->kobject); 97 slab_free(call_ slab, call);97 slab_free(call_cache, call); 98 98 } 99 99 … … 115 115 call_t *ipc_call_alloc(unsigned int flags) 116 116 { 117 call_t *call = slab_alloc(call_ slab, flags);117 call_t *call = slab_alloc(call_cache, flags); 118 118 if (!call) 119 119 return NULL; 120 120 kobject_t *kobj = (kobject_t *) malloc(sizeof(kobject_t), flags); 121 121 if (!kobj) { 122 slab_free(call_ slab, call);122 slab_free(call_cache, call); 123 123 return NULL; 124 124 } … … 210 210 int ipc_call_sync(phone_t *phone, call_t *request) 211 211 { 212 answerbox_t *mybox = slab_alloc(answerbox_ slab, 0);212 answerbox_t *mybox = slab_alloc(answerbox_cache, 0); 213 213 ipc_answerbox_init(mybox, TASK); 214 214 … … 218 218 int rc = ipc_call(phone, request); 219 219 if (rc != EOK) { 220 slab_free(answerbox_ slab, mybox);220 slab_free(answerbox_cache, mybox); 221 221 return rc; 222 222 } … … 265 265 assert(!answer || request == answer); 266 266 267 slab_free(answerbox_ slab, mybox);267 slab_free(answerbox_cache, mybox); 268 268 return rc; 269 269 } … … 906 906 void ipc_init(void) 907 907 { 908 call_ slab= slab_cache_create("call_t", sizeof(call_t), 0, NULL,908 call_cache = slab_cache_create("call_t", sizeof(call_t), 0, NULL, 909 909 NULL, 0); 910 phone_ slab= slab_cache_create("phone_t", sizeof(phone_t), 0, NULL,910 phone_cache = slab_cache_create("phone_t", sizeof(phone_t), 0, NULL, 911 911 NULL, 0); 912 answerbox_ slab= slab_cache_create("answerbox_t", sizeof(answerbox_t),912 answerbox_cache = slab_cache_create("answerbox_t", sizeof(answerbox_t), 913 913 0, NULL, NULL, 0); 914 914 } -
kernel/generic/src/ipc/ipcrsc.c
r9a09212 r82d515e9 153 153 { 154 154 phone_t *phone = (phone_t *) arg; 155 slab_free(phone_ slab, phone);155 slab_free(phone_cache, phone); 156 156 } 157 157 … … 173 173 cap_handle_t handle = cap_alloc(task); 174 174 if (handle >= 0) { 175 phone_t *phone = slab_alloc(phone_ slab, FRAME_ATOMIC);175 phone_t *phone = slab_alloc(phone_cache, FRAME_ATOMIC); 176 176 if (!phone) { 177 177 cap_free(TASK, handle); … … 181 181 if (!kobject) { 182 182 cap_free(TASK, handle); 183 slab_free(phone_ slab, phone);183 slab_free(phone_cache, phone); 184 184 return ENOMEM; 185 185 } -
kernel/generic/src/ipc/irq.c
r9a09212 r82d515e9 294 294 /* Free up the IRQ code and associated structures. */ 295 295 code_free(irq->notif_cfg.code); 296 slab_free(irq_ slab, irq);296 slab_free(irq_cache, irq); 297 297 } 298 298 … … 333 333 return handle; 334 334 335 irq_t *irq = (irq_t *) slab_alloc(irq_ slab, FRAME_ATOMIC);335 irq_t *irq = (irq_t *) slab_alloc(irq_cache, FRAME_ATOMIC); 336 336 if (!irq) { 337 337 cap_free(TASK, handle); … … 342 342 if (!kobject) { 343 343 cap_free(TASK, handle); 344 slab_free(irq_ slab, irq);344 slab_free(irq_cache, irq); 345 345 return ENOMEM; 346 346 } -
kernel/generic/src/mm/as.c
r9a09212 r82d515e9 90 90 * 91 91 */ 92 static slab_cache_t *as_ slab;92 static slab_cache_t *as_cache; 93 93 94 94 /** ASID subsystem lock. … … 131 131 as_arch_init(); 132 132 133 as_ slab= slab_cache_create("as_t", sizeof(as_t), 0,133 as_cache = slab_cache_create("as_t", sizeof(as_t), 0, 134 134 as_constructor, as_destructor, SLAB_CACHE_MAGDEFERRED); 135 135 … … 153 153 as_t *as_create(unsigned int flags) 154 154 { 155 as_t *as = (as_t *) slab_alloc(as_ slab, 0);155 as_t *as = (as_t *) slab_alloc(as_cache, 0); 156 156 (void) as_create_arch(as, 0); 157 157 … … 254 254 #endif 255 255 256 slab_free(as_ slab, as);256 slab_free(as_cache, as); 257 257 } 258 258 -
kernel/generic/src/mm/slab.c
r9a09212 r82d515e9 818 818 } 819 819 820 /* Print list of slabs 821 * 822 */ 820 /* Print list of caches */ 823 821 void slab_print_list(void) 824 822 { 825 printf("[ slab name] [size ] [pages ] [obj/pg] [slabs ]"823 printf("[cache name ] [size ] [pages ] [obj/pg] [slabs ]" 826 824 " [cached] [alloc ] [ctl]\n"); 827 825 -
kernel/generic/src/proc/scheduler.c
r9a09212 r82d515e9 163 163 irq_spinlock_unlock(&CPU->lock, false); 164 164 THREAD->saved_fpu_context = 165 (fpu_context_t *) slab_alloc(fpu_context_ slab, 0);165 (fpu_context_t *) slab_alloc(fpu_context_cache, 0); 166 166 167 167 /* We may have switched CPUs during slab_alloc */ -
kernel/generic/src/proc/task.c
r9a09212 r82d515e9 79 79 static task_id_t task_counter = 0; 80 80 81 static slab_cache_t *task_ slab;81 static slab_cache_t *task_cache; 82 82 83 83 /* Forward declarations. */ … … 93 93 TASK = NULL; 94 94 avltree_create(&tasks_tree); 95 task_ slab= slab_cache_create("task_t", sizeof(task_t), 0,95 task_cache = slab_cache_create("task_t", sizeof(task_t), 0, 96 96 tsk_constructor, tsk_destructor, 0); 97 97 } … … 206 206 task_t *task_create(as_t *as, const char *name) 207 207 { 208 task_t *task = (task_t *) slab_alloc(task_ slab, 0);208 task_t *task = (task_t *) slab_alloc(task_cache, 0); 209 209 task_create_arch(task); 210 210 … … 295 295 as_release(task->as); 296 296 297 slab_free(task_ slab, task);297 slab_free(task_cache, task); 298 298 } 299 299 -
kernel/generic/src/proc/thread.c
r9a09212 r82d515e9 103 103 static thread_id_t last_tid = 0; 104 104 105 static slab_cache_t *thread_ slab;105 static slab_cache_t *thread_cache; 106 106 107 107 #ifdef CONFIG_FPU 108 slab_cache_t *fpu_context_ slab;108 slab_cache_t *fpu_context_cache; 109 109 #endif 110 110 … … 169 169 thread->saved_fpu_context = NULL; 170 170 #else /* CONFIG_FPU_LAZY */ 171 thread->saved_fpu_context = slab_alloc(fpu_context_ slab, kmflags);171 thread->saved_fpu_context = slab_alloc(fpu_context_cache, kmflags); 172 172 if (!thread->saved_fpu_context) 173 173 return -1; … … 199 199 #ifdef CONFIG_FPU 200 200 if (thread->saved_fpu_context) 201 slab_free(fpu_context_ slab, thread->saved_fpu_context);201 slab_free(fpu_context_cache, thread->saved_fpu_context); 202 202 #endif 203 203 return -1; … … 225 225 #ifdef CONFIG_FPU 226 226 if (thread->saved_fpu_context) 227 slab_free(fpu_context_ slab, thread->saved_fpu_context);227 slab_free(fpu_context_cache, thread->saved_fpu_context); 228 228 #endif 229 229 … … 241 241 242 242 atomic_set(&nrdy, 0); 243 thread_ slab= slab_cache_create("thread_t", sizeof(thread_t), 0,243 thread_cache = slab_cache_create("thread_t", sizeof(thread_t), 0, 244 244 thr_constructor, thr_destructor, 0); 245 245 246 246 #ifdef CONFIG_FPU 247 fpu_context_ slab= slab_cache_create("fpu_context_t",247 fpu_context_cache = slab_cache_create("fpu_context_t", 248 248 sizeof(fpu_context_t), FPU_CONTEXT_ALIGN, NULL, NULL, 0); 249 249 #endif … … 341 341 thread_flags_t flags, const char *name) 342 342 { 343 thread_t *thread = (thread_t *) slab_alloc(thread_ slab, 0);343 thread_t *thread = (thread_t *) slab_alloc(thread_cache, 0); 344 344 if (!thread) 345 345 return NULL; … … 457 457 */ 458 458 task_release(thread->task); 459 slab_free(thread_ slab, thread);459 slab_free(thread_cache, thread); 460 460 } 461 461 … … 974 974 * We can safely deallocate it. 975 975 */ 976 slab_free(thread_ slab, thread);976 slab_free(thread_cache, thread); 977 977 free(kernel_uarg); 978 978 -
kernel/generic/src/sysinfo/sysinfo.c
r9a09212 r82d515e9 53 53 54 54 /** Sysinfo SLAB cache */ 55 static slab_cache_t *sysinfo_item_ slab;55 static slab_cache_t *sysinfo_item_cache; 56 56 57 57 /** Sysinfo lock */ … … 98 98 void sysinfo_init(void) 99 99 { 100 sysinfo_item_ slab= slab_cache_create("sysinfo_item_t",100 sysinfo_item_cache = slab_cache_create("sysinfo_item_t", 101 101 sizeof(sysinfo_item_t), 0, sysinfo_item_constructor, 102 102 sysinfo_item_destructor, SLAB_CACHE_MAGDEFERRED); … … 204 204 205 205 *psubtree = 206 (sysinfo_item_t *) slab_alloc(sysinfo_item_ slab, 0);206 (sysinfo_item_t *) slab_alloc(sysinfo_item_cache, 0); 207 207 assert(*psubtree); 208 208 … … 268 268 269 269 sysinfo_item_t *item = 270 (sysinfo_item_t *) slab_alloc(sysinfo_item_ slab, 0);270 (sysinfo_item_t *) slab_alloc(sysinfo_item_cache, 0); 271 271 assert(item); 272 272
Note:
See TracChangeset
for help on using the changeset viewer.