Changes in / [c2ab3f4:b8f7ea78] in mainline
- Location:
- kernel/generic/src
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/ddi/ddi.c
rc2ab3f4 rb8f7ea78 46 46 #include <mm/frame.h> 47 47 #include <mm/as.h> 48 #include <synch/ mutex.h>48 #include <synch/spinlock.h> 49 49 #include <syscall/copy.h> 50 50 #include <adt/btree.h> … … 54 54 55 55 /** This lock protects the parea_btree. */ 56 static mutex_t parea_lock;56 SPINLOCK_INITIALIZE(parea_lock); 57 57 58 58 /** B+tree with enabled physical memory areas. */ … … 63 63 { 64 64 btree_create(&parea_btree); 65 mutex_initialize(&parea_lock, MUTEX_PASSIVE);66 65 } 67 66 … … 73 72 void ddi_parea_register(parea_t *parea) 74 73 { 75 mutex_lock(&parea_lock); 74 ipl_t ipl = interrupts_disable(); 75 spinlock_lock(&parea_lock); 76 76 77 77 /* … … 80 80 btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL); 81 81 82 mutex_unlock(&parea_lock); 82 spinlock_unlock(&parea_lock); 83 interrupts_restore(ipl); 83 84 } 84 85 … … 140 141 spinlock_unlock(&zones.lock); 141 142 142 mutex_lock(&parea_lock);143 spinlock_lock(&parea_lock); 143 144 btree_node_t *nodep; 144 145 parea_t *parea = (parea_t *) btree_search(&parea_btree, … … 146 147 147 148 if ((!parea) || (parea->frames < pages)) { 148 mutex_unlock(&parea_lock);149 spinlock_unlock(&parea_lock); 149 150 goto err; 150 151 } 151 152 152 mutex_unlock(&parea_lock);153 spinlock_unlock(&parea_lock); 153 154 goto map; 154 155 } … … 160 161 161 162 map: 162 interrupts_restore(ipl);163 163 spinlock_lock(&TASK->lock); 164 164 165 if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, 165 166 AS_AREA_ATTR_NONE, &phys_backend, &backend_data)) { … … 168 169 * We report it using ENOMEM. 169 170 */ 171 spinlock_unlock(&TASK->lock); 172 interrupts_restore(ipl); 170 173 return ENOMEM; 171 174 } … … 174 177 * Mapping is created on-demand during page fault. 175 178 */ 179 180 spinlock_unlock(&TASK->lock); 181 interrupts_restore(ipl); 176 182 return 0; 177 183 } -
kernel/generic/src/mm/frame.c
rc2ab3f4 rb8f7ea78 1033 1033 spinlock_unlock(&zones.lock); 1034 1034 interrupts_restore(ipl); 1035 1036 if (!THREAD)1037 panic("Cannot wait for memory to become available.");1038 1035 1039 1036 /* -
kernel/generic/src/mm/slab.c
rc2ab3f4 rb8f7ea78 555 555 * Initialize mag_cache structure in slab cache 556 556 */ 557 static boolmake_magcache(slab_cache_t *cache)557 static void make_magcache(slab_cache_t *cache) 558 558 { 559 559 unsigned int i; … … 562 562 563 563 cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, 564 FRAME_ATOMIC); 565 if (!cache->mag_cache) 566 return false; 567 564 0); 568 565 for (i = 0; i < config.cpu_count; i++) { 569 566 memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); … … 571 568 "slab_maglock_cpu"); 572 569 } 573 return true;574 570 } 575 571 … … 601 597 spinlock_initialize(&cache->maglock, "slab_maglock"); 602 598 if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) 603 (void)make_magcache(cache);599 make_magcache(cache); 604 600 605 601 /* Compute slab sizes, object counts in slabs etc. */ … … 927 923 SLAB_CACHE_MAGDEFERRED) 928 924 continue; 929 (void)make_magcache(s);925 make_magcache(s); 930 926 s->flags &= ~SLAB_CACHE_MAGDEFERRED; 931 927 } -
kernel/generic/src/synch/mutex.c
rc2ab3f4 rb8f7ea78 40 40 #include <synch/synch.h> 41 41 #include <debug.h> 42 #include <arch.h>43 42 44 43 /** Initialize mutex. … … 70 69 int rc; 71 70 72 if (mtx->type == MUTEX_PASSIVE && THREAD) {71 if (mtx->type == MUTEX_PASSIVE) { 73 72 rc = _semaphore_down_timeout(&mtx->sem, usec, flags); 74 73 } else { 75 ASSERT(mtx->type == MUTEX_ACTIVE || !THREAD);74 ASSERT(mtx->type == MUTEX_ACTIVE); 76 75 ASSERT(usec == SYNCH_NO_TIMEOUT); 77 76 ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE)); -
kernel/generic/src/sysinfo/stats.c
rc2ab3f4 rb8f7ea78 38 38 #include <sysinfo/stats.h> 39 39 #include <sysinfo/sysinfo.h> 40 #include <synch/spinlock.h>41 #include <synch/mutex.h>42 40 #include <time/clock.h> 43 41 #include <mm/frame.h> … … 70 68 static load_t avenrdy[LOAD_STEPS] = {0, 0, 0}; 71 69 72 /** Load calculation lock */73 static mutex_t load_lock;70 /** Load calculation spinlock */ 71 SPINLOCK_STATIC_INITIALIZE_NAME(load_lock, "load_lock"); 74 72 75 73 /** Get system uptime … … 346 344 347 345 /* Interrupts are already disabled */ 348 spinlock_lock(& thread->lock);346 spinlock_lock(&(thread->lock)); 349 347 350 348 /* Record the statistics and increment the iterator */ … … 352 350 (*iterator)++; 353 351 354 spinlock_unlock(& thread->lock);352 spinlock_unlock(&(thread->lock)); 355 353 356 354 return true; … … 617 615 } 618 616 619 /* To always get consistent values acquire the mutex */ 620 mutex_lock(&load_lock); 617 /* To always get consistent values acquire the spinlock */ 618 ipl_t ipl = interrupts_disable(); 619 spinlock_lock(&load_lock); 621 620 622 621 unsigned int i; … … 624 623 stats_load[i] = avenrdy[i] << LOAD_FIXED_SHIFT; 625 624 626 mutex_unlock(&load_lock); 625 spinlock_unlock(&load_lock); 626 interrupts_restore(ipl); 627 627 628 628 return ((void *) stats_load); … … 655 655 656 656 /* Mutually exclude with get_stats_load() */ 657 mutex_lock(&load_lock); 657 ipl_t ipl = interrupts_disable(); 658 spinlock_lock(&load_lock); 658 659 659 660 unsigned int i; … … 661 662 avenrdy[i] = load_calc(avenrdy[i], load_exp[i], ready); 662 663 663 mutex_unlock(&load_lock); 664 spinlock_unlock(&load_lock); 665 interrupts_restore(ipl); 664 666 665 667 thread_sleep(LOAD_INTERVAL); … … 672 674 void stats_init(void) 673 675 { 674 mutex_initialize(&load_lock, MUTEX_PASSIVE);675 676 676 sysinfo_set_item_fn_val("system.uptime", NULL, get_stats_uptime); 677 677 sysinfo_set_item_fn_data("system.cpus", NULL, get_stats_cpus); -
kernel/generic/src/sysinfo/sysinfo.c
rc2ab3f4 rb8f7ea78 37 37 #include <print.h> 38 38 #include <syscall/copy.h> 39 #include <synch/ mutex.h>39 #include <synch/spinlock.h> 40 40 #include <arch/asm.h> 41 41 #include <errno.h> … … 52 52 static slab_cache_t *sysinfo_item_slab; 53 53 54 /** Sysinfo lock */55 static mutex_t sysinfo_lock;54 /** Sysinfo spinlock */ 55 SPINLOCK_STATIC_INITIALIZE_NAME(sysinfo_lock, "sysinfo_lock"); 56 56 57 57 /** Sysinfo item constructor … … 98 98 sizeof(sysinfo_item_t), 0, sysinfo_item_constructor, 99 99 sysinfo_item_destructor, SLAB_CACHE_MAGDEFERRED); 100 101 mutex_initialize(&sysinfo_lock, MUTEX_ACTIVE);102 100 } 103 101 104 102 /** Recursively find an item in sysinfo tree 105 103 * 106 * Should be called with sysinfo_lock held. 104 * Should be called with interrupts disabled 105 * and sysinfo_lock held. 107 106 * 108 107 * @param name Current sysinfo path suffix. … … 169 168 /** Recursively create items in sysinfo tree 170 169 * 171 * Should be called with sysinfo_lock held. 170 * Should be called with interrupts disabled 171 * and sysinfo_lock held. 172 172 * 173 173 * @param name Current sysinfo path suffix. … … 299 299 { 300 300 /* Protect sysinfo tree consistency */ 301 mutex_lock(&sysinfo_lock); 301 ipl_t ipl = interrupts_disable(); 302 spinlock_lock(&sysinfo_lock); 302 303 303 304 if (root == NULL) … … 310 311 } 311 312 312 mutex_unlock(&sysinfo_lock); 313 spinlock_unlock(&sysinfo_lock); 314 interrupts_restore(ipl); 313 315 } 314 316 … … 330 332 { 331 333 /* Protect sysinfo tree consistency */ 332 mutex_lock(&sysinfo_lock); 334 ipl_t ipl = interrupts_disable(); 335 spinlock_lock(&sysinfo_lock); 333 336 334 337 if (root == NULL) … … 342 345 } 343 346 344 mutex_unlock(&sysinfo_lock); 347 spinlock_unlock(&sysinfo_lock); 348 interrupts_restore(ipl); 345 349 } 346 350 … … 357 361 { 358 362 /* Protect sysinfo tree consistency */ 359 mutex_lock(&sysinfo_lock); 363 ipl_t ipl = interrupts_disable(); 364 spinlock_lock(&sysinfo_lock); 360 365 361 366 if (root == NULL) … … 368 373 } 369 374 370 mutex_unlock(&sysinfo_lock); 375 spinlock_unlock(&sysinfo_lock); 376 interrupts_restore(ipl); 371 377 } 372 378 … … 388 394 { 389 395 /* Protect sysinfo tree consistency */ 390 mutex_lock(&sysinfo_lock); 396 ipl_t ipl = interrupts_disable(); 397 spinlock_lock(&sysinfo_lock); 391 398 392 399 if (root == NULL) … … 399 406 } 400 407 401 mutex_unlock(&sysinfo_lock); 408 spinlock_unlock(&sysinfo_lock); 409 interrupts_restore(ipl); 402 410 } 403 411 … … 412 420 { 413 421 /* Protect sysinfo tree consistency */ 414 mutex_lock(&sysinfo_lock); 422 ipl_t ipl = interrupts_disable(); 423 spinlock_lock(&sysinfo_lock); 415 424 416 425 if (root == NULL) … … 421 430 item->val_type = SYSINFO_VAL_UNDEFINED; 422 431 423 mutex_unlock(&sysinfo_lock); 432 spinlock_unlock(&sysinfo_lock); 433 interrupts_restore(ipl); 424 434 } 425 435 … … 436 446 { 437 447 /* Protect sysinfo tree consistency */ 438 mutex_lock(&sysinfo_lock); 448 ipl_t ipl = interrupts_disable(); 449 spinlock_lock(&sysinfo_lock); 439 450 440 451 if (root == NULL) … … 450 461 } 451 462 452 mutex_unlock(&sysinfo_lock); 463 spinlock_unlock(&sysinfo_lock); 464 interrupts_restore(ipl); 453 465 } 454 466 … … 467 479 /** Dump the structure of sysinfo tree 468 480 * 469 * Should be called with sysinfo_lock held. 481 * Should be called with interrupts disabled 482 * and sysinfo_lock held. Because this routine 483 * might take a reasonable long time to proceed, 484 * having the spinlock held is not optimal, but 485 * there is no better simple solution. 470 486 * 471 487 * @param root Root item of the current (sub)tree. … … 543 559 /* Avoid other functions to mess with sysinfo 544 560 while we are dumping it */ 545 mutex_lock(&sysinfo_lock); 561 ipl_t ipl = interrupts_disable(); 562 spinlock_lock(&sysinfo_lock); 546 563 547 564 if (root == NULL) … … 550 567 sysinfo_dump_internal(root, 0); 551 568 552 mutex_unlock(&sysinfo_lock); 569 spinlock_unlock(&sysinfo_lock); 570 interrupts_restore(ipl); 553 571 } 554 572 555 573 /** Return sysinfo item value determined by name 556 574 * 557 * Should be called with sysinfo_lock held. 575 * Should be called with interrupts disabled 576 * and sysinfo_lock held. 558 577 * 559 578 * @param name Sysinfo path. … … 640 659 * are reading it. 641 660 */ 642 mutex_lock(&sysinfo_lock); 661 ipl_t ipl = interrupts_disable(); 662 spinlock_lock(&sysinfo_lock); 643 663 ret = sysinfo_get_item(path, NULL, dry_run); 644 mutex_unlock(&sysinfo_lock); 664 spinlock_unlock(&sysinfo_lock); 665 interrupts_restore(ipl); 645 666 } 646 667 free(path);
Note:
See TracChangeset
for help on using the changeset viewer.