Changes in kernel/generic/src/sysinfo/stats.c [07d4271:fe7bcf1] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/sysinfo/stats.c
r07d4271 rfe7bcf1 119 119 size_t i; 120 120 for (i = 0; i < config.cpu_count; i++) { 121 irq_spinlock_lock(&cpus[i].lock, true); 122 121 123 stats_cpus[i].id = cpus[i].id; 122 124 stats_cpus[i].active = cpus[i].active; 123 125 stats_cpus[i].frequency_mhz = cpus[i].frequency_mhz; 124 125 stats_cpus[i].busy_cycles = atomic_time_read(&cpus[i].busy_cycles); 126 stats_cpus[i].idle_cycles = atomic_time_read(&cpus[i].idle_cycles); 126 stats_cpus[i].busy_cycles = cpus[i].busy_cycles; 127 stats_cpus[i].idle_cycles = cpus[i].idle_cycles; 128 129 irq_spinlock_unlock(&cpus[i].lock, true); 127 130 } 128 131 … … 221 224 stats_task->virtmem = get_task_virtmem(task->as); 222 225 stats_task->resmem = get_task_resmem(task->as); 223 stats_task->threads = atomic_load(&task-> lifecount);226 stats_task->threads = atomic_load(&task->refcount); 224 227 task_get_accounting(task, &(stats_task->ucycles), 225 228 &(stats_task->kcycles)); … … 299 302 { 300 303 assert(interrupts_disabled()); 304 assert(irq_spinlock_locked(&thread->lock)); 301 305 302 306 stats_thread->thread_id = thread->tid; 303 307 stats_thread->task_id = thread->task->taskid; 304 stats_thread->state = atomic_get_unordered(&thread->state); 305 stats_thread->priority = atomic_get_unordered(&thread->priority); 306 stats_thread->ucycles = atomic_time_read(&thread->ucycles); 307 stats_thread->kcycles = atomic_time_read(&thread->kcycles); 308 309 cpu_t *cpu = atomic_get_unordered(&thread->cpu); 310 311 if (cpu != NULL) { 308 stats_thread->state = thread->state; 309 stats_thread->priority = thread->priority; 310 stats_thread->ucycles = thread->ucycles; 311 stats_thread->kcycles = thread->kcycles; 312 313 if (thread->cpu != NULL) { 312 314 stats_thread->on_cpu = true; 313 stats_thread->cpu = cpu->id;315 stats_thread->cpu = thread->cpu->id; 314 316 } else 315 317 stats_thread->on_cpu = false; … … 330 332 bool dry_run, void *data) 331 333 { 332 /* Messing with threads structures */334 /* Messing with threads structures, avoid deadlock */ 333 335 irq_spinlock_lock(&threads_lock, true); 334 336 … … 362 364 thread_t *thread = thread_first(); 363 365 while (thread != NULL) { 366 /* Interrupts are already disabled */ 367 irq_spinlock_lock(&thread->lock, false); 368 364 369 /* Record the statistics and increment the index */ 365 370 produce_stats_thread(thread, &stats_threads[i]); 366 371 i++; 372 373 irq_spinlock_unlock(&thread->lock, false); 367 374 368 375 thread = thread_next(thread); … … 511 518 { 512 519 /* Initially no return value */ 513 sysinfo_return_t ret = { 514 .tag = SYSINFO_VAL_UNDEFINED, 515 }; 520 sysinfo_return_t ret; 521 ret.tag = SYSINFO_VAL_UNDEFINED; 516 522 517 523 /* Parse the task ID */ … … 520 526 return ret; 521 527 528 /* Messing with task structures, avoid deadlock */ 529 irq_spinlock_lock(&tasks_lock, true); 530 522 531 task_t *task = task_find_by_id(task_id); 523 if (!task) 532 if (task == NULL) { 533 /* No task with this ID */ 534 irq_spinlock_unlock(&tasks_lock, true); 524 535 return ret; 536 } 525 537 526 538 if (dry_run) { … … 528 540 ret.data.data = NULL; 529 541 ret.data.size = sizeof(stats_task_t); 542 543 irq_spinlock_unlock(&tasks_lock, true); 530 544 } else { 531 545 /* Allocate stats_task_t structure */ 532 stats_task_t *stats_task = malloc(sizeof(stats_task_t)); 533 534 if (stats_task != NULL) { 535 /* Correct return value */ 536 ret.tag = SYSINFO_VAL_FUNCTION_DATA; 537 ret.data.data = stats_task; 538 ret.data.size = sizeof(stats_task_t); 539 540 irq_spinlock_lock(&task->lock, true); 541 produce_stats_task(task, stats_task); 542 irq_spinlock_unlock(&task->lock, true); 546 stats_task_t *stats_task = 547 (stats_task_t *) malloc(sizeof(stats_task_t)); 548 if (stats_task == NULL) { 549 irq_spinlock_unlock(&tasks_lock, true); 550 return ret; 543 551 } 544 } 545 546 task_release(task); 552 553 /* Correct return value */ 554 ret.tag = SYSINFO_VAL_FUNCTION_DATA; 555 ret.data.data = (void *) stats_task; 556 ret.data.size = sizeof(stats_task_t); 557 558 /* Hand-over-hand locking */ 559 irq_spinlock_exchange(&tasks_lock, &task->lock); 560 561 produce_stats_task(task, stats_task); 562 563 irq_spinlock_unlock(&task->lock, true); 564 } 565 547 566 return ret; 548 567 } … … 578 597 return ret; 579 598 580 /* Messing with threads structures */599 /* Messing with threads structures, avoid deadlock */ 581 600 irq_spinlock_lock(&threads_lock, true); 582 601 … … 608 627 ret.data.size = sizeof(stats_thread_t); 609 628 629 /* Hand-over-hand locking */ 630 irq_spinlock_exchange(&threads_lock, &thread->lock); 631 610 632 produce_stats_thread(thread, stats_thread); 611 633 612 irq_spinlock_unlock(&thread s_lock, true);634 irq_spinlock_unlock(&thread->lock, true); 613 635 } 614 636 … … 825 847 void kload(void *arg) 826 848 { 849 thread_detach(THREAD); 850 827 851 while (true) { 828 852 size_t ready = atomic_load(&nrdy);
Note:
See TracChangeset
for help on using the changeset viewer.