Changes in kernel/generic/src/sysinfo/stats.c [6e121b8:8f80c77] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/sysinfo/stats.c
r6e121b8 r8f80c77 110 110 } 111 111 112 /* Each CPU structure is locked separatelly */113 ipl_t ipl = interrupts_disable();114 115 112 size_t i; 116 113 for (i = 0; i < config.cpu_count; i++) { 117 spinlock_lock(&cpus[i].lock);114 irq_spinlock_lock(&cpus[i].lock, true); 118 115 119 116 stats_cpus[i].id = cpus[i].id; … … 123 120 stats_cpus[i].idle_ticks = cpus[i].idle_ticks; 124 121 125 spinlock_unlock(&cpus[i].lock); 126 } 127 128 interrupts_restore(ipl); 122 irq_spinlock_unlock(&cpus[i].lock, true); 123 } 129 124 130 125 return ((void *) stats_cpus); … … 200 195 * 201 196 * Summarize task information into task statistics. 202 * Task lock should be held and interrupts disabled203 * before executing this function.204 197 * 205 198 * @param task Task. … … 209 202 static void produce_stats_task(task_t *task, stats_task_t *stats_task) 210 203 { 204 ASSERT(interrupts_disabled()); 205 ASSERT(irq_spinlock_locked(&task->lock)); 206 211 207 stats_task->task_id = task->taskid; 212 208 str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name); … … 235 231 236 232 /* Interrupts are already disabled */ 237 spinlock_lock(&(task->lock));233 irq_spinlock_lock(&(task->lock), false); 238 234 239 235 /* Record the statistics and increment the iterator */ … … 241 237 (*iterator)++; 242 238 243 spinlock_unlock(&(task->lock));239 irq_spinlock_unlock(&(task->lock), false); 244 240 245 241 return true; … … 260 256 { 261 257 /* Messing with task structures, avoid deadlock */ 262 ipl_t ipl = interrupts_disable(); 263 spinlock_lock(&tasks_lock); 258 irq_spinlock_lock(&tasks_lock, true); 264 259 265 260 /* First walk the task tree to count the tasks */ … … 269 264 if (count == 0) { 270 265 /* No tasks found (strange) */ 271 spinlock_unlock(&tasks_lock); 272 interrupts_restore(ipl); 273 266 irq_spinlock_unlock(&tasks_lock, true); 274 267 *size = 0; 275 268 return NULL; … … 278 271 *size = sizeof(stats_task_t) * count; 279 272 if (dry_run) { 280 spinlock_unlock(&tasks_lock); 281 interrupts_restore(ipl); 273 irq_spinlock_unlock(&tasks_lock, true); 282 274 return NULL; 283 275 } … … 286 278 if (stats_tasks == NULL) { 287 279 /* No free space for allocation */ 288 spinlock_unlock(&tasks_lock); 289 interrupts_restore(ipl); 290 280 irq_spinlock_unlock(&tasks_lock, true); 291 281 *size = 0; 292 282 return NULL; … … 297 287 avltree_walk(&tasks_tree, task_serialize_walker, (void *) &iterator); 298 288 299 spinlock_unlock(&tasks_lock); 300 interrupts_restore(ipl); 289 irq_spinlock_unlock(&tasks_lock, true); 301 290 302 291 return ((void *) stats_tasks); … … 306 295 * 307 296 * Summarize thread information into thread statistics. 308 * Thread lock should be held and interrupts disabled309 * before executing this function.310 297 * 311 298 * @param thread Thread. … … 315 302 static void produce_stats_thread(thread_t *thread, stats_thread_t *stats_thread) 316 303 { 304 ASSERT(interrupts_disabled()); 305 ASSERT(irq_spinlock_locked(&thread->lock)); 306 317 307 stats_thread->thread_id = thread->tid; 318 308 stats_thread->task_id = thread->task->taskid; … … 346 336 347 337 /* Interrupts are already disabled */ 348 spinlock_lock(&thread->lock);338 irq_spinlock_lock(&thread->lock, false); 349 339 350 340 /* Record the statistics and increment the iterator */ … … 352 342 (*iterator)++; 353 343 354 spinlock_unlock(&thread->lock);344 irq_spinlock_unlock(&thread->lock, false); 355 345 356 346 return true; … … 371 361 { 372 362 /* Messing with threads structures, avoid deadlock */ 373 ipl_t ipl = interrupts_disable(); 374 spinlock_lock(&threads_lock); 363 irq_spinlock_lock(&threads_lock, true); 375 364 376 365 /* First walk the thread tree to count the threads */ … … 380 369 if (count == 0) { 381 370 /* No threads found (strange) */ 382 spinlock_unlock(&threads_lock); 383 interrupts_restore(ipl); 384 371 irq_spinlock_unlock(&threads_lock, true); 385 372 *size = 0; 386 373 return NULL; … … 389 376 *size = sizeof(stats_thread_t) * count; 390 377 if (dry_run) { 391 spinlock_unlock(&threads_lock); 392 interrupts_restore(ipl); 378 irq_spinlock_unlock(&threads_lock, true); 393 379 return NULL; 394 380 } … … 397 383 if (stats_threads == NULL) { 398 384 /* No free space for allocation */ 399 spinlock_unlock(&threads_lock); 400 interrupts_restore(ipl); 401 385 irq_spinlock_unlock(&threads_lock, true); 402 386 *size = 0; 403 387 return NULL; … … 408 392 avltree_walk(&threads_tree, thread_serialize_walker, (void *) &iterator); 409 393 410 spinlock_unlock(&threads_lock); 411 interrupts_restore(ipl); 394 irq_spinlock_unlock(&threads_lock, true); 412 395 413 396 return ((void *) stats_threads); … … 443 426 444 427 /* Messing with task structures, avoid deadlock */ 445 ipl_t ipl = interrupts_disable(); 446 spinlock_lock(&tasks_lock); 428 irq_spinlock_lock(&tasks_lock, true); 447 429 448 430 task_t *task = task_find_by_id(task_id); 449 431 if (task == NULL) { 450 432 /* No task with this ID */ 451 spinlock_unlock(&tasks_lock); 452 interrupts_restore(ipl); 433 irq_spinlock_unlock(&tasks_lock, true); 453 434 return ret; 454 435 } … … 459 440 ret.data.size = sizeof(stats_task_t); 460 441 461 spinlock_unlock(&tasks_lock);442 irq_spinlock_unlock(&tasks_lock, true); 462 443 } else { 463 444 /* Allocate stats_task_t structure */ … … 465 446 (stats_task_t *) malloc(sizeof(stats_task_t), FRAME_ATOMIC); 466 447 if (stats_task == NULL) { 467 spinlock_unlock(&tasks_lock); 468 interrupts_restore(ipl); 448 irq_spinlock_unlock(&tasks_lock, true); 469 449 return ret; 470 450 } … … 474 454 ret.data.data = (void *) stats_task; 475 455 ret.data.size = sizeof(stats_task_t); 476 456 477 457 /* Hand-over-hand locking */ 478 spinlock_lock(&task->lock); 479 spinlock_unlock(&tasks_lock); 458 irq_spinlock_exchange(&tasks_lock, &task->lock); 480 459 481 460 produce_stats_task(task, stats_task); 482 461 483 spinlock_unlock(&task->lock); 484 } 485 486 interrupts_restore(ipl); 462 irq_spinlock_unlock(&task->lock, true); 463 } 487 464 488 465 return ret; … … 518 495 519 496 /* Messing with threads structures, avoid deadlock */ 520 ipl_t ipl = interrupts_disable(); 521 spinlock_lock(&threads_lock); 497 irq_spinlock_lock(&threads_lock, true); 522 498 523 499 thread_t *thread = thread_find_by_id(thread_id); 524 500 if (thread == NULL) { 525 501 /* No thread with this ID */ 526 spinlock_unlock(&threads_lock); 527 interrupts_restore(ipl); 502 irq_spinlock_unlock(&threads_lock, true); 528 503 return ret; 529 504 } … … 534 509 ret.data.size = sizeof(stats_thread_t); 535 510 536 spinlock_unlock(&threads_lock);511 irq_spinlock_unlock(&threads_lock, true); 537 512 } else { 538 513 /* Allocate stats_thread_t structure */ … … 540 515 (stats_thread_t *) malloc(sizeof(stats_thread_t), FRAME_ATOMIC); 541 516 if (stats_thread == NULL) { 542 spinlock_unlock(&threads_lock); 543 interrupts_restore(ipl); 517 irq_spinlock_unlock(&threads_lock, true); 544 518 return ret; 545 519 } … … 551 525 552 526 /* Hand-over-hand locking */ 553 spinlock_lock(&thread->lock); 554 spinlock_unlock(&threads_lock); 527 irq_spinlock_exchange(&threads_lock, &thread->lock); 555 528 556 529 produce_stats_thread(thread, stats_thread); 557 530 558 spinlock_unlock(&thread->lock); 559 } 560 561 interrupts_restore(ipl); 531 irq_spinlock_unlock(&thread->lock, true); 532 } 562 533 563 534 return ret; … … 673 644 { 674 645 mutex_initialize(&load_lock, MUTEX_PASSIVE); 675 646 676 647 sysinfo_set_item_fn_val("system.uptime", NULL, get_stats_uptime); 677 648 sysinfo_set_item_fn_data("system.cpus", NULL, get_stats_cpus);
Note:
See TracChangeset
for help on using the changeset viewer.