Changeset da1bafb in mainline for kernel/generic/src/sysinfo/stats.c
- Timestamp:
- 2010-05-24T18:57:31Z (14 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 0095368
- Parents:
- 666f492
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/sysinfo/stats.c
r666f492 rda1bafb 110 110 } 111 111 112 /* Each CPU structure is locked separatelly */113 ipl_t ipl = interrupts_disable();114 115 112 size_t i; 116 113 for (i = 0; i < config.cpu_count; i++) { 117 spinlock_lock(&cpus[i].lock);114 irq_spinlock_lock(&cpus[i].lock, true); 118 115 119 116 stats_cpus[i].id = cpus[i].id; … … 123 120 stats_cpus[i].idle_ticks = cpus[i].idle_ticks; 124 121 125 spinlock_unlock(&cpus[i].lock); 126 } 127 128 interrupts_restore(ipl); 122 irq_spinlock_unlock(&cpus[i].lock, true); 123 } 129 124 130 125 return ((void *) stats_cpus); … … 235 230 236 231 /* Interrupts are already disabled */ 237 spinlock_lock(&(task->lock));232 irq_spinlock_lock(&(task->lock), false); 238 233 239 234 /* Record the statistics and increment the iterator */ … … 241 236 (*iterator)++; 242 237 243 spinlock_unlock(&(task->lock));238 irq_spinlock_unlock(&(task->lock), false); 244 239 245 240 return true; … … 260 255 { 261 256 /* Messing with task structures, avoid deadlock */ 262 ipl_t ipl = interrupts_disable(); 263 spinlock_lock(&tasks_lock); 257 irq_spinlock_lock(&tasks_lock, true); 264 258 265 259 /* First walk the task tree to count the tasks */ … … 269 263 if (count == 0) { 270 264 /* No tasks found (strange) */ 271 spinlock_unlock(&tasks_lock); 272 interrupts_restore(ipl); 273 265 irq_spinlock_unlock(&tasks_lock, true); 274 266 *size = 0; 275 267 return NULL; … … 278 270 *size = sizeof(stats_task_t) * count; 279 271 if (dry_run) { 280 spinlock_unlock(&tasks_lock); 281 interrupts_restore(ipl); 272 irq_spinlock_unlock(&tasks_lock, true); 282 273 return NULL; 283 274 } … … 286 277 if (stats_tasks == NULL) { 287 278 /* No free space for allocation */ 288 spinlock_unlock(&tasks_lock); 289 interrupts_restore(ipl); 290 279 irq_spinlock_unlock(&tasks_lock, true); 291 280 *size = 0; 292 281 return NULL; … … 297 286 avltree_walk(&tasks_tree, task_serialize_walker, (void *) &iterator); 298 287 299 spinlock_unlock(&tasks_lock); 300 interrupts_restore(ipl); 288 irq_spinlock_unlock(&tasks_lock, true); 301 289 302 290 return ((void *) stats_tasks); … … 346 334 347 335 /* Interrupts are already disabled */ 348 spinlock_lock(&thread->lock);336 irq_spinlock_lock(&thread->lock, false); 349 337 350 338 /* Record the statistics and increment the iterator */ … … 352 340 (*iterator)++; 353 341 354 spinlock_unlock(&thread->lock);342 irq_spinlock_unlock(&thread->lock, false); 355 343 356 344 return true; … … 371 359 { 372 360 /* Messing with threads structures, avoid deadlock */ 373 ipl_t ipl = interrupts_disable(); 374 spinlock_lock(&threads_lock); 361 irq_spinlock_lock(&threads_lock, true); 375 362 376 363 /* First walk the thread tree to count the threads */ … … 380 367 if (count == 0) { 381 368 /* No threads found (strange) */ 382 spinlock_unlock(&threads_lock); 383 interrupts_restore(ipl); 384 369 irq_spinlock_unlock(&threads_lock, true); 385 370 *size = 0; 386 371 return NULL; … … 389 374 *size = sizeof(stats_thread_t) * count; 390 375 if (dry_run) { 391 spinlock_unlock(&threads_lock); 392 interrupts_restore(ipl); 376 irq_spinlock_unlock(&threads_lock, true); 393 377 return NULL; 394 378 } … … 397 381 if (stats_threads == NULL) { 398 382 /* No free space for allocation */ 399 spinlock_unlock(&threads_lock); 400 interrupts_restore(ipl); 401 383 irq_spinlock_unlock(&threads_lock, true); 402 384 *size = 0; 403 385 return NULL; … … 408 390 avltree_walk(&threads_tree, thread_serialize_walker, (void *) &iterator); 409 391 410 spinlock_unlock(&threads_lock); 411 interrupts_restore(ipl); 392 irq_spinlock_unlock(&threads_lock, true); 412 393 413 394 return ((void *) stats_threads); … … 443 424 444 425 /* Messing with task structures, avoid deadlock */ 445 ipl_t ipl = interrupts_disable(); 446 spinlock_lock(&tasks_lock); 426 irq_spinlock_lock(&tasks_lock, true); 447 427 448 428 task_t *task = task_find_by_id(task_id); 449 429 if (task == NULL) { 450 430 /* No task with this ID */ 451 spinlock_unlock(&tasks_lock); 452 interrupts_restore(ipl); 431 irq_spinlock_unlock(&tasks_lock, true); 453 432 return ret; 454 433 } … … 459 438 ret.data.size = sizeof(stats_task_t); 460 439 461 spinlock_unlock(&tasks_lock);440 irq_spinlock_unlock(&tasks_lock, true); 462 441 } else { 463 442 /* Allocate stats_task_t structure */ … … 465 444 (stats_task_t *) malloc(sizeof(stats_task_t), FRAME_ATOMIC); 466 445 if (stats_task == NULL) { 467 spinlock_unlock(&tasks_lock); 468 interrupts_restore(ipl); 446 irq_spinlock_unlock(&tasks_lock, true); 469 447 return ret; 470 448 } … … 474 452 ret.data.data = (void *) stats_task; 475 453 ret.data.size = sizeof(stats_task_t); 476 454 477 455 /* Hand-over-hand locking */ 478 spinlock_lock(&task->lock); 479 spinlock_unlock(&tasks_lock); 456 irq_spinlock_exchange(&tasks_lock, &task->lock); 480 457 481 458 produce_stats_task(task, stats_task); 482 459 483 spinlock_unlock(&task->lock); 484 } 485 486 interrupts_restore(ipl); 460 irq_spinlock_unlock(&task->lock, true); 461 } 487 462 488 463 return ret; … … 518 493 519 494 /* Messing with threads structures, avoid deadlock */ 520 ipl_t ipl = interrupts_disable(); 521 spinlock_lock(&threads_lock); 495 irq_spinlock_lock(&threads_lock, true); 522 496 523 497 thread_t *thread = thread_find_by_id(thread_id); 524 498 if (thread == NULL) { 525 499 /* No thread with this ID */ 526 spinlock_unlock(&threads_lock); 527 interrupts_restore(ipl); 500 irq_spinlock_unlock(&threads_lock, true); 528 501 return ret; 529 502 } … … 534 507 ret.data.size = sizeof(stats_thread_t); 535 508 536 spinlock_unlock(&threads_lock);509 irq_spinlock_unlock(&threads_lock, true); 537 510 } else { 538 511 /* Allocate stats_thread_t structure */ … … 540 513 (stats_thread_t *) malloc(sizeof(stats_thread_t), FRAME_ATOMIC); 541 514 if (stats_thread == NULL) { 542 spinlock_unlock(&threads_lock); 543 interrupts_restore(ipl); 515 irq_spinlock_unlock(&threads_lock, true); 544 516 return ret; 545 517 } … … 551 523 552 524 /* Hand-over-hand locking */ 553 spinlock_lock(&thread->lock); 554 spinlock_unlock(&threads_lock); 525 irq_spinlock_exchange(&threads_lock, &thread->lock); 555 526 556 527 produce_stats_thread(thread, stats_thread); 557 528 558 spinlock_unlock(&thread->lock); 559 } 560 561 interrupts_restore(ipl); 529 irq_spinlock_unlock(&thread->lock, true); 530 } 562 531 563 532 return ret; … … 673 642 { 674 643 mutex_initialize(&load_lock, MUTEX_PASSIVE); 675 644 676 645 sysinfo_set_item_fn_val("system.uptime", NULL, get_stats_uptime); 677 646 sysinfo_set_item_fn_data("system.cpus", NULL, get_stats_cpus);
Note:
See TracChangeset
for help on using the changeset viewer.