Changes in kernel/generic/src/sysinfo/stats.c [8f80c77:6e121b8] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/sysinfo/stats.c
r8f80c77 r6e121b8 110 110 } 111 111 112 /* Each CPU structure is locked separatelly */ 113 ipl_t ipl = interrupts_disable(); 114 112 115 size_t i; 113 116 for (i = 0; i < config.cpu_count; i++) { 114 irq_spinlock_lock(&cpus[i].lock, true);117 spinlock_lock(&cpus[i].lock); 115 118 116 119 stats_cpus[i].id = cpus[i].id; … … 120 123 stats_cpus[i].idle_ticks = cpus[i].idle_ticks; 121 124 122 irq_spinlock_unlock(&cpus[i].lock, true); 123 } 125 spinlock_unlock(&cpus[i].lock); 126 } 127 128 interrupts_restore(ipl); 124 129 125 130 return ((void *) stats_cpus); … … 195 200 * 196 201 * Summarize task information into task statistics. 202 * Task lock should be held and interrupts disabled 203 * before executing this function. 197 204 * 198 205 * @param task Task. … … 202 209 static void produce_stats_task(task_t *task, stats_task_t *stats_task) 203 210 { 204 ASSERT(interrupts_disabled());205 ASSERT(irq_spinlock_locked(&task->lock));206 207 211 stats_task->task_id = task->taskid; 208 212 str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name); … … 231 235 232 236 /* Interrupts are already disabled */ 233 irq_spinlock_lock(&(task->lock), false);237 spinlock_lock(&(task->lock)); 234 238 235 239 /* Record the statistics and increment the iterator */ … … 237 241 (*iterator)++; 238 242 239 irq_spinlock_unlock(&(task->lock), false);243 spinlock_unlock(&(task->lock)); 240 244 241 245 return true; … … 256 260 { 257 261 /* Messing with task structures, avoid deadlock */ 258 irq_spinlock_lock(&tasks_lock, true); 262 ipl_t ipl = interrupts_disable(); 263 spinlock_lock(&tasks_lock); 259 264 260 265 /* First walk the task tree to count the tasks */ … … 264 269 if (count == 0) { 265 270 /* No tasks found (strange) */ 266 irq_spinlock_unlock(&tasks_lock, true); 271 spinlock_unlock(&tasks_lock); 272 interrupts_restore(ipl); 273 267 274 *size = 0; 268 275 return NULL; … … 271 278 *size = sizeof(stats_task_t) * count; 272 279 if (dry_run) { 273 irq_spinlock_unlock(&tasks_lock, true); 280 spinlock_unlock(&tasks_lock); 281 interrupts_restore(ipl); 274 282 return NULL; 275 283 } … … 278 286 if (stats_tasks == NULL) { 279 287 /* No free space for allocation */ 280 irq_spinlock_unlock(&tasks_lock, true); 288 spinlock_unlock(&tasks_lock); 289 interrupts_restore(ipl); 290 281 291 *size = 0; 282 292 return NULL; … … 287 297 avltree_walk(&tasks_tree, task_serialize_walker, (void *) &iterator); 288 298 289 irq_spinlock_unlock(&tasks_lock, true); 299 spinlock_unlock(&tasks_lock); 300 interrupts_restore(ipl); 290 301 291 302 return ((void *) stats_tasks); … … 295 306 * 296 307 * Summarize thread information into thread statistics. 308 * Thread lock should be held and interrupts disabled 309 * before executing this function. 297 310 * 298 311 * @param thread Thread. … … 302 315 static void produce_stats_thread(thread_t *thread, stats_thread_t *stats_thread) 303 316 { 304 ASSERT(interrupts_disabled());305 ASSERT(irq_spinlock_locked(&thread->lock));306 307 317 stats_thread->thread_id = thread->tid; 308 318 stats_thread->task_id = thread->task->taskid; … … 336 346 337 347 /* Interrupts are already disabled */ 338 irq_spinlock_lock(&thread->lock, false);348 spinlock_lock(&thread->lock); 339 349 340 350 /* Record the statistics and increment the iterator */ … … 342 352 (*iterator)++; 343 353 344 irq_spinlock_unlock(&thread->lock, false);354 spinlock_unlock(&thread->lock); 345 355 346 356 return true; … … 361 371 { 362 372 /* Messing with threads structures, avoid deadlock */ 363 irq_spinlock_lock(&threads_lock, true); 373 ipl_t ipl = interrupts_disable(); 374 spinlock_lock(&threads_lock); 364 375 365 376 /* First walk the thread tree to count the threads */ … … 369 380 if (count == 0) { 370 381 /* No threads found (strange) */ 371 irq_spinlock_unlock(&threads_lock, true); 382 spinlock_unlock(&threads_lock); 383 interrupts_restore(ipl); 384 372 385 *size = 0; 373 386 return NULL; … … 376 389 *size = sizeof(stats_thread_t) * count; 377 390 if (dry_run) { 378 irq_spinlock_unlock(&threads_lock, true); 391 spinlock_unlock(&threads_lock); 392 interrupts_restore(ipl); 379 393 return NULL; 380 394 } … … 383 397 if (stats_threads == NULL) { 384 398 /* No free space for allocation */ 385 irq_spinlock_unlock(&threads_lock, true); 399 spinlock_unlock(&threads_lock); 400 interrupts_restore(ipl); 401 386 402 *size = 0; 387 403 return NULL; … … 392 408 avltree_walk(&threads_tree, thread_serialize_walker, (void *) &iterator); 393 409 394 irq_spinlock_unlock(&threads_lock, true); 410 spinlock_unlock(&threads_lock); 411 interrupts_restore(ipl); 395 412 396 413 return ((void *) stats_threads); … … 426 443 427 444 /* Messing with task structures, avoid deadlock */ 428 irq_spinlock_lock(&tasks_lock, true); 445 ipl_t ipl = interrupts_disable(); 446 spinlock_lock(&tasks_lock); 429 447 430 448 task_t *task = task_find_by_id(task_id); 431 449 if (task == NULL) { 432 450 /* No task with this ID */ 433 irq_spinlock_unlock(&tasks_lock, true); 451 spinlock_unlock(&tasks_lock); 452 interrupts_restore(ipl); 434 453 return ret; 435 454 } … … 440 459 ret.data.size = sizeof(stats_task_t); 441 460 442 irq_spinlock_unlock(&tasks_lock, true);461 spinlock_unlock(&tasks_lock); 443 462 } else { 444 463 /* Allocate stats_task_t structure */ … … 446 465 (stats_task_t *) malloc(sizeof(stats_task_t), FRAME_ATOMIC); 447 466 if (stats_task == NULL) { 448 irq_spinlock_unlock(&tasks_lock, true); 467 spinlock_unlock(&tasks_lock); 468 interrupts_restore(ipl); 449 469 return ret; 450 470 } … … 454 474 ret.data.data = (void *) stats_task; 455 475 ret.data.size = sizeof(stats_task_t); 456 476 457 477 /* Hand-over-hand locking */ 458 irq_spinlock_exchange(&tasks_lock, &task->lock); 478 spinlock_lock(&task->lock); 479 spinlock_unlock(&tasks_lock); 459 480 460 481 produce_stats_task(task, stats_task); 461 482 462 irq_spinlock_unlock(&task->lock, true); 463 } 483 spinlock_unlock(&task->lock); 484 } 485 486 interrupts_restore(ipl); 464 487 465 488 return ret; … … 495 518 496 519 /* Messing with threads structures, avoid deadlock */ 497 irq_spinlock_lock(&threads_lock, true); 520 ipl_t ipl = interrupts_disable(); 521 spinlock_lock(&threads_lock); 498 522 499 523 thread_t *thread = thread_find_by_id(thread_id); 500 524 if (thread == NULL) { 501 525 /* No thread with this ID */ 502 irq_spinlock_unlock(&threads_lock, true); 526 spinlock_unlock(&threads_lock); 527 interrupts_restore(ipl); 503 528 return ret; 504 529 } … … 509 534 ret.data.size = sizeof(stats_thread_t); 510 535 511 irq_spinlock_unlock(&threads_lock, true);536 spinlock_unlock(&threads_lock); 512 537 } else { 513 538 /* Allocate stats_thread_t structure */ … … 515 540 (stats_thread_t *) malloc(sizeof(stats_thread_t), FRAME_ATOMIC); 516 541 if (stats_thread == NULL) { 517 irq_spinlock_unlock(&threads_lock, true); 542 spinlock_unlock(&threads_lock); 543 interrupts_restore(ipl); 518 544 return ret; 519 545 } … … 525 551 526 552 /* Hand-over-hand locking */ 527 irq_spinlock_exchange(&threads_lock, &thread->lock); 553 spinlock_lock(&thread->lock); 554 spinlock_unlock(&threads_lock); 528 555 529 556 produce_stats_thread(thread, stats_thread); 530 557 531 irq_spinlock_unlock(&thread->lock, true); 532 } 558 spinlock_unlock(&thread->lock); 559 } 560 561 interrupts_restore(ipl); 533 562 534 563 return ret; … … 644 673 { 645 674 mutex_initialize(&load_lock, MUTEX_PASSIVE); 646 675 647 676 sysinfo_set_item_fn_val("system.uptime", NULL, get_stats_uptime); 648 677 sysinfo_set_item_fn_data("system.cpus", NULL, get_stats_cpus);
Note:
See TracChangeset
for help on using the changeset viewer.