Changes in kernel/generic/src/udebug/udebug_ops.c [a35b458:5a6cc679] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/udebug/udebug_ops.c
ra35b458 r5a6cc679 82 82 { 83 83 mutex_lock(&TASK->udebug.lock); 84 84 85 85 /* thread_exists() must be called with threads_lock held */ 86 86 irq_spinlock_lock(&threads_lock, true); 87 87 88 88 if (!thread_exists(thread)) { 89 89 irq_spinlock_unlock(&threads_lock, true); … … 91 91 return ENOENT; 92 92 } 93 93 94 94 /* thread->lock is enough to ensure the thread's existence */ 95 95 irq_spinlock_exchange(&threads_lock, &thread->lock); 96 96 97 97 /* Verify that 'thread' is a userspace thread. */ 98 98 if (!thread->uspace) { … … 102 102 return ENOENT; 103 103 } 104 104 105 105 /* Verify debugging state. */ 106 106 if (thread->udebug.active != true) { … … 110 110 return ENOENT; 111 111 } 112 112 113 113 /* 114 114 * Since the thread has active == true, TASK->udebug.lock … … 118 118 */ 119 119 irq_spinlock_unlock(&thread->lock, true); 120 120 121 121 /* Only mutex TASK->udebug.lock left. */ 122 122 123 123 /* Now verify that the thread belongs to the current task. */ 124 124 if (thread->task != TASK) { … … 127 127 return ENOENT; 128 128 } 129 129 130 130 /* 131 131 * Now we need to grab the thread's debug lock for synchronization … … 134 134 */ 135 135 mutex_lock(&thread->udebug.lock); 136 136 137 137 /* The big task mutex is no longer needed. */ 138 138 mutex_unlock(&TASK->udebug.lock); 139 139 140 140 if (thread->udebug.go != being_go) { 141 141 /* Not in debugging session or undesired GO state. */ … … 143 143 return EINVAL; 144 144 } 145 145 146 146 /* Only thread->udebug.lock left. */ 147 147 148 148 return EOK; /* All went well. */ 149 149 } … … 177 177 { 178 178 LOG("Debugging task %" PRIu64, TASK->taskid); 179 180 mutex_lock(&TASK->udebug.lock); 181 179 180 mutex_lock(&TASK->udebug.lock); 181 182 182 if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) { 183 183 mutex_unlock(&TASK->udebug.lock); 184 184 return EBUSY; 185 185 } 186 186 187 187 TASK->udebug.dt_state = UDEBUG_TS_BEGINNING; 188 188 TASK->udebug.begin_call = call; 189 189 TASK->udebug.debugger = call->sender; 190 190 191 191 if (TASK->udebug.not_stoppable_count == 0) { 192 192 TASK->udebug.dt_state = UDEBUG_TS_ACTIVE; … … 195 195 } else 196 196 *active = false; /* only in beginning state */ 197 197 198 198 /* Set udebug.active on all of the task's userspace threads. */ 199 199 200 200 list_foreach(TASK->threads, th_link, thread_t, thread) { 201 201 mutex_lock(&thread->udebug.lock); … … 207 207 mutex_unlock(&thread->udebug.lock); 208 208 } 209 209 210 210 mutex_unlock(&TASK->udebug.lock); 211 211 return EOK; … … 222 222 { 223 223 LOG("Task %" PRIu64, TASK->taskid); 224 224 225 225 mutex_lock(&TASK->udebug.lock); 226 226 errno_t rc = udebug_task_cleanup(TASK); 227 227 mutex_unlock(&TASK->udebug.lock); 228 228 229 229 return rc; 230 230 } … … 242 242 { 243 243 LOG("mask = 0x%x", mask); 244 245 mutex_lock(&TASK->udebug.lock); 246 244 245 mutex_lock(&TASK->udebug.lock); 246 247 247 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { 248 248 mutex_unlock(&TASK->udebug.lock); 249 249 return EINVAL; 250 250 } 251 251 252 252 TASK->udebug.evmask = mask; 253 253 mutex_unlock(&TASK->udebug.lock); 254 254 255 255 return EOK; 256 256 } … … 272 272 if (rc != EOK) 273 273 return rc; 274 274 275 275 thread->udebug.go_call = call; 276 276 thread->udebug.go = true; 277 277 thread->udebug.cur_event = 0; /* none */ 278 278 279 279 /* 280 280 * Neither thread's lock nor threads_lock may be held during wakeup. … … 282 282 */ 283 283 waitq_wakeup(&thread->udebug.go_wq, WAKEUP_FIRST); 284 284 285 285 _thread_op_end(thread); 286 286 287 287 return EOK; 288 288 } … … 300 300 { 301 301 LOG("udebug_stop()"); 302 302 303 303 /* 304 304 * On success, this will lock thread->udebug.lock. Note that this … … 309 309 if (rc != EOK) 310 310 return rc; 311 311 312 312 /* Take GO away from the thread. */ 313 313 thread->udebug.go = false; 314 314 315 315 if (thread->udebug.stoppable != true) { 316 316 /* Answer will be sent when the thread becomes stoppable. */ … … 318 318 return EOK; 319 319 } 320 320 321 321 /* 322 322 * Answer GO call. 323 323 * 324 324 */ 325 325 326 326 /* Make sure nobody takes this call away from us. */ 327 327 call = thread->udebug.go_call; 328 328 thread->udebug.go_call = NULL; 329 329 330 330 IPC_SET_RETVAL(call->data, 0); 331 331 IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP); 332 332 333 333 THREAD->udebug.cur_event = UDEBUG_EVENT_STOP; 334 334 335 335 _thread_op_end(thread); 336 336 337 337 mutex_lock(&TASK->udebug.lock); 338 338 ipc_answer(&TASK->answerbox, call); 339 339 mutex_unlock(&TASK->udebug.lock); 340 340 341 341 return EOK; 342 342 } … … 368 368 { 369 369 LOG("udebug_thread_read()"); 370 370 371 371 /* Allocate a buffer to hold thread IDs */ 372 372 sysarg_t *id_buffer = malloc(buf_size + 1, 0); 373 374 mutex_lock(&TASK->udebug.lock); 375 373 374 mutex_lock(&TASK->udebug.lock); 375 376 376 /* Verify task state */ 377 377 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { … … 380 380 return EINVAL; 381 381 } 382 382 383 383 irq_spinlock_lock(&TASK->lock, true); 384 384 385 385 /* Copy down the thread IDs */ 386 386 387 387 size_t max_ids = buf_size / sizeof(sysarg_t); 388 388 size_t copied_ids = 0; 389 389 size_t extra_ids = 0; 390 390 391 391 /* FIXME: make sure the thread isn't past debug shutdown... */ 392 392 list_foreach(TASK->threads, th_link, thread_t, thread) { … … 394 394 bool uspace = thread->uspace; 395 395 irq_spinlock_unlock(&thread->lock, false); 396 396 397 397 /* Not interested in kernel threads. */ 398 398 if (!uspace) 399 399 continue; 400 400 401 401 if (copied_ids < max_ids) { 402 402 /* Using thread struct pointer as identification hash */ … … 405 405 extra_ids++; 406 406 } 407 407 408 408 irq_spinlock_unlock(&TASK->lock, true); 409 410 mutex_unlock(&TASK->udebug.lock); 411 409 410 mutex_unlock(&TASK->udebug.lock); 411 412 412 *buffer = id_buffer; 413 413 *stored = copied_ids * sizeof(sysarg_t); 414 414 *needed = (copied_ids + extra_ids) * sizeof(sysarg_t); 415 415 416 416 return EOK; 417 417 } … … 431 431 { 432 432 size_t name_size = str_size(TASK->name) + 1; 433 433 434 434 *data = malloc(name_size, 0); 435 435 *data_size = name_size; 436 436 437 437 memcpy(*data, TASK->name, name_size); 438 438 439 439 return EOK; 440 440 } … … 463 463 if (rc != EOK) 464 464 return rc; 465 465 466 466 /* Additionally we need to verify that we are inside a syscall. */ 467 467 if ((thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B) && … … 470 470 return EINVAL; 471 471 } 472 472 473 473 /* Prepare a buffer to hold the arguments. */ 474 474 sysarg_t *arg_buffer = malloc(6 * sizeof(sysarg_t), 0); 475 475 476 476 /* Copy to a local buffer before releasing the lock. */ 477 477 memcpy(arg_buffer, thread->udebug.syscall_args, 6 * sizeof(sysarg_t)); 478 478 479 479 _thread_op_end(thread); 480 480 481 481 *buffer = arg_buffer; 482 482 return EOK; … … 506 506 if (rc != EOK) 507 507 return rc; 508 508 509 509 istate_t *state = thread->udebug.uspace_state; 510 510 if (state == NULL) { … … 512 512 return EBUSY; 513 513 } 514 514 515 515 /* Prepare a buffer to hold the data. */ 516 516 istate_t *state_buf = malloc(sizeof(istate_t), 0); 517 517 518 518 /* Copy to the allocated buffer */ 519 519 memcpy(state_buf, state, sizeof(istate_t)); 520 520 521 521 _thread_op_end(thread); 522 522 523 523 *buffer = (void *) state_buf; 524 524 return EOK; … … 540 540 /* Verify task state */ 541 541 mutex_lock(&TASK->udebug.lock); 542 542 543 543 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { 544 544 mutex_unlock(&TASK->udebug.lock); 545 545 return EBUSY; 546 546 } 547 547 548 548 void *data_buffer = malloc(n, 0); 549 549 550 550 /* 551 551 * NOTE: this is not strictly from a syscall... but that shouldn't … … 555 555 errno_t rc = copy_from_uspace(data_buffer, (void *) uspace_addr, n); 556 556 mutex_unlock(&TASK->udebug.lock); 557 557 558 558 if (rc != EOK) 559 559 return rc; 560 560 561 561 *buffer = data_buffer; 562 562 return EOK;
Note:
See TracChangeset
for help on using the changeset viewer.