Changes in kernel/generic/src/udebug/udebug_ops.c [da1bafb:19f857a] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/generic/src/udebug/udebug_ops.c
rda1bafb r19f857a 33 33 /** 34 34 * @file 35 * @brief 35 * @brief Udebug operations. 36 36 * 37 37 * Udebug operations on tasks and threads are implemented here. The … … 39 39 * when servicing udebug IPC messages. 40 40 */ 41 41 42 42 #include <debug.h> 43 43 #include <proc/task.h> … … 53 53 #include <memstr.h> 54 54 55 /** Prepare a thread for a debugging operation. 55 /** 56 * Prepare a thread for a debugging operation. 56 57 * 57 58 * Simply put, return thread t with t->udebug.lock held, … … 72 73 * the t->lock spinlock to the t->udebug.lock mutex. 73 74 * 74 * @param t hreadPointer, need not at all be valid.75 * @param being_go 75 * @param t Pointer, need not at all be valid. 76 * @param being_go Required thread state. 76 77 * 77 78 * Returns EOK if all went well, or an error code otherwise. 78 * 79 */ 80 static int _thread_op_begin(thread_t *thread, bool being_go) 81 { 82 mutex_lock(&TASK->udebug.lock); 83 79 */ 80 static int _thread_op_begin(thread_t *t, bool being_go) 81 { 82 ipl_t ipl; 83 84 mutex_lock(&TASK->udebug.lock); 85 84 86 /* thread_exists() must be called with threads_lock held */ 85 irq_spinlock_lock(&threads_lock, true); 86 87 if (!thread_exists(thread)) { 88 irq_spinlock_unlock(&threads_lock, true); 87 ipl = interrupts_disable(); 88 spinlock_lock(&threads_lock); 89 90 if (!thread_exists(t)) { 91 spinlock_unlock(&threads_lock); 92 interrupts_restore(ipl); 89 93 mutex_unlock(&TASK->udebug.lock); 90 94 return ENOENT; 91 95 } 92 93 /* thread->lock is enough to ensure the thread's existence */ 94 irq_spinlock_exchange(&threads_lock, &thread->lock); 95 96 /* Verify that 'thread' is a userspace thread. */ 97 if ((thread->flags & THREAD_FLAG_USPACE) == 0) { 96 97 /* t->lock is enough to ensure the thread's existence */ 98 spinlock_lock(&t->lock); 99 spinlock_unlock(&threads_lock); 100 101 /* Verify that 't' is a userspace thread. */ 102 if ((t->flags & THREAD_FLAG_USPACE) == 0) { 98 103 /* It's not, deny its existence */ 99 irq_spinlock_unlock(&thread->lock, true); 104 spinlock_unlock(&t->lock); 105 interrupts_restore(ipl); 100 106 mutex_unlock(&TASK->udebug.lock); 101 107 return ENOENT; 102 108 } 103 109 104 110 /* Verify debugging state. */ 105 if (t hread->udebug.active != true) {111 if (t->udebug.active != true) { 106 112 /* Not in debugging session or undesired GO state */ 107 irq_spinlock_unlock(&thread->lock, true); 113 spinlock_unlock(&t->lock); 114 interrupts_restore(ipl); 108 115 mutex_unlock(&TASK->udebug.lock); 109 116 return ENOENT; 110 117 } 111 118 112 119 /* 113 120 * Since the thread has active == true, TASK->udebug.lock 114 121 * is enough to ensure its existence and that active remains 115 122 * true. 116 *117 123 */ 118 irq_spinlock_unlock(&thread->lock, true); 119 124 spinlock_unlock(&t->lock); 125 interrupts_restore(ipl); 126 120 127 /* Only mutex TASK->udebug.lock left. */ 121 128 122 129 /* Now verify that the thread belongs to the current task. */ 123 if (t hread->task != TASK) {130 if (t->task != TASK) { 124 131 /* No such thread belonging this task*/ 125 132 mutex_unlock(&TASK->udebug.lock); 126 133 return ENOENT; 127 134 } 128 135 129 136 /* 130 137 * Now we need to grab the thread's debug lock for synchronization 131 138 * of the threads stoppability/stop state. 132 *133 139 */ 134 mutex_lock(&t hread->udebug.lock);135 140 mutex_lock(&t->udebug.lock); 141 136 142 /* The big task mutex is no longer needed. */ 137 143 mutex_unlock(&TASK->udebug.lock); 138 139 if (t hread->udebug.go != being_go) {144 145 if (t->udebug.go != being_go) { 140 146 /* Not in debugging session or undesired GO state. */ 141 mutex_unlock(&t hread->udebug.lock);147 mutex_unlock(&t->udebug.lock); 142 148 return EINVAL; 143 149 } 144 145 /* Only t hread->udebug.lock left. */146 147 return EOK; 150 151 /* Only t->udebug.lock left. */ 152 153 return EOK; /* All went well. */ 148 154 } 149 155 150 156 /** End debugging operation on a thread. */ 151 static void _thread_op_end(thread_t *t hread)152 { 153 mutex_unlock(&t hread->udebug.lock);157 static void _thread_op_end(thread_t *t) 158 { 159 mutex_unlock(&t->udebug.lock); 154 160 } 155 161 … … 165 171 * all the threads become stoppable (i.e. they can be considered stopped). 166 172 * 167 * @param call The BEGIN call we are servicing. 168 * 169 * @return 0 (OK, but not done yet), 1 (done) or negative error code. 170 * 173 * @param call The BEGIN call we are servicing. 174 * @return 0 (OK, but not done yet), 1 (done) or negative error code. 171 175 */ 172 176 int udebug_begin(call_t *call) 173 177 { 174 LOG("Debugging task %" PRIu64, TASK->taskid); 175 176 mutex_lock(&TASK->udebug.lock); 177 178 int reply; 179 180 thread_t *t; 181 link_t *cur; 182 183 LOG("Debugging task %llu", TASK->taskid); 184 mutex_lock(&TASK->udebug.lock); 185 178 186 if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) { 179 187 mutex_unlock(&TASK->udebug.lock); 180 188 return EBUSY; 181 189 } 182 190 183 191 TASK->udebug.dt_state = UDEBUG_TS_BEGINNING; 184 192 TASK->udebug.begin_call = call; 185 193 TASK->udebug.debugger = call->sender; 186 187 int reply; 188 194 189 195 if (TASK->udebug.not_stoppable_count == 0) { 190 196 TASK->udebug.dt_state = UDEBUG_TS_ACTIVE; 191 197 TASK->udebug.begin_call = NULL; 192 reply = 1; /* immediate reply */ 193 } else 194 reply = 0; /* no reply */ 198 reply = 1; /* immediate reply */ 199 } else { 200 reply = 0; /* no reply */ 201 } 195 202 196 203 /* Set udebug.active on all of the task's userspace threads. */ 197 198 link_t *cur; 204 199 205 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 200 thread_t *thread = list_get_instance(cur, thread_t, th_link); 201 202 mutex_lock(&thread->udebug.lock); 203 if ((thread->flags & THREAD_FLAG_USPACE) != 0) { 204 thread->udebug.active = true; 205 mutex_unlock(&thread->udebug.lock); 206 condvar_broadcast(&thread->udebug.active_cv); 207 } else 208 mutex_unlock(&thread->udebug.lock); 209 } 210 206 t = list_get_instance(cur, thread_t, th_link); 207 208 mutex_lock(&t->udebug.lock); 209 if ((t->flags & THREAD_FLAG_USPACE) != 0) { 210 t->udebug.active = true; 211 mutex_unlock(&t->udebug.lock); 212 condvar_broadcast(&t->udebug.active_cv); 213 } else { 214 mutex_unlock(&t->udebug.lock); 215 } 216 } 217 211 218 mutex_unlock(&TASK->udebug.lock); 212 219 return reply; … … 216 223 * 217 224 * Closes the debugging session for the current task. 218 *219 225 * @return Zero on success or negative error code. 220 *221 226 */ 222 227 int udebug_end(void) 223 228 { 229 int rc; 230 224 231 LOG("Task %" PRIu64, TASK->taskid); 225 226 mutex_lock(&TASK->udebug.lock); 227 intrc = udebug_task_cleanup(TASK);228 mutex_unlock(&TASK->udebug.lock); 229 232 233 mutex_lock(&TASK->udebug.lock); 234 rc = udebug_task_cleanup(TASK); 235 mutex_unlock(&TASK->udebug.lock); 236 230 237 return rc; 231 238 } … … 235 242 * Sets the event mask that determines which events are enabled. 236 243 * 237 * @param mask Or combination of events that should be enabled. 238 * 239 * @return Zero on success or negative error code. 240 * 244 * @param mask Or combination of events that should be enabled. 245 * @return Zero on success or negative error code. 241 246 */ 242 247 int udebug_set_evmask(udebug_evmask_t mask) 243 248 { 244 249 LOG("mask = 0x%x", mask); 245 246 mutex_lock(&TASK->udebug.lock); 247 250 251 mutex_lock(&TASK->udebug.lock); 252 248 253 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { 249 254 mutex_unlock(&TASK->udebug.lock); 250 255 return EINVAL; 251 256 } 252 257 253 258 TASK->udebug.evmask = mask; 254 259 mutex_unlock(&TASK->udebug.lock); 255 260 256 261 return 0; 257 262 } … … 263 268 * a debugging event or STOP occurs, at which point the thread loses GO. 264 269 * 265 * @param thread The thread to operate on (unlocked and need not be valid). 266 * @param call The GO call that we are servicing. 267 * 268 */ 269 int udebug_go(thread_t *thread, call_t *call) 270 { 271 /* On success, this will lock thread->udebug.lock. */ 272 int rc = _thread_op_begin(thread, false); 273 if (rc != EOK) 270 * @param t The thread to operate on (unlocked and need not be valid). 271 * @param call The GO call that we are servicing. 272 */ 273 int udebug_go(thread_t *t, call_t *call) 274 { 275 int rc; 276 277 /* On success, this will lock t->udebug.lock. */ 278 rc = _thread_op_begin(t, false); 279 if (rc != EOK) { 274 280 return rc; 275 276 thread->udebug.go_call = call; 277 thread->udebug.go = true; 278 thread->udebug.cur_event = 0; /* none */ 279 281 } 282 283 t->udebug.go_call = call; 284 t->udebug.go = true; 285 t->udebug.cur_event = 0; /* none */ 286 280 287 /* 281 * Neither thread's lock nor threads_lock may be held during wakeup. 282 * 288 * Neither t's lock nor threads_lock may be held during wakeup. 283 289 */ 284 waitq_wakeup(&t hread->udebug.go_wq, WAKEUP_FIRST);285 286 _thread_op_end(t hread);287 290 waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST); 291 292 _thread_op_end(t); 293 288 294 return 0; 289 295 } … … 294 300 * can be considered stopped). 295 301 * 296 * @param thread The thread to operate on (unlocked and need not be valid). 297 * @param call The GO call that we are servicing. 298 * 299 */ 300 int udebug_stop(thread_t *thread, call_t *call) 301 { 302 * @param t The thread to operate on (unlocked and need not be valid). 303 * @param call The GO call that we are servicing. 304 */ 305 int udebug_stop(thread_t *t, call_t *call) 306 { 307 int rc; 308 302 309 LOG("udebug_stop()"); 303 310 304 311 /* 305 * On success, this will lock thread->udebug.lock. Note that this 306 * makes sure the thread is not stopped. 307 * 312 * On success, this will lock t->udebug.lock. Note that this makes sure 313 * the thread is not stopped. 308 314 */ 309 int rc = _thread_op_begin(thread, true);310 if (rc != EOK) 315 rc = _thread_op_begin(t, true); 316 if (rc != EOK) { 311 317 return rc; 312 318 } 319 313 320 /* Take GO away from the thread. */ 314 t hread->udebug.go = false;315 316 if (t hread->udebug.stoppable != true) {321 t->udebug.go = false; 322 323 if (t->udebug.stoppable != true) { 317 324 /* Answer will be sent when the thread becomes stoppable. */ 318 _thread_op_end(t hread);325 _thread_op_end(t); 319 326 return 0; 320 327 } 321 328 322 329 /* 323 330 * Answer GO call. 324 *325 331 */ 326 332 327 333 /* Make sure nobody takes this call away from us. */ 328 call = t hread->udebug.go_call;329 t hread->udebug.go_call = NULL;330 334 call = t->udebug.go_call; 335 t->udebug.go_call = NULL; 336 331 337 IPC_SET_RETVAL(call->data, 0); 332 338 IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP); 333 339 334 340 THREAD->udebug.cur_event = UDEBUG_EVENT_STOP; 335 336 _thread_op_end(t hread);337 341 342 _thread_op_end(t); 343 338 344 mutex_lock(&TASK->udebug.lock); 339 345 ipc_answer(&TASK->answerbox, call); 340 346 mutex_unlock(&TASK->udebug.lock); 341 347 342 348 return 0; 343 349 } … … 359 365 * a maximum size for the userspace buffer. 360 366 * 361 * @param buffer The buffer for storing thread hashes. 362 * @param buf_size Buffer size in bytes. 363 * @param stored The actual number of bytes copied will be stored here. 364 * @param needed Total number of hashes that could have been saved. 365 * 367 * @param buffer The buffer for storing thread hashes. 368 * @param buf_size Buffer size in bytes. 369 * @param stored The actual number of bytes copied will be stored here. 370 * @param needed Total number of hashes that could have been saved. 366 371 */ 367 372 int udebug_thread_read(void **buffer, size_t buf_size, size_t *stored, 368 373 size_t *needed) 369 374 { 375 thread_t *t; 376 link_t *cur; 377 unative_t tid; 378 size_t copied_ids; 379 size_t extra_ids; 380 ipl_t ipl; 381 unative_t *id_buffer; 382 int flags; 383 size_t max_ids; 384 370 385 LOG("udebug_thread_read()"); 371 386 372 387 /* Allocate a buffer to hold thread IDs */ 373 unative_t *id_buffer = malloc(buf_size + 1, 0);374 375 mutex_lock(&TASK->udebug.lock); 376 388 id_buffer = malloc(buf_size + 1, 0); 389 390 mutex_lock(&TASK->udebug.lock); 391 377 392 /* Verify task state */ 378 393 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { … … 380 395 return EINVAL; 381 396 } 382 383 i rq_spinlock_lock(&TASK->lock, true);384 397 398 ipl = interrupts_disable(); 399 spinlock_lock(&TASK->lock); 385 400 /* Copy down the thread IDs */ 386 387 size_tmax_ids = buf_size / sizeof(unative_t);388 size_tcopied_ids = 0;389 size_textra_ids = 0;390 401 402 max_ids = buf_size / sizeof(unative_t); 403 copied_ids = 0; 404 extra_ids = 0; 405 391 406 /* FIXME: make sure the thread isn't past debug shutdown... */ 392 link_t *cur;393 407 for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) { 394 t hread_t *thread= list_get_instance(cur, thread_t, th_link);395 396 irq_spinlock_lock(&thread->lock, false);397 int flags = thread->flags;398 irq_spinlock_unlock(&thread->lock, false);399 408 t = list_get_instance(cur, thread_t, th_link); 409 410 spinlock_lock(&t->lock); 411 flags = t->flags; 412 spinlock_unlock(&t->lock); 413 400 414 /* Not interested in kernel threads. */ 401 415 if ((flags & THREAD_FLAG_USPACE) == 0) 402 416 continue; 403 417 404 418 if (copied_ids < max_ids) { 405 419 /* Using thread struct pointer as identification hash */ 406 id_buffer[copied_ids++] = (unative_t) thread; 407 } else 420 tid = (unative_t) t; 421 id_buffer[copied_ids++] = tid; 422 } else { 408 423 extra_ids++; 409 } 410 411 irq_spinlock_unlock(&TASK->lock, true); 412 413 mutex_unlock(&TASK->udebug.lock); 414 424 } 425 } 426 427 spinlock_unlock(&TASK->lock); 428 interrupts_restore(ipl); 429 430 mutex_unlock(&TASK->udebug.lock); 431 415 432 *buffer = id_buffer; 416 433 *stored = copied_ids * sizeof(unative_t); 417 434 *needed = (copied_ids + extra_ids) * sizeof(unative_t); 418 435 419 436 return 0; 420 437 } … … 425 442 * Also returns the size of the data. 426 443 * 427 * @param data Place to store pointer to newly allocated block. 428 * @param data_size Place to store size of the data. 429 * 430 * @returns EOK. 431 * 444 * @param data Place to store pointer to newly allocated block. 445 * @param data_size Place to store size of the data. 446 * 447 * @returns EOK. 432 448 */ 433 449 int udebug_name_read(char **data, size_t *data_size) 434 450 { 435 size_t name_size = str_size(TASK->name) + 1; 436 451 size_t name_size; 452 453 name_size = str_size(TASK->name) + 1; 437 454 *data = malloc(name_size, 0); 438 455 *data_size = name_size; 439 456 440 457 memcpy(*data, TASK->name, name_size); 441 458 442 459 return 0; 443 460 } … … 453 470 * this function will fail with an EINVAL error code. 454 471 * 455 * @param thread Thread where call arguments are to be read. 456 * @param buffer Place to store pointer to new buffer. 457 * 458 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 459 * if thread state is not valid for this operation. 460 * 461 */ 462 int udebug_args_read(thread_t *thread, void **buffer) 463 { 472 * @param t Thread where call arguments are to be read. 473 * @param buffer Place to store pointer to new buffer. 474 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 475 * if thread state is not valid for this operation. 476 */ 477 int udebug_args_read(thread_t *t, void **buffer) 478 { 479 int rc; 480 unative_t *arg_buffer; 481 464 482 /* Prepare a buffer to hold the arguments. */ 465 unative_t *arg_buffer = malloc(6 * sizeof(unative_t), 0);466 483 arg_buffer = malloc(6 * sizeof(unative_t), 0); 484 467 485 /* On success, this will lock t->udebug.lock. */ 468 int rc = _thread_op_begin(thread, false);469 if (rc != EOK) 486 rc = _thread_op_begin(t, false); 487 if (rc != EOK) { 470 488 return rc; 471 489 } 490 472 491 /* Additionally we need to verify that we are inside a syscall. */ 473 if ( (thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B)&&474 (thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E)) {475 _thread_op_end(t hread);492 if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B && 493 t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) { 494 _thread_op_end(t); 476 495 return EINVAL; 477 496 } 478 497 479 498 /* Copy to a local buffer before releasing the lock. */ 480 memcpy(arg_buffer, t hread->udebug.syscall_args, 6 * sizeof(unative_t));481 482 _thread_op_end(t hread);483 499 memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t)); 500 501 _thread_op_end(t); 502 484 503 *buffer = arg_buffer; 485 504 return 0; … … 495 514 * call (as opposed to an exception). This is an implementation limit. 496 515 * 497 * @param thread Thread whose state is to be read. 498 * @param buffer Place to store pointer to new buffer. 499 * 500 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 501 * if thread is not in valid state, EBUSY if istate 502 * is not available. 503 * 504 */ 505 int udebug_regs_read(thread_t *thread, void **buffer) 506 { 516 * @param t Thread whose state is to be read. 517 * @param buffer Place to store pointer to new buffer. 518 * @return EOK on success, ENOENT if @a t is invalid, EINVAL 519 * if thread is not in valid state, EBUSY if istate 520 * is not available. 521 */ 522 int udebug_regs_read(thread_t *t, void **buffer) 523 { 524 istate_t *state, *state_buf; 525 int rc; 526 507 527 /* Prepare a buffer to hold the data. */ 508 istate_t *state_buf = malloc(sizeof(istate_t), 0);509 528 state_buf = malloc(sizeof(istate_t), 0); 529 510 530 /* On success, this will lock t->udebug.lock */ 511 int rc = _thread_op_begin(thread, false);512 if (rc != EOK) 531 rc = _thread_op_begin(t, false); 532 if (rc != EOK) { 513 533 return rc; 514 515 istate_t *state = thread->udebug.uspace_state; 534 } 535 536 state = t->udebug.uspace_state; 516 537 if (state == NULL) { 517 _thread_op_end(t hread);538 _thread_op_end(t); 518 539 return EBUSY; 519 540 } 520 541 521 542 /* Copy to the allocated buffer */ 522 543 memcpy(state_buf, state, sizeof(istate_t)); 523 524 _thread_op_end(t hread);525 544 545 _thread_op_end(t); 546 526 547 *buffer = (void *) state_buf; 527 548 return 0; … … 534 555 * and a pointer to it is written into @a buffer. 535 556 * 536 * @param uspace_addr Address from where to start reading. 537 * @param n Number of bytes to read. 538 * @param buffer For storing a pointer to the allocated buffer. 539 * 557 * @param uspace_addr Address from where to start reading. 558 * @param n Number of bytes to read. 559 * @param buffer For storing a pointer to the allocated buffer. 540 560 */ 541 561 int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer) 542 562 { 563 void *data_buffer; 564 int rc; 565 543 566 /* Verify task state */ 544 567 mutex_lock(&TASK->udebug.lock); 545 568 546 569 if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) { 547 570 mutex_unlock(&TASK->udebug.lock); 548 571 return EBUSY; 549 572 } 550 551 void *data_buffer = malloc(n, 0); 552 553 /* 554 * NOTE: this is not strictly from a syscall... but that shouldn't 555 * be a problem 556 * 557 */ 558 int rc = copy_from_uspace(data_buffer, (void *) uspace_addr, n); 559 mutex_unlock(&TASK->udebug.lock); 560 561 if (rc != 0) 562 return rc; 563 573 574 data_buffer = malloc(n, 0); 575 576 /* NOTE: this is not strictly from a syscall... but that shouldn't 577 * be a problem */ 578 rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n); 579 mutex_unlock(&TASK->udebug.lock); 580 581 if (rc != 0) return rc; 582 564 583 *buffer = data_buffer; 565 584 return 0;
Note:
See TracChangeset
for help on using the changeset viewer.