Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • kernel/generic/src/udebug/udebug_ops.c

    r1bfd3d3 r3698e44  
    3333/**
    3434 * @file
    35  * @brief Udebug operations.
     35 * @brief       Udebug operations.
    3636 *
    3737 * Udebug operations on tasks and threads are implemented here. The
     
    3939 * when servicing udebug IPC messages.
    4040 */
    41 
     41 
    4242#include <debug.h>
    4343#include <proc/task.h>
     
    4646#include <errno.h>
    4747#include <print.h>
    48 #include <str.h>
     48#include <string.h>
    4949#include <syscall/copy.h>
    5050#include <ipc/ipc.h>
     
    5353#include <memstr.h>
    5454
    55 /** Prepare a thread for a debugging operation.
     55/**
     56 * Prepare a thread for a debugging operation.
    5657 *
    5758 * Simply put, return thread t with t->udebug.lock held,
     
    7273 * the t->lock spinlock to the t->udebug.lock mutex.
    7374 *
    74  * @param thread   Pointer, need not at all be valid.
    75  * @param being_go Required thread state.
     75 * @param t             Pointer, need not at all be valid.
     76 * @param being_go      Required thread state.
    7677 *
    7778 * Returns EOK if all went well, or an error code otherwise.
    78  *
    79  */
    80 static int _thread_op_begin(thread_t *thread, bool being_go)
    81 {
    82         mutex_lock(&TASK->udebug.lock);
    83        
     79 */
     80static int _thread_op_begin(thread_t *t, bool being_go)
     81{
     82        task_id_t taskid;
     83        ipl_t ipl;
     84
     85        taskid = TASK->taskid;
     86
     87        mutex_lock(&TASK->udebug.lock);
     88
    8489        /* thread_exists() must be called with threads_lock held */
    85         irq_spinlock_lock(&threads_lock, true);
    86        
    87         if (!thread_exists(thread)) {
    88                 irq_spinlock_unlock(&threads_lock, true);
     90        ipl = interrupts_disable();
     91        spinlock_lock(&threads_lock);
     92
     93        if (!thread_exists(t)) {
     94                spinlock_unlock(&threads_lock);
     95                interrupts_restore(ipl);
    8996                mutex_unlock(&TASK->udebug.lock);
    9097                return ENOENT;
    9198        }
    92        
    93         /* thread->lock is enough to ensure the thread's existence */
    94         irq_spinlock_exchange(&threads_lock, &thread->lock);
    95        
    96         /* Verify that 'thread' is a userspace thread. */
    97         if ((thread->flags & THREAD_FLAG_USPACE) == 0) {
     99
     100        /* t->lock is enough to ensure the thread's existence */
     101        spinlock_lock(&t->lock);
     102        spinlock_unlock(&threads_lock);
     103
     104        /* Verify that 't' is a userspace thread. */
     105        if ((t->flags & THREAD_FLAG_USPACE) == 0) {
    98106                /* It's not, deny its existence */
    99                 irq_spinlock_unlock(&thread->lock, true);
     107                spinlock_unlock(&t->lock);
     108                interrupts_restore(ipl);
    100109                mutex_unlock(&TASK->udebug.lock);
    101110                return ENOENT;
    102111        }
    103        
     112
    104113        /* Verify debugging state. */
    105         if (thread->udebug.active != true) {
     114        if (t->udebug.active != true) {
    106115                /* Not in debugging session or undesired GO state */
    107                 irq_spinlock_unlock(&thread->lock, true);
     116                spinlock_unlock(&t->lock);
     117                interrupts_restore(ipl);
    108118                mutex_unlock(&TASK->udebug.lock);
    109119                return ENOENT;
    110120        }
    111        
     121
    112122        /*
    113123         * Since the thread has active == true, TASK->udebug.lock
    114124         * is enough to ensure its existence and that active remains
    115125         * true.
    116          *
    117126         */
    118         irq_spinlock_unlock(&thread->lock, true);
    119        
     127        spinlock_unlock(&t->lock);
     128        interrupts_restore(ipl);
     129
    120130        /* Only mutex TASK->udebug.lock left. */
    121131       
    122132        /* Now verify that the thread belongs to the current task. */
    123         if (thread->task != TASK) {
     133        if (t->task != TASK) {
    124134                /* No such thread belonging this task*/
    125135                mutex_unlock(&TASK->udebug.lock);
    126136                return ENOENT;
    127137        }
    128        
     138
    129139        /*
    130140         * Now we need to grab the thread's debug lock for synchronization
    131141         * of the threads stoppability/stop state.
    132          *
    133142         */
    134         mutex_lock(&thread->udebug.lock);
    135        
     143        mutex_lock(&t->udebug.lock);
     144
    136145        /* The big task mutex is no longer needed. */
    137146        mutex_unlock(&TASK->udebug.lock);
    138        
    139         if (thread->udebug.go != being_go) {
     147
     148        if (t->udebug.go != being_go) {
    140149                /* Not in debugging session or undesired GO state. */
    141                 mutex_unlock(&thread->udebug.lock);
     150                mutex_unlock(&t->udebug.lock);
    142151                return EINVAL;
    143152        }
    144        
    145         /* Only thread->udebug.lock left. */
    146        
    147         return EOK;  /* All went well. */
     153
     154        /* Only t->udebug.lock left. */
     155
     156        return EOK;     /* All went well. */
    148157}
    149158
    150159/** End debugging operation on a thread. */
    151 static void _thread_op_end(thread_t *thread)
    152 {
    153         mutex_unlock(&thread->udebug.lock);
     160static void _thread_op_end(thread_t *t)
     161{
     162        mutex_unlock(&t->udebug.lock);
    154163}
    155164
     
    165174 * all the threads become stoppable (i.e. they can be considered stopped).
    166175 *
    167  * @param call The BEGIN call we are servicing.
    168  *
    169  * @return 0 (OK, but not done yet), 1 (done) or negative error code.
    170  *
     176 * @param call  The BEGIN call we are servicing.
     177 * @return      0 (OK, but not done yet), 1 (done) or negative error code.
    171178 */
    172179int udebug_begin(call_t *call)
    173180{
    174         LOG("Debugging task %" PRIu64, TASK->taskid);
    175        
    176         mutex_lock(&TASK->udebug.lock);
    177        
     181        int reply;
     182
     183        thread_t *t;
     184        link_t *cur;
     185
     186        LOG("Debugging task %llu", TASK->taskid);
     187        mutex_lock(&TASK->udebug.lock);
     188
    178189        if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) {
    179190                mutex_unlock(&TASK->udebug.lock);
    180191                return EBUSY;
    181192        }
    182        
     193
    183194        TASK->udebug.dt_state = UDEBUG_TS_BEGINNING;
    184195        TASK->udebug.begin_call = call;
    185196        TASK->udebug.debugger = call->sender;
    186        
    187         int reply;
    188        
     197
    189198        if (TASK->udebug.not_stoppable_count == 0) {
    190199                TASK->udebug.dt_state = UDEBUG_TS_ACTIVE;
    191200                TASK->udebug.begin_call = NULL;
    192                 reply = 1;  /* immediate reply */
    193         } else
    194                 reply = 0;  /* no reply */
     201                reply = 1; /* immediate reply */
     202        } else {
     203                reply = 0; /* no reply */
     204        }
    195205       
    196206        /* Set udebug.active on all of the task's userspace threads. */
    197        
    198         link_t *cur;
     207
    199208        for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
    200                 thread_t *thread = list_get_instance(cur, thread_t, th_link);
    201                
    202                 mutex_lock(&thread->udebug.lock);
    203                 if ((thread->flags & THREAD_FLAG_USPACE) != 0) {
    204                         thread->udebug.active = true;
    205                         mutex_unlock(&thread->udebug.lock);
    206                         condvar_broadcast(&thread->udebug.active_cv);
    207                 } else
    208                         mutex_unlock(&thread->udebug.lock);
    209         }
    210        
     209                t = list_get_instance(cur, thread_t, th_link);
     210
     211                mutex_lock(&t->udebug.lock);
     212                if ((t->flags & THREAD_FLAG_USPACE) != 0) {
     213                        t->udebug.active = true;
     214                        mutex_unlock(&t->udebug.lock);
     215                        condvar_broadcast(&t->udebug.active_cv);
     216                } else {
     217                        mutex_unlock(&t->udebug.lock);
     218                }
     219        }
     220
    211221        mutex_unlock(&TASK->udebug.lock);
    212222        return reply;
     
    216226 *
    217227 * Closes the debugging session for the current task.
    218  *
    219228 * @return Zero on success or negative error code.
    220  *
    221229 */
    222230int udebug_end(void)
    223231{
     232        int rc;
     233
    224234        LOG("Task %" PRIu64, TASK->taskid);
    225        
    226         mutex_lock(&TASK->udebug.lock);
    227         int rc = udebug_task_cleanup(TASK);
    228         mutex_unlock(&TASK->udebug.lock);
    229        
     235
     236        mutex_lock(&TASK->udebug.lock);
     237        rc = udebug_task_cleanup(TASK);
     238        mutex_unlock(&TASK->udebug.lock);
     239
    230240        return rc;
    231241}
     
    235245 * Sets the event mask that determines which events are enabled.
    236246 *
    237  * @param mask Or combination of events that should be enabled.
    238  *
    239  * @return Zero on success or negative error code.
    240  *
     247 * @param mask  Or combination of events that should be enabled.
     248 * @return      Zero on success or negative error code.
    241249 */
    242250int udebug_set_evmask(udebug_evmask_t mask)
    243251{
    244252        LOG("mask = 0x%x", mask);
    245        
    246         mutex_lock(&TASK->udebug.lock);
    247        
     253
     254        mutex_lock(&TASK->udebug.lock);
     255
    248256        if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
    249257                mutex_unlock(&TASK->udebug.lock);
    250258                return EINVAL;
    251259        }
    252        
     260
    253261        TASK->udebug.evmask = mask;
    254262        mutex_unlock(&TASK->udebug.lock);
    255        
     263
    256264        return 0;
    257265}
     
    263271 * a debugging event or STOP occurs, at which point the thread loses GO.
    264272 *
    265  * @param thread The thread to operate on (unlocked and need not be valid).
    266  * @param call   The GO call that we are servicing.
    267  *
    268  */
    269 int udebug_go(thread_t *thread, call_t *call)
    270 {
    271         /* On success, this will lock thread->udebug.lock. */
    272         int rc = _thread_op_begin(thread, false);
    273         if (rc != EOK)
     273 * @param t     The thread to operate on (unlocked and need not be valid).
     274 * @param call  The GO call that we are servicing.
     275 */
     276int udebug_go(thread_t *t, call_t *call)
     277{
     278        int rc;
     279
     280        /* On success, this will lock t->udebug.lock. */
     281        rc = _thread_op_begin(t, false);
     282        if (rc != EOK) {
    274283                return rc;
    275        
    276         thread->udebug.go_call = call;
    277         thread->udebug.go = true;
    278         thread->udebug.cur_event = 0;  /* none */
    279        
     284        }
     285
     286        t->udebug.go_call = call;
     287        t->udebug.go = true;
     288        t->udebug.cur_event = 0;        /* none */
     289
    280290        /*
    281          * Neither thread's lock nor threads_lock may be held during wakeup.
    282          *
     291         * Neither t's lock nor threads_lock may be held during wakeup.
    283292         */
    284         waitq_wakeup(&thread->udebug.go_wq, WAKEUP_FIRST);
    285        
    286         _thread_op_end(thread);
    287        
     293        waitq_wakeup(&t->udebug.go_wq, WAKEUP_FIRST);
     294
     295        _thread_op_end(t);
     296
    288297        return 0;
    289298}
     
    294303 * can be considered stopped).
    295304 *
    296  * @param thread The thread to operate on (unlocked and need not be valid).
    297  * @param call   The GO call that we are servicing.
    298  *
    299  */
    300 int udebug_stop(thread_t *thread, call_t *call)
    301 {
     305 * @param t     The thread to operate on (unlocked and need not be valid).
     306 * @param call  The GO call that we are servicing.
     307 */
     308int udebug_stop(thread_t *t, call_t *call)
     309{
     310        int rc;
     311
    302312        LOG("udebug_stop()");
    303        
     313
    304314        /*
    305          * On success, this will lock thread->udebug.lock. Note that this
    306          * makes sure the thread is not stopped.
    307          *
     315         * On success, this will lock t->udebug.lock. Note that this makes sure
     316         * the thread is not stopped.
    308317         */
    309         int rc = _thread_op_begin(thread, true);
    310         if (rc != EOK)
     318        rc = _thread_op_begin(t, true);
     319        if (rc != EOK) {
    311320                return rc;
    312        
     321        }
     322
    313323        /* Take GO away from the thread. */
    314         thread->udebug.go = false;
    315        
    316         if (thread->udebug.stoppable != true) {
     324        t->udebug.go = false;
     325
     326        if (t->udebug.stoppable != true) {
    317327                /* Answer will be sent when the thread becomes stoppable. */
    318                 _thread_op_end(thread);
     328                _thread_op_end(t);
    319329                return 0;
    320330        }
    321        
     331
    322332        /*
    323333         * Answer GO call.
    324          *
    325334         */
    326        
     335
    327336        /* Make sure nobody takes this call away from us. */
    328         call = thread->udebug.go_call;
    329         thread->udebug.go_call = NULL;
    330        
     337        call = t->udebug.go_call;
     338        t->udebug.go_call = NULL;
     339
    331340        IPC_SET_RETVAL(call->data, 0);
    332341        IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP);
    333        
     342
    334343        THREAD->udebug.cur_event = UDEBUG_EVENT_STOP;
    335        
    336         _thread_op_end(thread);
    337        
     344
     345        _thread_op_end(t);
     346
    338347        mutex_lock(&TASK->udebug.lock);
    339348        ipc_answer(&TASK->answerbox, call);
    340349        mutex_unlock(&TASK->udebug.lock);
    341        
     350
    342351        return 0;
    343352}
     
    359368 * a maximum size for the userspace buffer.
    360369 *
    361  * @param buffer   The buffer for storing thread hashes.
    362  * @param buf_size Buffer size in bytes.
    363  * @param stored   The actual number of bytes copied will be stored here.
    364  * @param needed   Total number of hashes that could have been saved.
    365  *
     370 * @param buffer        The buffer for storing thread hashes.
     371 * @param buf_size      Buffer size in bytes.
     372 * @param stored        The actual number of bytes copied will be stored here.
     373 * @param needed        Total number of hashes that could have been saved.
    366374 */
    367375int udebug_thread_read(void **buffer, size_t buf_size, size_t *stored,
    368376    size_t *needed)
    369377{
     378        thread_t *t;
     379        link_t *cur;
     380        unative_t tid;
     381        size_t copied_ids;
     382        size_t extra_ids;
     383        ipl_t ipl;
     384        unative_t *id_buffer;
     385        int flags;
     386        size_t max_ids;
     387
    370388        LOG("udebug_thread_read()");
    371        
     389
    372390        /* Allocate a buffer to hold thread IDs */
    373         unative_t *id_buffer = malloc(buf_size + 1, 0);
    374        
    375         mutex_lock(&TASK->udebug.lock);
    376        
     391        id_buffer = malloc(buf_size + 1, 0);
     392
     393        mutex_lock(&TASK->udebug.lock);
     394
    377395        /* Verify task state */
    378396        if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
     
    380398                return EINVAL;
    381399        }
    382        
    383         irq_spinlock_lock(&TASK->lock, true);
    384        
     400
     401        ipl = interrupts_disable();
     402        spinlock_lock(&TASK->lock);
    385403        /* Copy down the thread IDs */
    386        
    387         size_t max_ids = buf_size / sizeof(unative_t);
    388         size_t copied_ids = 0;
    389         size_t extra_ids = 0;
    390        
     404
     405        max_ids = buf_size / sizeof(unative_t);
     406        copied_ids = 0;
     407        extra_ids = 0;
     408
    391409        /* FIXME: make sure the thread isn't past debug shutdown... */
    392         link_t *cur;
    393410        for (cur = TASK->th_head.next; cur != &TASK->th_head; cur = cur->next) {
    394                 thread_t *thread = list_get_instance(cur, thread_t, th_link);
    395                
    396                 irq_spinlock_lock(&thread->lock, false);
    397                 int flags = thread->flags;
    398                 irq_spinlock_unlock(&thread->lock, false);
    399                
     411                t = list_get_instance(cur, thread_t, th_link);
     412
     413                spinlock_lock(&t->lock);
     414                flags = t->flags;
     415                spinlock_unlock(&t->lock);
     416
    400417                /* Not interested in kernel threads. */
    401418                if ((flags & THREAD_FLAG_USPACE) == 0)
    402419                        continue;
    403                
     420
    404421                if (copied_ids < max_ids) {
    405422                        /* Using thread struct pointer as identification hash */
    406                         id_buffer[copied_ids++] = (unative_t) thread;
    407                 } else
     423                        tid = (unative_t) t;
     424                        id_buffer[copied_ids++] = tid;
     425                } else {
    408426                        extra_ids++;
    409         }
    410        
    411         irq_spinlock_unlock(&TASK->lock, true);
    412        
    413         mutex_unlock(&TASK->udebug.lock);
    414        
     427                }
     428        }
     429
     430        spinlock_unlock(&TASK->lock);
     431        interrupts_restore(ipl);
     432
     433        mutex_unlock(&TASK->udebug.lock);
     434
    415435        *buffer = id_buffer;
    416436        *stored = copied_ids * sizeof(unative_t);
    417437        *needed = (copied_ids + extra_ids) * sizeof(unative_t);
    418        
     438
    419439        return 0;
    420440}
     
    425445 * Also returns the size of the data.
    426446 *
    427  * @param data      Place to store pointer to newly allocated block.
    428  * @param data_size Place to store size of the data.
    429  *
    430  * @return EOK.
    431  *
     447 * @param data          Place to store pointer to newly allocated block.
     448 * @param data_size     Place to store size of the data.
     449 *
     450 * @returns             EOK.
    432451 */
    433452int udebug_name_read(char **data, size_t *data_size)
    434453{
    435         size_t name_size = str_size(TASK->name) + 1;
    436        
     454        size_t name_size;
     455
     456        name_size = str_size(TASK->name) + 1;
    437457        *data = malloc(name_size, 0);
    438458        *data_size = name_size;
    439        
     459
    440460        memcpy(*data, TASK->name, name_size);
    441        
     461
    442462        return 0;
    443463}
     
    453473 * this function will fail with an EINVAL error code.
    454474 *
    455  * @param thread Thread where call arguments are to be read.
    456  * @param buffer Place to store pointer to new buffer.
    457  *
    458  * @return EOK on success, ENOENT if @a t is invalid, EINVAL
    459  *         if thread state is not valid for this operation.
    460  *
    461  */
    462 int udebug_args_read(thread_t *thread, void **buffer)
    463 {
     475 * @param t             Thread where call arguments are to be read.
     476 * @param buffer        Place to store pointer to new buffer.
     477 * @return              EOK on success, ENOENT if @a t is invalid, EINVAL
     478 *                      if thread state is not valid for this operation.
     479 */
     480int udebug_args_read(thread_t *t, void **buffer)
     481{
     482        int rc;
     483        unative_t *arg_buffer;
     484
    464485        /* Prepare a buffer to hold the arguments. */
    465         unative_t *arg_buffer = malloc(6 * sizeof(unative_t), 0);
    466        
     486        arg_buffer = malloc(6 * sizeof(unative_t), 0);
     487
    467488        /* On success, this will lock t->udebug.lock. */
    468         int rc = _thread_op_begin(thread, false);
    469         if (rc != EOK)
     489        rc = _thread_op_begin(t, false);
     490        if (rc != EOK) {
    470491                return rc;
    471        
     492        }
     493
    472494        /* Additionally we need to verify that we are inside a syscall. */
    473         if ((thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B) &&
    474             (thread->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E)) {
    475                 _thread_op_end(thread);
     495        if (t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_B &&
     496            t->udebug.cur_event != UDEBUG_EVENT_SYSCALL_E) {
     497                _thread_op_end(t);
    476498                return EINVAL;
    477499        }
    478        
     500
    479501        /* Copy to a local buffer before releasing the lock. */
    480         memcpy(arg_buffer, thread->udebug.syscall_args, 6 * sizeof(unative_t));
    481        
    482         _thread_op_end(thread);
    483        
     502        memcpy(arg_buffer, t->udebug.syscall_args, 6 * sizeof(unative_t));
     503
     504        _thread_op_end(t);
     505
    484506        *buffer = arg_buffer;
    485507        return 0;
     
    495517 * call (as opposed to an exception). This is an implementation limit.
    496518 *
    497  * @param thread Thread whose state is to be read.
    498  * @param buffer Place to store pointer to new buffer.
    499  *
    500  * @return EOK on success, ENOENT if @a t is invalid, EINVAL
    501  *         if thread is not in valid state, EBUSY if istate
    502  *         is not available.
    503  *
    504  */
    505 int udebug_regs_read(thread_t *thread, void **buffer)
    506 {
     519 * @param t             Thread whose state is to be read.
     520 * @param buffer        Place to store pointer to new buffer.
     521 * @return              EOK on success, ENOENT if @a t is invalid, EINVAL
     522 *                      if thread is not in valid state, EBUSY if istate
     523 *                      is not available.
     524 */
     525int udebug_regs_read(thread_t *t, void **buffer)
     526{
     527        istate_t *state, *state_buf;
     528        int rc;
     529
    507530        /* Prepare a buffer to hold the data. */
    508         istate_t *state_buf = malloc(sizeof(istate_t), 0);
    509        
     531        state_buf = malloc(sizeof(istate_t), 0);
     532
    510533        /* On success, this will lock t->udebug.lock */
    511         int rc = _thread_op_begin(thread, false);
    512         if (rc != EOK)
     534        rc = _thread_op_begin(t, false);
     535        if (rc != EOK) {
    513536                return rc;
    514        
    515         istate_t *state = thread->udebug.uspace_state;
     537        }
     538
     539        state = t->udebug.uspace_state;
    516540        if (state == NULL) {
    517                 _thread_op_end(thread);
     541                _thread_op_end(t);
    518542                return EBUSY;
    519543        }
    520        
     544
    521545        /* Copy to the allocated buffer */
    522546        memcpy(state_buf, state, sizeof(istate_t));
    523        
    524         _thread_op_end(thread);
    525        
     547
     548        _thread_op_end(t);
     549
    526550        *buffer = (void *) state_buf;
    527551        return 0;
     
    534558 * and a pointer to it is written into @a buffer.
    535559 *
    536  * @param uspace_addr Address from where to start reading.
    537  * @param n           Number of bytes to read.
    538  * @param buffer      For storing a pointer to the allocated buffer.
    539  *
     560 * @param uspace_addr   Address from where to start reading.
     561 * @param n             Number of bytes to read.
     562 * @param buffer        For storing a pointer to the allocated buffer.
    540563 */
    541564int udebug_mem_read(unative_t uspace_addr, size_t n, void **buffer)
    542565{
     566        void *data_buffer;
     567        int rc;
     568
    543569        /* Verify task state */
    544570        mutex_lock(&TASK->udebug.lock);
    545        
     571
    546572        if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
    547573                mutex_unlock(&TASK->udebug.lock);
    548574                return EBUSY;
    549575        }
    550        
    551         void *data_buffer = malloc(n, 0);
    552        
    553         /*
    554          * NOTE: this is not strictly from a syscall... but that shouldn't
    555          * be a problem
    556          *
    557          */
    558         int rc = copy_from_uspace(data_buffer, (void *) uspace_addr, n);
    559         mutex_unlock(&TASK->udebug.lock);
    560        
    561         if (rc != 0)
    562                 return rc;
    563        
     576
     577        data_buffer = malloc(n, 0);
     578
     579        /* NOTE: this is not strictly from a syscall... but that shouldn't
     580         * be a problem */
     581        rc = copy_from_uspace(data_buffer, (void *)uspace_addr, n);
     582        mutex_unlock(&TASK->udebug.lock);
     583
     584        if (rc != 0) return rc;
     585
    564586        *buffer = data_buffer;
    565587        return 0;
Note: See TracChangeset for help on using the changeset viewer.