Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • uspace/lib/usbhost/src/endpoint.c

    r1d758fc rf527f58  
    11/*
    22 * Copyright (c) 2011 Jan Vesely
    3  * Copyright (c) 2017 Ondrej Hlavaty <aearsis@eideo.cz>
    43 * All rights reserved.
    54 *
     
    3534 */
    3635
     36#include <usb/host/endpoint.h>
     37
    3738#include <assert.h>
     39#include <stdlib.h>
    3840#include <atomic.h>
    39 #include <mem.h>
    40 #include <stdlib.h>
    41 #include <str_error.h>
    42 #include <usb/debug.h>
    43 #include <usb/descriptor.h>
    44 #include <usb/host/hcd.h>
    45 #include <usb/host/utility.h>
    4641
    47 #include "usb_transfer_batch.h"
    48 #include "bus.h"
    49 
    50 #include "endpoint.h"
    51 
    52 /**
    53  * Initialize provided endpoint structure.
     42/** Allocate ad initialize endpoint_t structure.
     43 * @param address USB address.
     44 * @param endpoint USB endpoint number.
     45 * @param direction Communication direction.
     46 * @param type USB transfer type.
     47 * @param speed Communication speed.
     48 * @param max_packet_size Maximum size of data packets.
     49 * @param bw Required bandwidth.
     50 * @return Pointer to initialized endpoint_t structure, NULL on failure.
    5451 */
    55 void endpoint_init(endpoint_t *ep, device_t *dev, const usb_endpoint_descriptors_t *desc)
     52endpoint_t * endpoint_create(usb_address_t address, usb_endpoint_t endpoint,
     53    usb_direction_t direction, usb_transfer_type_t type, usb_speed_t speed,
     54    size_t max_packet_size, unsigned packets, size_t bw,
     55    usb_address_t tt_address, unsigned tt_p)
    5656{
    57         memset(ep, 0, sizeof(endpoint_t));
    58 
    59         assert(dev);
    60         ep->device = dev;
    61 
    62         atomic_set(&ep->refcnt, 0);
    63         fibril_condvar_initialize(&ep->avail);
    64 
    65         ep->endpoint = USB_ED_GET_EP(desc->endpoint);
    66         ep->direction = USB_ED_GET_DIR(desc->endpoint);
    67         ep->transfer_type = USB_ED_GET_TRANSFER_TYPE(desc->endpoint);
    68         ep->max_packet_size = USB_ED_GET_MPS(desc->endpoint);
    69         ep->packets_per_uframe = USB_ED_GET_ADD_OPPS(desc->endpoint) + 1;
    70 
    71         /** Direction both is our construct never present in descriptors */
    72         if (ep->transfer_type == USB_TRANSFER_CONTROL)
    73                 ep->direction = USB_DIRECTION_BOTH;
    74 
    75         ep->max_transfer_size = ep->max_packet_size * ep->packets_per_uframe;
    76         ep->transfer_buffer_policy = DMA_POLICY_STRICT;
    77         ep->required_transfer_buffer_policy = DMA_POLICY_STRICT;
     57        endpoint_t *instance = malloc(sizeof(endpoint_t));
     58        if (instance) {
     59                atomic_set(&instance->refcnt, 0);
     60                instance->address = address;
     61                instance->endpoint = endpoint;
     62                instance->direction = direction;
     63                instance->transfer_type = type;
     64                instance->speed = speed;
     65                instance->max_packet_size = max_packet_size;
     66                instance->packets = packets;
     67                instance->bandwidth = bw;
     68                instance->toggle = 0;
     69                instance->active = false;
     70                instance->tt.address = tt_address;
     71                instance->tt.port = tt_p;
     72                instance->hc_data.data = NULL;
     73                instance->hc_data.toggle_get = NULL;
     74                instance->hc_data.toggle_set = NULL;
     75                link_initialize(&instance->link);
     76                fibril_mutex_initialize(&instance->guard);
     77                fibril_condvar_initialize(&instance->avail);
     78        }
     79        return instance;
    7880}
    7981
    80 /**
    81  * Get the bus endpoint belongs to.
     82/** Properly dispose of endpoint_t structure.
     83 * @param instance endpoint_t structure.
    8284 */
    83 static inline const bus_ops_t *get_bus_ops(endpoint_t *ep)
     85void endpoint_destroy(endpoint_t *instance)
    8486{
    85         return ep->device->bus->ops;
     87        assert(instance);
     88        assert(!instance->active);
     89        assert(instance->hc_data.data == NULL);
     90        free(instance);
    8691}
    8792
    88 /**
    89  * Increase the reference count on endpoint.
    90  */
    91 void endpoint_add_ref(endpoint_t *ep)
     93void endpoint_add_ref(endpoint_t *instance)
    9294{
    93         atomic_inc(&ep->refcnt);
     95        atomic_inc(&instance->refcnt);
    9496}
    9597
    96 /**
    97  * Call the desctruction callback. Default behavior is to free the memory directly.
    98  */
    99 static inline void endpoint_destroy(endpoint_t *ep)
     98void endpoint_del_ref(endpoint_t *instance)
    10099{
    101         const bus_ops_t *ops = get_bus_ops(ep);
    102         if (ops->endpoint_destroy) {
    103                 ops->endpoint_destroy(ep);
    104         } else {
    105                 assert(ep->active_batch == NULL);
    106 
    107                 /* Assume mostly the eps will be allocated by malloc. */
    108                 free(ep);
    109         }
     100        if (atomic_predec(&instance->refcnt) == 0)
     101                endpoint_destroy(instance);
    110102}
    111103
    112 /**
    113  * Decrease the reference count.
     104/** Set device specific data and hooks.
     105 * @param instance endpoint_t structure.
     106 * @param data device specific data.
     107 * @param toggle_get Hook to call when retrieving value of toggle bit.
     108 * @param toggle_set Hook to call when setting the value of toggle bit.
    114109 */
    115 void endpoint_del_ref(endpoint_t *ep)
     110void endpoint_set_hc_data(endpoint_t *instance,
     111    void *data, int (*toggle_get)(void *), void (*toggle_set)(void *, int))
    116112{
    117         if (atomic_predec(&ep->refcnt) == 0) {
    118                 endpoint_destroy(ep);
    119         }
     113        assert(instance);
     114        fibril_mutex_lock(&instance->guard);
     115        instance->hc_data.data = data;
     116        instance->hc_data.toggle_get = toggle_get;
     117        instance->hc_data.toggle_set = toggle_set;
     118        fibril_mutex_unlock(&instance->guard);
    120119}
    121120
    122 /**
    123  * Mark the endpoint as online. Supply a guard to be used for this endpoint
    124  * synchronization.
     121/** Clear device specific data and hooks.
     122 * @param instance endpoint_t structure.
     123 * @note This function does not free memory pointed to by data pointer.
    125124 */
    126 void endpoint_set_online(endpoint_t *ep, fibril_mutex_t *guard)
     125void endpoint_clear_hc_data(endpoint_t *instance)
    127126{
    128         ep->guard = guard;
    129         ep->online = true;
     127        assert(instance);
     128        endpoint_set_hc_data(instance, NULL, NULL, NULL);
    130129}
    131130
    132 /**
    133  * Mark the endpoint as offline. All other fibrils waiting to activate this
    134  * endpoint will be interrupted.
     131/** Mark the endpoint as active and block access for further fibrils.
     132 * @param instance endpoint_t structure.
    135133 */
    136 void endpoint_set_offline_locked(endpoint_t *ep)
     134void endpoint_use(endpoint_t *instance)
    137135{
    138         assert(ep);
    139         assert(fibril_mutex_is_locked(ep->guard));
    140 
    141         ep->online = false;
    142         fibril_condvar_broadcast(&ep->avail);
     136        assert(instance);
     137        /* Add reference for active endpoint. */
     138        endpoint_add_ref(instance);
     139        fibril_mutex_lock(&instance->guard);
     140        while (instance->active)
     141                fibril_condvar_wait(&instance->avail, &instance->guard);
     142        instance->active = true;
     143        fibril_mutex_unlock(&instance->guard);
    143144}
    144145
    145 /**
    146  * Wait until a transfer finishes. Can be used even when the endpoint is
    147  * offline (and is interrupted by the endpoint going offline).
     146/** Mark the endpoint as inactive and allow access for further fibrils.
     147 * @param instance endpoint_t structure.
    148148 */
    149 void endpoint_wait_timeout_locked(endpoint_t *ep, suseconds_t timeout)
     149void endpoint_release(endpoint_t *instance)
    150150{
    151         assert(ep);
    152         assert(fibril_mutex_is_locked(ep->guard));
    153 
    154         if (ep->active_batch == NULL)
    155                 return;
    156 
    157         fibril_condvar_wait_timeout(&ep->avail, ep->guard, timeout);
     151        assert(instance);
     152        fibril_mutex_lock(&instance->guard);
     153        instance->active = false;
     154        fibril_mutex_unlock(&instance->guard);
     155        fibril_condvar_signal(&instance->avail);
     156        /* Drop reference for active endpoint. */
     157        endpoint_del_ref(instance);
    158158}
    159159
    160 /**
    161  * Mark the endpoint as active and block access for further fibrils. If the
    162  * endpoint is already active, it will block on ep->avail condvar.
    163  *
    164  * Call only under endpoint guard. After you activate the endpoint and release
    165  * the guard, you must assume that particular transfer is already
    166  * finished/aborted.
    167  *
    168  * Activation and deactivation is not done by the library to maximize
    169  * performance. The HC might want to prepare some memory buffers prior to
    170  * interfering with other world.
    171  *
    172  * @param batch Transfer batch this endpoint is blocked by.
     160/** Get the value of toggle bit.
     161 * @param instance endpoint_t structure.
     162 * @note Will use provided hook.
    173163 */
    174 int endpoint_activate_locked(endpoint_t *ep, usb_transfer_batch_t *batch)
     164int endpoint_toggle_get(endpoint_t *instance)
    175165{
    176         assert(ep);
    177         assert(batch);
    178         assert(batch->ep == ep);
    179         assert(ep->guard);
    180         assert(fibril_mutex_is_locked(ep->guard));
    181 
    182         while (ep->online && ep->active_batch != NULL)
    183                 fibril_condvar_wait(&ep->avail, ep->guard);
    184 
    185         if (!ep->online)
    186                 return EINTR;
    187 
    188         assert(ep->active_batch == NULL);
    189         ep->active_batch = batch;
    190         return EOK;
     166        assert(instance);
     167        fibril_mutex_lock(&instance->guard);
     168        if (instance->hc_data.toggle_get)
     169                instance->toggle =
     170                    instance->hc_data.toggle_get(instance->hc_data.data);
     171        const int ret = instance->toggle;
     172        fibril_mutex_unlock(&instance->guard);
     173        return ret;
    191174}
    192175
    193 /**
    194  * Mark the endpoint as inactive and allow access for further fibrils.
     176/** Set the value of toggle bit.
     177 * @param instance endpoint_t structure.
     178 * @note Will use provided hook.
    195179 */
    196 void endpoint_deactivate_locked(endpoint_t *ep)
     180void endpoint_toggle_set(endpoint_t *instance, int toggle)
    197181{
    198         assert(ep);
    199         assert(fibril_mutex_is_locked(ep->guard));
    200 
    201         ep->active_batch = NULL;
    202         fibril_condvar_signal(&ep->avail);
    203 }
    204 
    205 /**
    206  * Initiate a transfer on an endpoint. Creates a transfer batch, checks the
    207  * bandwidth requirements and schedules the batch.
    208  *
    209  * @param endpoint Endpoint for which to send the batch
    210  */
    211 errno_t endpoint_send_batch(endpoint_t *ep, const transfer_request_t *req)
    212 {
    213         assert(ep);
    214         assert(req);
    215 
    216         if (ep->transfer_type == USB_TRANSFER_CONTROL) {
    217                 usb_log_debug("%s %d:%d %zu/%zuB, setup %#016" PRIx64, req->name,
    218                     req->target.address, req->target.endpoint,
    219                     req->size, ep->max_packet_size,
    220                     req->setup);
    221         } else {
    222                 usb_log_debug("%s %d:%d %zu/%zuB", req->name,
    223                     req->target.address, req->target.endpoint,
    224                     req->size, ep->max_packet_size);
    225         }
    226 
    227         device_t * const device = ep->device;
    228         if (!device) {
    229                 usb_log_warning("Endpoint detached");
    230                 return EAGAIN;
    231         }
    232 
    233         const bus_ops_t *ops = device->bus->ops;
    234         if (!ops->batch_schedule) {
    235                 usb_log_error("HCD does not implement scheduler.");
    236                 return ENOTSUP;
    237         }
    238 
    239         size_t size = req->size;
    240         /*
    241          * Limit transfers with reserved bandwidth to the amount reserved.
    242          * OUT transfers are rejected, IN can be just trimmed in advance.
    243          */
    244         if (size > ep->max_transfer_size &&
    245             (ep->transfer_type == USB_TRANSFER_INTERRUPT
    246              || ep->transfer_type == USB_TRANSFER_ISOCHRONOUS)) {
    247                 if (req->dir == USB_DIRECTION_OUT)
    248                         return ENOSPC;
    249                 else
    250                         size = ep->max_transfer_size;
    251         }
    252 
    253         /* Offline devices don't schedule transfers other than on EP0. */
    254         if (!device->online && ep->endpoint > 0)
    255                 return EAGAIN;
    256 
    257         usb_transfer_batch_t *batch = usb_transfer_batch_create(ep);
    258         if (!batch) {
    259                 usb_log_error("Failed to create transfer batch.");
    260                 return ENOMEM;
    261         }
    262 
    263         batch->target = req->target;
    264         batch->setup.packed = req->setup;
    265         batch->dir = req->dir;
    266         batch->size = size;
    267         batch->offset = req->offset;
    268         batch->dma_buffer = req->buffer;
    269 
    270         dma_buffer_acquire(&batch->dma_buffer);
    271 
    272         if (batch->offset != 0) {
    273                 usb_log_debug("A transfer with nonzero offset requested.");
    274                 usb_transfer_batch_bounce(batch);
    275         }
    276 
    277         if (usb_transfer_batch_bounce_required(batch))
    278                 usb_transfer_batch_bounce(batch);
    279 
    280         batch->on_complete = req->on_complete;
    281         batch->on_complete_data = req->arg;
    282 
    283         const int ret = ops->batch_schedule(batch);
    284         if (ret != EOK) {
    285                 usb_log_warning("Batch %p failed to schedule: %s", batch, str_error(ret));
    286                 usb_transfer_batch_destroy(batch);
    287         }
    288 
    289         return ret;
     182        assert(instance);
     183        assert(toggle == 0 || toggle == 1);
     184        fibril_mutex_lock(&instance->guard);
     185        instance->toggle = toggle;
     186        if (instance->hc_data.toggle_set)
     187                instance->hc_data.toggle_set(instance->hc_data.data, toggle);
     188        fibril_mutex_unlock(&instance->guard);
    290189}
    291190
Note: See TracChangeset for help on using the changeset viewer.