Changeset 1d758fc in mainline
- Timestamp:
- 2018-02-12T10:11:47Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 5fe3f954
- Parents:
- 2f762a7
- git-author:
- Ondřej Hlavatý <aearsis@…> (2018-02-05 03:28:50)
- git-committer:
- Ondřej Hlavatý <aearsis@…> (2018-02-12 10:11:47)
- Location:
- uspace
- Files:
-
- 27 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/drv/bus/usb/ehci/ehci_batch.c
r2f762a7 r1d758fc 102 102 : 0; 103 103 104 const size_t size = ehci_batch->base. buffer_size;104 const size_t size = ehci_batch->base.size; 105 105 106 106 /* Add TD left over by the previous transfer */ … … 180 180 181 181 /* Assume all data got through */ 182 ehci_batch->base.transferred_size = ehci_batch->base. buffer_size;182 ehci_batch->base.transferred_size = ehci_batch->base.size; 183 183 184 184 /* Check all TDs */ … … 216 216 } 217 217 218 assert(ehci_batch->base.transferred_size <= ehci_batch->base. buffer_size);218 assert(ehci_batch->base.transferred_size <= ehci_batch->base.size); 219 219 220 220 /* Clear TD pointers */ … … 281 281 /* Data stage */ 282 282 unsigned td_current = 1; 283 size_t remain_size = ehci_batch->base. buffer_size;283 size_t remain_size = ehci_batch->base.size; 284 284 uintptr_t buffer = dma_buffer_phys(&ehci_batch->base.dma_buffer, 285 285 ehci_batch->data_buffer); … … 335 335 336 336 size_t td_current = 0; 337 size_t remain_size = ehci_batch->base. buffer_size;337 size_t remain_size = ehci_batch->base.size; 338 338 uintptr_t buffer = dma_buffer_phys(&ehci_batch->base.dma_buffer, 339 339 ehci_batch->data_buffer); -
uspace/drv/bus/usb/ehci/ehci_rh.c
r2f762a7 r1d758fc 147 147 batch->error = virthub_base_request(&instance->base, batch->target, 148 148 batch->dir, (void*) batch->setup.buffer, 149 batch->dma_buffer.virt, batch-> buffer_size,149 batch->dma_buffer.virt, batch->size, 150 150 &batch->transferred_size); 151 151 if (batch->error == ENAK) { … … 206 206 batch->error = virthub_base_request(&instance->base, batch->target, 207 207 batch->dir, (void*) batch->setup.buffer, 208 batch->dma_buffer.virt, batch-> buffer_size,208 batch->dma_buffer.virt, batch->size, 209 209 &batch->transferred_size); 210 210 usb_transfer_batch_finish(batch); -
uspace/drv/bus/usb/ohci/ohci_batch.c
r2f762a7 r1d758fc 96 96 return ENOTSUP; 97 97 98 ohci_batch->td_count = (usb_batch-> buffer_size + OHCI_TD_MAX_TRANSFER - 1)98 ohci_batch->td_count = (usb_batch->size + OHCI_TD_MAX_TRANSFER - 1) 99 99 / OHCI_TD_MAX_TRANSFER; 100 100 /* Control transfer need Setup and Status stage */ … … 166 166 167 167 /* Assume all data got through */ 168 usb_batch->transferred_size = usb_batch-> buffer_size;168 usb_batch->transferred_size = usb_batch->size; 169 169 170 170 /* Check all TDs */ … … 212 212 } 213 213 } 214 assert(usb_batch->transferred_size <= usb_batch-> buffer_size);214 assert(usb_batch->transferred_size <= usb_batch->size); 215 215 216 216 /* Make sure that we are leaving the right TD behind */ … … 289 289 size_t td_current = 1; 290 290 const char* buffer = ohci_batch->data_buffer; 291 size_t remain_size = ohci_batch->base. buffer_size;291 size_t remain_size = ohci_batch->base.size; 292 292 while (remain_size > 0) { 293 293 const size_t transfer_size = … … 343 343 344 344 size_t td_current = 0; 345 size_t remain_size = ohci_batch->base. buffer_size;345 size_t remain_size = ohci_batch->base.size; 346 346 char *buffer = ohci_batch->data_buffer; 347 347 while (remain_size > 0) { -
uspace/drv/bus/usb/ohci/ohci_rh.c
r2f762a7 r1d758fc 182 182 batch->error = virthub_base_request(&instance->base, batch->target, 183 183 batch->dir, &batch->setup.packet, 184 batch->dma_buffer.virt, batch-> buffer_size, &batch->transferred_size);184 batch->dma_buffer.virt, batch->size, &batch->transferred_size); 185 185 if (batch->error == ENAK) { 186 186 /* Lock the HC guard */ … … 233 233 batch->error = virthub_base_request(&instance->base, batch->target, 234 234 batch->dir, &batch->setup.packet, 235 batch->dma_buffer.virt, batch-> buffer_size, &batch->transferred_size);235 batch->dma_buffer.virt, batch->size, &batch->transferred_size); 236 236 usb_transfer_batch_finish(batch); 237 237 } -
uspace/drv/bus/usb/uhci/uhci_batch.c
r2f762a7 r1d758fc 98 98 usb_transfer_batch_t *usb_batch = &uhci_batch->base; 99 99 100 uhci_batch->td_count = (usb_batch-> buffer_size + usb_batch->ep->max_packet_size - 1)100 uhci_batch->td_count = (usb_batch->size + usb_batch->ep->max_packet_size - 1) 101 101 / usb_batch->ep->max_packet_size; 102 102 … … 190 190 } 191 191 192 assert(batch->transferred_size <= batch-> buffer_size);192 assert(batch->transferred_size <= batch->size); 193 193 194 194 return true; … … 228 228 229 229 size_t td = 0; 230 size_t remain_size = uhci_batch->base. buffer_size;230 size_t remain_size = uhci_batch->base.size; 231 231 char *buffer = uhci_transfer_batch_data_buffer(uhci_batch); 232 232 … … 297 297 size_t td = 1; 298 298 unsigned toggle = 1; 299 size_t remain_size = uhci_batch->base. buffer_size;299 size_t remain_size = uhci_batch->base.size; 300 300 char *buffer = uhci_transfer_batch_data_buffer(uhci_batch); 301 301 -
uspace/drv/bus/usb/uhci/uhci_rh.c
r2f762a7 r1d758fc 107 107 batch->error = virthub_base_request(&instance->base, batch->target, 108 108 batch->dir, (void*) batch->setup.buffer, 109 batch->dma_buffer.virt, batch-> buffer_size, &batch->transferred_size);109 batch->dma_buffer.virt, batch->size, &batch->transferred_size); 110 110 if (batch->error == ENAK) 111 111 async_usleep(instance->base.endpoint_descriptor.poll_interval * 1000); -
uspace/drv/bus/usb/vhc/transfer.c
r2f762a7 r1d758fc 72 72 rc = usbvirt_control_read(dev, 73 73 batch->setup.buffer, USB_SETUP_PACKET_SIZE, 74 batch->dma_buffer.virt, batch-> buffer_size,74 batch->dma_buffer.virt, batch->size, 75 75 actual_data_size); 76 76 } else { … … 78 78 rc = usbvirt_control_write(dev, 79 79 batch->setup.buffer, USB_SETUP_PACKET_SIZE, 80 batch->dma_buffer.virt, batch-> buffer_size);80 batch->dma_buffer.virt, batch->size); 81 81 } 82 82 } else { … … 84 84 rc = usbvirt_data_in(dev, batch->ep->transfer_type, 85 85 batch->ep->endpoint, 86 batch->dma_buffer.virt, batch-> buffer_size,86 batch->dma_buffer.virt, batch->size, 87 87 actual_data_size); 88 88 } else { … … 90 90 rc = usbvirt_data_out(dev, batch->ep->transfer_type, 91 91 batch->ep->endpoint, 92 batch->dma_buffer.virt, batch-> buffer_size);92 batch->dma_buffer.virt, batch->size); 93 93 } 94 94 } … … 108 108 rc = usbvirt_ipc_send_control_read(sess, 109 109 batch->setup.buffer, USB_SETUP_PACKET_SIZE, 110 batch->dma_buffer.virt, batch-> buffer_size,110 batch->dma_buffer.virt, batch->size, 111 111 actual_data_size); 112 112 } else { … … 114 114 rc = usbvirt_ipc_send_control_write(sess, 115 115 batch->setup.buffer, USB_SETUP_PACKET_SIZE, 116 batch->dma_buffer.virt, batch-> buffer_size);116 batch->dma_buffer.virt, batch->size); 117 117 } 118 118 } else { … … 120 120 rc = usbvirt_ipc_send_data_in(sess, batch->ep->endpoint, 121 121 batch->ep->transfer_type, 122 batch->dma_buffer.virt, batch-> buffer_size,122 batch->dma_buffer.virt, batch->size, 123 123 actual_data_size); 124 124 } else { … … 126 126 rc = usbvirt_ipc_send_data_out(sess, batch->ep->endpoint, 127 127 batch->ep->transfer_type, 128 batch->dma_buffer.virt, batch-> buffer_size);128 batch->dma_buffer.virt, batch->size); 129 129 } 130 130 } -
uspace/drv/bus/usb/xhci/commands.c
r2f762a7 r1d758fc 470 470 xhci_trb_clean(&cmd->_header.trb); 471 471 472 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys); 472 const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx); 473 TRB_SET_ICTX(cmd->_header.trb, phys); 473 474 474 475 /** … … 496 497 assert(dma_buffer_is_set(&cmd->input_ctx)); 497 498 498 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys); 499 const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx); 500 TRB_SET_ICTX(cmd->_header.trb, phys); 499 501 } 500 502 … … 520 522 xhci_trb_clean(&cmd->_header.trb); 521 523 522 TRB_SET_ICTX(cmd->_header.trb, cmd->input_ctx.phys); 524 const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx); 525 TRB_SET_ICTX(cmd->_header.trb, phys); 523 526 524 527 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_EVALUATE_CONTEXT_CMD); … … 594 597 xhci_trb_clean(&cmd->_header.trb); 595 598 596 TRB_SET_ICTX(cmd->_header.trb, cmd->bandwidth_ctx.phys); 599 const uintptr_t phys = dma_buffer_phys_base(&cmd->input_ctx); 600 TRB_SET_ICTX(cmd->_header.trb, phys); 597 601 598 602 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_GET_PORT_BANDWIDTH_CMD); -
uspace/drv/bus/usb/xhci/endpoint.c
r2f762a7 r1d758fc 116 116 goto err; 117 117 118 /* Driver can handle non-contiguous buffers */ 119 ep->transfer_buffer_policy &= ~DMA_POLICY_CONTIGUOUS; 120 121 /* Driver can handle buffers crossing boundaries */ 122 ep->transfer_buffer_policy &= ~DMA_POLICY_NOT_CROSSING; 118 unsigned flags = -1U; 123 119 124 120 /* Some xHCs can handle 64-bit addresses */ 125 121 xhci_bus_t *bus = bus_to_xhci_bus(ep->device->bus); 126 122 if (bus->hc->ac64) 127 ep->transfer_buffer_policy &= ~DMA_POLICY_4GiB; 123 flags &= ~DMA_POLICY_4GiB; 124 125 /* xHCI works best if it can fit 65k transfers in one TRB */ 126 ep->transfer_buffer_policy = dma_policy_create(flags, 1 << 16); 127 128 /* But actualy can do full scatter-gather. */ 129 ep->required_transfer_buffer_policy = dma_policy_create(flags, PAGE_SIZE); 128 130 129 131 return EOK; -
uspace/drv/bus/usb/xhci/hc.c
r2f762a7 r1d758fc 476 476 return ETIMEOUT; 477 477 478 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP, hc->dcbaa_dma.phys); 478 uintptr_t dcbaa_phys = dma_buffer_phys_base(&hc->dcbaa_dma); 479 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP, dcbaa_phys); 479 480 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, hc->max_slots); 480 481 … … 490 491 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count); 491 492 XHCI_REG_WR(intr0, XHCI_INTR_ERDP, hc->event_ring.dequeue_ptr); 492 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA, hc->event_ring.erst.phys); 493 494 const uintptr_t erstba_phys = dma_buffer_phys_base(&hc->event_ring.erst); 495 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA, erstba_phys); 493 496 494 497 if (hc->base.irq_cap > 0) { … … 799 802 if (err == EOK) { 800 803 dev->slot_id = cmd.slot_id; 801 hc->dcbaa[dev->slot_id] = host2xhci(64, dev->dev_ctx.phys); 804 hc->dcbaa[dev->slot_id] = 805 host2xhci(64, dma_buffer_phys_base(&dev->dev_ctx)); 802 806 } 803 807 -
uspace/drv/bus/usb/xhci/isoch.c
r2f762a7 r1d758fc 176 176 xhci_trb_clean(&trb); 177 177 178 trb.parameter = it->data.phys;178 trb.parameter = host2xhci(64, dma_buffer_phys_base(&it->data)); 179 179 TRB_CTRL_SET_XFER_LEN(trb, it->size); 180 180 TRB_CTRL_SET_TD_SIZE(trb, 0); … … 481 481 482 482 /* This shall be already checked by endpoint */ 483 assert(transfer->batch. buffer_size <= ep->base.max_transfer_size);483 assert(transfer->batch.size <= ep->base.max_transfer_size); 484 484 485 485 fibril_mutex_lock(&isoch->guard); … … 521 521 522 522 /* Prepare the transfer. */ 523 it->size = transfer->batch. buffer_size;523 it->size = transfer->batch.size; 524 524 memcpy(it->data.virt, transfer->batch.dma_buffer.virt, it->size); 525 525 it->state = ISOCH_FILLED; … … 544 544 xhci_isoch_t * const isoch = ep->isoch; 545 545 546 if (transfer->batch. buffer_size < ep->base.max_transfer_size) {546 if (transfer->batch.size < ep->base.max_transfer_size) { 547 547 usb_log_error("Cannot schedule an undersized isochronous transfer."); 548 548 return ELIMIT; -
uspace/drv/bus/usb/xhci/scratchpad.c
r2f762a7 r1d758fc 72 72 memset(hc->scratchpad_array.virt, 0, size); 73 73 74 uint64_t phys_begin = hc->scratchpad_array.phys+ array_size;74 const char *base = hc->scratchpad_array.virt + array_size; 75 75 uint64_t *array = hc->scratchpad_array.virt; 76 76 77 for (unsigned i = 0; i < num_bufs; ++i) 78 array[i] = host2xhci(64, phys_begin + i * PAGE_SIZE); 77 for (unsigned i = 0; i < num_bufs; ++i) { 78 array[i] = host2xhci(64, dma_buffer_phys(&hc->scratchpad_array, 79 base + i * PAGE_SIZE)); 80 } 79 81 80 hc->dcbaa[0] = host2xhci(64, hc->scratchpad_array.phys);82 hc->dcbaa[0] = host2xhci(64, dma_buffer_phys_base(&hc->scratchpad_array)); 81 83 82 84 usb_log_debug("Allocated %d scratchpad buffers.", num_bufs); -
uspace/drv/bus/usb/xhci/streams.c
r2f762a7 r1d758fc 239 239 data->secondary_stream_ctx_array = data->secondary_stream_ctx_dma.virt; 240 240 241 XHCI_STREAM_DEQ_PTR_SET(*ctx, d ata->secondary_stream_ctx_dma.phys);241 XHCI_STREAM_DEQ_PTR_SET(*ctx, dma_buffer_phys_base(&data->secondary_stream_ctx_dma)); 242 242 XHCI_STREAM_SCT_SET(*ctx, fnzb32(count) + 1); 243 243 … … 283 283 284 284 XHCI_EP_MAX_P_STREAMS_SET(*ctx, pstreams); 285 XHCI_EP_TR_DPTR_SET(*ctx, xhci_ep->primary_stream_ctx_dma.phys);285 XHCI_EP_TR_DPTR_SET(*ctx, dma_buffer_phys_base(&xhci_ep->primary_stream_ctx_dma)); 286 286 XHCI_EP_LSA_SET(*ctx, lsa); 287 287 } -
uspace/drv/bus/usb/xhci/transfers.c
r2f762a7 r1d758fc 126 126 static int calculate_trb_count(xhci_transfer_t *transfer) 127 127 { 128 const size_t size = transfer->batch. buffer_size;128 const size_t size = transfer->batch.size; 129 129 return (size + PAGE_SIZE - 1 )/ PAGE_SIZE; 130 130 } … … 184 184 int stage_dir = REQUEST_TYPE_IS_DEVICE_TO_HOST(setup->request_type) 185 185 ? STAGE_IN : STAGE_OUT; 186 size_t remaining = transfer->batch. buffer_size;186 size_t remaining = transfer->batch.size; 187 187 188 188 for (size_t i = 0; i < buffer_count; ++i) { … … 227 227 const size_t buffer_count = calculate_trb_count(transfer); 228 228 xhci_trb_t trbs[buffer_count]; 229 size_t remaining = transfer->batch. buffer_size;229 size_t remaining = transfer->batch.size; 230 230 231 231 for (size_t i = 0; i < buffer_count; ++i) { … … 254 254 const size_t buffer_count = calculate_trb_count(transfer); 255 255 xhci_trb_t trbs[buffer_count + 1]; 256 size_t remaining = transfer->batch. buffer_size;256 size_t remaining = transfer->batch.size; 257 257 258 258 for (size_t i = 0; i < buffer_count; ++i) { … … 278 278 const size_t buffer_count = calculate_trb_count(transfer); 279 279 xhci_trb_t trbs[buffer_count]; 280 size_t remaining = transfer->batch. buffer_size;280 size_t remaining = transfer->batch.size; 281 281 282 282 for (size_t i = 0; i < buffer_count; ++i) { … … 372 372 case XHCI_TRBC_SUCCESS: 373 373 batch->error = EOK; 374 batch->transferred_size = batch-> buffer_size - TRB_TRANSFER_LENGTH(*trb);374 batch->transferred_size = batch->size - TRB_TRANSFER_LENGTH(*trb); 375 375 break; 376 376 … … 416 416 } 417 417 418 assert(batch->transferred_size <= batch-> buffer_size);418 assert(batch->transferred_size <= batch->size); 419 419 420 420 usb_transfer_batch_finish(batch); -
uspace/drv/bus/usb/xhci/trb_ring.c
r2f762a7 r1d758fc 89 89 static errno_t trb_segment_alloc(trb_segment_t **segment) 90 90 { 91 dma_buffer_t dbuf; 92 93 const errno_t err = dma_buffer_alloc(&dbuf, PAGE_SIZE); 91 *segment = AS_AREA_ANY; 92 uintptr_t phys; 93 94 const int err = dmamem_map_anonymous(PAGE_SIZE, 95 DMAMEM_4GiB, AS_AREA_READ | AS_AREA_WRITE, 0, 96 &phys, (void **) segment); 94 97 if (err) 95 98 return err; 96 99 97 *segment = dbuf.virt;98 100 memset(*segment, 0, PAGE_SIZE); 99 (*segment)->phys = dbuf.phys;101 (*segment)->phys = phys; 100 102 usb_log_debug("Allocated new ring segment."); 101 103 return EOK; … … 104 106 static void trb_segment_free(trb_segment_t *segment) 105 107 { 106 dma_buffer_t dbuf = { .virt = segment, .phys = segment->phys }; 107 dma_buffer_free(&dbuf); 108 dmamem_unmap_anonymous(segment); 108 109 } 109 110 -
uspace/lib/drv/generic/remote_usbhc.c
r2f762a7 r1d758fc 210 210 ? AS_AREA_WRITE : AS_AREA_READ; 211 211 212 const errno_t ret = async_share_out_start(exch, req->b ase, flags);212 const errno_t ret = async_share_out_start(exch, req->buffer.virt, flags); 213 213 if (ret != EOK) { 214 214 async_forget(opening_request); … … 374 374 return; 375 375 } 376 if (trans->request.b ase!= NULL) {377 as_area_destroy(trans->request.b ase);376 if (trans->request.buffer.virt != NULL) { 377 as_area_destroy(trans->request.buffer.virt); 378 378 } 379 379 … … 422 422 } 423 423 424 if ((err = async_share_out_finalize(data_callid, &trans->request.b ase)))424 if ((err = async_share_out_finalize(data_callid, &trans->request.buffer.virt))) 425 425 return err; 426 426 427 427 /* 428 * As we're going to check the mapping, we must make sure the memory is 429 * actually mapped. We must do it right now, because the area might be 430 * read-only or write-only, and we may be unsure later. 428 * As we're going to get physical addresses of the mapping, we must make 429 * sure the memory is actually mapped. We must do it right now, because 430 * the area might be read-only or write-only, and we may be unsure 431 * later. 431 432 */ 432 433 if (flags & AS_AREA_READ) { 433 434 char foo = 0; 434 volatile const char *buf = trans->request.b ase+ trans->request.offset;435 volatile const char *buf = trans->request.buffer.virt + trans->request.offset; 435 436 for (size_t i = 0; i < size; i += PAGE_SIZE) 436 437 foo += buf[i]; 437 438 } else { 438 volatile char *buf = trans->request.b ase+ trans->request.offset;439 volatile char *buf = trans->request.buffer.virt + trans->request.offset; 439 440 for (size_t i = 0; i < size; i += PAGE_SIZE) 440 441 buf[i] = 0xff; … … 482 483 } else { 483 484 /* The value was valid on the other side, for us, its garbage. */ 484 trans->request.b ase= NULL;485 trans->request.buffer.virt = NULL; 485 486 } 486 487 -
uspace/lib/drv/include/usbhc_iface.h
r2f762a7 r1d758fc 104 104 105 105 // FIXME: DMA buffers shall be part of libdrv anyway. 106 typedef unsigned dma_policy_t; 106 typedef uintptr_t dma_policy_t; 107 108 typedef struct dma_buffer { 109 void *virt; 110 dma_policy_t policy; 111 } dma_buffer_t; 107 112 108 113 typedef struct usb_pipe_desc { … … 134 139 135 140 /** 136 * Base address of the buffer to share. Must be at least offset + size 137 * large. Is patched after being transmitted over IPC, so the pointer is 138 * still valid. 139 * 140 * Note that offset might be actually more than PAGE_SIZE. 141 * The DMA buffer to share. Must be at least offset + size large. Is 142 * patched after being transmitted over IPC, so the pointer is still 143 * valid. 141 144 */ 142 void *base;145 dma_buffer_t buffer; 143 146 size_t offset; /**< Offset to the buffer */ 144 147 size_t size; /**< Requested size. */ 145 dma_policy_t buffer_policy; /**< Properties of the buffer. */146 148 } usbhc_iface_transfer_request_t; 147 149 -
uspace/lib/usb/include/usb/dma_buffer.h
r2f762a7 r1d758fc 37 37 * shared through IPC). 38 38 * 39 * Note that although allocated memory is always page-aligned, the buffer itself 40 * may be only a part of it, justifying the existence of page-alignment and 41 * page-crossing flags. 39 * Currently, it is possible to allocate either completely contiguous buffers 40 * (with dma_map_anonymous) or arbitrary memory (with as_area_create). Shall the 41 * kernel be updated, this is a subject of major optimization of memory usage. 42 * The other way to do it without the kernel is building an userspace IO vector 43 * in a similar way how QEMU does it. 42 44 * 43 * Also, currently the buffers that are allocated are always contiguous and 44 * page-aligned, regardless of whether the policy requires it. We blindly 45 * believe this fact in dma_buffer_phys, which will yield wrong results if the 46 * buffer is not contiguous. 45 * The structures themselves are defined in usbhc_iface, because they need to be 46 * passed through IPC. 47 47 */ 48 48 #ifndef LIB_USB_DMA_BUFFER 49 49 #define LIB_USB_DMA_BUFFER 50 50 51 #include <as.h> 52 #include <bitops.h> 53 #include <errno.h> 51 54 #include <stdint.h> 52 55 #include <stdlib.h> 53 56 #include <usbhc_iface.h> 54 #include <errno.h>55 57 56 #define DMA_POLICY_4GiB (1<<0) /**< Must use only 32-bit addresses */ 57 #define DMA_POLICY_PAGE_ALIGNED (1<<1) /**< The first pointer must be page-aligned */ 58 #define DMA_POLICY_CONTIGUOUS (1<<2) /**< Pages must follow each other physically */ 59 #define DMA_POLICY_NOT_CROSSING (1<<3) /**< Buffer must not cross page boundary. (Implies buffer is no larger than page). */ 58 /** 59 * The DMA policy describes properties of the buffer. It is used in two 60 * different contexts. Either it represents requirements, which shall be 61 * satisfied to avoid copying the buffer to a more strict one. Or, it is the 62 * actual property of the buffer, which can be more strict than requested. It 63 * always holds that more bits set means more restrictive policy, and that by 64 * computing a bitwise OR one gets the restriction that holds for both. 65 * 66 * The high bits of a DMA policy represent a physical contiguity. If bit i is 67 * set, it means that chunks of a size 2^(i+1) are contiguous in memory. It 68 * shall never happen that bit i > j is set when j is not. 69 * 70 * The previous applies for i >= PAGE_WIDTH. Lower bits are used as bit flags. 71 */ 72 #define DMA_POLICY_FLAGS_MASK (PAGE_SIZE - 1) 73 #define DMA_POLICY_CHUNK_SIZE_MASK (~DMA_POLICY_FLAGS_MASK) 60 74 61 #define DMA_POLICY_STRICT (-1U) 62 #define DMA_POLICY_DEFAULT DMA_POLICY_STRICT 75 #define DMA_POLICY_4GiB (1<<0) /**< Must use only 32-bit addresses */ 63 76 64 typedef struct dma_buffer { 65 void *virt; 66 uintptr_t phys; 67 } dma_buffer_t; 77 #define DMA_POLICY_STRICT (-1UL) 78 #define DMA_POLICY_DEFAULT DMA_POLICY_STRICT 79 80 extern dma_policy_t dma_policy_create(unsigned, size_t); 81 82 /** 83 * Get mask which defines bits of offset in chunk. 84 */ 85 static inline size_t dma_policy_chunk_mask(const dma_policy_t policy) 86 { 87 return policy | DMA_POLICY_FLAGS_MASK; 88 } 68 89 69 90 extern errno_t dma_buffer_alloc(dma_buffer_t *db, size_t size); 70 91 extern errno_t dma_buffer_alloc_policy(dma_buffer_t *, size_t, dma_policy_t); 71 92 extern void dma_buffer_free(dma_buffer_t *); 72 extern uintptr_t dma_buffer_phys(const dma_buffer_t *, void *);73 93 74 extern bool dma_buffer_check_policy(const void *, size_t, const dma_policy_t); 94 extern uintptr_t dma_buffer_phys(const dma_buffer_t *, const void *); 95 96 static inline uintptr_t dma_buffer_phys_base(const dma_buffer_t *db) 97 { 98 return dma_buffer_phys(db, db->virt); 99 } 75 100 76 101 extern errno_t dma_buffer_lock(dma_buffer_t *, void *, size_t); 77 102 extern void dma_buffer_unlock(dma_buffer_t *, size_t); 78 103 79 static inline int dma_buffer_is_set(dma_buffer_t *db) 104 extern void dma_buffer_acquire(dma_buffer_t *); 105 extern void dma_buffer_release(dma_buffer_t *); 106 107 static inline bool dma_buffer_is_set(const dma_buffer_t *db) 80 108 { 81 109 return !!db->virt; -
uspace/lib/usb/src/dma_buffer.c
r2f762a7 r1d758fc 39 39 #include "usb/dma_buffer.h" 40 40 41 dma_policy_t dma_policy_create(unsigned flags, size_t chunk_size) 42 { 43 assert((chunk_size & (chunk_size - 1)) == 0); /* Check if power of 2 */ 44 assert(chunk_size >= PAGE_SIZE || chunk_size == 0); 45 46 return ((chunk_size - 1) & DMA_POLICY_CHUNK_SIZE_MASK) 47 | (flags & DMA_POLICY_FLAGS_MASK); 48 } 49 50 /** 51 * As the driver is typically using only a few buffers at once, we cache the 52 * physical mapping to avoid calling the kernel unnecessarily often. This cache 53 * is global for a task. 54 * 55 * TODO: "few" is currently limited to one. 56 */ 57 static struct { 58 const void *last; 59 uintptr_t phys; 60 } phys_mapping_cache = { 0 }; 61 62 static void cache_insert(const void *v, uintptr_t p) 63 { 64 phys_mapping_cache.last = v; 65 phys_mapping_cache.phys = p; 66 } 67 68 static void cache_evict(const void *v) 69 { 70 if (phys_mapping_cache.last == v) 71 phys_mapping_cache.last = NULL; 72 } 73 74 static bool cache_find(const void *v, uintptr_t *p) 75 { 76 *p = phys_mapping_cache.phys; 77 return phys_mapping_cache.last == v; 78 } 79 41 80 /** 42 81 * Allocate a DMA buffer. 43 *44 * XXX: Currently cannot make much use of missing constraints, as it always45 * allocates page-aligned contiguous buffer. We rely on it in dma_buffer_phys.46 82 * 47 83 * @param[in] db dma_buffer_t structure to fill … … 62 98 void *address = AS_AREA_ANY; 63 99 64 const int ret= dmamem_map_anonymous(real_size,100 const int err = dmamem_map_anonymous(real_size, 65 101 flags, AS_AREA_READ | AS_AREA_WRITE, 0, 66 102 &phys, &address); 67 68 if (ret == EOK) { 69 /* Access the pages to force mapping */ 70 volatile char *buf = address; 71 for (size_t i = 0; i < size; i += PAGE_SIZE) 72 buf[i] = 0xff; 73 74 db->virt = address; 75 db->phys = phys; 76 } 77 return ret; 103 if (err) 104 return err; 105 106 /* Access the pages to force mapping */ 107 volatile char *buf = address; 108 for (size_t i = 0; i < size; i += PAGE_SIZE) 109 buf[i] = 0xff; 110 111 db->virt = address; 112 db->policy = dma_policy_create(policy, 0); 113 cache_insert(db->virt, phys); 114 115 return EOK; 78 116 } 79 117 … … 102 140 dmamem_unmap_anonymous(db->virt); 103 141 db->virt = NULL; 104 db->p hys= 0;142 db->policy = 0; 105 143 } 106 144 } … … 112 150 * @param[in] virt Pointer somewhere inside db 113 151 */ 114 uintptr_t dma_buffer_phys(const dma_buffer_t *db, void *virt) 115 { 116 return db->phys + (virt - db->virt); 117 } 118 119 /** 120 * Check whether a memory area is compatible with a policy. 121 * 122 * Useful to skip copying when the buffer is already ready to be given to 123 * hardware as is. 124 * 125 * Note that the "as_get_physical_mapping" fails when the page is not mapped 126 * yet, and that the caller is responsible for forcing the mapping. 127 */ 128 bool dma_buffer_check_policy(const void *buffer, size_t size, const dma_policy_t policy) 129 { 130 uintptr_t addr = (uintptr_t) buffer; 131 132 const bool check_4gib = !!(policy & DMA_POLICY_4GiB); 133 const bool check_crossing = !!(policy & DMA_POLICY_NOT_CROSSING); 134 const bool check_alignment = !!(policy & DMA_POLICY_PAGE_ALIGNED); 135 const bool check_contiguous = !!(policy & DMA_POLICY_CONTIGUOUS); 136 137 /* Check the two conditions that are easy */ 138 if (check_crossing && (addr + size - 1) / PAGE_SIZE != addr / PAGE_SIZE) 139 goto violated; 140 141 if (check_alignment && ((uintptr_t) buffer) % PAGE_SIZE) 142 goto violated; 143 144 /* 145 * For these conditions, we need to walk through pages and check 146 * physical address of each one 147 */ 148 if (check_contiguous || check_4gib) { 149 const void *virt = buffer; 150 uintptr_t phys; 151 152 /* Get the mapping of the first page */ 153 if (as_get_physical_mapping(virt, &phys)) 154 goto error; 155 156 /* First page can already break 4GiB condition */ 157 if (check_4gib && (phys & DMAMEM_4GiB) != 0) 158 goto violated; 159 160 while (size >= PAGE_SIZE) { 161 /* Move to the next page */ 162 virt += PAGE_SIZE; 163 size -= PAGE_SIZE; 164 165 uintptr_t last_phys = phys; 166 if (as_get_physical_mapping(virt, &phys)) 167 goto error; 168 169 if (check_contiguous && (phys - last_phys) != PAGE_SIZE) 170 goto violated; 171 172 if (check_4gib && (phys & DMAMEM_4GiB) != 0) 173 goto violated; 174 } 175 } 176 177 /* All checks passed */ 152 uintptr_t dma_buffer_phys(const dma_buffer_t *db, const void *virt) 153 { 154 const size_t chunk_mask = dma_policy_chunk_mask(db->policy); 155 const uintptr_t offset = (virt - db->virt) & chunk_mask; 156 const void *chunk_base = virt - offset; 157 158 uintptr_t phys; 159 160 if (!cache_find(chunk_base, &phys)) { 161 if (as_get_physical_mapping(chunk_base, &phys)) 162 return 0; 163 cache_insert(chunk_base, phys); 164 } 165 166 return phys + offset; 167 } 168 169 static bool dma_buffer_is_4gib(dma_buffer_t *db, size_t size) 170 { 171 if (sizeof(uintptr_t) <= 32) 172 return true; 173 174 const size_t chunk_size = dma_policy_chunk_mask(db->policy) + 1; 175 const size_t chunks = chunk_size ? 1 : size / chunk_size; 176 177 for (size_t c = 0; c < chunks; c++) { 178 const void *addr = db->virt + (c * chunk_size); 179 const uintptr_t phys = dma_buffer_phys(db, addr); 180 181 if ((phys & DMAMEM_4GiB) != 0) 182 return false; 183 } 184 178 185 return true; 179 180 violated:181 error:182 return false;183 186 } 184 187 … … 192 195 errno_t dma_buffer_lock(dma_buffer_t *db, void *virt, size_t size) 193 196 { 197 assert(virt); 198 199 uintptr_t phys; 200 201 const errno_t err = dmamem_map(virt, size, 0, 0, &phys); 202 if (err) 203 return err; 204 194 205 db->virt = virt; 195 return dmamem_map(db->virt, size, 0, 0, &db->phys); 206 db->policy = dma_policy_create(0, PAGE_SIZE); 207 cache_insert(virt, phys); 208 209 unsigned flags = -1U; 210 if (!dma_buffer_is_4gib(db, size)) 211 flags &= ~DMA_POLICY_4GiB; 212 db->policy = dma_policy_create(flags, PAGE_SIZE); 213 214 return EOK; 196 215 } 197 216 … … 204 223 dmamem_unmap(db->virt, size); 205 224 db->virt = NULL; 206 db->phys = 0; 207 } 225 db->policy = 0; 226 } 227 } 228 229 /** 230 * Must be called when the buffer is received over IPC. Clears potentially 231 * leftover value from different buffer mapped to the same virtual address. 232 */ 233 void dma_buffer_acquire(dma_buffer_t *db) 234 { 235 cache_evict(db->virt); 236 } 237 238 /** 239 * Counterpart of acquire. 240 */ 241 void dma_buffer_release(dma_buffer_t *db) 242 { 243 cache_evict(db->virt); 208 244 } 209 245 -
uspace/lib/usbdev/src/pipes.c
r2f762a7 r1d758fc 83 83 84 84 /* Only control writes make sense without buffer */ 85 if ((t->dir != USB_DIRECTION_OUT || !t->is_control) 86 && (t->req.base == NULL || t->req.size == 0)) 85 if ((t->dir != USB_DIRECTION_OUT || !t->is_control) && t->req.size == 0) 87 86 return EINVAL; 88 87 89 88 /* Nonzero size requires buffer */ 90 if ( t->req.base == NULL&& t->req.size != 0)89 if (!dma_buffer_is_set(&t->req.buffer) && t->req.size != 0) 91 90 return EINVAL; 92 91 … … 119 118 /** 120 119 * Setup the transfer request inside transfer according to dma buffer provided. 120 * 121 * TODO: The buffer could have been allocated as a more strict one. Currently, 122 * we assume that the policy is just the requested one. 121 123 */ 122 124 static void setup_dma_buffer(transfer_t *t, void *base, void *ptr, size_t size) 123 125 { 124 t->req.base = base; 126 t->req.buffer.virt = base; 127 t->req.buffer.policy = t->pipe->desc.transfer_buffer_policy; 125 128 t->req.offset = ptr - base; 126 129 t->req.size = size; 127 t->req.buffer_policy = t->pipe->desc.transfer_buffer_policy;128 130 } 129 131 … … 133 135 static errno_t transfer_wrap_dma(transfer_t *t, void *buf, size_t size) 134 136 { 137 if (size == 0) { 138 setup_dma_buffer(t, NULL, NULL, 0); 139 return transfer_common(t); 140 } 141 135 142 void *dma_buf = usb_pipe_alloc_buffer(t->pipe, size); 136 143 setup_dma_buffer(t, dma_buf, dma_buf, size); … … 364 371 .direction = USB_DIRECTION_BOTH, 365 372 .max_transfer_size = CTRL_PIPE_MIN_PACKET_SIZE, 373 .transfer_buffer_policy = DMA_POLICY_STRICT, 366 374 }; 367 375 -
uspace/lib/usbhost/include/usb/host/bus.h
r2f762a7 r1d758fc 153 153 int bus_device_offline(device_t *); 154 154 155 int bus_device_send_batch(device_t *, usb_target_t, 156 usb_direction_t direction, char *, size_t, uint64_t, 157 usbhc_iface_transfer_callback_t, void *, const char *); 155 /** 156 * A proforma to USB transfer batch. As opposed to transfer batch, which is 157 * supposed to be a dynamic structrure, this one is static and descriptive only. 158 * Its fields are copied to the final batch. 159 */ 160 typedef struct transfer_request { 161 usb_target_t target; 162 usb_direction_t dir; 163 164 dma_buffer_t buffer; 165 size_t offset, size; 166 uint64_t setup; 167 168 usbhc_iface_transfer_callback_t on_complete; 169 void *arg; 170 171 const char *name; 172 } transfer_request_t; 173 174 int bus_issue_transfer(device_t *, const transfer_request_t *); 158 175 159 176 errno_t bus_device_send_batch_sync(device_t *, usb_target_t, -
uspace/lib/usbhost/include/usb/host/endpoint.h
r2f762a7 r1d758fc 51 51 typedef struct bus bus_t; 52 52 typedef struct device device_t; 53 typedef struct transfer_request transfer_request_t; 53 54 typedef struct usb_transfer_batch usb_transfer_batch_t; 54 55 … … 98 99 /** Maximum size of one transfer */ 99 100 size_t max_transfer_size; 100 /** Policy for transfer buffers */ 101 dma_policy_t transfer_buffer_policy; 101 102 /* Policies for transfer buffers */ 103 dma_policy_t transfer_buffer_policy; /**< A hint for optimal performance. */ 104 dma_policy_t required_transfer_buffer_policy; /**< Enforced by the library. */ 102 105 103 106 /** … … 122 125 extern void endpoint_deactivate_locked(endpoint_t *); 123 126 124 int endpoint_send_batch(endpoint_t *, usb_target_t, usb_direction_t, 125 char *, size_t, uint64_t, usbhc_iface_transfer_callback_t, void *, 126 const char *); 127 int endpoint_send_batch(endpoint_t *, const transfer_request_t *); 127 128 128 129 static inline bus_t *endpoint_get_bus(endpoint_t *ep) -
uspace/lib/usbhost/include/usb/host/usb_transfer_batch.h
r2f762a7 r1d758fc 70 70 } setup; 71 71 72 /** DMA buffer with enforced policy */ 73 dma_buffer_t dma_buffer; 74 /** Size of memory buffer */ 75 size_t offset, size; 76 72 77 /** 73 78 * In case a bounce buffer is allocated, the original buffer must to be 74 79 * stored to be filled after the IN transaction is finished. 75 80 */ 76 char *buffer; 77 /** Size of memory buffer */ 78 size_t buffer_size; 79 80 /** DMA buffer with enforced policy */ 81 dma_buffer_t dma_buffer; 81 char *original_buffer; 82 82 bool is_bounced; 83 83 … … 107 107 usb_str_transfer_type_short((batch).ep->transfer_type), \ 108 108 usb_str_direction((batch).dir), \ 109 (batch). buffer_size, (batch).ep->max_packet_size109 (batch).size, (batch).ep->max_packet_size 110 110 111 111 /** Wrapper for bus operation. */ … … 115 115 void usb_transfer_batch_init(usb_transfer_batch_t *, endpoint_t *); 116 116 117 /** Buffer handling */ 118 bool usb_transfer_batch_bounce_required(usb_transfer_batch_t *); 117 119 errno_t usb_transfer_batch_bounce(usb_transfer_batch_t *); 118 /** Buffer preparation */119 errno_t usb_transfer_batch_prepare_buffer(usb_transfer_batch_t *, char *);120 120 121 121 /** Batch finalization. */ -
uspace/lib/usbhost/src/bus.c
r2f762a7 r1d758fc 46 46 #include <str_error.h> 47 47 #include <usb/debug.h> 48 #include <usb/dma_buffer.h> 48 49 49 50 #include "endpoint.h" … … 389 390 endpoint_init(ep, device, desc); 390 391 } 392 393 assert((ep->required_transfer_buffer_policy & ~ep->transfer_buffer_policy) == 0); 391 394 392 395 /* Bus reference */ … … 557 560 558 561 /** 559 * Initiate a transfer on the bus. Finds the target endpoint, creates 560 * a transfer batch and schedules it. 561 * 562 * @param device Device for which to send the batch 563 * @param target The target of the transfer. 564 * @param direction A direction of the transfer. 565 * @param data A pointer to the data buffer. 566 * @param size Size of the data buffer. 567 * @param setup_data Data to use in the setup stage (Control communication type) 568 * @param on_complete Callback which is called after the batch is complete 569 * @param arg Callback parameter. 570 * @param name Communication identifier (for nicer output). 562 * Assert some conditions on transfer request. As the request is an entity of 563 * HC driver only, we can force these conditions harder. Invalid values from 564 * devices shall be caught on DDF interface already. 565 */ 566 static void check_request(const transfer_request_t *request) 567 { 568 assert(usb_target_is_valid(&request->target)); 569 assert(request->dir != USB_DIRECTION_BOTH); 570 /* Non-zero offset => size is non-zero */ 571 assert(request->offset == 0 || request->size != 0); 572 /* Non-zero size => buffer is set */ 573 assert(request->size == 0 || dma_buffer_is_set(&request->buffer)); 574 /* Non-null arg => callback is set */ 575 assert(request->arg == NULL || request->on_complete != NULL); 576 assert(request->name); 577 } 578 579 /** 580 * Initiate a transfer with given device. 581 * 571 582 * @return Error code. 572 583 */ 573 int bus_device_send_batch(device_t *device, usb_target_t target, 574 usb_direction_t direction, char *data, size_t size, uint64_t setup_data, 575 usbhc_iface_transfer_callback_t on_complete, void *arg, const char *name) 576 { 577 assert(device->address == target.address); 584 int bus_issue_transfer(device_t *device, const transfer_request_t *request) 585 { 586 assert(device); 587 assert(request); 588 589 check_request(request); 590 assert(device->address == request->target.address); 578 591 579 592 /* Temporary reference */ 580 endpoint_t *ep = bus_find_endpoint(device, target.endpoint, direction);593 endpoint_t *ep = bus_find_endpoint(device, request->target.endpoint, request->dir); 581 594 if (ep == NULL) { 582 595 usb_log_error("Endpoint(%d:%d) not registered for %s.", 583 device->address, target.endpoint,name);596 device->address, request->target.endpoint, request->name); 584 597 return ENOENT; 585 598 } … … 587 600 assert(ep->device == device); 588 601 589 /* 590 * This method is already callable from HC only, so we can force these 591 * conditions harder. 592 * Invalid values from devices shall be caught on DDF interface already. 593 */ 594 assert(usb_target_is_valid(&target)); 595 assert(direction != USB_DIRECTION_BOTH); 596 assert(size == 0 || data != NULL); 597 assert(arg == NULL || on_complete != NULL); 598 assert(name); 599 600 const int err = endpoint_send_batch(ep, target, direction, 601 data, size, setup_data, on_complete, arg, name); 602 const int err = endpoint_send_batch(ep, request); 602 603 603 604 /* Temporary reference */ … … 650 651 const char *name, size_t *transferred_size) 651 652 { 653 int err; 652 654 sync_data_t sd = { .done = false }; 653 655 fibril_mutex_initialize(&sd.done_mtx); 654 656 fibril_condvar_initialize(&sd.done_cv); 655 657 656 const int ret = bus_device_send_batch(device, target, direction, 657 data, size, setup_data, sync_transfer_complete, &sd, name); 658 if (ret != EOK) 659 return ret; 658 transfer_request_t request = { 659 .target = target, 660 .dir = direction, 661 .offset = ((uintptr_t) data) % PAGE_SIZE, 662 .size = size, 663 .setup = setup_data, 664 .on_complete = sync_transfer_complete, 665 .arg = &sd, 666 .name = name, 667 }; 668 669 if (data && 670 (err = dma_buffer_lock(&request.buffer, data - request.offset, size))) 671 return err; 672 673 if ((err = bus_issue_transfer(device, &request))) { 674 dma_buffer_unlock(&request.buffer, size); 675 return err; 676 } 660 677 661 678 /* … … 668 685 fibril_mutex_unlock(&sd.done_mtx); 669 686 687 dma_buffer_unlock(&request.buffer, size); 688 670 689 if (transferred_size) 671 690 *transferred_size = sd.transferred_size; -
uspace/lib/usbhost/src/ddf_helpers.c
r2f762a7 r1d758fc 46 46 #include <usb/descriptor.h> 47 47 #include <usb/usb.h> 48 #include <usb/dma_buffer.h> 48 49 #include <usb_iface.h> 49 50 #include <usbhc_iface.h> … … 271 272 * @return Error code. 272 273 */ 273 static errno_t transfer(ddf_fun_t *fun, const usbhc_iface_transfer_request_t *req, 274 static errno_t transfer(ddf_fun_t *fun, 275 const usbhc_iface_transfer_request_t *ifreq, 274 276 usbhc_iface_transfer_callback_t callback, void *arg) 275 277 { … … 280 282 const usb_target_t target = {{ 281 283 .address = dev->address, 282 .endpoint = req->endpoint,283 .stream = req->stream,284 .endpoint = ifreq->endpoint, 285 .stream = ifreq->stream, 284 286 }}; 285 287 … … 287 289 return EINVAL; 288 290 289 if (req->size > 0 && req->base == NULL) 291 if (ifreq->offset > 0 && ifreq->size == 0) 292 return EINVAL; 293 294 if (ifreq->size > 0 && !dma_buffer_is_set(&ifreq->buffer)) 290 295 return EBADMEM; 291 296 … … 293 298 return EBADMEM; 294 299 295 const char *name = (req->dir == USB_DIRECTION_IN) ? "READ" : "WRITE"; 296 297 char *buffer = req->base + req->offset; 298 299 return bus_device_send_batch(dev, target, req->dir, 300 buffer, req->size, req->setup, 301 callback, arg, name); 300 const transfer_request_t request = { 301 .target = target, 302 .dir = ifreq->dir, 303 .buffer = ifreq->buffer, 304 .offset = ifreq->offset, 305 .size = ifreq->size, 306 .setup = ifreq->setup, 307 .on_complete = callback, 308 .arg = arg, 309 .name = (ifreq->dir == USB_DIRECTION_IN) ? "READ" : "WRITE", 310 }; 311 312 return bus_issue_transfer(dev, &request); 302 313 } 303 314 -
uspace/lib/usbhost/src/endpoint.c
r2f762a7 r1d758fc 75 75 ep->max_transfer_size = ep->max_packet_size * ep->packets_per_uframe; 76 76 ep->transfer_buffer_policy = DMA_POLICY_STRICT; 77 ep->required_transfer_buffer_policy = DMA_POLICY_STRICT; 77 78 } 78 79 … … 207 208 * 208 209 * @param endpoint Endpoint for which to send the batch 209 * @param target The target of the transfer. 210 * @param direction A direction of the transfer. 211 * @param data A pointer to the data buffer. 212 * @param size Size of the data buffer. 213 * @param setup_data Data to use in the setup stage (Control communication type) 214 * @param on_complete Callback which is called after the batch is complete 215 * @param arg Callback parameter. 216 * @param name Communication identifier (for nicer output). 217 */ 218 errno_t endpoint_send_batch(endpoint_t *ep, usb_target_t target, 219 usb_direction_t direction, char *data, size_t size, uint64_t setup_data, 220 usbhc_iface_transfer_callback_t on_complete, void *arg, const char *name) 221 { 222 if (!ep) 223 return EBADMEM; 210 */ 211 errno_t endpoint_send_batch(endpoint_t *ep, const transfer_request_t *req) 212 { 213 assert(ep); 214 assert(req); 224 215 225 216 if (ep->transfer_type == USB_TRANSFER_CONTROL) { 226 usb_log_debug("%s %d:%d %zu/%zuB, setup %#016" PRIx64, name, 227 target.address, target.endpoint, size, ep->max_packet_size, 228 setup_data); 217 usb_log_debug("%s %d:%d %zu/%zuB, setup %#016" PRIx64, req->name, 218 req->target.address, req->target.endpoint, 219 req->size, ep->max_packet_size, 220 req->setup); 229 221 } else { 230 usb_log_debug("%s %d:%d %zu/%zuB", name, target.address, 231 target.endpoint, size, ep->max_packet_size); 222 usb_log_debug("%s %d:%d %zu/%zuB", req->name, 223 req->target.address, req->target.endpoint, 224 req->size, ep->max_packet_size); 232 225 } 233 226 … … 244 237 } 245 238 239 size_t size = req->size; 246 240 /* 247 241 * Limit transfers with reserved bandwidth to the amount reserved. 248 242 * OUT transfers are rejected, IN can be just trimmed in advance. 249 243 */ 250 if ((ep->transfer_type == USB_TRANSFER_INTERRUPT || ep->transfer_type == USB_TRANSFER_ISOCHRONOUS) && size > ep->max_transfer_size) { 251 if (direction == USB_DIRECTION_OUT) 244 if (size > ep->max_transfer_size && 245 (ep->transfer_type == USB_TRANSFER_INTERRUPT 246 || ep->transfer_type == USB_TRANSFER_ISOCHRONOUS)) { 247 if (req->dir == USB_DIRECTION_OUT) 252 248 return ENOSPC; 253 249 else 254 250 size = ep->max_transfer_size; 255 256 251 } 257 252 … … 266 261 } 267 262 268 batch->target = target; 269 batch->setup.packed = setup_data; 270 batch->dir = direction; 271 batch->buffer_size = size; 272 273 errno_t err; 274 if ((err = usb_transfer_batch_prepare_buffer(batch, data))) { 275 usb_log_warning("Failed to prepare buffer for batch: %s", str_error(err)); 276 usb_transfer_batch_destroy(batch); 277 return err; 278 } 279 280 batch->on_complete = on_complete; 281 batch->on_complete_data = arg; 263 batch->target = req->target; 264 batch->setup.packed = req->setup; 265 batch->dir = req->dir; 266 batch->size = size; 267 batch->offset = req->offset; 268 batch->dma_buffer = req->buffer; 269 270 dma_buffer_acquire(&batch->dma_buffer); 271 272 if (batch->offset != 0) { 273 usb_log_debug("A transfer with nonzero offset requested."); 274 usb_transfer_batch_bounce(batch); 275 } 276 277 if (usb_transfer_batch_bounce_required(batch)) 278 usb_transfer_batch_bounce(batch); 279 280 batch->on_complete = req->on_complete; 281 batch->on_complete_data = req->arg; 282 282 283 283 const int ret = ops->batch_schedule(batch); -
uspace/lib/usbhost/src/usb_transfer_batch.c
r2f762a7 r1d758fc 103 103 } 104 104 105 bool usb_transfer_batch_bounce_required(usb_transfer_batch_t *batch) 106 { 107 if (!batch->size) 108 return false; 109 110 unsigned flags = batch->dma_buffer.policy & DMA_POLICY_FLAGS_MASK; 111 unsigned required_flags = 112 batch->ep->required_transfer_buffer_policy & DMA_POLICY_FLAGS_MASK; 113 114 if (required_flags & ~flags) 115 return true; 116 117 size_t chunk_mask = dma_policy_chunk_mask(batch->dma_buffer.policy); 118 size_t required_chunk_mask = 119 dma_policy_chunk_mask(batch->ep->required_transfer_buffer_policy); 120 121 /* If the chunks are at least as large as required, we're good */ 122 if ((required_chunk_mask & ~chunk_mask) == 0) 123 return false; 124 125 size_t start_chunk = batch->offset & ~chunk_mask; 126 size_t end_chunk = (batch->offset + batch->size - 1) & ~chunk_mask; 127 128 /* The requested area crosses a chunk boundary */ 129 if (start_chunk != end_chunk) 130 return true; 131 132 return false; 133 } 134 105 135 errno_t usb_transfer_batch_bounce(usb_transfer_batch_t *batch) 106 136 { … … 108 138 assert(!batch->is_bounced); 109 139 110 if (dma_buffer_is_set(&batch->dma_buffer)) 111 dma_buffer_unlock(&batch->dma_buffer, batch->buffer_size); 140 dma_buffer_release(&batch->dma_buffer); 141 142 batch->original_buffer = batch->dma_buffer.virt + batch->offset; 112 143 113 144 usb_log_debug("Batch(%p): Buffer cannot be used directly, " … … 115 146 116 147 const errno_t err = dma_buffer_alloc_policy(&batch->dma_buffer, 117 batch-> buffer_size, batch->ep->transfer_buffer_policy);148 batch->size, batch->ep->transfer_buffer_policy); 118 149 if (err) 119 150 return err; … … 121 152 /* Copy the data out */ 122 153 if (batch->dir == USB_DIRECTION_OUT) 123 memcpy(batch->dma_buffer.virt, batch->buffer, batch->buffer_size); 154 memcpy(batch->dma_buffer.virt, 155 batch->original_buffer, 156 batch->size); 124 157 125 158 batch->is_bounced = true; 159 batch->offset = 0; 160 126 161 return err; 127 }128 129 /**130 * Prepare a DMA buffer according to endpoint policy.131 *132 * If the buffer is suitable to be used directly, it is. Otherwise, a bounce133 * buffer is created.134 */135 errno_t usb_transfer_batch_prepare_buffer(usb_transfer_batch_t *batch, char *buf)136 {137 /* Empty transfers do not need a buffer */138 if (batch->buffer_size == 0)139 return EOK;140 141 batch->buffer = buf;142 143 const dma_policy_t policy = batch->ep->transfer_buffer_policy;144 145 /*146 * We don't have enough information (yet, WIP) to know if we can skip147 * the bounce, so check the conditions carefully.148 */149 if (!dma_buffer_check_policy(buf, batch->buffer_size, policy))150 return usb_transfer_batch_bounce(batch);151 152 /* Fill the buffer with virtual address and lock it for DMA */153 return dma_buffer_lock(&batch->dma_buffer, buf, batch->buffer_size);154 162 } 155 163 … … 167 175 batch, USB_TRANSFER_BATCH_ARGS(*batch)); 168 176 169 if (batch->error == EOK && batch->buffer_size > 0) { 170 if (!batch->is_bounced) { 171 /* Unlock the buffer for DMA */ 172 dma_buffer_unlock(&batch->dma_buffer, 173 batch->buffer_size); 177 if (batch->error == EOK && batch->size > 0) { 178 if (batch->is_bounced) { 179 /* We we're forced to use bounce buffer, copy it back */ 180 if (batch->dir == USB_DIRECTION_IN) 181 memcpy(batch->original_buffer, 182 batch->dma_buffer.virt, 183 batch->transferred_size); 184 185 dma_buffer_free(&batch->dma_buffer); 174 186 } 175 187 else { 176 /* We we're forced to use bounce buffer, copy it back */ 177 if (batch->dir == USB_DIRECTION_IN) 178 memcpy(batch->buffer, 179 batch->dma_buffer.virt, 180 batch->transferred_size); 181 182 dma_buffer_free(&batch->dma_buffer); 188 dma_buffer_release(&batch->dma_buffer); 183 189 } 184 190 }
Note:
See TracChangeset
for help on using the changeset viewer.