Changeset eb928c4 in mainline
- Timestamp:
- 2018-01-08T00:07:00Z (7 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- 1102eca
- Parents:
- ecbad17
- git-author:
- Ondřej Hlavatý <aearsis@…> (2018-01-08 00:05:39)
- git-committer:
- Ondřej Hlavatý <aearsis@…> (2018-01-08 00:07:00)
- Location:
- uspace/drv/bus/usb/xhci
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/drv/bus/usb/xhci/bus.c
recbad17 reb928c4 57 57 static endpoint_t *endpoint_create(device_t *, const usb_endpoint_descriptors_t *); 58 58 59 /** Assign address and control endpoint to a new XHCI device. 59 /** Ops receive generic bus_t pointer. */ 60 static inline xhci_bus_t *bus_to_xhci_bus(bus_t *bus_base) 61 { 62 assert(bus_base); 63 return (xhci_bus_t *) bus_base; 64 } 65 66 /** 67 * Assign address and control endpoint to a new XHCI device. Once this function 68 * successfully returns, the device is online. 69 * 60 70 * @param[in] bus XHCI bus, in which the address is assigned. 61 * @param[in] dev New device to address and configure. 62 * 71 * @param[in] dev New device to address and configure./e 63 72 * @return Error code. 64 73 */ … … 103 112 } 104 113 105 /** Retrieve and set maximum packet size for endpoint zero of a XHCI device. 114 /** 115 * Retrieve and set maximum packet size for endpoint zero of a XHCI device. 116 * 106 117 * @param[in] hc Host controller, which manages the device. 107 118 * @param[in] dev Device with operational endpoint zero. 108 *109 119 * @return Error code. 110 120 */ … … 134 144 } 135 145 136 /** Respond to a new device on the XHCI bus. Address it, negotiate packet size 146 /** 147 * Respond to a new device on the XHCI bus. Address it, negotiate packet size 137 148 * and retrieve USB descriptors. 149 * 138 150 * @param[in] bus XHCI bus, where the new device emerged. 139 151 * @param[in] dev XHCI device, which has appeared on the bus. … … 141 153 * @return Error code. 142 154 */ 143 int xhci_bus_enumerate_device(xhci_bus_t *bus, device_t *dev) 144 { 145 int err; 155 static int device_enumerate(device_t *dev) 156 { 157 int err; 158 xhci_bus_t *bus = bus_to_xhci_bus(dev->bus); 146 159 xhci_device_t *xhci_dev = xhci_device_get(dev); 147 160 … … 192 205 static int endpoint_unregister(endpoint_t *); 193 206 194 /** Remove device from XHCI bus. Transition it to the offline state, abort all 207 /** 208 * Remove device from XHCI bus. Transition it to the offline state, abort all 195 209 * ongoing transfers and unregister all of its endpoints. 210 * 211 * Bus callback. 212 * 196 213 * @param[in] bus XHCI bus, from which the device is removed. 197 214 * @param[in] dev XHCI device, which is removed from the bus. 198 *199 215 * @return Error code. 200 216 */ 201 int xhci_bus_remove_device(xhci_bus_t *bus, device_t *dev) 202 { 203 int err; 217 static int device_remove(device_t *dev) 218 { 219 int err; 220 xhci_bus_t *bus = bus_to_xhci_bus(dev->bus); 204 221 xhci_device_t *xhci_dev = xhci_device_get(dev); 205 222 … … 255 272 } 256 273 257 /** Ops receive generic bus_t pointer. */ 258 static inline xhci_bus_t *bus_to_xhci_bus(bus_t *bus_base) 259 { 260 assert(bus_base); 261 return (xhci_bus_t *) bus_base; 262 } 263 264 // TODO: Fill in docstrings for XHCI bus callbacks once generic bus callbacks get their docstrings. 265 266 static int device_enumerate(device_t *dev) 267 { 268 xhci_bus_t *bus = bus_to_xhci_bus(dev->bus); 269 return xhci_bus_enumerate_device(bus, dev); 270 } 271 272 static int device_remove(device_t *dev) 273 { 274 xhci_bus_t *bus = bus_to_xhci_bus(dev->bus); 275 return xhci_bus_remove_device(bus, dev); 276 } 277 274 /** 275 * Reverts things device_offline did, getting the device back up. 276 * 277 * Bus callback. 278 */ 278 279 static int device_online(device_t *dev_base) 279 280 { … … 304 305 } 305 306 307 /** 308 * Make given device offline. Offline the DDF function, tear down all 309 * endpoints, issue Deconfigure Device command to xHC. 310 * 311 * Bus callback. 312 */ 306 313 static int device_offline(device_t *dev_base) 307 314 { … … 353 360 } 354 361 362 /** 363 * Create a new xHCI endpoint structure. 364 * 365 * Bus callback. 366 */ 355 367 static endpoint_t *endpoint_create(device_t *dev, const usb_endpoint_descriptors_t *desc) 356 368 { … … 367 379 } 368 380 381 /** 382 * Destroy given xHCI endpoint structure. 383 * 384 * Bus callback. 385 */ 369 386 static void endpoint_destroy(endpoint_t *ep) 370 387 { … … 375 392 } 376 393 394 /** 395 * Register an andpoint to the bus. Allocate its transfer ring(s) and inform 396 * xHC about it. 397 * 398 * Bus callback. 399 */ 377 400 static int endpoint_register(endpoint_t *ep_base) 378 401 { … … 401 424 } 402 425 426 /** 427 * Unregister an endpoint. If the device is still available, inform the xHC 428 * about it. Destroy resources allocated when registering. 429 * 430 * Bus callback. 431 */ 403 432 static int endpoint_unregister(endpoint_t *ep_base) 404 433 { … … 426 455 } 427 456 428 static usb_transfer_batch_t *batch_create(endpoint_t *ep) 429 { 430 xhci_transfer_t *transfer = xhci_transfer_create(ep); 431 return &transfer->batch; 432 } 433 434 static void batch_destroy(usb_transfer_batch_t *batch) 435 { 436 xhci_transfer_destroy(xhci_transfer_from_batch(batch)); 437 } 438 439 /** Structure binding XHCI static callbacks to generic bus callbacks. */ 457 /** 458 * Schedule a batch for xHC. 459 * 460 * Bus callback. 461 */ 462 static int batch_schedule(usb_transfer_batch_t *batch) 463 { 464 assert(batch); 465 xhci_hc_t *hc = bus_to_hc(endpoint_get_bus(batch->ep)); 466 467 if (!batch->target.address) { 468 usb_log_error("Attempted to schedule transfer to address 0."); 469 return EINVAL; 470 } 471 472 return xhci_transfer_schedule(hc, batch); 473 } 474 440 475 static const bus_ops_t xhci_bus_ops = { 441 // TODO: Is it good idea to use this macro? It blurrs the fact that the callbacks and static functions are called the same.442 #define BIND_OP(op) .op = op,443 BIND_OP(device_enumerate)444 BIND_OP(device_remove)445 BIND_OP(device_online)446 BIND_OP(device_offline)447 448 BIND_OP(endpoint_create)449 BIND_OP(endpoint_destroy)450 BIND_OP(endpoint_register)451 BIND_OP(endpoint_unregister)452 453 BIND_OP(batch_create)454 BIND_OP(batch_destroy)455 #undef BIND_OP456 457 476 .interrupt = hc_interrupt, 458 477 .status = hc_status, 459 .batch_schedule = hc_schedule, 478 479 .device_enumerate = device_enumerate, 480 .device_remove = device_remove, 481 .device_online = device_online, 482 .device_offline = device_offline, 483 484 .endpoint_create = endpoint_create, 485 .endpoint_destroy = endpoint_destroy, 486 .endpoint_register = endpoint_register, 487 .endpoint_unregister = endpoint_unregister, 488 489 .batch_schedule = batch_schedule, 490 .batch_create = xhci_transfer_create, 491 .batch_destroy = xhci_transfer_destroy, 460 492 }; 461 493 … … 486 518 void xhci_bus_fini(xhci_bus_t *bus) 487 519 { 488 // FIXME: Deallocate bus->devices_by_slot?489 // FIXME: Should there be some bus_fini() to call?520 // FIXME: Ensure there are no more devices? 521 free(bus->devices_by_slot); 490 522 // FIXME: Something else we forgot? 491 523 } -
uspace/drv/bus/usb/xhci/commands.c
recbad17 reb928c4 70 70 } 71 71 72 /** 73 * Initialize the command subsystem. Allocates the comand ring. 74 * 75 * Does not configure the CR pointer to the hardware, because the xHC will be 76 * reset before starting. 77 */ 72 78 int xhci_init_commands(xhci_hc_t *hc) 73 79 { … … 89 95 } 90 96 97 /** 98 * Finish the command subsystem. Stops the hardware from running commands, then 99 * deallocates the ring. 100 */ 91 101 void xhci_fini_commands(xhci_hc_t *hc) 92 102 { 103 assert(hc); 93 104 xhci_stop_command_ring(hc); 94 assert(hc); 95 } 96 105 106 xhci_cmd_ring_t *cr = get_cmd_ring(hc); 107 108 fibril_mutex_lock(&cr->guard); 109 xhci_trb_ring_fini(&cr->trb_ring); 110 fibril_mutex_unlock(&cr->guard); 111 } 112 113 /** 114 * Initialize a command structure for the given command. 115 */ 97 116 void xhci_cmd_init(xhci_cmd_t *cmd, xhci_cmd_type_t type) 98 117 { … … 107 126 } 108 127 128 /** 129 * Finish the command structure. Some command invocation includes allocating 130 * a context structure. To have the convenience in calling commands, this 131 * method deallocates all resources. 132 */ 109 133 void xhci_cmd_fini(xhci_cmd_t *cmd) 110 134 { … … 119 143 } 120 144 121 /** Call with guard locked. */ 145 /** 146 * Find a command issued by TRB at @c phys inside the command list. 147 * 148 * Call with guard locked only. 149 */ 122 150 static inline xhci_cmd_t *find_command(xhci_hc_t *hc, uint64_t phys) 123 151 { … … 140 168 } 141 169 142 static inline int enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd, unsigned doorbell, unsigned target) 170 /** 171 * Enqueue a command on the TRB ring. Ring the doorbell to initiate processing. 172 * Register the command as waiting for completion inside the command list. 173 */ 174 static inline int enqueue_command(xhci_hc_t *hc, xhci_cmd_t *cmd) 143 175 { 144 176 xhci_cmd_ring_t *cr = get_cmd_ring(hc); … … 168 200 } 169 201 202 /** 203 * Stop the command ring. Stop processing commands, block issuing new ones. 204 * Wait until hardware acknowledges it is stopped. 205 */ 170 206 void xhci_stop_command_ring(xhci_hc_t *hc) 171 207 { … … 187 223 } 188 224 225 /** 226 * Abort currently processed command. Note that it is only aborted when the 227 * command is "blocking" - see section 4.6.1.2 of xHCI spec. 228 */ 189 229 static void abort_command_ring(xhci_hc_t *hc) 190 230 { … … 236 276 }; 237 277 278 /** 279 * Report an error according to command completion code. 280 */ 238 281 static void report_error(int code) 239 282 { … … 244 287 } 245 288 289 /** 290 * Handle a command completion. Feed the fibril waiting for result. 291 * 292 * @param trb The COMMAND_COMPLETION TRB found in event ring. 293 */ 246 294 int xhci_handle_command_completion(xhci_hc_t *hc, xhci_trb_t *trb) 247 295 { … … 343 391 TRB_SET_TYPE(cmd->_header.trb, XHCI_TRB_TYPE_NO_OP_CMD); 344 392 345 return enqueue_command(hc, cmd , 0, 0);393 return enqueue_command(hc, cmd); 346 394 } 347 395 … … 355 403 cmd->_header.trb.control |= host2xhci(32, XHCI_REG_RD(hc->xecp, XHCI_EC_SP_SLOT_TYPE) << 16); 356 404 357 return enqueue_command(hc, cmd , 0, 0);405 return enqueue_command(hc, cmd); 358 406 } 359 407 … … 368 416 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 369 417 370 return enqueue_command(hc, cmd , 0, 0);418 return enqueue_command(hc, cmd); 371 419 } 372 420 … … 398 446 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 399 447 400 return enqueue_command(hc, cmd , 0, 0);448 return enqueue_command(hc, cmd); 401 449 } 402 450 … … 419 467 TRB_SET_DC(cmd->_header.trb, cmd->deconfigure); 420 468 421 return enqueue_command(hc, cmd , 0, 0);469 return enqueue_command(hc, cmd); 422 470 } 423 471 … … 441 489 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 442 490 443 return enqueue_command(hc, cmd , 0, 0);491 return enqueue_command(hc, cmd); 444 492 } 445 493 … … 460 508 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 461 509 462 return enqueue_command(hc, cmd , 0, 0);510 return enqueue_command(hc, cmd); 463 511 } 464 512 … … 475 523 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 476 524 477 return enqueue_command(hc, cmd , 0, 0);525 return enqueue_command(hc, cmd); 478 526 } 479 527 … … 495 543 */ 496 544 497 return enqueue_command(hc, cmd , 0, 0);545 return enqueue_command(hc, cmd); 498 546 } 499 547 … … 508 556 TRB_SET_SLOT(cmd->_header.trb, cmd->slot_id); 509 557 510 return enqueue_command(hc, cmd , 0, 0);558 return enqueue_command(hc, cmd); 511 559 } 512 560 … … 524 572 TRB_SET_DEV_SPEED(cmd->_header.trb, cmd->device_speed); 525 573 526 return enqueue_command(hc, cmd , 0, 0);574 return enqueue_command(hc, cmd); 527 575 } 528 576 … … 541 589 [XHCI_CMD_SET_TR_DEQUEUE_POINTER] = set_tr_dequeue_pointer_cmd, 542 590 [XHCI_CMD_RESET_DEVICE] = reset_device_cmd, 543 // TODO: Force event (optional normative, for VMM, section 4.6.12).544 591 [XHCI_CMD_FORCE_EVENT] = NULL, 545 // TODO: Negotiate bandwidth (optional normative, section 4.6.13).546 592 [XHCI_CMD_NEGOTIATE_BANDWIDTH] = NULL, 547 // TODO: Set latency tolerance value (optional normative, section 4.6.14).548 593 [XHCI_CMD_SET_LATENCY_TOLERANCE_VALUE] = NULL, 549 // TODO: Get port bandwidth (mandatory, but needs root hub implementation, section 4.6.15).550 594 [XHCI_CMD_GET_PORT_BANDWIDTH] = get_port_bandwidth_cmd, 551 // TODO: Force header (mandatory, but needs root hub implementation, section 4.6.16).552 595 [XHCI_CMD_FORCE_HEADER] = NULL, 553 596 [XHCI_CMD_NO_OP] = no_op_cmd 554 597 }; 555 598 599 /** 600 * Try to abort currently processed command. This is tricky, because 601 * calling fibril is not necessarily the one which issued the blocked command. 602 * Also, the trickiness intensifies by the fact that stopping a CR is denoted by 603 * event, which is again handled in different fibril. but, once we go to sleep 604 * on waiting for that event, another fibril may wake up and try to abort the 605 * blocked command. 606 * 607 * So, we mark the command ring as being restarted, wait for it to stop, and 608 * then start it again. If there was a blocked command, it will be satisfied by 609 * COMMAND_ABORTED event. 610 */ 556 611 static int try_abort_current_command(xhci_hc_t *hc) 557 612 { … … 603 658 } 604 659 660 /** 661 * Wait, until the command is completed. The completion is triggered by 662 * COMMAND_COMPLETION event. As we do not want to rely on HW completing the 663 * command in timely manner, we timeout. Note that we can't just return an 664 * error after the timeout pass - it may be other command blocking the ring, 665 * and ours can be completed afterwards. Therefore, it is not guaranteed that 666 * this function will return in XHCI_COMMAND_TIMEOUT. It will continue waiting 667 * until COMMAND_COMPLETION event arrives. 668 */ 605 669 static int wait_for_cmd_completion(xhci_hc_t *hc, xhci_cmd_t *cmd) 606 670 { … … 630 694 } 631 695 632 /** Issue command and block the current fibril until it is completed or timeout 633 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`. 696 /** 697 * Issue command and block the current fibril until it is completed or timeout 698 * expires. Nothing is deallocated. Caller should always execute `xhci_cmd_fini`. 634 699 */ 635 700 int xhci_cmd_sync(xhci_hc_t *hc, xhci_cmd_t *cmd) … … 658 723 } 659 724 660 /** Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This 661 * is a useful shorthand for issuing commands without out parameters. 725 /** 726 * Does the same thing as `xhci_cmd_sync` and executes `xhci_cmd_fini`. This 727 * is a useful shorthand for issuing commands without out parameters. 662 728 */ 663 729 int xhci_cmd_sync_fini(xhci_hc_t *hc, xhci_cmd_t *cmd) … … 669 735 } 670 736 671 /** Does the same thing as `xhci_cmd_sync_fini` without blocking the current 672 * fibril. The command is copied to stack memory and `fini` is called upon its completion. 737 /** 738 * Does the same thing as `xhci_cmd_sync_fini` without blocking the current 739 * fibril. The command is copied to stack memory and `fini` is called upon its completion. 673 740 */ 674 741 int xhci_cmd_async_fini(xhci_hc_t *hc, xhci_cmd_t *stack_cmd) -
uspace/drv/bus/usb/xhci/debug.c
recbad17 reb928c4 31 31 */ 32 32 /** @file 33 * Memory-mapped register structuresof the xHC.33 * Various functions to examine current state of the xHC. 34 34 */ 35 35 … … 100 100 } 101 101 102 /** 103 * Dumps registers of one port. 104 */ 102 105 void xhci_dump_port(const xhci_port_regs_t *port) 103 106 { … … 126 129 } 127 130 131 /** 132 * Dumps all registers that define state of the HC. 133 */ 128 134 void xhci_dump_state(const xhci_hc_t *hc) 129 135 { … … 173 179 } 174 180 181 /** 182 * Dump registers of all ports. 183 */ 175 184 void xhci_dump_ports(const xhci_hc_t *hc) 176 185 { … … 221 230 }; 222 231 232 /** 233 * Stringify XHCI_TRB_TYPE_*. 234 */ 223 235 const char *xhci_trb_str_type(unsigned type) 224 236 { … … 232 244 } 233 245 246 /** 247 * Dump a TRB. 248 */ 234 249 void xhci_dump_trb(const xhci_trb_t *trb) 235 250 { … … 252 267 }; 253 268 269 /** 270 * Dump Extended Capability ID. 271 */ 254 272 const char *xhci_ec_str_id(unsigned id) 255 273 { … … 263 281 } 264 282 283 /** 284 * Dump Protocol Speed ID. 285 */ 265 286 static void xhci_dump_psi(const xhci_psi_t *psi) 266 287 { … … 276 297 } 277 298 299 /** 300 * Dump given Extended Capability. 301 */ 278 302 void xhci_dump_extcap(const xhci_extcap_t *ec) 279 303 { -
uspace/drv/bus/usb/xhci/endpoint.c
recbad17 reb928c4 45 45 #include "endpoint.h" 46 46 47 /** Initialize new XHCI endpoint. 47 /** 48 * Initialize new XHCI endpoint. 48 49 * @param[in] xhci_ep Allocated XHCI endpoint to initialize. 49 50 * @param[in] dev Device, to which the endpoint belongs. … … 98 99 } 99 100 100 /** Finalize XHCI endpoint. 101 /** 102 * Finalize XHCI endpoint. 101 103 * @param[in] xhci_ep XHCI endpoint to finalize. 102 104 */ … … 108 110 } 109 111 110 /** Determine the type of a XHCI endpoint. 112 /** 113 * Determine the type of a XHCI endpoint. 111 114 * @param[in] ep XHCI endpoint to query. 112 115 * … … 137 140 } 138 141 139 /** Test whether an XHCI endpoint uses streams. 142 /** 143 * Test whether an XHCI endpoint uses streams. 140 144 * @param[in] xhci_ep XHCI endpoint to query. 141 145 * -
uspace/drv/bus/usb/xhci/hc.c
recbad17 reb928c4 66 66 /** 67 67 * Walk the list of extended capabilities. 68 * 69 * The most interesting thing hidden in extended capabilities is the mapping of 70 * ports to protocol versions and speeds. 68 71 */ 69 72 static int hc_parse_ec(xhci_hc_t *hc) … … 145 148 } 146 149 150 /** 151 * Initialize MMIO spaces of xHC. 152 */ 147 153 int hc_init_mmio(xhci_hc_t *hc, const hw_res_list_parsed_t *hw_res) 148 154 { … … 195 201 } 196 202 203 /** 204 * Initialize structures kept in allocated memory. 205 */ 197 206 int hc_init_memory(xhci_hc_t *hc, ddf_dev_t *device) 198 207 { … … 338 347 } 339 348 349 /** 350 * Claim xHC from BIOS. Implements handoff as per Section 4.22.1 of xHCI spec. 351 */ 340 352 int hc_claim(xhci_hc_t *hc, ddf_dev_t *dev) 341 353 { … … 344 356 return EOK; 345 357 346 /* Section 4.22.1 */347 358 /* TODO: Test this with USB3-aware BIOS */ 348 359 usb_log_debug2("LEGSUP: bios: %x, os: %x", hc->legsup->sem_bios, hc->legsup->sem_os); … … 363 374 } 364 375 376 /** 377 * Ask the xHC to reset its state. Implements sequence 378 */ 365 379 static int hc_reset(xhci_hc_t *hc) 366 380 { … … 454 468 } 455 469 456 int hc_schedule(usb_transfer_batch_t *batch)457 {458 assert(batch);459 xhci_hc_t *hc = bus_to_hc(endpoint_get_bus(batch->ep));460 461 if (!batch->target.address) {462 usb_log_error("Attempted to schedule transfer to address 0.");463 return EINVAL;464 }465 466 return xhci_transfer_schedule(hc, batch);467 }468 469 470 typedef int (*event_handler) (xhci_hc_t *, xhci_trb_t *trb); 470 471 … … 484 485 } 485 486 487 /** 488 * Dequeue from event ring and handle dequeued events. 489 * 490 * As there can be events, that blocks on waiting for subsequent events, 491 * we solve this problem by first copying the event TRBs from the event ring, 492 * then asserting EHB and only after, handling the events. 493 * 494 * Whenever the event handling blocks, it switches fibril, and incoming 495 * IPC notification will create new event handling fibril for us. 496 */ 486 497 static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring, xhci_interrupter_regs_t *intr) 487 498 { … … 541 552 } 542 553 554 /** 555 * Handle an interrupt request from xHC. Resolve all situations that trigger an 556 * interrupt separately. 557 * 558 * Note that all RW1C bits in USBSTS register are cleared at the time of 559 * handling the interrupt in irq_code. This method is the top-half. 560 * 561 * @param status contents of USBSTS register at the time of the interrupt. 562 */ 543 563 void hc_interrupt(bus_t *bus, uint32_t status) 544 564 { … … 573 593 } 574 594 595 /** 596 * Tear down all in-memory structures. 597 */ 575 598 void hc_fini(xhci_hc_t *hc) 576 599 { … … 585 608 } 586 609 610 /** 611 * Ring a xHC Doorbell. Implements section 4.7. 612 */ 587 613 int hc_ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target) 588 614 { … … 594 620 } 595 621 622 /** 623 * Issue an Enable Slot command, returning the obtained Slot ID. 624 * 625 * @param slot_id Pointer where to store the obtained Slot ID. 626 */ 596 627 int hc_enable_slot(xhci_hc_t *hc, uint32_t *slot_id) 597 628 { … … 615 646 } 616 647 648 /** 649 * Issue a Disable Slot command for a slot occupied by device. 650 * 651 * Frees the device context 652 */ 617 653 int hc_disable_slot(xhci_hc_t *hc, xhci_device_t *dev) 618 654 { … … 634 670 } 635 671 672 /** 673 * Prepare an empty Endpoint Input Context inside a dma buffer. 674 */ 636 675 static int create_configure_ep_input_ctx(dma_buffer_t *dma_buf) 637 676 { … … 649 688 } 650 689 690 /** 691 * Initialize a device, assigning it an address. Implements section 4.3.4. 692 * 693 * @param dev Device to assing an address (unconfigured yet) 694 * @param ep0 EP0 of device TODO remove, can be fetched from dev 695 */ 651 696 int hc_address_device(xhci_hc_t *hc, xhci_device_t *dev, xhci_endpoint_t *ep0) 652 697 { … … 713 758 } 714 759 760 /** 761 * Issue a Configure Device command for a device in slot. 762 * 763 * @param slot_id Slot ID assigned to the device. 764 */ 715 765 int hc_configure_device(xhci_hc_t *hc, uint32_t slot_id) 716 766 { … … 726 776 } 727 777 778 /** 779 * Issue a Deconfigure Device command for a device in slot. 780 * 781 * @param slot_id Slot ID assigned to the device. 782 */ 728 783 int hc_deconfigure_device(xhci_hc_t *hc, uint32_t slot_id) 729 784 { … … 732 787 } 733 788 789 /** 790 * Instruct xHC to add an endpoint with supplied endpoint context. 791 * 792 * @param slot_id Slot ID assigned to the device. 793 * @param ep_idx Endpoint index (number + direction) in question 794 * @param ep_ctx Endpoint context of the endpoint 795 */ 734 796 int hc_add_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx) 735 797 { … … 748 810 } 749 811 812 /** 813 * Instruct xHC to drop an endpoint. 814 * 815 * @param slot_id Slot ID assigned to the device. 816 * @param ep_idx Endpoint index (number + direction) in question 817 */ 750 818 int hc_drop_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx) 751 819 { … … 763 831 } 764 832 833 /** 834 * Instruct xHC to update information about an endpoint, using supplied 835 * endpoint context. 836 * 837 * @param slot_id Slot ID assigned to the device. 838 * @param ep_idx Endpoint index (number + direction) in question 839 * @param ep_ctx Endpoint context of the endpoint 840 */ 765 841 int hc_update_endpoint(xhci_hc_t *hc, uint32_t slot_id, uint8_t ep_idx, xhci_ep_ctx_t *ep_ctx) 766 842 { -
uspace/drv/bus/usb/xhci/hc.h
recbad17 reb928c4 115 115 int hc_update_endpoint(xhci_hc_t *, uint32_t, uint8_t, xhci_ep_ctx_t *); 116 116 117 int hc_schedule(usb_transfer_batch_t *batch);118 117 int hc_status(bus_t *, uint32_t *); 119 118 void hc_interrupt(bus_t *, uint32_t); -
uspace/drv/bus/usb/xhci/rh.c
recbad17 reb928c4 61 61 XHCI_REG_MASK(XHCI_PORT_CEC); 62 62 63 /** 64 * Initialize the roothub subsystem. 65 */ 63 66 int xhci_rh_init(xhci_rh_t *rh, xhci_hc_t *hc) 64 67 { … … 81 84 } 82 85 83 /** Create a device node for device directly connected to RH. 86 /** 87 * Finalize the RH subsystem. 88 */ 89 int xhci_rh_fini(xhci_rh_t *rh) 90 { 91 assert(rh); 92 free(rh->devices_by_port); 93 return EOK; 94 } 95 96 /** 97 * Create and setup a device directly connected to RH. As the xHCI is not using 98 * a virtual usbhub device for RH, this routine is called for devices directly. 84 99 */ 85 100 static int rh_setup_device(xhci_rh_t *rh, uint8_t port_id) … … 89 104 90 105 assert(rh->devices_by_port[port_id - 1] == NULL); 91 92 xhci_bus_t *bus = &rh->hc->bus;93 106 94 107 device_t *dev = hcd_ddf_fun_create(&rh->hc->base); … … 107 120 dev->speed = port_speed->usb_speed; 108 121 109 if ((err = xhci_bus_enumerate_device(bus,dev))) {122 if ((err = bus_device_enumerate(dev))) { 110 123 usb_log_error("Failed to enumerate USB device: %s", str_error(err)); 111 124 return err; … … 134 147 } 135 148 149 /** 150 * Handle a device connection. USB 3+ devices are set up directly, USB 2 and 151 * below first need to have their port reset. 152 */ 136 153 static int handle_connected_device(xhci_rh_t *rh, uint8_t port_id) 137 154 { … … 161 178 usb_log_debug("USB 2 device attached, issuing reset."); 162 179 xhci_rh_reset_port(rh, port_id); 163 /*164 FIXME: we need to wait for the event triggered by the reset165 and then alloc_dev()... can't it be done directly instead of166 going around?167 */168 180 return EOK; 169 181 } 170 182 } 171 183 172 /** Deal with a detached device. 184 /** 185 * Deal with a detached device. 173 186 */ 174 187 static int handle_disconnected_device(xhci_rh_t *rh, uint8_t port_id) … … 194 207 195 208 /* Remove device from XHCI bus. */ 196 if ((err = xhci_bus_remove_device(&rh->hc->bus,&dev->base))) {209 if ((err = bus_device_remove(&dev->base))) { 197 210 usb_log_warning("Failed to remove device " XHCI_DEV_FMT " from XHCI bus: %s", 198 211 XHCI_DEV_ARGS(*dev), str_error(err)); … … 202 215 } 203 216 204 /** Handle an incoming Port Change Detected Event. 217 /** 218 * Handle an incoming Port Change Detected Event. 205 219 */ 206 220 int xhci_rh_handle_port_status_change_event(xhci_hc_t *hc, xhci_trb_t *trb) … … 219 233 } 220 234 235 /** 236 * Handle all changes on all ports. 237 */ 221 238 void xhci_rh_handle_port_change(xhci_rh_t *rh) 222 239 { … … 303 320 } 304 321 305 static inline int get_hub_available_bandwidth(xhci_hc_t *hc, xhci_device_t* dev, uint8_t speed, xhci_port_bandwidth_ctx_t *ctx) 306 { 307 int err = EOK; 308 309 // TODO: find a correct place for this function + API 310 // We need speed, because a root hub device has both USB 2 and USB 3 speeds 311 // and the command can query only one of them 312 // ctx is an out parameter as of now 313 assert(dev); 314 assert(ctx); 315 316 xhci_cmd_t cmd; 317 xhci_cmd_init(&cmd, XHCI_CMD_GET_PORT_BANDWIDTH); 318 319 if ((err = dma_buffer_alloc(&cmd.bandwidth_ctx, sizeof(xhci_port_bandwidth_ctx_t)))) 320 goto end; 321 322 cmd.device_speed = speed; 323 324 if ((err = xhci_cmd_sync(hc, &cmd))) 325 goto end; 326 327 memcpy(ctx, cmd.bandwidth_ctx.virt, sizeof(xhci_port_bandwidth_ctx_t)); 328 329 end: 330 xhci_cmd_fini(&cmd); 331 return err; 332 } 333 322 /** 323 * Get a port speed for a given port id. 324 */ 334 325 const xhci_port_speed_t *xhci_rh_get_port_speed(xhci_rh_t *rh, uint8_t port) 335 326 { … … 340 331 } 341 332 333 /** 334 * Issue a port reset for a given port. 335 */ 342 336 int xhci_rh_reset_port(xhci_rh_t* rh, uint8_t port) 343 337 { … … 349 343 } 350 344 351 int xhci_rh_fini(xhci_rh_t *rh)352 {353 assert(rh);354 free(rh->devices_by_port);355 return EOK;356 }357 358 345 /** 359 346 * @} -
uspace/drv/bus/usb/xhci/scratchpad.c
recbad17 reb928c4 41 41 #include "scratchpad.h" 42 42 43 /** 44 * Get the number of scratchpad buffers needed. 45 */ 43 46 static inline unsigned xhci_scratchpad_count(xhci_hc_t *hc) 44 47 { … … 51 54 } 52 55 56 /** 57 * Allocate all scratchpad buffers, and configure the xHC. 58 */ 53 59 int xhci_scratchpad_alloc(xhci_hc_t *hc) 54 60 { … … 79 85 } 80 86 87 /** 88 * Deallocate the scratchpads and deconfigure xHC. 89 */ 81 90 void xhci_scratchpad_free(xhci_hc_t *hc) 82 91 { -
uspace/drv/bus/usb/xhci/transfers.c
recbad17 reb928c4 94 94 95 95 /** 96 * There can currently be only one active transfer, because97 * usb_transfer_batch_init locks the endpoint by endpoint_use.98 * Therefore, we store the only active transfer per endpoint there.99 */ 100 xhci_transfer_t* xhci_transfer_create(endpoint_t* ep)96 * Create a xHCI-specific transfer batch. 97 * 98 * Bus callback. 99 */ 100 usb_transfer_batch_t * xhci_transfer_create(endpoint_t* ep) 101 101 { 102 102 xhci_transfer_t *transfer = calloc(1, sizeof(xhci_transfer_t)); … … 105 105 106 106 usb_transfer_batch_init(&transfer->batch, ep); 107 return transfer; 108 } 109 110 void xhci_transfer_destroy(xhci_transfer_t* transfer) 111 { 112 assert(transfer); 107 return &transfer->batch; 108 } 109 110 /** 111 * Destroy a xHCI transfer. 112 */ 113 void xhci_transfer_destroy(usb_transfer_batch_t* batch) 114 { 115 xhci_transfer_t *transfer = xhci_transfer_from_batch(batch); 113 116 114 117 dma_buffer_free(&transfer->hc_buffer); -
uspace/drv/bus/usb/xhci/transfers.h
recbad17 reb928c4 64 64 } xhci_isoch_transfer_t; 65 65 66 xhci_transfer_t* xhci_transfer_create(endpoint_t *);66 usb_transfer_batch_t* xhci_transfer_create(endpoint_t *); 67 67 int xhci_transfer_schedule(xhci_hc_t *, usb_transfer_batch_t *); 68 68 int xhci_handle_transfer_event(xhci_hc_t *, xhci_trb_t *); 69 void xhci_transfer_destroy( xhci_transfer_t *);69 void xhci_transfer_destroy(usb_transfer_batch_t *); 70 70 71 71 static inline xhci_transfer_t *xhci_transfer_from_batch(usb_transfer_batch_t *batch) -
uspace/drv/bus/usb/xhci/trb_ring.c
recbad17 reb928c4 52 52 53 53 54 /** 55 * Get the first TRB of a segment. 56 */ 54 57 static inline xhci_trb_t *segment_begin(trb_segment_t *segment) 55 58 { … … 57 60 } 58 61 62 /** 63 * Get the one-past-end TRB of a segment. 64 */ 59 65 static inline xhci_trb_t *segment_end(trb_segment_t *segment) 60 66 { … … 91 97 /** 92 98 * Initializes the ring with one segment. 93 * Event when it fails, the structure needs to be finalized.94 99 */ 95 100 int xhci_trb_ring_init(xhci_trb_ring_t *ring) … … 117 122 fibril_mutex_initialize(&ring->guard); 118 123 119 usb_log_debug2("Initialized new TRB ring."); 120 121 return EOK; 122 } 123 124 return EOK; 125 } 126 127 /** 128 * Free all segments inside the ring. 129 */ 124 130 void xhci_trb_ring_fini(xhci_trb_ring_t *ring) 125 131 { … … 150 156 } 151 157 158 /** 159 * Get the physical address of the enqueue pointer. 160 */ 152 161 static uintptr_t trb_ring_enqueue_phys(xhci_trb_ring_t *ring) 153 162 { … … 156 165 } 157 166 167 /** 168 * Decides whether the TRB will trigger an interrupt after being processed. 169 */ 158 170 static bool trb_generates_interrupt(xhci_trb_t *trb) 159 171 { … … 163 175 164 176 /** 165 * Enqueue TD scomposed of TRBs.177 * Enqueue TD composed of TRBs. 166 178 * 167 179 * This will copy specified number of TRBs chained together into the ring. The … … 256 268 /** 257 269 * Initializes an event ring. 258 * Even when it fails, the structure needs to be finalized.259 270 */ 260 271 int xhci_event_ring_init(xhci_event_ring_t *ring) … … 275 286 ring->dequeue_ptr = segment->phys; 276 287 277 if (dma_buffer_alloc(&ring->erst, PAGE_SIZE)) 288 if (dma_buffer_alloc(&ring->erst, PAGE_SIZE)) { 289 xhci_event_ring_fini(ring); 278 290 return ENOMEM; 291 } 279 292 xhci_erst_entry_t *erst = ring->erst.virt; 280 293 … … 301 314 } 302 315 316 /** 317 * Get the physical address of the dequeue pointer. 318 */ 303 319 static uintptr_t event_ring_dequeue_phys(xhci_event_ring_t *ring) 304 320 {
Note:
See TracChangeset
for help on using the changeset viewer.