Changeset cb89430 in mainline
- Timestamp:
- 2017-06-22T13:59:15Z (8 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- e4d7363
- Parents:
- 62ba2cbe
- Location:
- uspace
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
uspace/drv/bus/usb/xhci/debug.c
r62ba2cbe rcb89430 37 37 #include <usb/debug.h> 38 38 39 #include "hw_struct/trb.h" 39 40 #include "debug.h" 40 41 #include "hc.h" … … 66 67 DUMP_REG(cap, XHCI_CAP_VERSION); 67 68 DUMP_REG(cap, XHCI_CAP_MAX_SLOTS); 69 DUMP_REG(cap, XHCI_CAP_MAX_INTRS); 70 DUMP_REG(cap, XHCI_CAP_MAX_PORTS); 68 71 DUMP_REG(cap, XHCI_CAP_IST); 69 72 DUMP_REG(cap, XHCI_CAP_ERST_MAX); 73 usb_log_debug2(PX "%u", "Max Scratchpad bufs", xhci_get_max_spbuf(cap)); 70 74 DUMP_REG(cap, XHCI_CAP_SPR); 71 75 DUMP_REG(cap, XHCI_CAP_U1EL); … … 124 128 usb_log_debug2("Operational registers:"); 125 129 126 DUMP_REG(hc->op_regs, XHCI_OP_RS);127 130 DUMP_REG(hc->op_regs, XHCI_OP_RS); 128 131 DUMP_REG(hc->op_regs, XHCI_OP_HCRST); … … 152 155 DUMP_REG(hc->op_regs, XHCI_OP_CRCR_LO); 153 156 DUMP_REG(hc->op_regs, XHCI_OP_CRCR_HI); 154 157 DUMP_REG(hc->op_regs, XHCI_OP_DCBAAP_LO); 158 DUMP_REG(hc->op_regs, XHCI_OP_DCBAAP_HI); 159 DUMP_REG(hc->rt_regs, XHCI_RT_MFINDEX); 160 161 usb_log_debug2("Interrupter 0 state:"); 162 DUMP_REG(&hc->rt_regs->ir[0], XHCI_INTR_IP); 163 DUMP_REG(&hc->rt_regs->ir[0], XHCI_INTR_IE); 164 DUMP_REG(&hc->rt_regs->ir[0], XHCI_INTR_IMI); 165 DUMP_REG(&hc->rt_regs->ir[0], XHCI_INTR_IMC); 166 DUMP_REG(&hc->rt_regs->ir[0], XHCI_INTR_ERSTSZ); 167 DUMP_REG(&hc->rt_regs->ir[0], XHCI_INTR_ERSTBA_LO); 168 DUMP_REG(&hc->rt_regs->ir[0], XHCI_INTR_ERSTBA_HI); 169 DUMP_REG(&hc->rt_regs->ir[0], XHCI_INTR_ERDP_LO); 170 DUMP_REG(&hc->rt_regs->ir[0], XHCI_INTR_ERDP_HI); 171 } 172 173 void xhci_dump_ports(xhci_hc_t *hc) 174 { 155 175 const size_t num_ports = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_PORTS); 156 176 for (size_t i = 0; i < num_ports; i++) { … … 160 180 } 161 181 } 182 183 static const char *trb_types [] = { 184 [0] = "<empty>", 185 #define TRB(t) [XHCI_TRB_TYPE_##t] = #t 186 TRB(NORMAL), 187 TRB(SETUP_STAGE), 188 TRB(DATA_STAGE), 189 TRB(STATUS_STAGE), 190 TRB(ISOCH), 191 TRB(LINK), 192 TRB(EVENT_DATA), 193 TRB(NO_OP), 194 TRB(ENABLE_SLOT_CMD), 195 TRB(DISABLE_SLOT_CMD), 196 TRB(ADDRESS_DEVICE_CMD), 197 TRB(CONFIGURE_ENDPOINT_CMD), 198 TRB(EVALUATE_CONTEXT_CMD), 199 TRB(RESET_ENDPOINT_CMD), 200 TRB(STOP_ENDPOINT_CMD), 201 TRB(SET_TR_DEQUEUE_POINTER_CMD), 202 TRB(RESET_DEVICE_CMD), 203 TRB(FORCE_EVENT_CMD), 204 TRB(NEGOTIATE_BANDWIDTH_CMD), 205 TRB(SET_LATENCY_TOLERANCE_VALUE_CMD), 206 TRB(GET_PORT_BANDWIDTH_CMD), 207 TRB(FORCE_HEADER_CMD), 208 TRB(NO_OP_CMD), 209 TRB(TRANSFER_EVENT), 210 TRB(COMMAND_COMPLETION_EVENT), 211 TRB(PORT_STATUS_CHANGE_EVENT), 212 TRB(BANDWIDTH_REQUEST_EVENT), 213 TRB(DOORBELL_EVENT), 214 TRB(HOST_CONTROLLER_EVENT), 215 TRB(DEVICE_NOTIFICATION_EVENT), 216 TRB(MFINDEX_WRAP_EVENT), 217 #undef TRB 218 [XHCI_TRB_TYPE_MAX] = NULL, 219 }; 220 221 const char *xhci_trb_str_type(unsigned type) 222 { 223 static char type_buf [20]; 224 225 if (type < XHCI_TRB_TYPE_MAX && trb_types[type] != NULL) 226 return trb_types[type]; 227 228 snprintf(type_buf, sizeof(type_buf), "<unknown (%u)>", type); 229 return type_buf; 230 } 231 232 void xhci_dump_trb(xhci_trb_t *trb) 233 { 234 usb_log_debug2("TRB(%p): type %s, cycle %u", trb, xhci_trb_str_type(TRB_TYPE(*trb)), TRB_CYCLE(*trb)); 235 } 236 162 237 /** 163 238 * @} -
uspace/drv/bus/usb/xhci/debug.h
r62ba2cbe rcb89430 18 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,21 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, … … 38 37 #define XHCI_DEBUG_H 39 38 40 #include "hw_struct/regs.h" 39 struct xhci_hc; 40 struct xhci_cap_regs; 41 struct xhci_port_regs; 42 struct xhci_trb; 41 43 42 typedef struct xhci_hc xhci_hc_t; 44 void xhci_dump_cap_regs(const struct xhci_cap_regs *); 45 void xhci_dump_port(struct xhci_port_regs *); 46 void xhci_dump_state(struct xhci_hc *); 47 void xhci_dump_ports(struct xhci_hc *); 43 48 44 void xhci_dump_cap_regs(xhci_cap_regs_t *); 45 void xhci_dump_port(xhci_port_regs_t *); 46 void xhci_dump_state(xhci_hc_t *); 49 const char *xhci_trb_str_type(unsigned); 50 void xhci_dump_trb(struct xhci_trb *trb); 47 51 48 52 #endif -
uspace/drv/bus/usb/xhci/hc.c
r62ba2cbe rcb89430 35 35 36 36 #include <errno.h> 37 #include <str_error.h> 37 38 #include <usb/debug.h> 38 39 #include <usb/host/ddf_helpers.h> 40 #include <usb/host/utils/malloc32.h> 39 41 #include "debug.h" 40 42 #include "hc.h" 41 42 const ddf_hc_driver_t xhci_ddf_hc_driver = { 43 .hc_speed = USB_SPEED_SUPER, 44 .init = xhci_hc_init, 45 .fini = xhci_hc_fini, 46 .name = "XHCI-PCI", 47 .ops = { 48 .schedule = xhci_hc_schedule, 49 .irq_hook = xhci_hc_interrupt, 50 .status_hook = xhci_hc_status, 43 #include "hw_struct/trb.h" 44 45 static const irq_cmd_t irq_commands[] = { 46 { 47 .cmd = CMD_PIO_READ_32, 48 .dstarg = 1, 49 .addr = NULL 50 }, 51 { 52 .cmd = CMD_AND, 53 .srcarg = 1, 54 .dstarg = 2, 55 .value = 0 56 }, 57 { 58 .cmd = CMD_PREDICATE, 59 .srcarg = 2, 60 .value = 2 61 }, 62 { 63 .cmd = CMD_PIO_WRITE_A_32, 64 .srcarg = 1, 65 .addr = NULL 66 }, 67 { 68 .cmd = CMD_ACCEPT 51 69 } 52 70 }; 53 71 54 int xhci_hc_init(hcd_t *hcd, const hw_res_list_parsed_t *hw_res, bool irq) 72 /** 73 * Generates code to accept interrupts. The xHCI is designed primarily for 74 * MSI/MSI-X, but we use PCI Interrupt Pin. In this mode, all the Interrupters 75 * (except 0) are disabled. 76 */ 77 static int hc_gen_irq_code(irq_code_t *code, const hw_res_list_parsed_t *hw_res) 55 78 { 56 79 int err; 57 80 81 assert(code); 82 assert(hw_res); 83 84 if (hw_res->irqs.count != 1 || hw_res->mem_ranges.count != 1) { 85 usb_log_info("Unexpected HW resources to enable interrupts."); 86 return EINVAL; 87 } 88 89 addr_range_t mmio_range = hw_res->mem_ranges.ranges[0]; 90 91 if (RNGSZ(mmio_range) < sizeof(xhci_cap_regs_t)) 92 return EOVERFLOW; 93 94 95 xhci_cap_regs_t *cap_regs = NULL; 96 if ((err = pio_enable_range(&mmio_range, (void **)&cap_regs))) 97 return EIO; 98 99 code->ranges = malloc(sizeof(irq_pio_range_t)); 100 if (code->ranges == NULL) 101 return ENOMEM; 102 103 code->cmds = malloc(sizeof(irq_commands)); 104 if (code->cmds == NULL) { 105 free(code->ranges); 106 return ENOMEM; 107 } 108 109 code->rangecount = 1; 110 code->ranges[0] = (irq_pio_range_t) { 111 .base = RNGABS(mmio_range), 112 .size = RNGSZ(mmio_range), 113 }; 114 115 code->cmdcount = ARRAY_SIZE(irq_commands); 116 memcpy(code->cmds, irq_commands, sizeof(irq_commands)); 117 118 void *intr0_iman = RNGABSPTR(mmio_range) + XHCI_REG_RD(cap_regs, XHCI_CAP_RTSOFF) + offsetof(xhci_rt_regs_t, ir[0]); 119 code->cmds[0].addr = intr0_iman; 120 code->cmds[3].addr = intr0_iman; 121 code->cmds[1].value = host2xhci(32, 1); 122 123 return hw_res->irqs.irqs[0]; 124 } 125 126 static int hc_claim(ddf_dev_t *dev) 127 { 128 // TODO: implement handoff: section 4.22.1 129 return EOK; 130 } 131 132 static int hc_reset(xhci_hc_t *hc) 133 { 134 /* Stop the HC: set R/S to 0 */ 135 XHCI_REG_CLR(hc->op_regs, XHCI_OP_RS, 1); 136 137 /* Wait 16 ms until the HC is halted */ 138 async_usleep(16000); 139 assert(XHCI_REG_RD(hc->op_regs, XHCI_OP_HCH)); 140 141 /* Reset */ 142 XHCI_REG_SET(hc->op_regs, XHCI_OP_HCRST, 1); 143 144 /* Wait until the reset is complete */ 145 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_HCRST)) 146 async_usleep(1000); 147 148 return EOK; 149 } 150 151 /** 152 * Initialize the HC: section 4.2 153 */ 154 static int hc_start(xhci_hc_t *hc, bool irq) 155 { 156 int err; 157 158 if ((err = hc_reset(hc))) 159 return err; 160 161 while (XHCI_REG_RD(hc->op_regs, XHCI_OP_CNR)) 162 async_usleep(1000); 163 164 uint64_t dcbaaptr = addr_to_phys(hc->event_ring.erst); 165 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_LO, LOWER32(dcbaaptr)); 166 XHCI_REG_WR(hc->op_regs, XHCI_OP_DCBAAP_HI, UPPER32(dcbaaptr)); 167 XHCI_REG_WR(hc->op_regs, XHCI_OP_MAX_SLOTS_EN, 0); 168 169 uint64_t crptr = xhci_trb_ring_get_dequeue_ptr(&hc->command_ring); 170 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_LO, LOWER32(crptr) >> 6); 171 XHCI_REG_WR(hc->op_regs, XHCI_OP_CRCR_HI, UPPER32(crptr)); 172 173 uint64_t erstptr = addr_to_phys(hc->event_ring.erst); 174 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0]; 175 XHCI_REG_WR(intr0, XHCI_INTR_ERSTSZ, hc->event_ring.segment_count); 176 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_LO, LOWER32(erstptr)); 177 XHCI_REG_WR(intr0, XHCI_INTR_ERDP_HI, UPPER32(erstptr)); 178 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_LO, LOWER32(erstptr)); 179 XHCI_REG_WR(intr0, XHCI_INTR_ERSTBA_HI, UPPER32(erstptr)); 180 181 // TODO: Setup scratchpad buffers 182 183 if (irq) { 184 XHCI_REG_SET(intr0, XHCI_INTR_IE, 1); 185 XHCI_REG_SET(hc->op_regs, XHCI_OP_INTE, 1); 186 } 187 188 XHCI_REG_SET(hc->op_regs, XHCI_OP_RS, 1); 189 190 return EOK; 191 } 192 193 static int hc_init(hcd_t *hcd, const hw_res_list_parsed_t *hw_res, bool irq) 194 { 195 int err; 196 58 197 assert(hcd); 198 assert(hw_res); 59 199 assert(hcd_get_driver_data(hcd) == NULL); 60 200 201 /* Initialize the MMIO ranges */ 61 202 if (hw_res->mem_ranges.count != 1) { 62 usb_log_ debug("Unexpected MMIO area, bailing out.");203 usb_log_error("Unexpected MMIO area, bailing out."); 63 204 return EINVAL; 64 205 } 206 207 addr_range_t mmio_range = hw_res->mem_ranges.ranges[0]; 208 209 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.\n", 210 RNGABSPTR(mmio_range), RNGSZ(mmio_range), hw_res->irqs.irqs[0]); 211 212 if (RNGSZ(mmio_range) < sizeof(xhci_cap_regs_t)) 213 return EOVERFLOW; 214 215 void *base; 216 if ((err = pio_enable_range(&mmio_range, &base))) 217 return err; 65 218 66 219 xhci_hc_t *hc = malloc(sizeof(xhci_hc_t)); … … 68 221 return ENOMEM; 69 222 70 addr_range_t mmio_range = hw_res->mem_ranges.ranges[0]; 71 72 usb_log_debug("MMIO area at %p (size %zu), IRQ %d.\n", 73 RNGABSPTR(mmio_range), RNGSZ(mmio_range), hw_res->irqs.irqs[0]); 74 75 if ((err = pio_enable_range(&mmio_range, (void **)&hc->cap_regs))) 223 hc->cap_regs = (xhci_cap_regs_t *) base; 224 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH)); 225 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF)); 226 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF)); 227 228 usb_log_debug2("Initialized MMIO reg areas:"); 229 usb_log_debug2("\tCapability regs: %p", hc->cap_regs); 230 usb_log_debug2("\tOperational regs: %p", hc->op_regs); 231 usb_log_debug2("\tRuntime regs: %p", hc->rt_regs); 232 usb_log_debug2("\tDoorbell array base: %p", hc->db_arry); 233 234 xhci_dump_cap_regs(hc->cap_regs); 235 236 hc->ac64 = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_AC64); 237 hc->max_slots = XHCI_REG_RD(hc->cap_regs, XHCI_CAP_MAX_SLOTS); 238 239 hc->dcbaa = malloc32((1 + hc->max_slots) * sizeof(xhci_device_ctx_t)); 240 if (!hc->dcbaa) 76 241 goto err_hc; 77 242 78 xhci_dump_cap_regs(hc->cap_regs); 79 80 uintptr_t base = (uintptr_t) hc->cap_regs; 81 82 hc->op_regs = (xhci_op_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_LENGTH)); 83 hc->rt_regs = (xhci_rt_regs_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_RTSOFF)); 84 hc->db_arry = (xhci_doorbell_t *) (base + XHCI_REG_RD(hc->cap_regs, XHCI_CAP_DBOFF)); 85 86 usb_log_debug("Initialized MMIO reg areas:"); 87 usb_log_debug("\tCapability regs: %p", hc->cap_regs); 88 usb_log_debug("\tOperational regs: %p", hc->op_regs); 89 usb_log_debug("\tRuntime regs: %p", hc->rt_regs); 90 usb_log_debug("\tDoorbell array base: %p", hc->db_arry); 91 92 xhci_dump_state(hc); 243 if ((err = xhci_trb_ring_init(&hc->command_ring, hc))) 244 goto err_dcbaa; 245 246 if ((err = xhci_event_ring_init(&hc->event_ring, hc))) 247 goto err_cmd_ring; 248 249 // TODO: Allocate scratchpad buffers 93 250 94 251 hcd_set_implementation(hcd, hc, &xhci_ddf_hc_driver.ops); 95 252 96 // TODO: check if everything fits into the mmio_area 97 98 return EOK; 99 253 if ((err = hc_start(hc, irq))) 254 goto err_event_ring; 255 256 return EOK; 257 258 err_event_ring: 259 xhci_event_ring_fini(&hc->event_ring); 260 err_cmd_ring: 261 xhci_trb_ring_fini(&hc->command_ring); 262 err_dcbaa: 263 free32(hc->dcbaa); 100 264 err_hc: 101 265 free(hc); 266 hcd_set_implementation(hcd, NULL, NULL); 102 267 return err; 103 268 } 104 269 105 int xhci_hc_status(hcd_t *hcd, uint32_t *status) 106 { 107 usb_log_info("status"); 108 return ENOTSUP; 109 } 110 111 int xhci_hc_schedule(hcd_t *hcd, usb_transfer_batch_t *batch) 112 { 113 usb_log_info("schedule"); 114 return ENOTSUP; 115 } 116 117 void xhci_hc_interrupt(hcd_t *hcd, uint32_t status) 118 { 119 usb_log_info("Interrupted!"); 120 } 121 122 void xhci_hc_fini(hcd_t *hcd) 123 { 124 assert(hcd); 270 static int hc_status(hcd_t *hcd, uint32_t *status) 271 { 272 xhci_hc_t *hc = hcd_get_driver_data(hcd); 273 assert(hc); 274 assert(status); 275 276 *status = 0; 277 if (hc->op_regs) { 278 *status = XHCI_REG_RD(hc->op_regs, XHCI_OP_STATUS); 279 XHCI_REG_WR(hc->op_regs, XHCI_OP_STATUS, *status & XHCI_STATUS_ACK_MASK); 280 } 281 usb_log_debug2("HC(%p): Read status: %x", hc, *status); 282 return EOK; 283 } 284 285 static int ring_doorbell(xhci_hc_t *hc, unsigned doorbell, unsigned target) 286 { 287 uint32_t v = host2xhci(32, target & BIT_RRANGE(uint32_t, 7)); 288 pio_write_32(&hc->db_arry[doorbell], v); 289 return EOK; 290 } 291 292 static int send_no_op_command(xhci_hc_t *hc) 293 { 294 xhci_trb_t trb; 295 memset(&trb, 0, sizeof(trb)); 296 297 trb.control = host2xhci(32, XHCI_TRB_TYPE_NO_OP_CMD << 10); 298 299 xhci_trb_ring_enqueue(&hc->command_ring, &trb); 300 ring_doorbell(hc, 0, 0); 301 302 xhci_dump_trb(&trb); 303 usb_log_debug2("HC(%p): Sent TRB", hc); 304 return EOK; 305 } 306 307 static int hc_schedule(hcd_t *hcd, usb_transfer_batch_t *batch) 308 { 309 xhci_hc_t *hc = hcd_get_driver_data(hcd); 310 assert(hc); 311 312 xhci_dump_state(hc); 313 send_no_op_command(hc); 314 async_usleep(1000); 315 xhci_dump_state(hc); 316 317 xhci_dump_trb(hc->event_ring.dequeue_trb); 318 return EOK; 319 } 320 321 static void hc_run_event_ring(xhci_hc_t *hc, xhci_event_ring_t *event_ring, xhci_interrupter_regs_t *intr) 322 { 323 int err; 324 xhci_trb_t trb; 325 326 err = xhci_event_ring_dequeue(event_ring, &trb);; 327 328 switch (err) { 329 case EOK: 330 usb_log_debug2("Dequeued from event ring."); 331 xhci_dump_trb(&trb); 332 break; 333 334 case ENOENT: 335 usb_log_debug2("Event ring finished."); 336 break; 337 338 default: 339 usb_log_warning("Error while accessing event ring: %s", str_error(err)); 340 } 341 342 /* Update the ERDP to make room inthe ring */ 343 uint64_t erstptr = addr_to_phys(hc->event_ring.erst); 344 XHCI_REG_WR(intr, XHCI_INTR_ERDP_LO, LOWER32(erstptr)); 345 XHCI_REG_WR(intr, XHCI_INTR_ERDP_HI, UPPER32(erstptr)); 346 } 347 348 static void hc_interrupt(hcd_t *hcd, uint32_t status) 349 { 350 xhci_hc_t *hc = hcd_get_driver_data(hcd); 351 assert(hc); 352 353 if (status & XHCI_REG_MASK(XHCI_OP_HSE)) { 354 usb_log_error("Host controller error occured. Bad things gonna happen..."); 355 } 356 357 if (status & XHCI_REG_MASK(XHCI_OP_EINT)) { 358 usb_log_debug2("Event interrupt."); 359 360 xhci_interrupter_regs_t *intr0 = &hc->rt_regs->ir[0]; 361 362 if (XHCI_REG_RD(intr0, XHCI_INTR_IP)) { 363 XHCI_REG_SET(intr0, XHCI_INTR_IP, 1); 364 hc_run_event_ring(hc, &hc->event_ring, intr0); 365 } 366 } 367 368 if (status & XHCI_REG_MASK(XHCI_OP_PCD)) { 369 usb_log_error("Port change detected. Not implemented yet!"); 370 } 371 372 if (status & XHCI_REG_MASK(XHCI_OP_SRE)) { 373 usb_log_error("Save/Restore error occured. WTF, S/R mechanism not implemented!"); 374 } 375 } 376 377 static void hc_fini(hcd_t *hcd) 378 { 379 xhci_hc_t *hc = hcd_get_driver_data(hcd); 380 assert(hc); 381 125 382 usb_log_info("Finishing"); 126 383 127 xhci_hc_t *hc = hcd_get_driver_data(hcd); 384 xhci_trb_ring_fini(&hc->command_ring); 385 xhci_event_ring_fini(&hc->event_ring); 386 128 387 free(hc); 129 388 hcd_set_implementation(hcd, NULL, NULL); 130 389 } 131 390 391 const ddf_hc_driver_t xhci_ddf_hc_driver = { 392 .hc_speed = USB_SPEED_SUPER, 393 .irq_code_gen = hc_gen_irq_code, 394 .claim = hc_claim, 395 .init = hc_init, 396 .fini = hc_fini, 397 .name = "XHCI-PCI", 398 .ops = { 399 .schedule = hc_schedule, 400 .irq_hook = hc_interrupt, 401 .status_hook = hc_status, 402 } 403 }; 404 405 132 406 133 407 /** -
uspace/drv/bus/usb/xhci/hc.h
r62ba2cbe rcb89430 36 36 #include <usb/host/ddf_helpers.h> 37 37 #include "hw_struct/regs.h" 38 #include "hw_struct/context.h" 39 #include "trb_ring.h" 38 40 39 41 typedef struct xhci_hc { … … 42 44 xhci_rt_regs_t *rt_regs; 43 45 xhci_doorbell_t *db_arry; 46 47 xhci_trb_ring_t command_ring; 48 xhci_event_ring_t event_ring; 49 50 xhci_device_ctx_t *dcbaa; 51 52 unsigned max_slots; 53 bool ac64; 54 44 55 } xhci_hc_t; 45 56 46 57 extern const ddf_hc_driver_t xhci_ddf_hc_driver; 47 48 int xhci_hc_init(hcd_t *, const hw_res_list_parsed_t *, bool irq);49 int xhci_hc_gen_irq_code(irq_code_t *, const hw_res_list_parsed_t *);50 int xhci_hc_status(hcd_t *, uint32_t *);51 int xhci_hc_schedule(hcd_t *, usb_transfer_batch_t *);52 void xhci_hc_interrupt(hcd_t *, uint32_t);53 void xhci_hc_fini(hcd_t *);54 58 55 59 -
uspace/drv/bus/usb/xhci/hw_struct/context.h
r62ba2cbe rcb89430 43 43 44 44 #include <stdint.h> 45 #include <common.h>45 #include "common.h" 46 46 47 47 /** … … 67 67 #define XHCI_EP_TR_DPTR(ctx) XHCI_DWORD_EXTRACT((ctx).data[2], 63, 4) 68 68 69 } ep_ctx_t __attribute__((packed));69 } __attribute__((packed)) xhci_ep_ctx_t; 70 70 71 71 /** … … 93 93 #define XHCI_SLOT_SLOT_STATE(ctx) XHCI_DWORD_EXTRACT((ctx).data[3], 31, 27) 94 94 95 } xhci_slot_ctx_t __attribute__((packed));95 } __attribute__((packed)) xhci_slot_ctx_t; 96 96 97 97 /** … … 101 101 xhci_slot_ctx_t slot_ctx; 102 102 xhci_ep_ctx_t endpoint_ctx [31]; 103 } xhci_device_ctx_t;103 } __attribute__((packed)) xhci_device_ctx_t; 104 104 105 105 /** … … 108 108 typedef struct xhci_stream_ctx { 109 109 uint64_t data [2]; 110 } xhci_stream_ctx_t __attribute__((packed));110 } __attribute__((packed)) xhci_stream_ctx_t; 111 111 112 112 #endif -
uspace/drv/bus/usb/xhci/hw_struct/regs.h
r62ba2cbe rcb89430 43 43 #include "common.h" 44 44 45 /*46 */47 48 45 #define XHCI_PIO_CHANGE_UDELAY 5 49 50 #define host2xhci(size, val) host2uint##size##_t_le((val))51 #define xhci2host(size, val) uint##size##_t_le2host((val))52 46 53 47 /* … … 63 57 #define XHCI_REG_SET(reg_set, reg_spec, value) XHCI_REG_SET_INNER(reg_set, value, reg_spec) 64 58 #define XHCI_REG_CLR(reg_set, reg_spec, value) XHCI_REG_CLR_INNER(reg_set, value, reg_spec) 59 #define XHCI_REG_MASK(reg_spec) XHCI_REG_MASK_INNER(reg_spec) 60 #define XHCI_REG_SHIFT(reg_spec) XHCI_REG_SHIFT_INNER(reg_spec) 65 61 66 62 /* … … 78 74 #define XHCI_REG_CLR_INNER(reg_set, value, field, size, type, ...) \ 79 75 XHCI_REG_CLR_##type(&(reg_set)->field, value, size, ##__VA_ARGS__) 76 77 #define XHCI_REG_MASK_INNER(field, size, type, ...) \ 78 XHCI_REG_MASK_##type(size, ##__VA_ARGS__) 79 80 #define XHCI_REG_SHIFT_INNER(field, size, type, ...) \ 81 XHCI_REG_SHIFT_##type(size, ##__VA_ARGS__) 80 82 81 83 /* … … 86 88 #define XHCI_REG_SET_FIELD(ptr, value, size) pio_set_##size((ptr), host2xhci(size, value), XHCI_PIO_CHANGE_UDELAY); 87 89 #define XHCI_REG_CLR_FIELD(ptr, value, size) pio_clear_##size((ptr), host2xhci(size, value), XHCI_PIO_CHANGE_UDELAY); 90 #define XHCI_REG_MASK_FIELD(size) (~((uint##size##_t) 0)) 91 #define XHCI_REG_SHIFT_FIELD(size) (0) 88 92 89 93 /* … … 94 98 #define XHCI_REG_SET_FLAG(ptr, value, size, offset) XHCI_REG_SET_RANGE((ptr), (value), size, (offset), (offset)) 95 99 #define XHCI_REG_CLR_FLAG(ptr, value, size, offset) XHCI_REG_CLR_RANGE((ptr), (value), size, (offset), (offset)) 100 #define XHCI_REG_MASK_FLAG(size, offset) BIT_V(uint##size##_t, offset) 101 #define XHCI_REG_SHIFT_FLAG(size, offset) (offset) 96 102 97 103 /* … … 113 119 pio_clear_##size((ptr), host2xhci(size, BIT_RANGE_INSERT(uint##size##_t, (hi), (lo), (value))), \ 114 120 XHCI_PIO_CHANGE_UDELAY); 121 122 #define XHCI_REG_MASK_RANGE(size, hi, lo) BIT_RANGE(uint##size##_t, hi, lo) 123 #define XHCI_REG_SHIFT_RANGE(size, hi, lo) (lo) 115 124 116 125 /** HC capability registers: section 5.3 */ … … 215 224 #define XHCI_CAP_CIC hccparams2, 32, FLAG, 5 216 225 226 static inline unsigned xhci_get_max_spbuf(xhci_cap_regs_t *cap_regs) { 227 return XHCI_REG_RD(cap_regs, XHCI_CAP_MAX_SPBUF_HI) << 5 228 | XHCI_REG_RD(cap_regs, XHCI_CAP_MAX_SPBUF_LO); 229 } 230 217 231 /** 218 232 * XHCI Port Register Set: section 5.4, table 32 … … 395 409 #define XHCI_OP_CRCR_LO crcr_lo, 32, RANGE, 31, 6 396 410 #define XHCI_OP_CRCR_HI crcr_lo, 32, FIELD 411 #define XHCI_OP_DCBAAP_LO dcbaap_lo, 32, FIELD 412 #define XHCI_OP_DCBAAP_HI dcbaap_lo, 32, FIELD 413 #define XHCI_OP_MAX_SLOTS_EN config, 32, RANGE, 7, 0 414 #define XHCI_OP_U3E config, 32, FLAG, 8 415 #define XHCI_OP_CIE config, 32, FLAG, 9 416 417 /* Aggregating field to read & write whole status at once */ 418 #define XHCI_OP_STATUS usbsts, 32, RANGE, 12, 0 419 420 /* RW1C fields in usbsts */ 421 #define XHCI_STATUS_ACK_MASK 0x41C 397 422 398 423 /** … … 444 469 ioport32_t mfindex; 445 470 446 PADD32 [ 5];447 448 xhci_interrupter_regs_t ir [1024];471 PADD32 [7]; 472 473 xhci_interrupter_regs_t ir []; 449 474 } xhci_rt_regs_t; 450 475 … … 452 477 453 478 /** 454 * XHCI Doorbel Registers: section 5.6455 * 456 * These registers are write-only, thus convenience macros are useless.479 * XHCI Doorbell Registers: section 5.6 480 * 481 * These registers are to be written as a whole field. 457 482 */ 458 483 typedef ioport32_t xhci_doorbell_t; -
uspace/drv/bus/usb/xhci/hw_struct/trb.h
r62ba2cbe rcb89430 84 84 XHCI_TRB_TYPE_DEVICE_NOTIFICATION_EVENT, 85 85 XHCI_TRB_TYPE_MFINDEX_WRAP_EVENT, 86 87 XHCI_TRB_TYPE_MAX 86 88 }; 87 89 … … 93 95 xhci_dword_t status; 94 96 xhci_dword_t control; 95 } xhci_trb_t;97 } __attribute__((packed)) xhci_trb_t; 96 98 97 99 #define TRB_TYPE(trb) XHCI_DWORD_EXTRACT((trb).control, 15, 10) 100 #define TRB_CYCLE(trb) XHCI_DWORD_EXTRACT((trb).control, 0, 0) 98 101 #define TRB_LINK_TC(trb) XHCI_DWORD_EXTRACT((trb).control, 1, 1) 99 102 … … 114 117 static inline void xhci_trb_set_cycle(xhci_trb_t *trb, bool cycle) 115 118 { 116 xhci_dword_set_bits(&trb->control, cycle, 1, 1);119 xhci_dword_set_bits(&trb->control, cycle, 0, 0); 117 120 } 118 121 … … 137 140 } 138 141 139 140 142 /** 141 143 * Event Ring Segment Table: section 6.5 142 144 */ 143 145 typedef struct xhci_erst_entry { 144 xhci_qword_t rs_base_ptr; // sans bits 0-6 145 xhci_dword_t size; // only low 16 bits, the rest is reserved 146 xhci_qword_t rs_base_ptr; /* 64B aligned */ 147 xhci_dword_t size; /* only low 16 bits, the rest is RsvdZ */ 148 xhci_dword_t _reserved; 146 149 } xhci_erst_entry_t; 147 150 151 static inline void xhci_fill_erst_entry(xhci_erst_entry_t *entry, uintptr_t phys, int segments) 152 { 153 xhci_qword_set(&entry->rs_base_ptr, phys); 154 xhci_dword_set_bits(&entry->size, segments, 16, 0); 155 } 156 148 157 #endif -
uspace/drv/bus/usb/xhci/trb_ring.c
r62ba2cbe rcb89430 32 32 #include <as.h> 33 33 #include <align.h> 34 #include <usb/debug.h> 35 #include <usb/host/utils/malloc32.h> 36 #include "hw_struct/trb.h" 34 37 #include "trb_ring.h" 35 38 … … 42 45 43 46 struct trb_segment { 44 xhci_trb_t trb_storage [SEGMENT_TRB_COUNT] __attribute__((packed));47 xhci_trb_t trb_storage [SEGMENT_TRB_COUNT]; 45 48 46 49 link_t segments_link; … … 59 62 } 60 63 64 /** 65 * Allocate and initialize new segment. 66 * 67 * TODO: When the HC supports 64-bit addressing, there's no need to restrict 68 * to DMAMEM_4GiB. 69 */ 61 70 static int trb_segment_allocate(trb_segment_t **segment) 62 71 { … … 72 81 memset(*segment, 0, PAGE_SIZE); 73 82 (*segment)->phys = phys; 83 84 usb_log_debug2("Allocated new ring segment."); 74 85 } 75 86 … … 77 88 } 78 89 79 int trb_ring_init(xhci_trb_ring_t *ring) 90 /** 91 * Initializes the ring with one segment. 92 * Event when it fails, the structure needs to be finalized. 93 */ 94 int xhci_trb_ring_init(xhci_trb_ring_t *ring, xhci_hc_t *hc) 80 95 { 81 96 struct trb_segment *segment; 82 97 int err; 83 98 99 list_initialize(&ring->segments); 100 84 101 if ((err = trb_segment_allocate(&segment)) != EOK) 85 102 return err; 86 103 87 list_initialize(&ring->segments);88 104 list_append(&segment->segments_link, &ring->segments); 105 ring->segment_count = 1; 89 106 90 107 xhci_trb_t *last = segment_end(segment) - 1; … … 97 114 ring->pcs = 1; 98 115 99 return EOK; 100 } 101 102 int trb_ring_fini(xhci_trb_ring_t *ring) 103 { 116 usb_log_debug("Initialized new TRB ring."); 117 118 return EOK; 119 } 120 121 int xhci_trb_ring_fini(xhci_trb_ring_t *ring) 122 { 123 list_foreach(ring->segments, segments_link, trb_segment_t, segment) 124 dmamem_unmap_anonymous(segment); 104 125 return EOK; 105 126 } … … 123 144 } 124 145 125 static uintptr_t xhci_trb_ring_enqueue_phys(xhci_trb_ring_t *ring)146 static uintptr_t trb_ring_enqueue_phys(xhci_trb_ring_t *ring) 126 147 { 127 148 uintptr_t trb_id = ring->enqueue_trb - segment_begin(ring->enqueue_segment); … … 129 150 } 130 151 131 int trb_ring_enqueue(xhci_trb_ring_t *ring, xhci_trb_t *td) 152 /** 153 * Enqueue a TD composed of TRBs. 154 * 155 * This will copy all TRBs chained together into the ring. The cycle flag in 156 * TRBs may be changed. 157 * 158 * The chained TRBs must be contiguous in memory, and must not contain Link TRBs. 159 * 160 * We cannot avoid the copying, because the TRB in ring should be updated atomically. 161 * 162 * @param td the first TRB of TD 163 * @return EOK on success, 164 * EAGAIN when the ring is too full to fit all TRBs (temporary) 165 */ 166 int xhci_trb_ring_enqueue(xhci_trb_ring_t *ring, xhci_trb_t *td) 132 167 { 133 168 xhci_trb_t * const saved_enqueue_trb = ring->enqueue_trb; … … 145 180 trb_ring_resolve_link(ring); 146 181 147 if ( xhci_trb_ring_enqueue_phys(ring) == ring->dequeue)182 if (trb_ring_enqueue_phys(ring) == ring->dequeue) 148 183 goto err_again; 149 184 } while (xhci_trb_is_chained(trb++)); … … 160 195 xhci_trb_copy(ring->enqueue_trb, trb); 161 196 197 usb_log_debug2("TRB ring(%p): Enqueued TRB %p", ring, trb); 162 198 ring->enqueue_trb++; 163 199 … … 166 202 xhci_trb_set_cycle(ring->enqueue_trb, ring->pcs); 167 203 168 if (TRB_LINK_TC(*ring->enqueue_trb)) 204 if (TRB_LINK_TC(*ring->enqueue_trb)) { 169 205 ring->pcs = !ring->pcs; 206 usb_log_debug2("TRB ring(%p): PCS toggled", ring); 207 } 170 208 171 209 trb_ring_resolve_link(ring); … … 180 218 return EAGAIN; 181 219 } 220 221 /** 222 * Initializes an event ring. 223 * Even when it fails, the structure needs to be finalized. 224 */ 225 int xhci_event_ring_init(xhci_event_ring_t *ring, xhci_hc_t *hc) 226 { 227 struct trb_segment *segment; 228 int err; 229 230 list_initialize(&ring->segments); 231 232 if ((err = trb_segment_allocate(&segment)) != EOK) 233 return err; 234 235 list_append(&segment->segments_link, &ring->segments); 236 ring->segment_count = 1; 237 238 ring->dequeue_segment = segment; 239 ring->dequeue_trb = segment_begin(segment); 240 ring->dequeue_ptr = segment->phys; 241 242 ring->erst = malloc32(PAGE_SIZE); 243 if (ring->erst == NULL) 244 return ENOMEM; 245 246 xhci_fill_erst_entry(&ring->erst[0], segment->phys, SEGMENT_TRB_COUNT); 247 248 ring->ccs = 1; 249 250 usb_log_debug("Initialized event ring."); 251 252 return EOK; 253 } 254 255 int xhci_event_ring_fini(xhci_event_ring_t *ring) 256 { 257 list_foreach(ring->segments, segments_link, trb_segment_t, segment) 258 dmamem_unmap_anonymous(segment); 259 260 if (ring->erst) 261 free32(ring->erst); 262 263 return EOK; 264 } 265 266 static uintptr_t event_ring_dequeue_phys(xhci_event_ring_t *ring) 267 { 268 uintptr_t trb_id = ring->dequeue_trb - segment_begin(ring->dequeue_segment); 269 return ring->dequeue_segment->phys + trb_id * sizeof(xhci_trb_t); 270 } 271 272 /** 273 * Fill the event with next valid event from the ring. 274 * 275 * @param event pointer to event to be overwritten 276 * @return EOK on success, 277 * ENOENT when the ring is empty 278 */ 279 int xhci_event_ring_dequeue(xhci_event_ring_t *ring, xhci_trb_t *event) 280 { 281 /** 282 * The ERDP reported to the HC is a half-phase off the one we need to 283 * maintain. Therefore, we keep it extra. 284 */ 285 ring->dequeue_ptr = event_ring_dequeue_phys(ring); 286 287 if (TRB_CYCLE(*ring->dequeue_trb) != ring->ccs) 288 return ENOENT; /* The ring is empty. */ 289 290 memcpy(event, ring->dequeue_trb, sizeof(xhci_trb_t)); 291 292 ring->dequeue_trb++; 293 const unsigned index = ring->dequeue_trb - segment_begin(ring->dequeue_segment); 294 295 /* Wrapping around segment boundary */ 296 if (index >= SEGMENT_TRB_COUNT) { 297 link_t *next_segment = list_next(&ring->dequeue_segment->segments_link, &ring->segments); 298 299 /* Wrapping around table boundary */ 300 if (!next_segment) { 301 next_segment = list_first(&ring->segments); 302 ring->ccs = !ring->ccs; 303 } 304 305 ring->dequeue_segment = list_get_instance(next_segment, trb_segment_t, segments_link); 306 ring->dequeue_trb = segment_begin(ring->dequeue_segment); 307 } 308 309 310 return EOK; 311 } -
uspace/drv/bus/usb/xhci/trb_ring.h
r62ba2cbe rcb89430 45 45 #include <adt/list.h> 46 46 #include <libarch/config.h> 47 #include "hw_struct/trb.h"48 47 49 48 typedef struct trb_segment trb_segment_t; 49 typedef struct xhci_hc xhci_hc_t; 50 typedef struct xhci_trb xhci_trb_t; 51 typedef struct xhci_erst_entry xhci_erst_entry_t; 50 52 51 53 /** … … 54 56 typedef struct xhci_trb_ring { 55 57 list_t segments; /* List of assigned segments */ 58 int segment_count; /* Number of segments assigned */ 56 59 57 60 /* … … 67 70 } xhci_trb_ring_t; 68 71 69 /** 70 * Initializes the ring. 71 * Allocates one page as the first segment. 72 */ 73 int trb_ring_init(xhci_trb_ring_t *ring); 74 int trb_ring_fini(xhci_trb_ring_t *ring); 72 int xhci_trb_ring_init(xhci_trb_ring_t *, xhci_hc_t *); 73 int xhci_trb_ring_fini(xhci_trb_ring_t *); 74 int xhci_trb_ring_enqueue(xhci_trb_ring_t *, xhci_trb_t *); 75 75 76 76 /** 77 * Enqueue a TD composed of TRBs. 78 * 79 * This will copy all TRBs chained together into the ring. The cycle flag in 80 * TRBs may be changed. 81 * 82 * The chained TRBs must be contiguous in memory, and must not contain Link TRBs. 83 * 84 * We cannot avoid the copying, because the TRB in ring should be updated atomically. 85 * 86 * @param td the first TRB of TD 87 * @return EOK on success, 88 * EAGAIN when the ring is too full to fit all TRBs (temporary) 77 * Get the initial value to fill into CRCR. 89 78 */ 90 int trb_ring_enqueue(xhci_trb_ring_t *ring, xhci_trb_t *td); 79 static inline uintptr_t xhci_trb_ring_get_dequeue_ptr(xhci_trb_ring_t *ring) 80 { 81 return ring->dequeue; 82 } 91 83 92 84 /** … … 94 86 * pointer inside the ring. Otherwise, the ring will soon show up as full. 95 87 */ 96 void trb_ring_update_dequeue(xhci_trb_ring_t *ring, uintptr_t dequeue); 88 void xhci_trb_ring_update_dequeue(xhci_trb_ring_t *, uintptr_t); 89 uintptr_t xhci_trb_ring_get_dequeue_ptr(xhci_trb_ring_t *); 97 90 91 /** 92 * A TRB ring of which the software is a consumer (event rings). 93 */ 94 typedef struct xhci_event_ring { 95 list_t segments; /* List of assigned segments */ 96 int segment_count; /* Number of segments assigned */ 97 98 trb_segment_t *dequeue_segment; /* Current segment of the dequeue ptr */ 99 xhci_trb_t *dequeue_trb; /* Next TRB to be processed */ 100 uintptr_t dequeue_ptr; /* Physical address of the ERDP to be reported to the HC */ 101 102 xhci_erst_entry_t *erst; /* ERST given to the HC */ 103 104 bool ccs; /* Consumer Cycle State: section 4.9.2 */ 105 } xhci_event_ring_t; 106 107 int xhci_event_ring_init(xhci_event_ring_t *, xhci_hc_t *); 108 int xhci_event_ring_fini(xhci_event_ring_t *); 109 int xhci_event_ring_dequeue(xhci_event_ring_t *, xhci_trb_t *); 98 110 99 111 #endif -
uspace/lib/usbhost/src/ddf_helpers.c
r62ba2cbe rcb89430 781 781 ret = hcd_ddf_enable_interrupts(device); 782 782 if (ret != EOK) { 783 usb_log_error("Failed to register interrupt handler: %s.\n",783 usb_log_error("Failed to enable interrupts: %s.\n", 784 784 str_error(ret)); 785 785 unregister_interrupt_handler(device, irq);
Note:
See TracChangeset
for help on using the changeset viewer.