Lines Matching +full:generic +full:- +full:xhci

1 // SPDX-License-Identifier: GPL-2.0
3 * xhci-dbgcap.c - xHCI debug capability support
9 #include <linux/dma-mapping.h>
13 #include "xhci.h"
14 #include "xhci-trace.h"
15 #include "xhci-dbgcap.h"
21 dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma); in dbc_free_ctx()
31 if (ring->first_seg && ring->first_seg->trbs) { in dbc_ring_free()
33 ring->first_seg->trbs, in dbc_ring_free()
34 ring->first_seg->dma); in dbc_ring_free()
35 kfree(ring->first_seg); in dbc_ring_free()
46 s_desc = (struct usb_string_descriptor *)strings->serial; in xhci_dbc_populate_strings()
48 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, in xhci_dbc_populate_strings()
51 s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2; in xhci_dbc_populate_strings()
52 s_desc->bDescriptorType = USB_DT_STRING; in xhci_dbc_populate_strings()
53 string_length = s_desc->bLength; in xhci_dbc_populate_strings()
57 s_desc = (struct usb_string_descriptor *)strings->product; in xhci_dbc_populate_strings()
59 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, in xhci_dbc_populate_strings()
62 s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2; in xhci_dbc_populate_strings()
63 s_desc->bDescriptorType = USB_DT_STRING; in xhci_dbc_populate_strings()
64 string_length += s_desc->bLength; in xhci_dbc_populate_strings()
68 s_desc = (struct usb_string_descriptor *)strings->manufacturer; in xhci_dbc_populate_strings()
71 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData, in xhci_dbc_populate_strings()
74 s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2; in xhci_dbc_populate_strings()
75 s_desc->bDescriptorType = USB_DT_STRING; in xhci_dbc_populate_strings()
76 string_length += s_desc->bLength; in xhci_dbc_populate_strings()
80 strings->string0[0] = 4; in xhci_dbc_populate_strings()
81 strings->string0[1] = USB_DT_STRING; in xhci_dbc_populate_strings()
82 strings->string0[2] = 0x09; in xhci_dbc_populate_strings()
83 strings->string0[3] = 0x04; in xhci_dbc_populate_strings()
101 info = (struct dbc_info_context *)dbc->ctx->bytes; in xhci_dbc_init_contexts()
102 dma = dbc->string_dma; in xhci_dbc_init_contexts()
103 info->string0 = cpu_to_le64(dma); in xhci_dbc_init_contexts()
104 info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH); in xhci_dbc_init_contexts()
105 info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2); in xhci_dbc_init_contexts()
106 info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3); in xhci_dbc_init_contexts()
107 info->length = cpu_to_le32(string_length); in xhci_dbc_init_contexts()
111 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control)); in xhci_dbc_init_contexts()
113 ep_ctx->ep_info = 0; in xhci_dbc_init_contexts()
114 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst); in xhci_dbc_init_contexts()
115 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state); in xhci_dbc_init_contexts()
120 ep_ctx->ep_info = 0; in xhci_dbc_init_contexts()
121 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst); in xhci_dbc_init_contexts()
122 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state); in xhci_dbc_init_contexts()
125 lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp); in xhci_dbc_init_contexts()
128 writel(dev_info, &dbc->regs->devinfo1); in xhci_dbc_init_contexts()
131 writel(dev_info, &dbc->regs->devinfo2); in xhci_dbc_init_contexts()
135 __releases(&dbc->lock) in xhci_dbc_giveback()
136 __acquires(&dbc->lock) in xhci_dbc_giveback()
138 struct xhci_dbc *dbc = req->dbc; in xhci_dbc_giveback()
139 struct device *dev = dbc->dev; in xhci_dbc_giveback()
141 list_del_init(&req->list_pending); in xhci_dbc_giveback()
142 req->trb_dma = 0; in xhci_dbc_giveback()
143 req->trb = NULL; in xhci_dbc_giveback()
145 if (req->status == -EINPROGRESS) in xhci_dbc_giveback()
146 req->status = status; in xhci_dbc_giveback()
151 req->dma, in xhci_dbc_giveback()
152 req->length, in xhci_dbc_giveback()
156 spin_unlock(&dbc->lock); in xhci_dbc_giveback()
157 req->complete(dbc, req); in xhci_dbc_giveback()
158 spin_lock(&dbc->lock); in xhci_dbc_giveback()
163 union xhci_trb *trb = req->trb; in xhci_dbc_flush_single_request()
165 trb->generic.field[0] = 0; in xhci_dbc_flush_single_request()
166 trb->generic.field[1] = 0; in xhci_dbc_flush_single_request()
167 trb->generic.field[2] = 0; in xhci_dbc_flush_single_request()
168 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); in xhci_dbc_flush_single_request()
169 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)); in xhci_dbc_flush_single_request()
171 xhci_dbc_giveback(req, -ESHUTDOWN); in xhci_dbc_flush_single_request()
178 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending) in xhci_dbc_flush_endpoint_requests()
184 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]); in xhci_dbc_flush_requests()
185 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]); in xhci_dbc_flush_requests()
204 req->dbc = dbc; in dbc_alloc_request()
205 INIT_LIST_HEAD(&req->list_pending); in dbc_alloc_request()
206 INIT_LIST_HEAD(&req->list_pool); in dbc_alloc_request()
207 req->direction = direction; in dbc_alloc_request()
228 trb = ring->enqueue; in xhci_dbc_queue_trb()
229 trb->generic.field[0] = cpu_to_le32(field1); in xhci_dbc_queue_trb()
230 trb->generic.field[1] = cpu_to_le32(field2); in xhci_dbc_queue_trb()
231 trb->generic.field[2] = cpu_to_le32(field3); in xhci_dbc_queue_trb()
232 trb->generic.field[3] = cpu_to_le32(field4); in xhci_dbc_queue_trb()
234 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic); in xhci_dbc_queue_trb()
236 ring->num_trbs_free--; in xhci_dbc_queue_trb()
237 next = ++(ring->enqueue); in xhci_dbc_queue_trb()
238 if (TRB_TYPE_LINK_LE32(next->link.control)) { in xhci_dbc_queue_trb()
239 next->link.control ^= cpu_to_le32(TRB_CYCLE); in xhci_dbc_queue_trb()
240 ring->enqueue = ring->enq_seg->trbs; in xhci_dbc_queue_trb()
241 ring->cycle_state ^= 1; in xhci_dbc_queue_trb()
251 struct xhci_dbc *dbc = req->dbc; in xhci_dbc_queue_bulk_tx()
252 struct xhci_ring *ring = dep->ring; in xhci_dbc_queue_bulk_tx()
255 num_trbs = count_trbs(req->dma, req->length); in xhci_dbc_queue_bulk_tx()
257 if (ring->num_trbs_free < num_trbs) in xhci_dbc_queue_bulk_tx()
258 return -EBUSY; in xhci_dbc_queue_bulk_tx()
260 addr = req->dma; in xhci_dbc_queue_bulk_tx()
261 trb = ring->enqueue; in xhci_dbc_queue_bulk_tx()
262 cycle = ring->cycle_state; in xhci_dbc_queue_bulk_tx()
263 length = TRB_LEN(req->length); in xhci_dbc_queue_bulk_tx()
271 req->trb = ring->enqueue; in xhci_dbc_queue_bulk_tx()
272 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); in xhci_dbc_queue_bulk_tx()
285 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE); in xhci_dbc_queue_bulk_tx()
287 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE); in xhci_dbc_queue_bulk_tx()
289 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell); in xhci_dbc_queue_bulk_tx()
298 struct xhci_dbc *dbc = req->dbc; in dbc_ep_do_queue()
299 struct device *dev = dbc->dev; in dbc_ep_do_queue()
300 struct dbc_ep *dep = &dbc->eps[req->direction]; in dbc_ep_do_queue()
302 if (!req->length || !req->buf) in dbc_ep_do_queue()
303 return -EINVAL; in dbc_ep_do_queue()
305 req->actual = 0; in dbc_ep_do_queue()
306 req->status = -EINPROGRESS; in dbc_ep_do_queue()
308 req->dma = dma_map_single(dev, in dbc_ep_do_queue()
309 req->buf, in dbc_ep_do_queue()
310 req->length, in dbc_ep_do_queue()
312 if (dma_mapping_error(dev, req->dma)) { in dbc_ep_do_queue()
313 dev_err(dbc->dev, "failed to map buffer\n"); in dbc_ep_do_queue()
314 return -EFAULT; in dbc_ep_do_queue()
319 dev_err(dbc->dev, "failed to queue trbs\n"); in dbc_ep_do_queue()
321 req->dma, in dbc_ep_do_queue()
322 req->length, in dbc_ep_do_queue()
324 return -EFAULT; in dbc_ep_do_queue()
327 list_add_tail(&req->list_pending, &dep->list_pending); in dbc_ep_do_queue()
335 struct xhci_dbc *dbc = req->dbc; in dbc_ep_queue()
336 int ret = -ESHUTDOWN; in dbc_ep_queue()
339 return -ENODEV; in dbc_ep_queue()
341 if (req->direction != BULK_IN && in dbc_ep_queue()
342 req->direction != BULK_OUT) in dbc_ep_queue()
343 return -EINVAL; in dbc_ep_queue()
345 spin_lock_irqsave(&dbc->lock, flags); in dbc_ep_queue()
346 if (dbc->state == DS_CONFIGURED) in dbc_ep_queue()
348 spin_unlock_irqrestore(&dbc->lock, flags); in dbc_ep_queue()
350 mod_delayed_work(system_wq, &dbc->event_work, 0); in dbc_ep_queue()
361 dep = &dbc->eps[direction]; in xhci_dbc_do_eps_init()
362 dep->dbc = dbc; in xhci_dbc_do_eps_init()
363 dep->direction = direction; in xhci_dbc_do_eps_init()
364 dep->ring = direction ? dbc->ring_in : dbc->ring_out; in xhci_dbc_do_eps_init()
366 INIT_LIST_HEAD(&dep->list_pending); in xhci_dbc_do_eps_init()
377 memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps)); in xhci_dbc_eps_exit()
383 erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry), in dbc_erst_alloc()
384 &erst->erst_dma_addr, flags); in dbc_erst_alloc()
385 if (!erst->entries) in dbc_erst_alloc()
386 return -ENOMEM; in dbc_erst_alloc()
388 erst->num_entries = 1; in dbc_erst_alloc()
389 erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma); in dbc_erst_alloc()
390 erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT); in dbc_erst_alloc()
391 erst->entries[0].rsvd = 0; in dbc_erst_alloc()
397 if (erst->entries) in dbc_erst_free()
399 erst->entries, erst->erst_dma_addr); in dbc_erst_free()
400 erst->entries = NULL; in dbc_erst_free()
412 /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/ in dbc_alloc_ctx()
413 ctx->size = 3 * DBC_CONTEXT_SIZE; in dbc_alloc_ctx()
414 ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags); in dbc_alloc_ctx()
415 if (!ctx->bytes) { in dbc_alloc_ctx()
433 ring->num_segs = 1; in xhci_dbc_ring_alloc()
434 ring->type = type; in xhci_dbc_ring_alloc()
440 ring->first_seg = seg; in xhci_dbc_ring_alloc()
441 ring->last_seg = seg; in xhci_dbc_ring_alloc()
442 seg->next = seg; in xhci_dbc_ring_alloc()
444 seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags); in xhci_dbc_ring_alloc()
445 if (!seg->trbs) in xhci_dbc_ring_alloc()
448 seg->dma = dma; in xhci_dbc_ring_alloc()
452 union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1]; in xhci_dbc_ring_alloc()
454 trb->link.segment_ptr = cpu_to_le64(dma); in xhci_dbc_ring_alloc()
455 trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK)); in xhci_dbc_ring_alloc()
457 INIT_LIST_HEAD(&ring->td_list); in xhci_dbc_ring_alloc()
472 struct device *dev = dbc->dev; in xhci_dbc_mem_init()
475 dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags); in xhci_dbc_mem_init()
476 if (!dbc->ring_evt) in xhci_dbc_mem_init()
479 dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags); in xhci_dbc_mem_init()
480 if (!dbc->ring_in) in xhci_dbc_mem_init()
483 dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags); in xhci_dbc_mem_init()
484 if (!dbc->ring_out) in xhci_dbc_mem_init()
488 ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags); in xhci_dbc_mem_init()
493 dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */ in xhci_dbc_mem_init()
494 if (!dbc->ctx) in xhci_dbc_mem_init()
498 dbc->string_size = sizeof(struct dbc_str_descs); in xhci_dbc_mem_init()
499 dbc->string = dma_alloc_coherent(dev, dbc->string_size, in xhci_dbc_mem_init()
500 &dbc->string_dma, flags); in xhci_dbc_mem_init()
501 if (!dbc->string) in xhci_dbc_mem_init()
505 writel(dbc->erst.erst_size, &dbc->regs->ersts); in xhci_dbc_mem_init()
507 lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba); in xhci_dbc_mem_init()
508 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, in xhci_dbc_mem_init()
509 dbc->ring_evt->dequeue); in xhci_dbc_mem_init()
510 lo_hi_writeq(deq, &dbc->regs->erdp); in xhci_dbc_mem_init()
513 string_length = xhci_dbc_populate_strings(dbc->string); in xhci_dbc_mem_init()
517 dbc->state = DS_INITIALIZED; in xhci_dbc_mem_init()
522 dbc_free_ctx(dev, dbc->ctx); in xhci_dbc_mem_init()
523 dbc->ctx = NULL; in xhci_dbc_mem_init()
525 dbc_erst_free(dev, &dbc->erst); in xhci_dbc_mem_init()
527 dbc_ring_free(dev, dbc->ring_out); in xhci_dbc_mem_init()
528 dbc->ring_out = NULL; in xhci_dbc_mem_init()
530 dbc_ring_free(dev, dbc->ring_in); in xhci_dbc_mem_init()
531 dbc->ring_in = NULL; in xhci_dbc_mem_init()
533 dbc_ring_free(dev, dbc->ring_evt); in xhci_dbc_mem_init()
534 dbc->ring_evt = NULL; in xhci_dbc_mem_init()
536 return -ENOMEM; in xhci_dbc_mem_init()
546 if (dbc->string) { in xhci_dbc_mem_cleanup()
547 dma_free_coherent(dbc->dev, dbc->string_size, in xhci_dbc_mem_cleanup()
548 dbc->string, dbc->string_dma); in xhci_dbc_mem_cleanup()
549 dbc->string = NULL; in xhci_dbc_mem_cleanup()
552 dbc_free_ctx(dbc->dev, dbc->ctx); in xhci_dbc_mem_cleanup()
553 dbc->ctx = NULL; in xhci_dbc_mem_cleanup()
555 dbc_erst_free(dbc->dev, &dbc->erst); in xhci_dbc_mem_cleanup()
556 dbc_ring_free(dbc->dev, dbc->ring_out); in xhci_dbc_mem_cleanup()
557 dbc_ring_free(dbc->dev, dbc->ring_in); in xhci_dbc_mem_cleanup()
558 dbc_ring_free(dbc->dev, dbc->ring_evt); in xhci_dbc_mem_cleanup()
559 dbc->ring_in = NULL; in xhci_dbc_mem_cleanup()
560 dbc->ring_out = NULL; in xhci_dbc_mem_cleanup()
561 dbc->ring_evt = NULL; in xhci_dbc_mem_cleanup()
569 if (dbc->state != DS_DISABLED) in xhci_do_dbc_start()
570 return -EINVAL; in xhci_do_dbc_start()
572 writel(0, &dbc->regs->control); in xhci_do_dbc_start()
573 ret = xhci_handshake(&dbc->regs->control, in xhci_do_dbc_start()
583 ctrl = readl(&dbc->regs->control); in xhci_do_dbc_start()
585 &dbc->regs->control); in xhci_do_dbc_start()
586 ret = xhci_handshake(&dbc->regs->control, in xhci_do_dbc_start()
592 dbc->state = DS_ENABLED; in xhci_do_dbc_start()
599 if (dbc->state == DS_DISABLED) in xhci_do_dbc_stop()
600 return -1; in xhci_do_dbc_stop()
602 writel(0, &dbc->regs->control); in xhci_do_dbc_stop()
603 dbc->state = DS_DISABLED; in xhci_do_dbc_stop()
615 pm_runtime_get_sync(dbc->dev); /* note this was self.controller */ in xhci_dbc_start()
617 spin_lock_irqsave(&dbc->lock, flags); in xhci_dbc_start()
619 spin_unlock_irqrestore(&dbc->lock, flags); in xhci_dbc_start()
622 pm_runtime_put(dbc->dev); /* note this was self.controller */ in xhci_dbc_start()
626 return mod_delayed_work(system_wq, &dbc->event_work, 1); in xhci_dbc_start()
636 switch (dbc->state) { in xhci_dbc_stop()
641 if (dbc->driver->disconnect) in xhci_dbc_stop()
642 dbc->driver->disconnect(dbc); in xhci_dbc_stop()
648 cancel_delayed_work_sync(&dbc->event_work); in xhci_dbc_stop()
650 spin_lock_irqsave(&dbc->lock, flags); in xhci_dbc_stop()
652 spin_unlock_irqrestore(&dbc->lock, flags); in xhci_dbc_stop()
656 pm_runtime_put_sync(dbc->dev); /* note, was self.controller */ in xhci_dbc_stop()
665 portsc = readl(&dbc->regs->portsc); in dbc_handle_port_status()
667 dev_info(dbc->dev, "DbC port connect change\n"); in dbc_handle_port_status()
670 dev_info(dbc->dev, "DbC port reset change\n"); in dbc_handle_port_status()
673 dev_info(dbc->dev, "DbC port link status change\n"); in dbc_handle_port_status()
676 dev_info(dbc->dev, "DbC config error change\n"); in dbc_handle_port_status()
679 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc); in dbc_handle_port_status()
692 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2])); in dbc_handle_xfer_event()
693 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2])); in dbc_handle_xfer_event()
694 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3])); in dbc_handle_xfer_event()
697 ring = dep->ring; in dbc_handle_xfer_event()
710 dev_warn(dbc->dev, "tx error %d detected\n", comp_code); in dbc_handle_xfer_event()
711 status = -comp_code; in dbc_handle_xfer_event()
714 dev_err(dbc->dev, "unknown tx error %d\n", comp_code); in dbc_handle_xfer_event()
715 status = -comp_code; in dbc_handle_xfer_event()
720 list_for_each_entry(r, &dep->list_pending, list_pending) { in dbc_handle_xfer_event()
721 if (r->trb_dma == event->trans_event.buffer) { in dbc_handle_xfer_event()
728 dev_warn(dbc->dev, "no matched request\n"); in dbc_handle_xfer_event()
732 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic); in dbc_handle_xfer_event()
734 ring->num_trbs_free++; in dbc_handle_xfer_event()
735 req->actual = req->length - remain_length; in dbc_handle_xfer_event()
742 if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) { in inc_evt_deq()
743 ring->cycle_state ^= 1; in inc_evt_deq()
744 ring->dequeue = ring->deq_seg->trbs; in inc_evt_deq()
747 ring->dequeue++; in inc_evt_deq()
759 switch (dbc->state) { in xhci_dbc_do_handle_events()
765 portsc = readl(&dbc->regs->portsc); in xhci_dbc_do_handle_events()
767 dbc->state = DS_CONNECTED; in xhci_dbc_do_handle_events()
768 dev_info(dbc->dev, "DbC connected\n"); in xhci_dbc_do_handle_events()
773 ctrl = readl(&dbc->regs->control); in xhci_dbc_do_handle_events()
775 dbc->state = DS_CONFIGURED; in xhci_dbc_do_handle_events()
776 dev_info(dbc->dev, "DbC configured\n"); in xhci_dbc_do_handle_events()
777 portsc = readl(&dbc->regs->portsc); in xhci_dbc_do_handle_events()
778 writel(portsc, &dbc->regs->portsc); in xhci_dbc_do_handle_events()
785 portsc = readl(&dbc->regs->portsc); in xhci_dbc_do_handle_events()
788 dev_info(dbc->dev, "DbC cable unplugged\n"); in xhci_dbc_do_handle_events()
789 dbc->state = DS_ENABLED; in xhci_dbc_do_handle_events()
797 dev_info(dbc->dev, "DbC port reset\n"); in xhci_dbc_do_handle_events()
798 writel(portsc, &dbc->regs->portsc); in xhci_dbc_do_handle_events()
799 dbc->state = DS_ENABLED; in xhci_dbc_do_handle_events()
806 ctrl = readl(&dbc->regs->control); in xhci_dbc_do_handle_events()
809 dev_info(dbc->dev, "DbC Endpoint stall\n"); in xhci_dbc_do_handle_events()
810 dbc->state = DS_STALLED; in xhci_dbc_do_handle_events()
827 writel(ctrl, &dbc->regs->control); in xhci_dbc_do_handle_events()
828 ctrl = readl(&dbc->regs->control); in xhci_dbc_do_handle_events()
833 ctrl = readl(&dbc->regs->control); in xhci_dbc_do_handle_events()
837 dbc->state = DS_CONFIGURED; in xhci_dbc_do_handle_events()
843 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state); in xhci_dbc_do_handle_events()
848 evt = dbc->ring_evt->dequeue; in xhci_dbc_do_handle_events()
849 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) == in xhci_dbc_do_handle_events()
850 dbc->ring_evt->cycle_state) { in xhci_dbc_do_handle_events()
857 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic); in xhci_dbc_do_handle_events()
859 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) { in xhci_dbc_do_handle_events()
870 inc_evt_deq(dbc->ring_evt); in xhci_dbc_do_handle_events()
872 evt = dbc->ring_evt->dequeue; in xhci_dbc_do_handle_events()
878 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, in xhci_dbc_do_handle_events()
879 dbc->ring_evt->dequeue); in xhci_dbc_do_handle_events()
880 lo_hi_writeq(deq, &dbc->regs->erdp); in xhci_dbc_do_handle_events()
894 spin_lock_irqsave(&dbc->lock, flags); in xhci_dbc_handle_events()
896 spin_unlock_irqrestore(&dbc->lock, flags); in xhci_dbc_handle_events()
900 if (dbc->driver->configure) in xhci_dbc_handle_events()
901 dbc->driver->configure(dbc); in xhci_dbc_handle_events()
904 if (dbc->driver->disconnect) in xhci_dbc_handle_events()
905 dbc->driver->disconnect(dbc); in xhci_dbc_handle_events()
910 dev_info(dbc->dev, "stop handling dbc events\n"); in xhci_dbc_handle_events()
914 mod_delayed_work(system_wq, &dbc->event_work, 1); in xhci_dbc_handle_events()
917 static void xhci_do_dbc_exit(struct xhci_hcd *xhci) in xhci_do_dbc_exit() argument
921 spin_lock_irqsave(&xhci->lock, flags); in xhci_do_dbc_exit()
922 kfree(xhci->dbc); in xhci_do_dbc_exit()
923 xhci->dbc = NULL; in xhci_do_dbc_exit()
924 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_do_dbc_exit()
927 static int xhci_do_dbc_init(struct xhci_hcd *xhci) in xhci_do_dbc_init() argument
935 base = &xhci->cap_regs->hc_capbase; in xhci_do_dbc_init()
938 return -ENODEV; in xhci_do_dbc_init()
942 return -ENOMEM; in xhci_do_dbc_init()
944 dbc->regs = base + dbc_cap_offs; in xhci_do_dbc_init()
946 /* We will avoid using DbC in xhci driver if it's in use. */ in xhci_do_dbc_init()
947 reg = readl(&dbc->regs->control); in xhci_do_dbc_init()
950 return -EBUSY; in xhci_do_dbc_init()
953 spin_lock_irqsave(&xhci->lock, flags); in xhci_do_dbc_init()
954 if (xhci->dbc) { in xhci_do_dbc_init()
955 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_do_dbc_init()
957 return -EBUSY; in xhci_do_dbc_init()
959 xhci->dbc = dbc; in xhci_do_dbc_init()
960 spin_unlock_irqrestore(&xhci->lock, flags); in xhci_do_dbc_init()
962 dbc->xhci = xhci; in xhci_do_dbc_init()
963 dbc->dev = xhci_to_hcd(xhci)->self.sysdev; in xhci_do_dbc_init()
964 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events); in xhci_do_dbc_init()
965 spin_lock_init(&dbc->lock); in xhci_do_dbc_init()
976 struct xhci_hcd *xhci; in dbc_show() local
978 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_show()
979 dbc = xhci->dbc; in dbc_show()
981 switch (dbc->state) { in dbc_show()
1011 struct xhci_hcd *xhci; in dbc_store() local
1014 xhci = hcd_to_xhci(dev_get_drvdata(dev)); in dbc_store()
1015 dbc = xhci->dbc; in dbc_store()
1022 return -EINVAL; in dbc_store()
1029 int xhci_dbc_init(struct xhci_hcd *xhci) in xhci_dbc_init() argument
1032 struct device *dev = xhci_to_hcd(xhci)->self.controller; in xhci_dbc_init()
1034 ret = xhci_do_dbc_init(xhci); in xhci_dbc_init()
1038 ret = xhci_dbc_tty_probe(xhci); in xhci_dbc_init()
1049 xhci_dbc_tty_remove(xhci->dbc); in xhci_dbc_init()
1051 xhci_do_dbc_exit(xhci); in xhci_dbc_init()
1056 void xhci_dbc_exit(struct xhci_hcd *xhci) in xhci_dbc_exit() argument
1058 struct device *dev = xhci_to_hcd(xhci)->self.controller; in xhci_dbc_exit()
1060 if (!xhci->dbc) in xhci_dbc_exit()
1064 xhci_dbc_tty_remove(xhci->dbc); in xhci_dbc_exit()
1065 xhci_dbc_stop(xhci->dbc); in xhci_dbc_exit()
1066 xhci_do_dbc_exit(xhci); in xhci_dbc_exit()
1070 int xhci_dbc_suspend(struct xhci_hcd *xhci) in xhci_dbc_suspend() argument
1072 struct xhci_dbc *dbc = xhci->dbc; in xhci_dbc_suspend()
1077 if (dbc->state == DS_CONFIGURED) in xhci_dbc_suspend()
1078 dbc->resume_required = 1; in xhci_dbc_suspend()
1085 int xhci_dbc_resume(struct xhci_hcd *xhci) in xhci_dbc_resume() argument
1088 struct xhci_dbc *dbc = xhci->dbc; in xhci_dbc_resume()
1093 if (dbc->resume_required) { in xhci_dbc_resume()
1094 dbc->resume_required = 0; in xhci_dbc_resume()