1 // SPDX-License-Identifier: GPL-2.0
3 * xhci-dbgcap.c - xHCI debug capability support
5 * Copyright (C) 2017 Intel Corporation
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
9 #include <linux/dma-mapping.h>
10 #include <linux/slab.h>
11 #include <linux/nls.h>
14 #include "xhci-trace.h"
15 #include "xhci-dbgcap.h"
18 dbc_dma_alloc_coherent(struct xhci_hcd *xhci, size_t size,
19 dma_addr_t *dma_handle, gfp_t flags)
23 vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
24 size, dma_handle, flags);
25 memset(vaddr, 0, size);
30 dbc_dma_free_coherent(struct xhci_hcd *xhci, size_t size,
31 void *cpu_addr, dma_addr_t dma_handle)
34 dma_free_coherent(xhci_to_hcd(xhci)->self.sysdev,
35 size, cpu_addr, dma_handle);
38 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
40 struct usb_string_descriptor *s_desc;
44 s_desc = (struct usb_string_descriptor *)strings->serial;
45 utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
46 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
47 DBC_MAX_STRING_LENGTH);
49 s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2;
50 s_desc->bDescriptorType = USB_DT_STRING;
51 string_length = s_desc->bLength;
55 s_desc = (struct usb_string_descriptor *)strings->product;
56 utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
57 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
58 DBC_MAX_STRING_LENGTH);
60 s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
61 s_desc->bDescriptorType = USB_DT_STRING;
62 string_length += s_desc->bLength;
65 /* Manufacture string: */
66 s_desc = (struct usb_string_descriptor *)strings->manufacturer;
67 utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
68 strlen(DBC_STRING_MANUFACTURER),
69 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
70 DBC_MAX_STRING_LENGTH);
72 s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
73 s_desc->bDescriptorType = USB_DT_STRING;
74 string_length += s_desc->bLength;
78 strings->string0[0] = 4;
79 strings->string0[1] = USB_DT_STRING;
80 strings->string0[2] = 0x09;
81 strings->string0[3] = 0x04;
87 static void xhci_dbc_init_contexts(struct xhci_hcd *xhci, u32 string_length)
90 struct dbc_info_context *info;
91 struct xhci_ep_ctx *ep_ctx;
94 unsigned int max_burst;
100 /* Populate info Context: */
101 info = (struct dbc_info_context *)dbc->ctx->bytes;
102 dma = dbc->string_dma;
103 info->string0 = cpu_to_le64(dma);
104 info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
105 info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
106 info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
107 info->length = cpu_to_le32(string_length);
109 /* Populate bulk out endpoint context: */
110 ep_ctx = dbc_bulkout_ctx(dbc);
111 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
112 deq = dbc_bulkout_enq(dbc);
114 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
115 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
117 /* Populate bulk in endpoint context: */
118 ep_ctx = dbc_bulkin_ctx(dbc);
119 deq = dbc_bulkin_enq(dbc);
121 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
122 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
124 /* Set DbC context and info registers: */
125 xhci_write_64(xhci, dbc->ctx->dma, &dbc->regs->dccp);
127 dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
128 writel(dev_info, &dbc->regs->devinfo1);
130 dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
131 writel(dev_info, &dbc->regs->devinfo2);
134 static void xhci_dbc_giveback(struct dbc_request *req, int status)
135 __releases(&dbc->lock)
136 __acquires(&dbc->lock)
138 struct dbc_ep *dep = req->dep;
139 struct xhci_dbc *dbc = dep->dbc;
140 struct xhci_hcd *xhci = dbc->xhci;
141 struct device *dev = xhci_to_hcd(dbc->xhci)->self.sysdev;
143 list_del_init(&req->list_pending);
147 if (req->status == -EINPROGRESS)
148 req->status = status;
150 trace_xhci_dbc_giveback_request(req);
152 dma_unmap_single(dev,
155 dbc_ep_dma_direction(dep));
157 /* Give back the transfer request: */
158 spin_unlock(&dbc->lock);
159 req->complete(xhci, req);
160 spin_lock(&dbc->lock);
163 static void xhci_dbc_flush_single_request(struct dbc_request *req)
165 union xhci_trb *trb = req->trb;
167 trb->generic.field[0] = 0;
168 trb->generic.field[1] = 0;
169 trb->generic.field[2] = 0;
170 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
171 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
173 xhci_dbc_giveback(req, -ESHUTDOWN);
176 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
178 struct dbc_request *req, *tmp;
180 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
181 xhci_dbc_flush_single_request(req);
184 static void xhci_dbc_flush_reqests(struct xhci_dbc *dbc)
186 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
187 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
191 dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
193 struct dbc_request *req;
195 req = kzalloc(sizeof(*req), gfp_flags);
200 INIT_LIST_HEAD(&req->list_pending);
201 INIT_LIST_HEAD(&req->list_pool);
202 req->direction = dep->direction;
204 trace_xhci_dbc_alloc_request(req);
210 dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
212 trace_xhci_dbc_free_request(req);
218 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
219 u32 field2, u32 field3, u32 field4)
221 union xhci_trb *trb, *next;
224 trb->generic.field[0] = cpu_to_le32(field1);
225 trb->generic.field[1] = cpu_to_le32(field2);
226 trb->generic.field[2] = cpu_to_le32(field3);
227 trb->generic.field[3] = cpu_to_le32(field4);
229 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
231 ring->num_trbs_free--;
232 next = ++(ring->enqueue);
233 if (TRB_TYPE_LINK_LE32(next->link.control)) {
234 next->link.control ^= cpu_to_le32(TRB_CYCLE);
235 ring->enqueue = ring->enq_seg->trbs;
236 ring->cycle_state ^= 1;
240 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
241 struct dbc_request *req)
245 unsigned int num_trbs;
246 struct xhci_dbc *dbc = dep->dbc;
247 struct xhci_ring *ring = dep->ring;
248 u32 length, control, cycle;
250 num_trbs = count_trbs(req->dma, req->length);
251 WARN_ON(num_trbs != 1);
252 if (ring->num_trbs_free < num_trbs)
257 cycle = ring->cycle_state;
258 length = TRB_LEN(req->length);
259 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
262 control &= cpu_to_le32(~TRB_CYCLE);
264 control |= cpu_to_le32(TRB_CYCLE);
266 req->trb = ring->enqueue;
267 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
268 xhci_dbc_queue_trb(ring,
274 * Add a barrier between writes of trb fields and flipping
280 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
282 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
284 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
290 dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
294 struct xhci_dbc *dbc = dep->dbc;
295 struct xhci_hcd *xhci = dbc->xhci;
297 dev = xhci_to_hcd(xhci)->self.sysdev;
299 if (!req->length || !req->buf)
303 req->status = -EINPROGRESS;
305 req->dma = dma_map_single(dev,
308 dbc_ep_dma_direction(dep));
309 if (dma_mapping_error(dev, req->dma)) {
310 xhci_err(xhci, "failed to map buffer\n");
314 ret = xhci_dbc_queue_bulk_tx(dep, req);
316 xhci_err(xhci, "failed to queue trbs\n");
317 dma_unmap_single(dev,
320 dbc_ep_dma_direction(dep));
324 list_add_tail(&req->list_pending, &dep->list_pending);
329 int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
333 struct xhci_dbc *dbc = dep->dbc;
334 int ret = -ESHUTDOWN;
336 spin_lock_irqsave(&dbc->lock, flags);
337 if (dbc->state == DS_CONFIGURED)
338 ret = dbc_ep_do_queue(dep, req);
339 spin_unlock_irqrestore(&dbc->lock, flags);
341 mod_delayed_work(system_wq, &dbc->event_work, 0);
343 trace_xhci_dbc_queue_request(req);
348 static inline void xhci_dbc_do_eps_init(struct xhci_hcd *xhci, bool direction)
351 struct xhci_dbc *dbc = xhci->dbc;
353 dep = &dbc->eps[direction];
355 dep->direction = direction;
356 dep->ring = direction ? dbc->ring_in : dbc->ring_out;
358 INIT_LIST_HEAD(&dep->list_pending);
361 static void xhci_dbc_eps_init(struct xhci_hcd *xhci)
363 xhci_dbc_do_eps_init(xhci, BULK_OUT);
364 xhci_dbc_do_eps_init(xhci, BULK_IN);
367 static void xhci_dbc_eps_exit(struct xhci_hcd *xhci)
369 struct xhci_dbc *dbc = xhci->dbc;
371 memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
374 static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
379 struct xhci_dbc *dbc = xhci->dbc;
381 /* Allocate various rings for events and transfers: */
382 dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags);
386 dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
390 dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
394 /* Allocate and populate ERST: */
395 ret = xhci_alloc_erst(xhci, dbc->ring_evt, &dbc->erst, flags);
399 /* Allocate context data structure: */
400 dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
404 /* Allocate the string table: */
405 dbc->string_size = sizeof(struct dbc_str_descs);
406 dbc->string = dbc_dma_alloc_coherent(xhci,
413 /* Setup ERST register: */
414 writel(dbc->erst.erst_size, &dbc->regs->ersts);
415 xhci_write_64(xhci, dbc->erst.erst_dma_addr, &dbc->regs->erstba);
416 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
417 dbc->ring_evt->dequeue);
418 xhci_write_64(xhci, deq, &dbc->regs->erdp);
420 /* Setup strings and contexts: */
421 string_length = xhci_dbc_populate_strings(dbc->string);
422 xhci_dbc_init_contexts(xhci, string_length);
426 xhci_dbc_eps_init(xhci);
427 dbc->state = DS_INITIALIZED;
432 xhci_free_container_ctx(xhci, dbc->ctx);
435 xhci_free_erst(xhci, &dbc->erst);
437 xhci_ring_free(xhci, dbc->ring_out);
438 dbc->ring_out = NULL;
440 xhci_ring_free(xhci, dbc->ring_in);
443 xhci_ring_free(xhci, dbc->ring_evt);
444 dbc->ring_evt = NULL;
449 static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci)
451 struct xhci_dbc *dbc = xhci->dbc;
456 xhci_dbc_eps_exit(xhci);
459 dbc_dma_free_coherent(xhci,
461 dbc->string, dbc->string_dma);
465 xhci_free_container_ctx(xhci, dbc->ctx);
468 xhci_free_erst(xhci, &dbc->erst);
469 xhci_ring_free(xhci, dbc->ring_out);
470 xhci_ring_free(xhci, dbc->ring_in);
471 xhci_ring_free(xhci, dbc->ring_evt);
473 dbc->ring_out = NULL;
474 dbc->ring_evt = NULL;
477 static int xhci_do_dbc_start(struct xhci_hcd *xhci)
481 struct xhci_dbc *dbc = xhci->dbc;
483 if (dbc->state != DS_DISABLED)
486 writel(0, &dbc->regs->control);
487 ret = xhci_handshake(&dbc->regs->control,
493 ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC);
497 ctrl = readl(&dbc->regs->control);
498 writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
499 &dbc->regs->control);
500 ret = xhci_handshake(&dbc->regs->control,
502 DBC_CTRL_DBC_ENABLE, 1000);
506 dbc->state = DS_ENABLED;
511 static void xhci_do_dbc_stop(struct xhci_hcd *xhci)
513 struct xhci_dbc *dbc = xhci->dbc;
515 if (dbc->state == DS_DISABLED)
518 writel(0, &dbc->regs->control);
519 xhci_dbc_mem_cleanup(xhci);
520 dbc->state = DS_DISABLED;
523 static int xhci_dbc_start(struct xhci_hcd *xhci)
527 struct xhci_dbc *dbc = xhci->dbc;
531 pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
533 spin_lock_irqsave(&dbc->lock, flags);
534 ret = xhci_do_dbc_start(xhci);
535 spin_unlock_irqrestore(&dbc->lock, flags);
538 pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
542 return mod_delayed_work(system_wq, &dbc->event_work, 1);
545 static void xhci_dbc_stop(struct xhci_hcd *xhci)
548 struct xhci_dbc *dbc = xhci->dbc;
549 struct dbc_port *port = &dbc->port;
553 cancel_delayed_work_sync(&dbc->event_work);
555 if (port->registered)
556 xhci_dbc_tty_unregister_device(xhci);
558 spin_lock_irqsave(&dbc->lock, flags);
559 xhci_do_dbc_stop(xhci);
560 spin_unlock_irqrestore(&dbc->lock, flags);
562 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
566 dbc_handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event)
569 struct xhci_dbc *dbc = xhci->dbc;
571 portsc = readl(&dbc->regs->portsc);
572 if (portsc & DBC_PORTSC_CONN_CHANGE)
573 xhci_info(xhci, "DbC port connect change\n");
575 if (portsc & DBC_PORTSC_RESET_CHANGE)
576 xhci_info(xhci, "DbC port reset change\n");
578 if (portsc & DBC_PORTSC_LINK_CHANGE)
579 xhci_info(xhci, "DbC port link status change\n");
581 if (portsc & DBC_PORTSC_CONFIG_CHANGE)
582 xhci_info(xhci, "DbC config error change\n");
584 /* Port reset change bit will be cleared in other place: */
585 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
588 static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event)
591 struct xhci_ring *ring;
595 size_t remain_length;
596 struct dbc_request *req = NULL, *r;
598 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
599 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
600 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
601 dep = (ep_id == EPID_OUT) ?
602 get_out_ep(xhci) : get_in_ep(xhci);
609 case COMP_SHORT_PACKET:
613 case COMP_BABBLE_DETECTED_ERROR:
614 case COMP_USB_TRANSACTION_ERROR:
615 case COMP_STALL_ERROR:
616 xhci_warn(xhci, "tx error %d detected\n", comp_code);
620 xhci_err(xhci, "unknown tx error %d\n", comp_code);
625 /* Match the pending request: */
626 list_for_each_entry(r, &dep->list_pending, list_pending) {
627 if (r->trb_dma == event->trans_event.buffer) {
634 xhci_warn(xhci, "no matched request\n");
638 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
640 ring->num_trbs_free++;
641 req->actual = req->length - remain_length;
642 xhci_dbc_giveback(req, status);
645 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
651 struct xhci_hcd *xhci = dbc->xhci;
652 bool update_erdp = false;
654 /* DbC state machine: */
655 switch (dbc->state) {
661 portsc = readl(&dbc->regs->portsc);
662 if (portsc & DBC_PORTSC_CONN_STATUS) {
663 dbc->state = DS_CONNECTED;
664 xhci_info(xhci, "DbC connected\n");
669 ctrl = readl(&dbc->regs->control);
670 if (ctrl & DBC_CTRL_DBC_RUN) {
671 dbc->state = DS_CONFIGURED;
672 xhci_info(xhci, "DbC configured\n");
673 portsc = readl(&dbc->regs->portsc);
674 writel(portsc, &dbc->regs->portsc);
680 /* Handle cable unplug event: */
681 portsc = readl(&dbc->regs->portsc);
682 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
683 !(portsc & DBC_PORTSC_CONN_STATUS)) {
684 xhci_info(xhci, "DbC cable unplugged\n");
685 dbc->state = DS_ENABLED;
686 xhci_dbc_flush_reqests(dbc);
691 /* Handle debug port reset event: */
692 if (portsc & DBC_PORTSC_RESET_CHANGE) {
693 xhci_info(xhci, "DbC port reset\n");
694 writel(portsc, &dbc->regs->portsc);
695 dbc->state = DS_ENABLED;
696 xhci_dbc_flush_reqests(dbc);
701 /* Handle endpoint stall event: */
702 ctrl = readl(&dbc->regs->control);
703 if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
704 (ctrl & DBC_CTRL_HALT_OUT_TR)) {
705 xhci_info(xhci, "DbC Endpoint stall\n");
706 dbc->state = DS_STALLED;
708 if (ctrl & DBC_CTRL_HALT_IN_TR) {
709 dep = get_in_ep(xhci);
710 xhci_dbc_flush_endpoint_requests(dep);
713 if (ctrl & DBC_CTRL_HALT_OUT_TR) {
714 dep = get_out_ep(xhci);
715 xhci_dbc_flush_endpoint_requests(dep);
721 /* Clear DbC run change bit: */
722 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
723 writel(ctrl, &dbc->regs->control);
724 ctrl = readl(&dbc->regs->control);
729 ctrl = readl(&dbc->regs->control);
730 if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
731 !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
732 (ctrl & DBC_CTRL_DBC_RUN)) {
733 dbc->state = DS_CONFIGURED;
739 xhci_err(xhci, "Unknown DbC state %d\n", dbc->state);
743 /* Handle the events in the event ring: */
744 evt = dbc->ring_evt->dequeue;
745 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
746 dbc->ring_evt->cycle_state) {
748 * Add a barrier between reading the cycle flag and any
749 * reads of the event's flags/data below:
753 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
755 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
756 case TRB_TYPE(TRB_PORT_STATUS):
757 dbc_handle_port_status(xhci, evt);
759 case TRB_TYPE(TRB_TRANSFER):
760 dbc_handle_xfer_event(xhci, evt);
766 inc_deq(xhci, dbc->ring_evt);
767 evt = dbc->ring_evt->dequeue;
771 /* Update event ring dequeue pointer: */
773 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
774 dbc->ring_evt->dequeue);
775 xhci_write_64(xhci, deq, &dbc->regs->erdp);
781 static void xhci_dbc_handle_events(struct work_struct *work)
785 struct xhci_dbc *dbc;
787 struct xhci_hcd *xhci;
789 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
792 spin_lock_irqsave(&dbc->lock, flags);
793 evtr = xhci_dbc_do_handle_events(dbc);
794 spin_unlock_irqrestore(&dbc->lock, flags);
798 ret = xhci_dbc_tty_register_device(xhci);
800 xhci_err(xhci, "failed to alloc tty device\n");
804 xhci_info(xhci, "DbC now attached to /dev/ttyDBC0\n");
807 xhci_dbc_tty_unregister_device(xhci);
812 xhci_info(xhci, "stop handling dbc events\n");
816 mod_delayed_work(system_wq, &dbc->event_work, 1);
819 static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
823 spin_lock_irqsave(&xhci->lock, flags);
826 spin_unlock_irqrestore(&xhci->lock, flags);
829 static int xhci_do_dbc_init(struct xhci_hcd *xhci)
832 struct xhci_dbc *dbc;
837 base = &xhci->cap_regs->hc_capbase;
838 dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
842 dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
846 dbc->regs = base + dbc_cap_offs;
848 /* We will avoid using DbC in xhci driver if it's in use. */
849 reg = readl(&dbc->regs->control);
850 if (reg & DBC_CTRL_DBC_ENABLE) {
855 spin_lock_irqsave(&xhci->lock, flags);
857 spin_unlock_irqrestore(&xhci->lock, flags);
862 spin_unlock_irqrestore(&xhci->lock, flags);
865 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
866 spin_lock_init(&dbc->lock);
871 static ssize_t dbc_show(struct device *dev,
872 struct device_attribute *attr,
876 struct xhci_dbc *dbc;
877 struct xhci_hcd *xhci;
879 xhci = hcd_to_xhci(dev_get_drvdata(dev));
882 switch (dbc->state) {
905 return sprintf(buf, "%s\n", p);
908 static ssize_t dbc_store(struct device *dev,
909 struct device_attribute *attr,
910 const char *buf, size_t count)
912 struct xhci_dbc *dbc;
913 struct xhci_hcd *xhci;
915 xhci = hcd_to_xhci(dev_get_drvdata(dev));
918 if (!strncmp(buf, "enable", 6))
919 xhci_dbc_start(xhci);
920 else if (!strncmp(buf, "disable", 7))
928 static DEVICE_ATTR_RW(dbc);
930 int xhci_dbc_init(struct xhci_hcd *xhci)
933 struct device *dev = xhci_to_hcd(xhci)->self.controller;
935 ret = xhci_do_dbc_init(xhci);
939 ret = xhci_dbc_tty_register_driver(xhci);
943 ret = device_create_file(dev, &dev_attr_dbc);
950 xhci_dbc_tty_unregister_driver();
952 xhci_do_dbc_exit(xhci);
957 void xhci_dbc_exit(struct xhci_hcd *xhci)
959 struct device *dev = xhci_to_hcd(xhci)->self.controller;
964 device_remove_file(dev, &dev_attr_dbc);
965 xhci_dbc_tty_unregister_driver();
967 xhci_do_dbc_exit(xhci);
971 int xhci_dbc_suspend(struct xhci_hcd *xhci)
973 struct xhci_dbc *dbc = xhci->dbc;
978 if (dbc->state == DS_CONFIGURED)
979 dbc->resume_required = 1;
986 int xhci_dbc_resume(struct xhci_hcd *xhci)
989 struct xhci_dbc *dbc = xhci->dbc;
994 if (dbc->resume_required) {
995 dbc->resume_required = 0;
996 xhci_dbc_start(xhci);
1001 #endif /* CONFIG_PM */