Merge branch 'overlayfs-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszer...
[sfrench/cifs-2.6.git] / drivers / usb / host / xhci-dbgcap.c
1 /**
2  * xhci-dbgcap.c - xHCI debug capability support
3  *
4  * Copyright (C) 2017 Intel Corporation
5  *
6  * Author: Lu Baolu <baolu.lu@linux.intel.com>
7  */
8 #include <linux/dma-mapping.h>
9 #include <linux/slab.h>
10 #include <linux/nls.h>
11
12 #include "xhci.h"
13 #include "xhci-trace.h"
14 #include "xhci-dbgcap.h"
15
16 static inline void *
17 dbc_dma_alloc_coherent(struct xhci_hcd *xhci, size_t size,
18                        dma_addr_t *dma_handle, gfp_t flags)
19 {
20         void            *vaddr;
21
22         vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
23                                    size, dma_handle, flags);
24         memset(vaddr, 0, size);
25         return vaddr;
26 }
27
28 static inline void
29 dbc_dma_free_coherent(struct xhci_hcd *xhci, size_t size,
30                       void *cpu_addr, dma_addr_t dma_handle)
31 {
32         if (cpu_addr)
33                 dma_free_coherent(xhci_to_hcd(xhci)->self.sysdev,
34                                   size, cpu_addr, dma_handle);
35 }
36
37 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
38 {
39         struct usb_string_descriptor    *s_desc;
40         u32                             string_length;
41
42         /* Serial string: */
43         s_desc = (struct usb_string_descriptor *)strings->serial;
44         utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
45                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
46                         DBC_MAX_STRING_LENGTH);
47
48         s_desc->bLength         = (strlen(DBC_STRING_SERIAL) + 1) * 2;
49         s_desc->bDescriptorType = USB_DT_STRING;
50         string_length           = s_desc->bLength;
51         string_length           <<= 8;
52
53         /* Product string: */
54         s_desc = (struct usb_string_descriptor *)strings->product;
55         utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
56                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
57                         DBC_MAX_STRING_LENGTH);
58
59         s_desc->bLength         = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
60         s_desc->bDescriptorType = USB_DT_STRING;
61         string_length           += s_desc->bLength;
62         string_length           <<= 8;
63
64         /* Manufacture string: */
65         s_desc = (struct usb_string_descriptor *)strings->manufacturer;
66         utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
67                         strlen(DBC_STRING_MANUFACTURER),
68                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
69                         DBC_MAX_STRING_LENGTH);
70
71         s_desc->bLength         = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
72         s_desc->bDescriptorType = USB_DT_STRING;
73         string_length           += s_desc->bLength;
74         string_length           <<= 8;
75
76         /* String0: */
77         strings->string0[0]     = 4;
78         strings->string0[1]     = USB_DT_STRING;
79         strings->string0[2]     = 0x09;
80         strings->string0[3]     = 0x04;
81         string_length           += 4;
82
83         return string_length;
84 }
85
86 static void xhci_dbc_init_contexts(struct xhci_hcd *xhci, u32 string_length)
87 {
88         struct xhci_dbc         *dbc;
89         struct dbc_info_context *info;
90         struct xhci_ep_ctx      *ep_ctx;
91         u32                     dev_info;
92         dma_addr_t              deq, dma;
93         unsigned int            max_burst;
94
95         dbc = xhci->dbc;
96         if (!dbc)
97                 return;
98
99         /* Populate info Context: */
100         info                    = (struct dbc_info_context *)dbc->ctx->bytes;
101         dma                     = dbc->string_dma;
102         info->string0           = cpu_to_le64(dma);
103         info->manufacturer      = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
104         info->product           = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
105         info->serial            = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
106         info->length            = cpu_to_le32(string_length);
107
108         /* Populate bulk out endpoint context: */
109         ep_ctx                  = dbc_bulkout_ctx(dbc);
110         max_burst               = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
111         deq                     = dbc_bulkout_enq(dbc);
112         ep_ctx->ep_info         = 0;
113         ep_ctx->ep_info2        = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
114         ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_out->cycle_state);
115
116         /* Populate bulk in endpoint context: */
117         ep_ctx                  = dbc_bulkin_ctx(dbc);
118         deq                     = dbc_bulkin_enq(dbc);
119         ep_ctx->ep_info         = 0;
120         ep_ctx->ep_info2        = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
121         ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_in->cycle_state);
122
123         /* Set DbC context and info registers: */
124         xhci_write_64(xhci, dbc->ctx->dma, &dbc->regs->dccp);
125
126         dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
127         writel(dev_info, &dbc->regs->devinfo1);
128
129         dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
130         writel(dev_info, &dbc->regs->devinfo2);
131 }
132
133 static void xhci_dbc_giveback(struct dbc_request *req, int status)
134         __releases(&dbc->lock)
135         __acquires(&dbc->lock)
136 {
137         struct dbc_ep           *dep = req->dep;
138         struct xhci_dbc         *dbc = dep->dbc;
139         struct xhci_hcd         *xhci = dbc->xhci;
140         struct device           *dev = xhci_to_hcd(dbc->xhci)->self.sysdev;
141
142         list_del_init(&req->list_pending);
143         req->trb_dma = 0;
144         req->trb = NULL;
145
146         if (req->status == -EINPROGRESS)
147                 req->status = status;
148
149         trace_xhci_dbc_giveback_request(req);
150
151         dma_unmap_single(dev,
152                          req->dma,
153                          req->length,
154                          dbc_ep_dma_direction(dep));
155
156         /* Give back the transfer request: */
157         spin_unlock(&dbc->lock);
158         req->complete(xhci, req);
159         spin_lock(&dbc->lock);
160 }
161
162 static void xhci_dbc_flush_single_request(struct dbc_request *req)
163 {
164         union xhci_trb  *trb = req->trb;
165
166         trb->generic.field[0]   = 0;
167         trb->generic.field[1]   = 0;
168         trb->generic.field[2]   = 0;
169         trb->generic.field[3]   &= cpu_to_le32(TRB_CYCLE);
170         trb->generic.field[3]   |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
171
172         xhci_dbc_giveback(req, -ESHUTDOWN);
173 }
174
175 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
176 {
177         struct dbc_request      *req, *tmp;
178
179         list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
180                 xhci_dbc_flush_single_request(req);
181 }
182
183 static void xhci_dbc_flush_reqests(struct xhci_dbc *dbc)
184 {
185         xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
186         xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
187 }
188
189 struct dbc_request *
190 dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
191 {
192         struct dbc_request      *req;
193
194         req = kzalloc(sizeof(*req), gfp_flags);
195         if (!req)
196                 return NULL;
197
198         req->dep = dep;
199         INIT_LIST_HEAD(&req->list_pending);
200         INIT_LIST_HEAD(&req->list_pool);
201         req->direction = dep->direction;
202
203         trace_xhci_dbc_alloc_request(req);
204
205         return req;
206 }
207
208 void
209 dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
210 {
211         trace_xhci_dbc_free_request(req);
212
213         kfree(req);
214 }
215
216 static void
217 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
218                    u32 field2, u32 field3, u32 field4)
219 {
220         union xhci_trb          *trb, *next;
221
222         trb = ring->enqueue;
223         trb->generic.field[0]   = cpu_to_le32(field1);
224         trb->generic.field[1]   = cpu_to_le32(field2);
225         trb->generic.field[2]   = cpu_to_le32(field3);
226         trb->generic.field[3]   = cpu_to_le32(field4);
227
228         trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
229
230         ring->num_trbs_free--;
231         next = ++(ring->enqueue);
232         if (TRB_TYPE_LINK_LE32(next->link.control)) {
233                 next->link.control ^= cpu_to_le32(TRB_CYCLE);
234                 ring->enqueue = ring->enq_seg->trbs;
235                 ring->cycle_state ^= 1;
236         }
237 }
238
239 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
240                                   struct dbc_request *req)
241 {
242         u64                     addr;
243         union xhci_trb          *trb;
244         unsigned int            num_trbs;
245         struct xhci_dbc         *dbc = dep->dbc;
246         struct xhci_ring        *ring = dep->ring;
247         u32                     length, control, cycle;
248
249         num_trbs = count_trbs(req->dma, req->length);
250         WARN_ON(num_trbs != 1);
251         if (ring->num_trbs_free < num_trbs)
252                 return -EBUSY;
253
254         addr    = req->dma;
255         trb     = ring->enqueue;
256         cycle   = ring->cycle_state;
257         length  = TRB_LEN(req->length);
258         control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
259
260         if (cycle)
261                 control &= cpu_to_le32(~TRB_CYCLE);
262         else
263                 control |= cpu_to_le32(TRB_CYCLE);
264
265         req->trb = ring->enqueue;
266         req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
267         xhci_dbc_queue_trb(ring,
268                            lower_32_bits(addr),
269                            upper_32_bits(addr),
270                            length, control);
271
272         /*
273          * Add a barrier between writes of trb fields and flipping
274          * the cycle bit:
275          */
276         wmb();
277
278         if (cycle)
279                 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
280         else
281                 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
282
283         writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
284
285         return 0;
286 }
287
288 static int
289 dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
290 {
291         int                     ret;
292         struct device           *dev;
293         struct xhci_dbc         *dbc = dep->dbc;
294         struct xhci_hcd         *xhci = dbc->xhci;
295
296         dev = xhci_to_hcd(xhci)->self.sysdev;
297
298         if (!req->length || !req->buf)
299                 return -EINVAL;
300
301         req->actual             = 0;
302         req->status             = -EINPROGRESS;
303
304         req->dma = dma_map_single(dev,
305                                   req->buf,
306                                   req->length,
307                                   dbc_ep_dma_direction(dep));
308         if (dma_mapping_error(dev, req->dma)) {
309                 xhci_err(xhci, "failed to map buffer\n");
310                 return -EFAULT;
311         }
312
313         ret = xhci_dbc_queue_bulk_tx(dep, req);
314         if (ret) {
315                 xhci_err(xhci, "failed to queue trbs\n");
316                 dma_unmap_single(dev,
317                                  req->dma,
318                                  req->length,
319                                  dbc_ep_dma_direction(dep));
320                 return -EFAULT;
321         }
322
323         list_add_tail(&req->list_pending, &dep->list_pending);
324
325         return 0;
326 }
327
328 int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
329                  gfp_t gfp_flags)
330 {
331         struct xhci_dbc         *dbc = dep->dbc;
332         int                     ret = -ESHUTDOWN;
333
334         spin_lock(&dbc->lock);
335         if (dbc->state == DS_CONFIGURED)
336                 ret = dbc_ep_do_queue(dep, req);
337         spin_unlock(&dbc->lock);
338
339         mod_delayed_work(system_wq, &dbc->event_work, 0);
340
341         trace_xhci_dbc_queue_request(req);
342
343         return ret;
344 }
345
346 static inline void xhci_dbc_do_eps_init(struct xhci_hcd *xhci, bool direction)
347 {
348         struct dbc_ep           *dep;
349         struct xhci_dbc         *dbc = xhci->dbc;
350
351         dep                     = &dbc->eps[direction];
352         dep->dbc                = dbc;
353         dep->direction          = direction;
354         dep->ring               = direction ? dbc->ring_in : dbc->ring_out;
355
356         INIT_LIST_HEAD(&dep->list_pending);
357 }
358
359 static void xhci_dbc_eps_init(struct xhci_hcd *xhci)
360 {
361         xhci_dbc_do_eps_init(xhci, BULK_OUT);
362         xhci_dbc_do_eps_init(xhci, BULK_IN);
363 }
364
365 static void xhci_dbc_eps_exit(struct xhci_hcd *xhci)
366 {
367         struct xhci_dbc         *dbc = xhci->dbc;
368
369         memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
370 }
371
372 static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
373 {
374         int                     ret;
375         dma_addr_t              deq;
376         u32                     string_length;
377         struct xhci_dbc         *dbc = xhci->dbc;
378
379         /* Allocate various rings for events and transfers: */
380         dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags);
381         if (!dbc->ring_evt)
382                 goto evt_fail;
383
384         dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
385         if (!dbc->ring_in)
386                 goto in_fail;
387
388         dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
389         if (!dbc->ring_out)
390                 goto out_fail;
391
392         /* Allocate and populate ERST: */
393         ret = xhci_alloc_erst(xhci, dbc->ring_evt, &dbc->erst, flags);
394         if (ret)
395                 goto erst_fail;
396
397         /* Allocate context data structure: */
398         dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
399         if (!dbc->ctx)
400                 goto ctx_fail;
401
402         /* Allocate the string table: */
403         dbc->string_size = sizeof(struct dbc_str_descs);
404         dbc->string = dbc_dma_alloc_coherent(xhci,
405                                              dbc->string_size,
406                                              &dbc->string_dma,
407                                              flags);
408         if (!dbc->string)
409                 goto string_fail;
410
411         /* Setup ERST register: */
412         writel(dbc->erst.erst_size, &dbc->regs->ersts);
413         xhci_write_64(xhci, dbc->erst.erst_dma_addr, &dbc->regs->erstba);
414         deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
415                                    dbc->ring_evt->dequeue);
416         xhci_write_64(xhci, deq, &dbc->regs->erdp);
417
418         /* Setup strings and contexts: */
419         string_length = xhci_dbc_populate_strings(dbc->string);
420         xhci_dbc_init_contexts(xhci, string_length);
421
422         mmiowb();
423
424         xhci_dbc_eps_init(xhci);
425         dbc->state = DS_INITIALIZED;
426
427         return 0;
428
429 string_fail:
430         xhci_free_container_ctx(xhci, dbc->ctx);
431         dbc->ctx = NULL;
432 ctx_fail:
433         xhci_free_erst(xhci, &dbc->erst);
434 erst_fail:
435         xhci_ring_free(xhci, dbc->ring_out);
436         dbc->ring_out = NULL;
437 out_fail:
438         xhci_ring_free(xhci, dbc->ring_in);
439         dbc->ring_in = NULL;
440 in_fail:
441         xhci_ring_free(xhci, dbc->ring_evt);
442         dbc->ring_evt = NULL;
443 evt_fail:
444         return -ENOMEM;
445 }
446
447 static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci)
448 {
449         struct xhci_dbc         *dbc = xhci->dbc;
450
451         if (!dbc)
452                 return;
453
454         xhci_dbc_eps_exit(xhci);
455
456         if (dbc->string) {
457                 dbc_dma_free_coherent(xhci,
458                                       dbc->string_size,
459                                       dbc->string, dbc->string_dma);
460                 dbc->string = NULL;
461         }
462
463         xhci_free_container_ctx(xhci, dbc->ctx);
464         dbc->ctx = NULL;
465
466         xhci_free_erst(xhci, &dbc->erst);
467         xhci_ring_free(xhci, dbc->ring_out);
468         xhci_ring_free(xhci, dbc->ring_in);
469         xhci_ring_free(xhci, dbc->ring_evt);
470         dbc->ring_in = NULL;
471         dbc->ring_out = NULL;
472         dbc->ring_evt = NULL;
473 }
474
475 static int xhci_do_dbc_start(struct xhci_hcd *xhci)
476 {
477         int                     ret;
478         u32                     ctrl;
479         struct xhci_dbc         *dbc = xhci->dbc;
480
481         if (dbc->state != DS_DISABLED)
482                 return -EINVAL;
483
484         writel(0, &dbc->regs->control);
485         ret = xhci_handshake(&dbc->regs->control,
486                              DBC_CTRL_DBC_ENABLE,
487                              0, 1000);
488         if (ret)
489                 return ret;
490
491         ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC);
492         if (ret)
493                 return ret;
494
495         ctrl = readl(&dbc->regs->control);
496         writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
497                &dbc->regs->control);
498         ret = xhci_handshake(&dbc->regs->control,
499                              DBC_CTRL_DBC_ENABLE,
500                              DBC_CTRL_DBC_ENABLE, 1000);
501         if (ret)
502                 return ret;
503
504         dbc->state = DS_ENABLED;
505
506         return 0;
507 }
508
509 static void xhci_do_dbc_stop(struct xhci_hcd *xhci)
510 {
511         struct xhci_dbc         *dbc = xhci->dbc;
512
513         if (dbc->state == DS_DISABLED)
514                 return;
515
516         writel(0, &dbc->regs->control);
517         xhci_dbc_mem_cleanup(xhci);
518         dbc->state = DS_DISABLED;
519 }
520
521 static int xhci_dbc_start(struct xhci_hcd *xhci)
522 {
523         int                     ret;
524         struct xhci_dbc         *dbc = xhci->dbc;
525
526         WARN_ON(!dbc);
527
528         pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
529
530         spin_lock(&dbc->lock);
531         ret = xhci_do_dbc_start(xhci);
532         spin_unlock(&dbc->lock);
533
534         if (ret) {
535                 pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
536                 return ret;
537         }
538
539         return mod_delayed_work(system_wq, &dbc->event_work, 1);
540 }
541
542 static void xhci_dbc_stop(struct xhci_hcd *xhci)
543 {
544         struct xhci_dbc         *dbc = xhci->dbc;
545         struct dbc_port         *port = &dbc->port;
546
547         WARN_ON(!dbc);
548
549         cancel_delayed_work_sync(&dbc->event_work);
550
551         if (port->registered)
552                 xhci_dbc_tty_unregister_device(xhci);
553
554         spin_lock(&dbc->lock);
555         xhci_do_dbc_stop(xhci);
556         spin_unlock(&dbc->lock);
557
558         pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
559 }
560
561 static void
562 dbc_handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event)
563 {
564         u32                     portsc;
565         struct xhci_dbc         *dbc = xhci->dbc;
566
567         portsc = readl(&dbc->regs->portsc);
568         if (portsc & DBC_PORTSC_CONN_CHANGE)
569                 xhci_info(xhci, "DbC port connect change\n");
570
571         if (portsc & DBC_PORTSC_RESET_CHANGE)
572                 xhci_info(xhci, "DbC port reset change\n");
573
574         if (portsc & DBC_PORTSC_LINK_CHANGE)
575                 xhci_info(xhci, "DbC port link status change\n");
576
577         if (portsc & DBC_PORTSC_CONFIG_CHANGE)
578                 xhci_info(xhci, "DbC config error change\n");
579
580         /* Port reset change bit will be cleared in other place: */
581         writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
582 }
583
584 static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event)
585 {
586         struct dbc_ep           *dep;
587         struct xhci_ring        *ring;
588         int                     ep_id;
589         int                     status;
590         u32                     comp_code;
591         size_t                  remain_length;
592         struct dbc_request      *req = NULL, *r;
593
594         comp_code       = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
595         remain_length   = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
596         ep_id           = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
597         dep             = (ep_id == EPID_OUT) ?
598                                 get_out_ep(xhci) : get_in_ep(xhci);
599         ring            = dep->ring;
600
601         switch (comp_code) {
602         case COMP_SUCCESS:
603                 remain_length = 0;
604         /* FALLTHROUGH */
605         case COMP_SHORT_PACKET:
606                 status = 0;
607                 break;
608         case COMP_TRB_ERROR:
609         case COMP_BABBLE_DETECTED_ERROR:
610         case COMP_USB_TRANSACTION_ERROR:
611         case COMP_STALL_ERROR:
612                 xhci_warn(xhci, "tx error %d detected\n", comp_code);
613                 status = -comp_code;
614                 break;
615         default:
616                 xhci_err(xhci, "unknown tx error %d\n", comp_code);
617                 status = -comp_code;
618                 break;
619         }
620
621         /* Match the pending request: */
622         list_for_each_entry(r, &dep->list_pending, list_pending) {
623                 if (r->trb_dma == event->trans_event.buffer) {
624                         req = r;
625                         break;
626                 }
627         }
628
629         if (!req) {
630                 xhci_warn(xhci, "no matched request\n");
631                 return;
632         }
633
634         trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
635
636         ring->num_trbs_free++;
637         req->actual = req->length - remain_length;
638         xhci_dbc_giveback(req, status);
639 }
640
641 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
642 {
643         dma_addr_t              deq;
644         struct dbc_ep           *dep;
645         union xhci_trb          *evt;
646         u32                     ctrl, portsc;
647         struct xhci_hcd         *xhci = dbc->xhci;
648         bool                    update_erdp = false;
649
650         /* DbC state machine: */
651         switch (dbc->state) {
652         case DS_DISABLED:
653         case DS_INITIALIZED:
654
655                 return EVT_ERR;
656         case DS_ENABLED:
657                 portsc = readl(&dbc->regs->portsc);
658                 if (portsc & DBC_PORTSC_CONN_STATUS) {
659                         dbc->state = DS_CONNECTED;
660                         xhci_info(xhci, "DbC connected\n");
661                 }
662
663                 return EVT_DONE;
664         case DS_CONNECTED:
665                 ctrl = readl(&dbc->regs->control);
666                 if (ctrl & DBC_CTRL_DBC_RUN) {
667                         dbc->state = DS_CONFIGURED;
668                         xhci_info(xhci, "DbC configured\n");
669                         portsc = readl(&dbc->regs->portsc);
670                         writel(portsc, &dbc->regs->portsc);
671                         return EVT_GSER;
672                 }
673
674                 return EVT_DONE;
675         case DS_CONFIGURED:
676                 /* Handle cable unplug event: */
677                 portsc = readl(&dbc->regs->portsc);
678                 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
679                     !(portsc & DBC_PORTSC_CONN_STATUS)) {
680                         xhci_info(xhci, "DbC cable unplugged\n");
681                         dbc->state = DS_ENABLED;
682                         xhci_dbc_flush_reqests(dbc);
683
684                         return EVT_DISC;
685                 }
686
687                 /* Handle debug port reset event: */
688                 if (portsc & DBC_PORTSC_RESET_CHANGE) {
689                         xhci_info(xhci, "DbC port reset\n");
690                         writel(portsc, &dbc->regs->portsc);
691                         dbc->state = DS_ENABLED;
692                         xhci_dbc_flush_reqests(dbc);
693
694                         return EVT_DISC;
695                 }
696
697                 /* Handle endpoint stall event: */
698                 ctrl = readl(&dbc->regs->control);
699                 if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
700                     (ctrl & DBC_CTRL_HALT_OUT_TR)) {
701                         xhci_info(xhci, "DbC Endpoint stall\n");
702                         dbc->state = DS_STALLED;
703
704                         if (ctrl & DBC_CTRL_HALT_IN_TR) {
705                                 dep = get_in_ep(xhci);
706                                 xhci_dbc_flush_endpoint_requests(dep);
707                         }
708
709                         if (ctrl & DBC_CTRL_HALT_OUT_TR) {
710                                 dep = get_out_ep(xhci);
711                                 xhci_dbc_flush_endpoint_requests(dep);
712                         }
713
714                         return EVT_DONE;
715                 }
716
717                 /* Clear DbC run change bit: */
718                 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
719                         writel(ctrl, &dbc->regs->control);
720                         ctrl = readl(&dbc->regs->control);
721                 }
722
723                 break;
724         case DS_STALLED:
725                 ctrl = readl(&dbc->regs->control);
726                 if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
727                     !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
728                     (ctrl & DBC_CTRL_DBC_RUN)) {
729                         dbc->state = DS_CONFIGURED;
730                         break;
731                 }
732
733                 return EVT_DONE;
734         default:
735                 xhci_err(xhci, "Unknown DbC state %d\n", dbc->state);
736                 break;
737         }
738
739         /* Handle the events in the event ring: */
740         evt = dbc->ring_evt->dequeue;
741         while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
742                         dbc->ring_evt->cycle_state) {
743                 /*
744                  * Add a barrier between reading the cycle flag and any
745                  * reads of the event's flags/data below:
746                  */
747                 rmb();
748
749                 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
750
751                 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
752                 case TRB_TYPE(TRB_PORT_STATUS):
753                         dbc_handle_port_status(xhci, evt);
754                         break;
755                 case TRB_TYPE(TRB_TRANSFER):
756                         dbc_handle_xfer_event(xhci, evt);
757                         break;
758                 default:
759                         break;
760                 }
761
762                 inc_deq(xhci, dbc->ring_evt);
763                 evt = dbc->ring_evt->dequeue;
764                 update_erdp = true;
765         }
766
767         /* Update event ring dequeue pointer: */
768         if (update_erdp) {
769                 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
770                                            dbc->ring_evt->dequeue);
771                 xhci_write_64(xhci, deq, &dbc->regs->erdp);
772         }
773
774         return EVT_DONE;
775 }
776
777 static void xhci_dbc_handle_events(struct work_struct *work)
778 {
779         int                     ret;
780         enum evtreturn          evtr;
781         struct xhci_dbc         *dbc;
782         struct xhci_hcd         *xhci;
783
784         dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
785         xhci = dbc->xhci;
786
787         spin_lock(&dbc->lock);
788         evtr = xhci_dbc_do_handle_events(dbc);
789         spin_unlock(&dbc->lock);
790
791         switch (evtr) {
792         case EVT_GSER:
793                 ret = xhci_dbc_tty_register_device(xhci);
794                 if (ret) {
795                         xhci_err(xhci, "failed to alloc tty device\n");
796                         break;
797                 }
798
799                 xhci_info(xhci, "DbC now attached to /dev/ttyDBC0\n");
800                 break;
801         case EVT_DISC:
802                 xhci_dbc_tty_unregister_device(xhci);
803                 break;
804         case EVT_DONE:
805                 break;
806         default:
807                 xhci_info(xhci, "stop handling dbc events\n");
808                 return;
809         }
810
811         mod_delayed_work(system_wq, &dbc->event_work, 1);
812 }
813
814 static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
815 {
816         unsigned long           flags;
817
818         spin_lock_irqsave(&xhci->lock, flags);
819         kfree(xhci->dbc);
820         xhci->dbc = NULL;
821         spin_unlock_irqrestore(&xhci->lock, flags);
822 }
823
824 static int xhci_do_dbc_init(struct xhci_hcd *xhci)
825 {
826         u32                     reg;
827         struct xhci_dbc         *dbc;
828         unsigned long           flags;
829         void __iomem            *base;
830         int                     dbc_cap_offs;
831
832         base = &xhci->cap_regs->hc_capbase;
833         dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
834         if (!dbc_cap_offs)
835                 return -ENODEV;
836
837         dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
838         if (!dbc)
839                 return -ENOMEM;
840
841         dbc->regs = base + dbc_cap_offs;
842
843         /* We will avoid using DbC in xhci driver if it's in use. */
844         reg = readl(&dbc->regs->control);
845         if (reg & DBC_CTRL_DBC_ENABLE) {
846                 kfree(dbc);
847                 return -EBUSY;
848         }
849
850         spin_lock_irqsave(&xhci->lock, flags);
851         if (xhci->dbc) {
852                 spin_unlock_irqrestore(&xhci->lock, flags);
853                 kfree(dbc);
854                 return -EBUSY;
855         }
856         xhci->dbc = dbc;
857         spin_unlock_irqrestore(&xhci->lock, flags);
858
859         dbc->xhci = xhci;
860         INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
861         spin_lock_init(&dbc->lock);
862
863         return 0;
864 }
865
866 static ssize_t dbc_show(struct device *dev,
867                         struct device_attribute *attr,
868                         char *buf)
869 {
870         const char              *p;
871         struct xhci_dbc         *dbc;
872         struct xhci_hcd         *xhci;
873
874         xhci = hcd_to_xhci(dev_get_drvdata(dev));
875         dbc = xhci->dbc;
876
877         switch (dbc->state) {
878         case DS_DISABLED:
879                 p = "disabled";
880                 break;
881         case DS_INITIALIZED:
882                 p = "initialized";
883                 break;
884         case DS_ENABLED:
885                 p = "enabled";
886                 break;
887         case DS_CONNECTED:
888                 p = "connected";
889                 break;
890         case DS_CONFIGURED:
891                 p = "configured";
892                 break;
893         case DS_STALLED:
894                 p = "stalled";
895                 break;
896         default:
897                 p = "unknown";
898         }
899
900         return sprintf(buf, "%s\n", p);
901 }
902
903 static ssize_t dbc_store(struct device *dev,
904                          struct device_attribute *attr,
905                          const char *buf, size_t count)
906 {
907         struct xhci_dbc         *dbc;
908         struct xhci_hcd         *xhci;
909
910         xhci = hcd_to_xhci(dev_get_drvdata(dev));
911         dbc = xhci->dbc;
912
913         if (!strncmp(buf, "enable", 6))
914                 xhci_dbc_start(xhci);
915         else if (!strncmp(buf, "disable", 7))
916                 xhci_dbc_stop(xhci);
917         else
918                 return -EINVAL;
919
920         return count;
921 }
922
923 static DEVICE_ATTR_RW(dbc);
924
925 int xhci_dbc_init(struct xhci_hcd *xhci)
926 {
927         int                     ret;
928         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
929
930         ret = xhci_do_dbc_init(xhci);
931         if (ret)
932                 goto init_err3;
933
934         ret = xhci_dbc_tty_register_driver(xhci);
935         if (ret)
936                 goto init_err2;
937
938         ret = device_create_file(dev, &dev_attr_dbc);
939         if (ret)
940                 goto init_err1;
941
942         return 0;
943
944 init_err1:
945         xhci_dbc_tty_unregister_driver();
946 init_err2:
947         xhci_do_dbc_exit(xhci);
948 init_err3:
949         return ret;
950 }
951
952 void xhci_dbc_exit(struct xhci_hcd *xhci)
953 {
954         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
955
956         if (!xhci->dbc)
957                 return;
958
959         device_remove_file(dev, &dev_attr_dbc);
960         xhci_dbc_tty_unregister_driver();
961         xhci_dbc_stop(xhci);
962         xhci_do_dbc_exit(xhci);
963 }
964
965 #ifdef CONFIG_PM
966 int xhci_dbc_suspend(struct xhci_hcd *xhci)
967 {
968         struct xhci_dbc         *dbc = xhci->dbc;
969
970         if (!dbc)
971                 return 0;
972
973         if (dbc->state == DS_CONFIGURED)
974                 dbc->resume_required = 1;
975
976         xhci_dbc_stop(xhci);
977
978         return 0;
979 }
980
981 int xhci_dbc_resume(struct xhci_hcd *xhci)
982 {
983         int                     ret = 0;
984         struct xhci_dbc         *dbc = xhci->dbc;
985
986         if (!dbc)
987                 return 0;
988
989         if (dbc->resume_required) {
990                 dbc->resume_required = 0;
991                 xhci_dbc_start(xhci);
992         }
993
994         return ret;
995 }
996 #endif /* CONFIG_PM */