Merge remote-tracking branches 'spi/topic/devprop', 'spi/topic/fsl', 'spi/topic/fsl...
[sfrench/cifs-2.6.git] / drivers / usb / dwc3 / gadget.c
1 /**
2  * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3  *
4  * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5  *
6  * Authors: Felipe Balbi <balbi@ti.com>,
7  *          Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8  *
9  * This program is free software: you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2  of
11  * the License as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32
33 #include "debug.h"
34 #include "core.h"
35 #include "gadget.h"
36 #include "io.h"
37
38 /**
39  * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40  * @dwc: pointer to our context structure
41  * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42  *
43  * Caller should take care of locking. This function will
44  * return 0 on success or -EINVAL if wrong Test Selector
45  * is passed
46  */
47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48 {
49         u32             reg;
50
51         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52         reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54         switch (mode) {
55         case TEST_J:
56         case TEST_K:
57         case TEST_SE0_NAK:
58         case TEST_PACKET:
59         case TEST_FORCE_EN:
60                 reg |= mode << 1;
61                 break;
62         default:
63                 return -EINVAL;
64         }
65
66         dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68         return 0;
69 }
70
71 /**
72  * dwc3_gadget_get_link_state - Gets current state of USB Link
73  * @dwc: pointer to our context structure
74  *
75  * Caller should take care of locking. This function will
76  * return the link state on success (>= 0) or -ETIMEDOUT.
77  */
78 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79 {
80         u32             reg;
81
82         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84         return DWC3_DSTS_USBLNKST(reg);
85 }
86
87 /**
88  * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89  * @dwc: pointer to our context structure
90  * @state: the state to put link into
91  *
92  * Caller should take care of locking. This function will
93  * return 0 on success or -ETIMEDOUT.
94  */
95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96 {
97         int             retries = 10000;
98         u32             reg;
99
100         /*
101          * Wait until device controller is ready. Only applies to 1.94a and
102          * later RTL.
103          */
104         if (dwc->revision >= DWC3_REVISION_194A) {
105                 while (--retries) {
106                         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107                         if (reg & DWC3_DSTS_DCNRD)
108                                 udelay(5);
109                         else
110                                 break;
111                 }
112
113                 if (retries <= 0)
114                         return -ETIMEDOUT;
115         }
116
117         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118         reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120         /* set requested state */
121         reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122         dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
124         /*
125          * The following code is racy when called from dwc3_gadget_wakeup,
126          * and is not needed, at least on newer versions
127          */
128         if (dwc->revision >= DWC3_REVISION_194A)
129                 return 0;
130
131         /* wait for a change in DSTS */
132         retries = 10000;
133         while (--retries) {
134                 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
136                 if (DWC3_DSTS_USBLNKST(reg) == state)
137                         return 0;
138
139                 udelay(5);
140         }
141
142         return -ETIMEDOUT;
143 }
144
145 /**
146  * dwc3_ep_inc_trb() - Increment a TRB index.
147  * @index - Pointer to the TRB index to increment.
148  *
149  * The index should never point to the link TRB. After incrementing,
150  * if it is point to the link TRB, wrap around to the beginning. The
151  * link TRB is always at the last TRB entry.
152  */
153 static void dwc3_ep_inc_trb(u8 *index)
154 {
155         (*index)++;
156         if (*index == (DWC3_TRB_NUM - 1))
157                 *index = 0;
158 }
159
160 static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
161 {
162         dwc3_ep_inc_trb(&dep->trb_enqueue);
163 }
164
165 static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
166 {
167         dwc3_ep_inc_trb(&dep->trb_dequeue);
168 }
169
170 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
171                 int status)
172 {
173         struct dwc3                     *dwc = dep->dwc;
174         unsigned int                    unmap_after_complete = false;
175
176         req->started = false;
177         list_del(&req->list);
178         req->trb = NULL;
179         req->remaining = 0;
180
181         if (req->request.status == -EINPROGRESS)
182                 req->request.status = status;
183
184         /*
185          * NOTICE we don't want to unmap before calling ->complete() if we're
186          * dealing with a bounced ep0 request. If we unmap it here, we would end
187          * up overwritting the contents of req->buf and this could confuse the
188          * gadget driver.
189          */
190         if (dwc->ep0_bounced && dep->number <= 1) {
191                 dwc->ep0_bounced = false;
192                 unmap_after_complete = true;
193         } else {
194                 usb_gadget_unmap_request_by_dev(dwc->sysdev,
195                                 &req->request, req->direction);
196         }
197
198         trace_dwc3_gadget_giveback(req);
199
200         spin_unlock(&dwc->lock);
201         usb_gadget_giveback_request(&dep->endpoint, &req->request);
202         spin_lock(&dwc->lock);
203
204         if (unmap_after_complete)
205                 usb_gadget_unmap_request_by_dev(dwc->sysdev,
206                                 &req->request, req->direction);
207
208         if (dep->number > 1)
209                 pm_runtime_put(dwc->dev);
210 }
211
212 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
213 {
214         u32             timeout = 500;
215         int             status = 0;
216         int             ret = 0;
217         u32             reg;
218
219         dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
220         dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
221
222         do {
223                 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
224                 if (!(reg & DWC3_DGCMD_CMDACT)) {
225                         status = DWC3_DGCMD_STATUS(reg);
226                         if (status)
227                                 ret = -EINVAL;
228                         break;
229                 }
230         } while (--timeout);
231
232         if (!timeout) {
233                 ret = -ETIMEDOUT;
234                 status = -ETIMEDOUT;
235         }
236
237         trace_dwc3_gadget_generic_cmd(cmd, param, status);
238
239         return ret;
240 }
241
242 static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
243
244 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
245                 struct dwc3_gadget_ep_cmd_params *params)
246 {
247         const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
248         struct dwc3             *dwc = dep->dwc;
249         u32                     timeout = 500;
250         u32                     reg;
251
252         int                     cmd_status = 0;
253         int                     susphy = false;
254         int                     ret = -EINVAL;
255
256         /*
257          * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
258          * we're issuing an endpoint command, we must check if
259          * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
260          *
261          * We will also set SUSPHY bit to what it was before returning as stated
262          * by the same section on Synopsys databook.
263          */
264         if (dwc->gadget.speed <= USB_SPEED_HIGH) {
265                 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
266                 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
267                         susphy = true;
268                         reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
269                         dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
270                 }
271         }
272
273         if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
274                 int             needs_wakeup;
275
276                 needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
277                                 dwc->link_state == DWC3_LINK_STATE_U2 ||
278                                 dwc->link_state == DWC3_LINK_STATE_U3);
279
280                 if (unlikely(needs_wakeup)) {
281                         ret = __dwc3_gadget_wakeup(dwc);
282                         dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
283                                         ret);
284                 }
285         }
286
287         dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
288         dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
289         dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
290
291         /*
292          * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
293          * not relying on XferNotReady, we can make use of a special "No
294          * Response Update Transfer" command where we should clear both CmdAct
295          * and CmdIOC bits.
296          *
297          * With this, we don't need to wait for command completion and can
298          * straight away issue further commands to the endpoint.
299          *
300          * NOTICE: We're making an assumption that control endpoints will never
301          * make use of Update Transfer command. This is a safe assumption
302          * because we can never have more than one request at a time with
303          * Control Endpoints. If anybody changes that assumption, this chunk
304          * needs to be updated accordingly.
305          */
306         if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
307                         !usb_endpoint_xfer_isoc(desc))
308                 cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
309         else
310                 cmd |= DWC3_DEPCMD_CMDACT;
311
312         dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
313         do {
314                 reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
315                 if (!(reg & DWC3_DEPCMD_CMDACT)) {
316                         cmd_status = DWC3_DEPCMD_STATUS(reg);
317
318                         switch (cmd_status) {
319                         case 0:
320                                 ret = 0;
321                                 break;
322                         case DEPEVT_TRANSFER_NO_RESOURCE:
323                                 ret = -EINVAL;
324                                 break;
325                         case DEPEVT_TRANSFER_BUS_EXPIRY:
326                                 /*
327                                  * SW issues START TRANSFER command to
328                                  * isochronous ep with future frame interval. If
329                                  * future interval time has already passed when
330                                  * core receives the command, it will respond
331                                  * with an error status of 'Bus Expiry'.
332                                  *
333                                  * Instead of always returning -EINVAL, let's
334                                  * give a hint to the gadget driver that this is
335                                  * the case by returning -EAGAIN.
336                                  */
337                                 ret = -EAGAIN;
338                                 break;
339                         default:
340                                 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
341                         }
342
343                         break;
344                 }
345         } while (--timeout);
346
347         if (timeout == 0) {
348                 ret = -ETIMEDOUT;
349                 cmd_status = -ETIMEDOUT;
350         }
351
352         trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
353
354         if (ret == 0) {
355                 switch (DWC3_DEPCMD_CMD(cmd)) {
356                 case DWC3_DEPCMD_STARTTRANSFER:
357                         dep->flags |= DWC3_EP_TRANSFER_STARTED;
358                         break;
359                 case DWC3_DEPCMD_ENDTRANSFER:
360                         dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
361                         break;
362                 default:
363                         /* nothing */
364                         break;
365                 }
366         }
367
368         if (unlikely(susphy)) {
369                 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
370                 reg |= DWC3_GUSB2PHYCFG_SUSPHY;
371                 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
372         }
373
374         return ret;
375 }
376
377 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
378 {
379         struct dwc3 *dwc = dep->dwc;
380         struct dwc3_gadget_ep_cmd_params params;
381         u32 cmd = DWC3_DEPCMD_CLEARSTALL;
382
383         /*
384          * As of core revision 2.60a the recommended programming model
385          * is to set the ClearPendIN bit when issuing a Clear Stall EP
386          * command for IN endpoints. This is to prevent an issue where
387          * some (non-compliant) hosts may not send ACK TPs for pending
388          * IN transfers due to a mishandled error condition. Synopsys
389          * STAR 9000614252.
390          */
391         if (dep->direction && (dwc->revision >= DWC3_REVISION_260A) &&
392             (dwc->gadget.speed >= USB_SPEED_SUPER))
393                 cmd |= DWC3_DEPCMD_CLEARPENDIN;
394
395         memset(&params, 0, sizeof(params));
396
397         return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
398 }
399
400 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
401                 struct dwc3_trb *trb)
402 {
403         u32             offset = (char *) trb - (char *) dep->trb_pool;
404
405         return dep->trb_pool_dma + offset;
406 }
407
408 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
409 {
410         struct dwc3             *dwc = dep->dwc;
411
412         if (dep->trb_pool)
413                 return 0;
414
415         dep->trb_pool = dma_alloc_coherent(dwc->sysdev,
416                         sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
417                         &dep->trb_pool_dma, GFP_KERNEL);
418         if (!dep->trb_pool) {
419                 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
420                                 dep->name);
421                 return -ENOMEM;
422         }
423
424         return 0;
425 }
426
427 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
428 {
429         struct dwc3             *dwc = dep->dwc;
430
431         dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
432                         dep->trb_pool, dep->trb_pool_dma);
433
434         dep->trb_pool = NULL;
435         dep->trb_pool_dma = 0;
436 }
437
438 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
439
440 /**
441  * dwc3_gadget_start_config - Configure EP resources
442  * @dwc: pointer to our controller context structure
443  * @dep: endpoint that is being enabled
444  *
445  * The assignment of transfer resources cannot perfectly follow the
446  * data book due to the fact that the controller driver does not have
447  * all knowledge of the configuration in advance. It is given this
448  * information piecemeal by the composite gadget framework after every
449  * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
450  * programming model in this scenario can cause errors. For two
451  * reasons:
452  *
453  * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
454  * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
455  * multiple interfaces.
456  *
457  * 2) The databook does not mention doing more DEPXFERCFG for new
458  * endpoint on alt setting (8.1.6).
459  *
460  * The following simplified method is used instead:
461  *
462  * All hardware endpoints can be assigned a transfer resource and this
463  * setting will stay persistent until either a core reset or
464  * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
465  * do DEPXFERCFG for every hardware endpoint as well. We are
466  * guaranteed that there are as many transfer resources as endpoints.
467  *
468  * This function is called for each endpoint when it is being enabled
469  * but is triggered only when called for EP0-out, which always happens
470  * first, and which should only happen in one of the above conditions.
471  */
472 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
473 {
474         struct dwc3_gadget_ep_cmd_params params;
475         u32                     cmd;
476         int                     i;
477         int                     ret;
478
479         if (dep->number)
480                 return 0;
481
482         memset(&params, 0x00, sizeof(params));
483         cmd = DWC3_DEPCMD_DEPSTARTCFG;
484
485         ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
486         if (ret)
487                 return ret;
488
489         for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
490                 struct dwc3_ep *dep = dwc->eps[i];
491
492                 if (!dep)
493                         continue;
494
495                 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
496                 if (ret)
497                         return ret;
498         }
499
500         return 0;
501 }
502
503 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
504                 bool modify, bool restore)
505 {
506         const struct usb_ss_ep_comp_descriptor *comp_desc;
507         const struct usb_endpoint_descriptor *desc;
508         struct dwc3_gadget_ep_cmd_params params;
509
510         if (dev_WARN_ONCE(dwc->dev, modify && restore,
511                                         "Can't modify and restore\n"))
512                 return -EINVAL;
513
514         comp_desc = dep->endpoint.comp_desc;
515         desc = dep->endpoint.desc;
516
517         memset(&params, 0x00, sizeof(params));
518
519         params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
520                 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
521
522         /* Burst size is only needed in SuperSpeed mode */
523         if (dwc->gadget.speed >= USB_SPEED_SUPER) {
524                 u32 burst = dep->endpoint.maxburst;
525                 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
526         }
527
528         if (modify) {
529                 params.param0 |= DWC3_DEPCFG_ACTION_MODIFY;
530         } else if (restore) {
531                 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
532                 params.param2 |= dep->saved_state;
533         } else {
534                 params.param0 |= DWC3_DEPCFG_ACTION_INIT;
535         }
536
537         if (usb_endpoint_xfer_control(desc))
538                 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
539
540         if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
541                 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
542
543         if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
544                 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
545                         | DWC3_DEPCFG_STREAM_EVENT_EN;
546                 dep->stream_capable = true;
547         }
548
549         if (!usb_endpoint_xfer_control(desc))
550                 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
551
552         /*
553          * We are doing 1:1 mapping for endpoints, meaning
554          * Physical Endpoints 2 maps to Logical Endpoint 2 and
555          * so on. We consider the direction bit as part of the physical
556          * endpoint number. So USB endpoint 0x81 is 0x03.
557          */
558         params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
559
560         /*
561          * We must use the lower 16 TX FIFOs even though
562          * HW might have more
563          */
564         if (dep->direction)
565                 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
566
567         if (desc->bInterval) {
568                 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
569                 dep->interval = 1 << (desc->bInterval - 1);
570         }
571
572         return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
573 }
574
575 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
576 {
577         struct dwc3_gadget_ep_cmd_params params;
578
579         memset(&params, 0x00, sizeof(params));
580
581         params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
582
583         return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
584                         &params);
585 }
586
587 /**
588  * __dwc3_gadget_ep_enable - Initializes a HW endpoint
589  * @dep: endpoint to be initialized
590  * @desc: USB Endpoint Descriptor
591  *
592  * Caller should take care of locking
593  */
594 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
595                 bool modify, bool restore)
596 {
597         const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
598         struct dwc3             *dwc = dep->dwc;
599
600         u32                     reg;
601         int                     ret;
602
603         if (!(dep->flags & DWC3_EP_ENABLED)) {
604                 ret = dwc3_gadget_start_config(dwc, dep);
605                 if (ret)
606                         return ret;
607         }
608
609         ret = dwc3_gadget_set_ep_config(dwc, dep, modify, restore);
610         if (ret)
611                 return ret;
612
613         if (!(dep->flags & DWC3_EP_ENABLED)) {
614                 struct dwc3_trb *trb_st_hw;
615                 struct dwc3_trb *trb_link;
616
617                 dep->type = usb_endpoint_type(desc);
618                 dep->flags |= DWC3_EP_ENABLED;
619                 dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
620
621                 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
622                 reg |= DWC3_DALEPENA_EP(dep->number);
623                 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
624
625                 init_waitqueue_head(&dep->wait_end_transfer);
626
627                 if (usb_endpoint_xfer_control(desc))
628                         goto out;
629
630                 /* Initialize the TRB ring */
631                 dep->trb_dequeue = 0;
632                 dep->trb_enqueue = 0;
633                 memset(dep->trb_pool, 0,
634                        sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
635
636                 /* Link TRB. The HWO bit is never reset */
637                 trb_st_hw = &dep->trb_pool[0];
638
639                 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
640                 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
641                 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
642                 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
643                 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
644         }
645
646         /*
647          * Issue StartTransfer here with no-op TRB so we can always rely on No
648          * Response Update Transfer command.
649          */
650         if (usb_endpoint_xfer_bulk(desc)) {
651                 struct dwc3_gadget_ep_cmd_params params;
652                 struct dwc3_trb *trb;
653                 dma_addr_t trb_dma;
654                 u32 cmd;
655
656                 memset(&params, 0, sizeof(params));
657                 trb = &dep->trb_pool[0];
658                 trb_dma = dwc3_trb_dma_offset(dep, trb);
659
660                 params.param0 = upper_32_bits(trb_dma);
661                 params.param1 = lower_32_bits(trb_dma);
662
663                 cmd = DWC3_DEPCMD_STARTTRANSFER;
664
665                 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
666                 if (ret < 0)
667                         return ret;
668
669                 dep->flags |= DWC3_EP_BUSY;
670
671                 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
672                 WARN_ON_ONCE(!dep->resource_index);
673         }
674
675
676 out:
677         trace_dwc3_gadget_ep_enable(dep);
678
679         return 0;
680 }
681
682 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
683 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
684 {
685         struct dwc3_request             *req;
686
687         dwc3_stop_active_transfer(dwc, dep->number, true);
688
689         /* - giveback all requests to gadget driver */
690         while (!list_empty(&dep->started_list)) {
691                 req = next_request(&dep->started_list);
692
693                 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
694         }
695
696         while (!list_empty(&dep->pending_list)) {
697                 req = next_request(&dep->pending_list);
698
699                 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
700         }
701 }
702
703 /**
704  * __dwc3_gadget_ep_disable - Disables a HW endpoint
705  * @dep: the endpoint to disable
706  *
707  * This function also removes requests which are currently processed ny the
708  * hardware and those which are not yet scheduled.
709  * Caller should take care of locking.
710  */
711 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
712 {
713         struct dwc3             *dwc = dep->dwc;
714         u32                     reg;
715
716         trace_dwc3_gadget_ep_disable(dep);
717
718         dwc3_remove_requests(dwc, dep);
719
720         /* make sure HW endpoint isn't stalled */
721         if (dep->flags & DWC3_EP_STALL)
722                 __dwc3_gadget_ep_set_halt(dep, 0, false);
723
724         reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
725         reg &= ~DWC3_DALEPENA_EP(dep->number);
726         dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
727
728         dep->stream_capable = false;
729         dep->type = 0;
730         dep->flags &= DWC3_EP_END_TRANSFER_PENDING;
731
732         /* Clear out the ep descriptors for non-ep0 */
733         if (dep->number > 1) {
734                 dep->endpoint.comp_desc = NULL;
735                 dep->endpoint.desc = NULL;
736         }
737
738         return 0;
739 }
740
741 /* -------------------------------------------------------------------------- */
742
743 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
744                 const struct usb_endpoint_descriptor *desc)
745 {
746         return -EINVAL;
747 }
748
749 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
750 {
751         return -EINVAL;
752 }
753
754 /* -------------------------------------------------------------------------- */
755
756 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
757                 const struct usb_endpoint_descriptor *desc)
758 {
759         struct dwc3_ep                  *dep;
760         struct dwc3                     *dwc;
761         unsigned long                   flags;
762         int                             ret;
763
764         if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
765                 pr_debug("dwc3: invalid parameters\n");
766                 return -EINVAL;
767         }
768
769         if (!desc->wMaxPacketSize) {
770                 pr_debug("dwc3: missing wMaxPacketSize\n");
771                 return -EINVAL;
772         }
773
774         dep = to_dwc3_ep(ep);
775         dwc = dep->dwc;
776
777         if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
778                                         "%s is already enabled\n",
779                                         dep->name))
780                 return 0;
781
782         spin_lock_irqsave(&dwc->lock, flags);
783         ret = __dwc3_gadget_ep_enable(dep, false, false);
784         spin_unlock_irqrestore(&dwc->lock, flags);
785
786         return ret;
787 }
788
789 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
790 {
791         struct dwc3_ep                  *dep;
792         struct dwc3                     *dwc;
793         unsigned long                   flags;
794         int                             ret;
795
796         if (!ep) {
797                 pr_debug("dwc3: invalid parameters\n");
798                 return -EINVAL;
799         }
800
801         dep = to_dwc3_ep(ep);
802         dwc = dep->dwc;
803
804         if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
805                                         "%s is already disabled\n",
806                                         dep->name))
807                 return 0;
808
809         spin_lock_irqsave(&dwc->lock, flags);
810         ret = __dwc3_gadget_ep_disable(dep);
811         spin_unlock_irqrestore(&dwc->lock, flags);
812
813         return ret;
814 }
815
816 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
817         gfp_t gfp_flags)
818 {
819         struct dwc3_request             *req;
820         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
821
822         req = kzalloc(sizeof(*req), gfp_flags);
823         if (!req)
824                 return NULL;
825
826         req->epnum      = dep->number;
827         req->dep        = dep;
828
829         dep->allocated_requests++;
830
831         trace_dwc3_alloc_request(req);
832
833         return &req->request;
834 }
835
836 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
837                 struct usb_request *request)
838 {
839         struct dwc3_request             *req = to_dwc3_request(request);
840         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
841
842         dep->allocated_requests--;
843         trace_dwc3_free_request(req);
844         kfree(req);
845 }
846
847 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep);
848
849 static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
850                 dma_addr_t dma, unsigned length, unsigned chain, unsigned node,
851                 unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt)
852 {
853         struct dwc3             *dwc = dep->dwc;
854         struct usb_gadget       *gadget = &dwc->gadget;
855         enum usb_device_speed   speed = gadget->speed;
856
857         dwc3_ep_inc_enq(dep);
858
859         trb->size = DWC3_TRB_SIZE_LENGTH(length);
860         trb->bpl = lower_32_bits(dma);
861         trb->bph = upper_32_bits(dma);
862
863         switch (usb_endpoint_type(dep->endpoint.desc)) {
864         case USB_ENDPOINT_XFER_CONTROL:
865                 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
866                 break;
867
868         case USB_ENDPOINT_XFER_ISOC:
869                 if (!node) {
870                         trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
871
872                         if (speed == USB_SPEED_HIGH) {
873                                 struct usb_ep *ep = &dep->endpoint;
874                                 trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1);
875                         }
876                 } else {
877                         trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
878                 }
879
880                 /* always enable Interrupt on Missed ISOC */
881                 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
882                 break;
883
884         case USB_ENDPOINT_XFER_BULK:
885         case USB_ENDPOINT_XFER_INT:
886                 trb->ctrl = DWC3_TRBCTL_NORMAL;
887                 break;
888         default:
889                 /*
890                  * This is only possible with faulty memory because we
891                  * checked it already :)
892                  */
893                 dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
894                                 usb_endpoint_type(dep->endpoint.desc));
895         }
896
897         /* always enable Continue on Short Packet */
898         if (usb_endpoint_dir_out(dep->endpoint.desc)) {
899                 trb->ctrl |= DWC3_TRB_CTRL_CSP;
900
901                 if (short_not_ok)
902                         trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
903         }
904
905         if ((!no_interrupt && !chain) ||
906                         (dwc3_calc_trbs_left(dep) == 0))
907                 trb->ctrl |= DWC3_TRB_CTRL_IOC;
908
909         if (chain)
910                 trb->ctrl |= DWC3_TRB_CTRL_CHN;
911
912         if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
913                 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
914
915         trb->ctrl |= DWC3_TRB_CTRL_HWO;
916
917         trace_dwc3_prepare_trb(dep, trb);
918 }
919
920 /**
921  * dwc3_prepare_one_trb - setup one TRB from one request
922  * @dep: endpoint for which this request is prepared
923  * @req: dwc3_request pointer
924  * @chain: should this TRB be chained to the next?
925  * @node: only for isochronous endpoints. First TRB needs different type.
926  */
927 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
928                 struct dwc3_request *req, unsigned chain, unsigned node)
929 {
930         struct dwc3_trb         *trb;
931         unsigned                length = req->request.length;
932         unsigned                stream_id = req->request.stream_id;
933         unsigned                short_not_ok = req->request.short_not_ok;
934         unsigned                no_interrupt = req->request.no_interrupt;
935         dma_addr_t              dma = req->request.dma;
936
937         trb = &dep->trb_pool[dep->trb_enqueue];
938
939         if (!req->trb) {
940                 dwc3_gadget_move_started_request(req);
941                 req->trb = trb;
942                 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
943                 dep->queued_requests++;
944         }
945
946         __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node,
947                         stream_id, short_not_ok, no_interrupt);
948 }
949
950 /**
951  * dwc3_ep_prev_trb() - Returns the previous TRB in the ring
952  * @dep: The endpoint with the TRB ring
953  * @index: The index of the current TRB in the ring
954  *
955  * Returns the TRB prior to the one pointed to by the index. If the
956  * index is 0, we will wrap backwards, skip the link TRB, and return
957  * the one just before that.
958  */
959 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
960 {
961         u8 tmp = index;
962
963         if (!tmp)
964                 tmp = DWC3_TRB_NUM - 1;
965
966         return &dep->trb_pool[tmp - 1];
967 }
968
969 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
970 {
971         struct dwc3_trb         *tmp;
972         struct dwc3             *dwc = dep->dwc;
973         u8                      trbs_left;
974
975         /*
976          * If enqueue & dequeue are equal than it is either full or empty.
977          *
978          * One way to know for sure is if the TRB right before us has HWO bit
979          * set or not. If it has, then we're definitely full and can't fit any
980          * more transfers in our ring.
981          */
982         if (dep->trb_enqueue == dep->trb_dequeue) {
983                 tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
984                 if (dev_WARN_ONCE(dwc->dev, tmp->ctrl & DWC3_TRB_CTRL_HWO,
985                                   "%s No TRBS left\n", dep->name))
986                         return 0;
987
988                 return DWC3_TRB_NUM - 1;
989         }
990
991         trbs_left = dep->trb_dequeue - dep->trb_enqueue;
992         trbs_left &= (DWC3_TRB_NUM - 1);
993
994         if (dep->trb_dequeue < dep->trb_enqueue)
995                 trbs_left--;
996
997         return trbs_left;
998 }
999
1000 static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
1001                 struct dwc3_request *req)
1002 {
1003         struct scatterlist *sg = req->sg;
1004         struct scatterlist *s;
1005         int             i;
1006
1007         for_each_sg(sg, s, req->num_pending_sgs, i) {
1008                 unsigned int length = req->request.length;
1009                 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
1010                 unsigned int rem = length % maxp;
1011                 unsigned chain = true;
1012
1013                 if (sg_is_last(s))
1014                         chain = false;
1015
1016                 if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
1017                         struct dwc3     *dwc = dep->dwc;
1018                         struct dwc3_trb *trb;
1019
1020                         req->unaligned = true;
1021
1022                         /* prepare normal TRB */
1023                         dwc3_prepare_one_trb(dep, req, true, i);
1024
1025                         /* Now prepare one extra TRB to align transfer size */
1026                         trb = &dep->trb_pool[dep->trb_enqueue];
1027                         __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr,
1028                                         maxp - rem, false, 0,
1029                                         req->request.stream_id,
1030                                         req->request.short_not_ok,
1031                                         req->request.no_interrupt);
1032                 } else {
1033                         dwc3_prepare_one_trb(dep, req, chain, i);
1034                 }
1035
1036                 if (!dwc3_calc_trbs_left(dep))
1037                         break;
1038         }
1039 }
1040
1041 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
1042                 struct dwc3_request *req)
1043 {
1044         unsigned int length = req->request.length;
1045         unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
1046         unsigned int rem = length % maxp;
1047
1048         if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) {
1049                 struct dwc3     *dwc = dep->dwc;
1050                 struct dwc3_trb *trb;
1051
1052                 req->unaligned = true;
1053
1054                 /* prepare normal TRB */
1055                 dwc3_prepare_one_trb(dep, req, true, 0);
1056
1057                 /* Now prepare one extra TRB to align transfer size */
1058                 trb = &dep->trb_pool[dep->trb_enqueue];
1059                 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem,
1060                                 false, 0, req->request.stream_id,
1061                                 req->request.short_not_ok,
1062                                 req->request.no_interrupt);
1063         } else {
1064                 dwc3_prepare_one_trb(dep, req, false, 0);
1065         }
1066 }
1067
1068 /*
1069  * dwc3_prepare_trbs - setup TRBs from requests
1070  * @dep: endpoint for which requests are being prepared
1071  *
1072  * The function goes through the requests list and sets up TRBs for the
1073  * transfers. The function returns once there are no more TRBs available or
1074  * it runs out of requests.
1075  */
1076 static void dwc3_prepare_trbs(struct dwc3_ep *dep)
1077 {
1078         struct dwc3_request     *req, *n;
1079
1080         BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
1081
1082         if (!dwc3_calc_trbs_left(dep))
1083                 return;
1084
1085         /*
1086          * We can get in a situation where there's a request in the started list
1087          * but there weren't enough TRBs to fully kick it in the first time
1088          * around, so it has been waiting for more TRBs to be freed up.
1089          *
1090          * In that case, we should check if we have a request with pending_sgs
1091          * in the started list and prepare TRBs for that request first,
1092          * otherwise we will prepare TRBs completely out of order and that will
1093          * break things.
1094          */
1095         list_for_each_entry(req, &dep->started_list, list) {
1096                 if (req->num_pending_sgs > 0)
1097                         dwc3_prepare_one_trb_sg(dep, req);
1098
1099                 if (!dwc3_calc_trbs_left(dep))
1100                         return;
1101         }
1102
1103         list_for_each_entry_safe(req, n, &dep->pending_list, list) {
1104                 if (req->num_pending_sgs > 0)
1105                         dwc3_prepare_one_trb_sg(dep, req);
1106                 else
1107                         dwc3_prepare_one_trb_linear(dep, req);
1108
1109                 if (!dwc3_calc_trbs_left(dep))
1110                         return;
1111         }
1112 }
1113
1114 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
1115 {
1116         struct dwc3_gadget_ep_cmd_params params;
1117         struct dwc3_request             *req;
1118         int                             starting;
1119         int                             ret;
1120         u32                             cmd;
1121
1122         starting = !(dep->flags & DWC3_EP_BUSY);
1123
1124         dwc3_prepare_trbs(dep);
1125         req = next_request(&dep->started_list);
1126         if (!req) {
1127                 dep->flags |= DWC3_EP_PENDING_REQUEST;
1128                 return 0;
1129         }
1130
1131         memset(&params, 0, sizeof(params));
1132
1133         if (starting) {
1134                 params.param0 = upper_32_bits(req->trb_dma);
1135                 params.param1 = lower_32_bits(req->trb_dma);
1136                 cmd = DWC3_DEPCMD_STARTTRANSFER |
1137                         DWC3_DEPCMD_PARAM(cmd_param);
1138         } else {
1139                 cmd = DWC3_DEPCMD_UPDATETRANSFER |
1140                         DWC3_DEPCMD_PARAM(dep->resource_index);
1141         }
1142
1143         ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1144         if (ret < 0) {
1145                 /*
1146                  * FIXME we need to iterate over the list of requests
1147                  * here and stop, unmap, free and del each of the linked
1148                  * requests instead of what we do now.
1149                  */
1150                 if (req->trb)
1151                         memset(req->trb, 0, sizeof(struct dwc3_trb));
1152                 dep->queued_requests--;
1153                 dwc3_gadget_giveback(dep, req, ret);
1154                 return ret;
1155         }
1156
1157         dep->flags |= DWC3_EP_BUSY;
1158
1159         if (starting) {
1160                 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
1161                 WARN_ON_ONCE(!dep->resource_index);
1162         }
1163
1164         return 0;
1165 }
1166
1167 static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
1168 {
1169         u32                     reg;
1170
1171         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1172         return DWC3_DSTS_SOFFN(reg);
1173 }
1174
1175 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1176                 struct dwc3_ep *dep, u32 cur_uf)
1177 {
1178         u32 uf;
1179
1180         if (list_empty(&dep->pending_list)) {
1181                 dev_info(dwc->dev, "%s: ran out of requests\n",
1182                                 dep->name);
1183                 dep->flags |= DWC3_EP_PENDING_REQUEST;
1184                 return;
1185         }
1186
1187         /* 4 micro frames in the future */
1188         uf = cur_uf + dep->interval * 4;
1189
1190         __dwc3_gadget_kick_transfer(dep, uf);
1191 }
1192
1193 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1194                 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1195 {
1196         u32 cur_uf, mask;
1197
1198         mask = ~(dep->interval - 1);
1199         cur_uf = event->parameters & mask;
1200
1201         __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1202 }
1203
1204 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1205 {
1206         struct dwc3             *dwc = dep->dwc;
1207         int                     ret;
1208
1209         if (!dep->endpoint.desc) {
1210                 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
1211                                 dep->name);
1212                 return -ESHUTDOWN;
1213         }
1214
1215         if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1216                                 &req->request, req->dep->name)) {
1217                 dev_err(dwc->dev, "%s: request %p belongs to '%s'\n",
1218                                 dep->name, &req->request, req->dep->name);
1219                 return -EINVAL;
1220         }
1221
1222         pm_runtime_get(dwc->dev);
1223
1224         req->request.actual     = 0;
1225         req->request.status     = -EINPROGRESS;
1226         req->direction          = dep->direction;
1227         req->epnum              = dep->number;
1228
1229         trace_dwc3_ep_queue(req);
1230
1231         ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
1232                                             dep->direction);
1233         if (ret)
1234                 return ret;
1235
1236         req->sg                 = req->request.sg;
1237         req->num_pending_sgs    = req->request.num_mapped_sgs;
1238
1239         list_add_tail(&req->list, &dep->pending_list);
1240
1241         /*
1242          * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
1243          * wait for a XferNotReady event so we will know what's the current
1244          * (micro-)frame number.
1245          *
1246          * Without this trick, we are very, very likely gonna get Bus Expiry
1247          * errors which will force us issue EndTransfer command.
1248          */
1249         if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1250                 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
1251                         if (dep->flags & DWC3_EP_TRANSFER_STARTED) {
1252                                 dwc3_stop_active_transfer(dwc, dep->number, true);
1253                                 dep->flags = DWC3_EP_ENABLED;
1254                         } else {
1255                                 u32 cur_uf;
1256
1257                                 cur_uf = __dwc3_gadget_get_frame(dwc);
1258                                 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1259                                 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
1260                         }
1261                 }
1262                 return 0;
1263         }
1264
1265         if (!dwc3_calc_trbs_left(dep))
1266                 return 0;
1267
1268         ret = __dwc3_gadget_kick_transfer(dep, 0);
1269         if (ret == -EBUSY)
1270                 ret = 0;
1271
1272         return ret;
1273 }
1274
1275 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
1276                 struct usb_request *request)
1277 {
1278         dwc3_gadget_ep_free_request(ep, request);
1279 }
1280
1281 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
1282 {
1283         struct dwc3_request             *req;
1284         struct usb_request              *request;
1285         struct usb_ep                   *ep = &dep->endpoint;
1286
1287         request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
1288         if (!request)
1289                 return -ENOMEM;
1290
1291         request->length = 0;
1292         request->buf = dwc->zlp_buf;
1293         request->complete = __dwc3_gadget_ep_zlp_complete;
1294
1295         req = to_dwc3_request(request);
1296
1297         return __dwc3_gadget_ep_queue(dep, req);
1298 }
1299
1300 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1301         gfp_t gfp_flags)
1302 {
1303         struct dwc3_request             *req = to_dwc3_request(request);
1304         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
1305         struct dwc3                     *dwc = dep->dwc;
1306
1307         unsigned long                   flags;
1308
1309         int                             ret;
1310
1311         spin_lock_irqsave(&dwc->lock, flags);
1312         ret = __dwc3_gadget_ep_queue(dep, req);
1313
1314         /*
1315          * Okay, here's the thing, if gadget driver has requested for a ZLP by
1316          * setting request->zero, instead of doing magic, we will just queue an
1317          * extra usb_request ourselves so that it gets handled the same way as
1318          * any other request.
1319          */
1320         if (ret == 0 && request->zero && request->length &&
1321             (request->length % ep->maxpacket == 0))
1322                 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
1323
1324         spin_unlock_irqrestore(&dwc->lock, flags);
1325
1326         return ret;
1327 }
1328
1329 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1330                 struct usb_request *request)
1331 {
1332         struct dwc3_request             *req = to_dwc3_request(request);
1333         struct dwc3_request             *r = NULL;
1334
1335         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
1336         struct dwc3                     *dwc = dep->dwc;
1337
1338         unsigned long                   flags;
1339         int                             ret = 0;
1340
1341         trace_dwc3_ep_dequeue(req);
1342
1343         spin_lock_irqsave(&dwc->lock, flags);
1344
1345         list_for_each_entry(r, &dep->pending_list, list) {
1346                 if (r == req)
1347                         break;
1348         }
1349
1350         if (r != req) {
1351                 list_for_each_entry(r, &dep->started_list, list) {
1352                         if (r == req)
1353                                 break;
1354                 }
1355                 if (r == req) {
1356                         /* wait until it is processed */
1357                         dwc3_stop_active_transfer(dwc, dep->number, true);
1358
1359                         /*
1360                          * If request was already started, this means we had to
1361                          * stop the transfer. With that we also need to ignore
1362                          * all TRBs used by the request, however TRBs can only
1363                          * be modified after completion of END_TRANSFER
1364                          * command. So what we do here is that we wait for
1365                          * END_TRANSFER completion and only after that, we jump
1366                          * over TRBs by clearing HWO and incrementing dequeue
1367                          * pointer.
1368                          *
1369                          * Note that we have 2 possible types of transfers here:
1370                          *
1371                          * i) Linear buffer request
1372                          * ii) SG-list based request
1373                          *
1374                          * SG-list based requests will have r->num_pending_sgs
1375                          * set to a valid number (> 0). Linear requests,
1376                          * normally use a single TRB.
1377                          *
1378                          * For each of these two cases, if r->unaligned flag is
1379                          * set, one extra TRB has been used to align transfer
1380                          * size to wMaxPacketSize.
1381                          *
1382                          * All of these cases need to be taken into
1383                          * consideration so we don't mess up our TRB ring
1384                          * pointers.
1385                          */
1386                         wait_event_lock_irq(dep->wait_end_transfer,
1387                                         !(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
1388                                         dwc->lock);
1389
1390                         if (!r->trb)
1391                                 goto out1;
1392
1393                         if (r->num_pending_sgs) {
1394                                 struct dwc3_trb *trb;
1395                                 int i = 0;
1396
1397                                 for (i = 0; i < r->num_pending_sgs; i++) {
1398                                         trb = r->trb + i;
1399                                         trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1400                                         dwc3_ep_inc_deq(dep);
1401                                 }
1402
1403                                 if (r->unaligned) {
1404                                         trb = r->trb + r->num_pending_sgs + 1;
1405                                         trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1406                                         dwc3_ep_inc_deq(dep);
1407                                 }
1408                         } else {
1409                                 struct dwc3_trb *trb = r->trb;
1410
1411                                 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1412                                 dwc3_ep_inc_deq(dep);
1413
1414                                 if (r->unaligned) {
1415                                         trb = r->trb + 1;
1416                                         trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1417                                         dwc3_ep_inc_deq(dep);
1418                                 }
1419                         }
1420                         goto out1;
1421                 }
1422                 dev_err(dwc->dev, "request %p was not queued to %s\n",
1423                                 request, ep->name);
1424                 ret = -EINVAL;
1425                 goto out0;
1426         }
1427
1428 out1:
1429         /* giveback the request */
1430         dep->queued_requests--;
1431         dwc3_gadget_giveback(dep, req, -ECONNRESET);
1432
1433 out0:
1434         spin_unlock_irqrestore(&dwc->lock, flags);
1435
1436         return ret;
1437 }
1438
1439 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1440 {
1441         struct dwc3_gadget_ep_cmd_params        params;
1442         struct dwc3                             *dwc = dep->dwc;
1443         int                                     ret;
1444
1445         if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1446                 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1447                 return -EINVAL;
1448         }
1449
1450         memset(&params, 0x00, sizeof(params));
1451
1452         if (value) {
1453                 struct dwc3_trb *trb;
1454
1455                 unsigned transfer_in_flight;
1456                 unsigned started;
1457
1458                 if (dep->flags & DWC3_EP_STALL)
1459                         return 0;
1460
1461                 if (dep->number > 1)
1462                         trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
1463                 else
1464                         trb = &dwc->ep0_trb[dep->trb_enqueue];
1465
1466                 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
1467                 started = !list_empty(&dep->started_list);
1468
1469                 if (!protocol && ((dep->direction && transfer_in_flight) ||
1470                                 (!dep->direction && started))) {
1471                         return -EAGAIN;
1472                 }
1473
1474                 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
1475                                 &params);
1476                 if (ret)
1477                         dev_err(dwc->dev, "failed to set STALL on %s\n",
1478                                         dep->name);
1479                 else
1480                         dep->flags |= DWC3_EP_STALL;
1481         } else {
1482                 if (!(dep->flags & DWC3_EP_STALL))
1483                         return 0;
1484
1485                 ret = dwc3_send_clear_stall_ep_cmd(dep);
1486                 if (ret)
1487                         dev_err(dwc->dev, "failed to clear STALL on %s\n",
1488                                         dep->name);
1489                 else
1490                         dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1491         }
1492
1493         return ret;
1494 }
1495
1496 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1497 {
1498         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
1499         struct dwc3                     *dwc = dep->dwc;
1500
1501         unsigned long                   flags;
1502
1503         int                             ret;
1504
1505         spin_lock_irqsave(&dwc->lock, flags);
1506         ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1507         spin_unlock_irqrestore(&dwc->lock, flags);
1508
1509         return ret;
1510 }
1511
1512 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1513 {
1514         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
1515         struct dwc3                     *dwc = dep->dwc;
1516         unsigned long                   flags;
1517         int                             ret;
1518
1519         spin_lock_irqsave(&dwc->lock, flags);
1520         dep->flags |= DWC3_EP_WEDGE;
1521
1522         if (dep->number == 0 || dep->number == 1)
1523                 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1524         else
1525                 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1526         spin_unlock_irqrestore(&dwc->lock, flags);
1527
1528         return ret;
1529 }
1530
1531 /* -------------------------------------------------------------------------- */
1532
1533 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1534         .bLength        = USB_DT_ENDPOINT_SIZE,
1535         .bDescriptorType = USB_DT_ENDPOINT,
1536         .bmAttributes   = USB_ENDPOINT_XFER_CONTROL,
1537 };
1538
1539 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1540         .enable         = dwc3_gadget_ep0_enable,
1541         .disable        = dwc3_gadget_ep0_disable,
1542         .alloc_request  = dwc3_gadget_ep_alloc_request,
1543         .free_request   = dwc3_gadget_ep_free_request,
1544         .queue          = dwc3_gadget_ep0_queue,
1545         .dequeue        = dwc3_gadget_ep_dequeue,
1546         .set_halt       = dwc3_gadget_ep0_set_halt,
1547         .set_wedge      = dwc3_gadget_ep_set_wedge,
1548 };
1549
1550 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1551         .enable         = dwc3_gadget_ep_enable,
1552         .disable        = dwc3_gadget_ep_disable,
1553         .alloc_request  = dwc3_gadget_ep_alloc_request,
1554         .free_request   = dwc3_gadget_ep_free_request,
1555         .queue          = dwc3_gadget_ep_queue,
1556         .dequeue        = dwc3_gadget_ep_dequeue,
1557         .set_halt       = dwc3_gadget_ep_set_halt,
1558         .set_wedge      = dwc3_gadget_ep_set_wedge,
1559 };
1560
1561 /* -------------------------------------------------------------------------- */
1562
1563 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1564 {
1565         struct dwc3             *dwc = gadget_to_dwc(g);
1566
1567         return __dwc3_gadget_get_frame(dwc);
1568 }
1569
1570 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
1571 {
1572         int                     retries;
1573
1574         int                     ret;
1575         u32                     reg;
1576
1577         u8                      link_state;
1578         u8                      speed;
1579
1580         /*
1581          * According to the Databook Remote wakeup request should
1582          * be issued only when the device is in early suspend state.
1583          *
1584          * We can check that via USB Link State bits in DSTS register.
1585          */
1586         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1587
1588         speed = reg & DWC3_DSTS_CONNECTSPD;
1589         if ((speed == DWC3_DSTS_SUPERSPEED) ||
1590             (speed == DWC3_DSTS_SUPERSPEED_PLUS))
1591                 return 0;
1592
1593         link_state = DWC3_DSTS_USBLNKST(reg);
1594
1595         switch (link_state) {
1596         case DWC3_LINK_STATE_RX_DET:    /* in HS, means Early Suspend */
1597         case DWC3_LINK_STATE_U3:        /* in HS, means SUSPEND */
1598                 break;
1599         default:
1600                 return -EINVAL;
1601         }
1602
1603         ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1604         if (ret < 0) {
1605                 dev_err(dwc->dev, "failed to put link in Recovery\n");
1606                 return ret;
1607         }
1608
1609         /* Recent versions do this automatically */
1610         if (dwc->revision < DWC3_REVISION_194A) {
1611                 /* write zeroes to Link Change Request */
1612                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1613                 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1614                 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1615         }
1616
1617         /* poll until Link State changes to ON */
1618         retries = 20000;
1619
1620         while (retries--) {
1621                 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1622
1623                 /* in HS, means ON */
1624                 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1625                         break;
1626         }
1627
1628         if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1629                 dev_err(dwc->dev, "failed to send remote wakeup\n");
1630                 return -EINVAL;
1631         }
1632
1633         return 0;
1634 }
1635
1636 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1637 {
1638         struct dwc3             *dwc = gadget_to_dwc(g);
1639         unsigned long           flags;
1640         int                     ret;
1641
1642         spin_lock_irqsave(&dwc->lock, flags);
1643         ret = __dwc3_gadget_wakeup(dwc);
1644         spin_unlock_irqrestore(&dwc->lock, flags);
1645
1646         return ret;
1647 }
1648
1649 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1650                 int is_selfpowered)
1651 {
1652         struct dwc3             *dwc = gadget_to_dwc(g);
1653         unsigned long           flags;
1654
1655         spin_lock_irqsave(&dwc->lock, flags);
1656         g->is_selfpowered = !!is_selfpowered;
1657         spin_unlock_irqrestore(&dwc->lock, flags);
1658
1659         return 0;
1660 }
1661
1662 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1663 {
1664         u32                     reg;
1665         u32                     timeout = 500;
1666
1667         if (pm_runtime_suspended(dwc->dev))
1668                 return 0;
1669
1670         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1671         if (is_on) {
1672                 if (dwc->revision <= DWC3_REVISION_187A) {
1673                         reg &= ~DWC3_DCTL_TRGTULST_MASK;
1674                         reg |= DWC3_DCTL_TRGTULST_RX_DET;
1675                 }
1676
1677                 if (dwc->revision >= DWC3_REVISION_194A)
1678                         reg &= ~DWC3_DCTL_KEEP_CONNECT;
1679                 reg |= DWC3_DCTL_RUN_STOP;
1680
1681                 if (dwc->has_hibernation)
1682                         reg |= DWC3_DCTL_KEEP_CONNECT;
1683
1684                 dwc->pullups_connected = true;
1685         } else {
1686                 reg &= ~DWC3_DCTL_RUN_STOP;
1687
1688                 if (dwc->has_hibernation && !suspend)
1689                         reg &= ~DWC3_DCTL_KEEP_CONNECT;
1690
1691                 dwc->pullups_connected = false;
1692         }
1693
1694         dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1695
1696         do {
1697                 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1698                 reg &= DWC3_DSTS_DEVCTRLHLT;
1699         } while (--timeout && !(!is_on ^ !reg));
1700
1701         if (!timeout)
1702                 return -ETIMEDOUT;
1703
1704         return 0;
1705 }
1706
1707 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1708 {
1709         struct dwc3             *dwc = gadget_to_dwc(g);
1710         unsigned long           flags;
1711         int                     ret;
1712
1713         is_on = !!is_on;
1714
1715         /*
1716          * Per databook, when we want to stop the gadget, if a control transfer
1717          * is still in process, complete it and get the core into setup phase.
1718          */
1719         if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
1720                 reinit_completion(&dwc->ep0_in_setup);
1721
1722                 ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
1723                                 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
1724                 if (ret == 0) {
1725                         dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
1726                         return -ETIMEDOUT;
1727                 }
1728         }
1729
1730         spin_lock_irqsave(&dwc->lock, flags);
1731         ret = dwc3_gadget_run_stop(dwc, is_on, false);
1732         spin_unlock_irqrestore(&dwc->lock, flags);
1733
1734         return ret;
1735 }
1736
1737 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1738 {
1739         u32                     reg;
1740
1741         /* Enable all but Start and End of Frame IRQs */
1742         reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1743                         DWC3_DEVTEN_EVNTOVERFLOWEN |
1744                         DWC3_DEVTEN_CMDCMPLTEN |
1745                         DWC3_DEVTEN_ERRTICERREN |
1746                         DWC3_DEVTEN_WKUPEVTEN |
1747                         DWC3_DEVTEN_CONNECTDONEEN |
1748                         DWC3_DEVTEN_USBRSTEN |
1749                         DWC3_DEVTEN_DISCONNEVTEN);
1750
1751         if (dwc->revision < DWC3_REVISION_250A)
1752                 reg |= DWC3_DEVTEN_ULSTCNGEN;
1753
1754         dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1755 }
1756
1757 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1758 {
1759         /* mask all interrupts */
1760         dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1761 }
1762
1763 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1764 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1765
1766 /**
1767  * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
1768  * dwc: pointer to our context structure
1769  *
1770  * The following looks like complex but it's actually very simple. In order to
1771  * calculate the number of packets we can burst at once on OUT transfers, we're
1772  * gonna use RxFIFO size.
1773  *
1774  * To calculate RxFIFO size we need two numbers:
1775  * MDWIDTH = size, in bits, of the internal memory bus
1776  * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
1777  *
1778  * Given these two numbers, the formula is simple:
1779  *
1780  * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
1781  *
1782  * 24 bytes is for 3x SETUP packets
1783  * 16 bytes is a clock domain crossing tolerance
1784  *
1785  * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
1786  */
1787 static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
1788 {
1789         u32 ram2_depth;
1790         u32 mdwidth;
1791         u32 nump;
1792         u32 reg;
1793
1794         ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
1795         mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
1796
1797         nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
1798         nump = min_t(u32, nump, 16);
1799
1800         /* update NumP */
1801         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1802         reg &= ~DWC3_DCFG_NUMP_MASK;
1803         reg |= nump << DWC3_DCFG_NUMP_SHIFT;
1804         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1805 }
1806
1807 static int __dwc3_gadget_start(struct dwc3 *dwc)
1808 {
1809         struct dwc3_ep          *dep;
1810         int                     ret = 0;
1811         u32                     reg;
1812
1813         /*
1814          * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
1815          * the core supports IMOD, disable it.
1816          */
1817         if (dwc->imod_interval) {
1818                 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
1819                 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
1820         } else if (dwc3_has_imod(dwc)) {
1821                 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
1822         }
1823
1824         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1825         reg &= ~(DWC3_DCFG_SPEED_MASK);
1826
1827         /**
1828          * WORKAROUND: DWC3 revision < 2.20a have an issue
1829          * which would cause metastability state on Run/Stop
1830          * bit if we try to force the IP to USB2-only mode.
1831          *
1832          * Because of that, we cannot configure the IP to any
1833          * speed other than the SuperSpeed
1834          *
1835          * Refers to:
1836          *
1837          * STAR#9000525659: Clock Domain Crossing on DCTL in
1838          * USB 2.0 Mode
1839          */
1840         if (dwc->revision < DWC3_REVISION_220A) {
1841                 reg |= DWC3_DCFG_SUPERSPEED;
1842         } else {
1843                 switch (dwc->maximum_speed) {
1844                 case USB_SPEED_LOW:
1845                         reg |= DWC3_DCFG_LOWSPEED;
1846                         break;
1847                 case USB_SPEED_FULL:
1848                         reg |= DWC3_DCFG_FULLSPEED;
1849                         break;
1850                 case USB_SPEED_HIGH:
1851                         reg |= DWC3_DCFG_HIGHSPEED;
1852                         break;
1853                 case USB_SPEED_SUPER_PLUS:
1854                         reg |= DWC3_DCFG_SUPERSPEED_PLUS;
1855                         break;
1856                 default:
1857                         dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
1858                                 dwc->maximum_speed);
1859                         /* fall through */
1860                 case USB_SPEED_SUPER:
1861                         reg |= DWC3_DCFG_SUPERSPEED;
1862                         break;
1863                 }
1864         }
1865         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1866
1867         /*
1868          * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
1869          * field instead of letting dwc3 itself calculate that automatically.
1870          *
1871          * This way, we maximize the chances that we'll be able to get several
1872          * bursts of data without going through any sort of endpoint throttling.
1873          */
1874         reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
1875         reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
1876         dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
1877
1878         dwc3_gadget_setup_nump(dwc);
1879
1880         /* Start with SuperSpeed Default */
1881         dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1882
1883         dep = dwc->eps[0];
1884         ret = __dwc3_gadget_ep_enable(dep, false, false);
1885         if (ret) {
1886                 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1887                 goto err0;
1888         }
1889
1890         dep = dwc->eps[1];
1891         ret = __dwc3_gadget_ep_enable(dep, false, false);
1892         if (ret) {
1893                 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1894                 goto err1;
1895         }
1896
1897         /* begin to receive SETUP packets */
1898         dwc->ep0state = EP0_SETUP_PHASE;
1899         dwc3_ep0_out_start(dwc);
1900
1901         dwc3_gadget_enable_irq(dwc);
1902
1903         return 0;
1904
1905 err1:
1906         __dwc3_gadget_ep_disable(dwc->eps[0]);
1907
1908 err0:
1909         return ret;
1910 }
1911
1912 static int dwc3_gadget_start(struct usb_gadget *g,
1913                 struct usb_gadget_driver *driver)
1914 {
1915         struct dwc3             *dwc = gadget_to_dwc(g);
1916         unsigned long           flags;
1917         int                     ret = 0;
1918         int                     irq;
1919
1920         irq = dwc->irq_gadget;
1921         ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1922                         IRQF_SHARED, "dwc3", dwc->ev_buf);
1923         if (ret) {
1924                 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1925                                 irq, ret);
1926                 goto err0;
1927         }
1928
1929         spin_lock_irqsave(&dwc->lock, flags);
1930         if (dwc->gadget_driver) {
1931                 dev_err(dwc->dev, "%s is already bound to %s\n",
1932                                 dwc->gadget.name,
1933                                 dwc->gadget_driver->driver.name);
1934                 ret = -EBUSY;
1935                 goto err1;
1936         }
1937
1938         dwc->gadget_driver      = driver;
1939
1940         if (pm_runtime_active(dwc->dev))
1941                 __dwc3_gadget_start(dwc);
1942
1943         spin_unlock_irqrestore(&dwc->lock, flags);
1944
1945         return 0;
1946
1947 err1:
1948         spin_unlock_irqrestore(&dwc->lock, flags);
1949         free_irq(irq, dwc);
1950
1951 err0:
1952         return ret;
1953 }
1954
1955 static void __dwc3_gadget_stop(struct dwc3 *dwc)
1956 {
1957         dwc3_gadget_disable_irq(dwc);
1958         __dwc3_gadget_ep_disable(dwc->eps[0]);
1959         __dwc3_gadget_ep_disable(dwc->eps[1]);
1960 }
1961
1962 static int dwc3_gadget_stop(struct usb_gadget *g)
1963 {
1964         struct dwc3             *dwc = gadget_to_dwc(g);
1965         unsigned long           flags;
1966         int                     epnum;
1967
1968         spin_lock_irqsave(&dwc->lock, flags);
1969
1970         if (pm_runtime_suspended(dwc->dev))
1971                 goto out;
1972
1973         __dwc3_gadget_stop(dwc);
1974
1975         for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1976                 struct dwc3_ep  *dep = dwc->eps[epnum];
1977
1978                 if (!dep)
1979                         continue;
1980
1981                 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
1982                         continue;
1983
1984                 wait_event_lock_irq(dep->wait_end_transfer,
1985                                     !(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
1986                                     dwc->lock);
1987         }
1988
1989 out:
1990         dwc->gadget_driver      = NULL;
1991         spin_unlock_irqrestore(&dwc->lock, flags);
1992
1993         free_irq(dwc->irq_gadget, dwc->ev_buf);
1994
1995         return 0;
1996 }
1997
1998 static const struct usb_gadget_ops dwc3_gadget_ops = {
1999         .get_frame              = dwc3_gadget_get_frame,
2000         .wakeup                 = dwc3_gadget_wakeup,
2001         .set_selfpowered        = dwc3_gadget_set_selfpowered,
2002         .pullup                 = dwc3_gadget_pullup,
2003         .udc_start              = dwc3_gadget_start,
2004         .udc_stop               = dwc3_gadget_stop,
2005 };
2006
2007 /* -------------------------------------------------------------------------- */
2008
2009 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
2010                 u8 num, u32 direction)
2011 {
2012         struct dwc3_ep                  *dep;
2013         u8                              i;
2014
2015         for (i = 0; i < num; i++) {
2016                 u8 epnum = (i << 1) | (direction ? 1 : 0);
2017
2018                 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
2019                 if (!dep)
2020                         return -ENOMEM;
2021
2022                 dep->dwc = dwc;
2023                 dep->number = epnum;
2024                 dep->direction = !!direction;
2025                 dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
2026                 dwc->eps[epnum] = dep;
2027
2028                 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
2029                                 (epnum & 1) ? "in" : "out");
2030
2031                 dep->endpoint.name = dep->name;
2032
2033                 if (!(dep->number > 1)) {
2034                         dep->endpoint.desc = &dwc3_gadget_ep0_desc;
2035                         dep->endpoint.comp_desc = NULL;
2036                 }
2037
2038                 spin_lock_init(&dep->lock);
2039
2040                 if (epnum == 0 || epnum == 1) {
2041                         usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
2042                         dep->endpoint.maxburst = 1;
2043                         dep->endpoint.ops = &dwc3_gadget_ep0_ops;
2044                         if (!epnum)
2045                                 dwc->gadget.ep0 = &dep->endpoint;
2046                 } else if (direction) {
2047                         int mdwidth;
2048                         int size;
2049                         int ret;
2050                         int num;
2051
2052                         mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
2053                         /* MDWIDTH is represented in bits, we need it in bytes */
2054                         mdwidth /= 8;
2055
2056                         size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(i));
2057                         size = DWC3_GTXFIFOSIZ_TXFDEF(size);
2058
2059                         /* FIFO Depth is in MDWDITH bytes. Multiply */
2060                         size *= mdwidth;
2061
2062                         num = size / 1024;
2063                         if (num == 0)
2064                                 num = 1;
2065
2066                         /*
2067                          * FIFO sizes account an extra MDWIDTH * (num + 1) bytes for
2068                          * internal overhead. We don't really know how these are used,
2069                          * but documentation say it exists.
2070                          */
2071                         size -= mdwidth * (num + 1);
2072                         size /= num;
2073
2074                         usb_ep_set_maxpacket_limit(&dep->endpoint, size);
2075
2076                         dep->endpoint.max_streams = 15;
2077                         dep->endpoint.ops = &dwc3_gadget_ep_ops;
2078                         list_add_tail(&dep->endpoint.ep_list,
2079                                         &dwc->gadget.ep_list);
2080
2081                         ret = dwc3_alloc_trb_pool(dep);
2082                         if (ret)
2083                                 return ret;
2084                 } else {
2085                         int             ret;
2086
2087                         usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
2088                         dep->endpoint.max_streams = 15;
2089                         dep->endpoint.ops = &dwc3_gadget_ep_ops;
2090                         list_add_tail(&dep->endpoint.ep_list,
2091                                         &dwc->gadget.ep_list);
2092
2093                         ret = dwc3_alloc_trb_pool(dep);
2094                         if (ret)
2095                                 return ret;
2096                 }
2097
2098                 if (epnum == 0 || epnum == 1) {
2099                         dep->endpoint.caps.type_control = true;
2100                 } else {
2101                         dep->endpoint.caps.type_iso = true;
2102                         dep->endpoint.caps.type_bulk = true;
2103                         dep->endpoint.caps.type_int = true;
2104                 }
2105
2106                 dep->endpoint.caps.dir_in = !!direction;
2107                 dep->endpoint.caps.dir_out = !direction;
2108
2109                 INIT_LIST_HEAD(&dep->pending_list);
2110                 INIT_LIST_HEAD(&dep->started_list);
2111         }
2112
2113         return 0;
2114 }
2115
2116 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
2117 {
2118         int                             ret;
2119
2120         INIT_LIST_HEAD(&dwc->gadget.ep_list);
2121
2122         ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
2123         if (ret < 0) {
2124                 dev_err(dwc->dev, "failed to initialize OUT endpoints\n");
2125                 return ret;
2126         }
2127
2128         ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
2129         if (ret < 0) {
2130                 dev_err(dwc->dev, "failed to initialize IN endpoints\n");
2131                 return ret;
2132         }
2133
2134         return 0;
2135 }
2136
2137 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
2138 {
2139         struct dwc3_ep                  *dep;
2140         u8                              epnum;
2141
2142         for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2143                 dep = dwc->eps[epnum];
2144                 if (!dep)
2145                         continue;
2146                 /*
2147                  * Physical endpoints 0 and 1 are special; they form the
2148                  * bi-directional USB endpoint 0.
2149                  *
2150                  * For those two physical endpoints, we don't allocate a TRB
2151                  * pool nor do we add them the endpoints list. Due to that, we
2152                  * shouldn't do these two operations otherwise we would end up
2153                  * with all sorts of bugs when removing dwc3.ko.
2154                  */
2155                 if (epnum != 0 && epnum != 1) {
2156                         dwc3_free_trb_pool(dep);
2157                         list_del(&dep->endpoint.ep_list);
2158                 }
2159
2160                 kfree(dep);
2161         }
2162 }
2163
2164 /* -------------------------------------------------------------------------- */
2165
2166 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
2167                 struct dwc3_request *req, struct dwc3_trb *trb,
2168                 const struct dwc3_event_depevt *event, int status,
2169                 int chain)
2170 {
2171         unsigned int            count;
2172         unsigned int            s_pkt = 0;
2173         unsigned int            trb_status;
2174
2175         dwc3_ep_inc_deq(dep);
2176
2177         if (req->trb == trb)
2178                 dep->queued_requests--;
2179
2180         trace_dwc3_complete_trb(dep, trb);
2181
2182         /*
2183          * If we're in the middle of series of chained TRBs and we
2184          * receive a short transfer along the way, DWC3 will skip
2185          * through all TRBs including the last TRB in the chain (the
2186          * where CHN bit is zero. DWC3 will also avoid clearing HWO
2187          * bit and SW has to do it manually.
2188          *
2189          * We're going to do that here to avoid problems of HW trying
2190          * to use bogus TRBs for transfers.
2191          */
2192         if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
2193                 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
2194
2195         /*
2196          * If we're dealing with unaligned size OUT transfer, we will be left
2197          * with one TRB pending in the ring. We need to manually clear HWO bit
2198          * from that TRB.
2199          */
2200         if (req->unaligned && (trb->ctrl & DWC3_TRB_CTRL_HWO)) {
2201                 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
2202                 return 1;
2203         }
2204
2205         count = trb->size & DWC3_TRB_SIZE_MASK;
2206         req->remaining += count;
2207
2208         if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
2209                 return 1;
2210
2211         if (dep->direction) {
2212                 if (count) {
2213                         trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
2214                         if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
2215                                 /*
2216                                  * If missed isoc occurred and there is
2217                                  * no request queued then issue END
2218                                  * TRANSFER, so that core generates
2219                                  * next xfernotready and we will issue
2220                                  * a fresh START TRANSFER.
2221                                  * If there are still queued request
2222                                  * then wait, do not issue either END
2223                                  * or UPDATE TRANSFER, just attach next
2224                                  * request in pending_list during
2225                                  * giveback.If any future queued request
2226                                  * is successfully transferred then we
2227                                  * will issue UPDATE TRANSFER for all
2228                                  * request in the pending_list.
2229                                  */
2230                                 dep->flags |= DWC3_EP_MISSED_ISOC;
2231                         } else {
2232                                 dev_err(dwc->dev, "incomplete IN transfer %s\n",
2233                                                 dep->name);
2234                                 status = -ECONNRESET;
2235                         }
2236                 } else {
2237                         dep->flags &= ~DWC3_EP_MISSED_ISOC;
2238                 }
2239         } else {
2240                 if (count && (event->status & DEPEVT_STATUS_SHORT))
2241                         s_pkt = 1;
2242         }
2243
2244         if (s_pkt && !chain)
2245                 return 1;
2246
2247         if ((event->status & DEPEVT_STATUS_IOC) &&
2248                         (trb->ctrl & DWC3_TRB_CTRL_IOC))
2249                 return 1;
2250
2251         return 0;
2252 }
2253
2254 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
2255                 const struct dwc3_event_depevt *event, int status)
2256 {
2257         struct dwc3_request     *req, *n;
2258         struct dwc3_trb         *trb;
2259         bool                    ioc = false;
2260         int                     ret = 0;
2261
2262         list_for_each_entry_safe(req, n, &dep->started_list, list) {
2263                 unsigned length;
2264                 int chain;
2265
2266                 length = req->request.length;
2267                 chain = req->num_pending_sgs > 0;
2268                 if (chain) {
2269                         struct scatterlist *sg = req->sg;
2270                         struct scatterlist *s;
2271                         unsigned int pending = req->num_pending_sgs;
2272                         unsigned int i;
2273
2274                         for_each_sg(sg, s, pending, i) {
2275                                 trb = &dep->trb_pool[dep->trb_dequeue];
2276
2277                                 if (trb->ctrl & DWC3_TRB_CTRL_HWO)
2278                                         break;
2279
2280                                 req->sg = sg_next(s);
2281                                 req->num_pending_sgs--;
2282
2283                                 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2284                                                 event, status, chain);
2285                                 if (ret)
2286                                         break;
2287                         }
2288                 } else {
2289                         trb = &dep->trb_pool[dep->trb_dequeue];
2290                         ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2291                                         event, status, chain);
2292                 }
2293
2294                 if (req->unaligned) {
2295                         trb = &dep->trb_pool[dep->trb_dequeue];
2296                         ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2297                                         event, status, false);
2298                         req->unaligned = false;
2299                 }
2300
2301                 req->request.actual = length - req->remaining;
2302
2303                 if ((req->request.actual < length) && req->num_pending_sgs)
2304                         return __dwc3_gadget_kick_transfer(dep, 0);
2305
2306                 dwc3_gadget_giveback(dep, req, status);
2307
2308                 if (ret) {
2309                         if ((event->status & DEPEVT_STATUS_IOC) &&
2310                             (trb->ctrl & DWC3_TRB_CTRL_IOC))
2311                                 ioc = true;
2312                         break;
2313                 }
2314         }
2315
2316         /*
2317          * Our endpoint might get disabled by another thread during
2318          * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2319          * early on so DWC3_EP_BUSY flag gets cleared
2320          */
2321         if (!dep->endpoint.desc)
2322                 return 1;
2323
2324         if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
2325                         list_empty(&dep->started_list)) {
2326                 if (list_empty(&dep->pending_list)) {
2327                         /*
2328                          * If there is no entry in request list then do
2329                          * not issue END TRANSFER now. Just set PENDING
2330                          * flag, so that END TRANSFER is issued when an
2331                          * entry is added into request list.
2332                          */
2333                         dep->flags = DWC3_EP_PENDING_REQUEST;
2334                 } else {
2335                         dwc3_stop_active_transfer(dwc, dep->number, true);
2336                         dep->flags = DWC3_EP_ENABLED;
2337                 }
2338                 return 1;
2339         }
2340
2341         if (usb_endpoint_xfer_isoc(dep->endpoint.desc) && ioc)
2342                 return 0;
2343
2344         return 1;
2345 }
2346
2347 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
2348                 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
2349 {
2350         unsigned                status = 0;
2351         int                     clean_busy;
2352         u32                     is_xfer_complete;
2353
2354         is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
2355
2356         if (event->status & DEPEVT_STATUS_BUSERR)
2357                 status = -ECONNRESET;
2358
2359         clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
2360         if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
2361                                 usb_endpoint_xfer_isoc(dep->endpoint.desc)))
2362                 dep->flags &= ~DWC3_EP_BUSY;
2363
2364         /*
2365          * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
2366          * See dwc3_gadget_linksts_change_interrupt() for 1st half.
2367          */
2368         if (dwc->revision < DWC3_REVISION_183A) {
2369                 u32             reg;
2370                 int             i;
2371
2372                 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
2373                         dep = dwc->eps[i];
2374
2375                         if (!(dep->flags & DWC3_EP_ENABLED))
2376                                 continue;
2377
2378                         if (!list_empty(&dep->started_list))
2379                                 return;
2380                 }
2381
2382                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2383                 reg |= dwc->u1u2;
2384                 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2385
2386                 dwc->u1u2 = 0;
2387         }
2388
2389         /*
2390          * Our endpoint might get disabled by another thread during
2391          * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
2392          * early on so DWC3_EP_BUSY flag gets cleared
2393          */
2394         if (!dep->endpoint.desc)
2395                 return;
2396
2397         if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2398                 int ret;
2399
2400                 ret = __dwc3_gadget_kick_transfer(dep, 0);
2401                 if (!ret || ret == -EBUSY)
2402                         return;
2403         }
2404 }
2405
2406 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
2407                 const struct dwc3_event_depevt *event)
2408 {
2409         struct dwc3_ep          *dep;
2410         u8                      epnum = event->endpoint_number;
2411         u8                      cmd;
2412
2413         dep = dwc->eps[epnum];
2414
2415         if (!(dep->flags & DWC3_EP_ENABLED)) {
2416                 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
2417                         return;
2418
2419                 /* Handle only EPCMDCMPLT when EP disabled */
2420                 if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT)
2421                         return;
2422         }
2423
2424         if (epnum == 0 || epnum == 1) {
2425                 dwc3_ep0_interrupt(dwc, event);
2426                 return;
2427         }
2428
2429         switch (event->endpoint_event) {
2430         case DWC3_DEPEVT_XFERCOMPLETE:
2431                 dep->resource_index = 0;
2432
2433                 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2434                         dev_err(dwc->dev, "XferComplete for Isochronous endpoint\n");
2435                         return;
2436                 }
2437
2438                 dwc3_endpoint_transfer_complete(dwc, dep, event);
2439                 break;
2440         case DWC3_DEPEVT_XFERINPROGRESS:
2441                 dwc3_endpoint_transfer_complete(dwc, dep, event);
2442                 break;
2443         case DWC3_DEPEVT_XFERNOTREADY:
2444                 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2445                         dwc3_gadget_start_isoc(dwc, dep, event);
2446                 } else {
2447                         int ret;
2448
2449                         ret = __dwc3_gadget_kick_transfer(dep, 0);
2450                         if (!ret || ret == -EBUSY)
2451                                 return;
2452                 }
2453
2454                 break;
2455         case DWC3_DEPEVT_STREAMEVT:
2456                 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
2457                         dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2458                                         dep->name);
2459                         return;
2460                 }
2461                 break;
2462         case DWC3_DEPEVT_EPCMDCMPLT:
2463                 cmd = DEPEVT_PARAMETER_CMD(event->parameters);
2464
2465                 if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
2466                         dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
2467                         wake_up(&dep->wait_end_transfer);
2468                 }
2469                 break;
2470         case DWC3_DEPEVT_RXTXFIFOEVT:
2471                 break;
2472         }
2473 }
2474
2475 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2476 {
2477         if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2478                 spin_unlock(&dwc->lock);
2479                 dwc->gadget_driver->disconnect(&dwc->gadget);
2480                 spin_lock(&dwc->lock);
2481         }
2482 }
2483
2484 static void dwc3_suspend_gadget(struct dwc3 *dwc)
2485 {
2486         if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2487                 spin_unlock(&dwc->lock);
2488                 dwc->gadget_driver->suspend(&dwc->gadget);
2489                 spin_lock(&dwc->lock);
2490         }
2491 }
2492
2493 static void dwc3_resume_gadget(struct dwc3 *dwc)
2494 {
2495         if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2496                 spin_unlock(&dwc->lock);
2497                 dwc->gadget_driver->resume(&dwc->gadget);
2498                 spin_lock(&dwc->lock);
2499         }
2500 }
2501
2502 static void dwc3_reset_gadget(struct dwc3 *dwc)
2503 {
2504         if (!dwc->gadget_driver)
2505                 return;
2506
2507         if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2508                 spin_unlock(&dwc->lock);
2509                 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2510                 spin_lock(&dwc->lock);
2511         }
2512 }
2513
2514 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2515 {
2516         struct dwc3_ep *dep;
2517         struct dwc3_gadget_ep_cmd_params params;
2518         u32 cmd;
2519         int ret;
2520
2521         dep = dwc->eps[epnum];
2522
2523         if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
2524             !dep->resource_index)
2525                 return;
2526
2527         /*
2528          * NOTICE: We are violating what the Databook says about the
2529          * EndTransfer command. Ideally we would _always_ wait for the
2530          * EndTransfer Command Completion IRQ, but that's causing too
2531          * much trouble synchronizing between us and gadget driver.
2532          *
2533          * We have discussed this with the IP Provider and it was
2534          * suggested to giveback all requests here, but give HW some
2535          * extra time to synchronize with the interconnect. We're using
2536          * an arbitrary 100us delay for that.
2537          *
2538          * Note also that a similar handling was tested by Synopsys
2539          * (thanks a lot Paul) and nothing bad has come out of it.
2540          * In short, what we're doing is:
2541          *
2542          * - Issue EndTransfer WITH CMDIOC bit set
2543          * - Wait 100us
2544          *
2545          * As of IP version 3.10a of the DWC_usb3 IP, the controller
2546          * supports a mode to work around the above limitation. The
2547          * software can poll the CMDACT bit in the DEPCMD register
2548          * after issuing a EndTransfer command. This mode is enabled
2549          * by writing GUCTL2[14]. This polling is already done in the
2550          * dwc3_send_gadget_ep_cmd() function so if the mode is
2551          * enabled, the EndTransfer command will have completed upon
2552          * returning from this function and we don't need to delay for
2553          * 100us.
2554          *
2555          * This mode is NOT available on the DWC_usb31 IP.
2556          */
2557
2558         cmd = DWC3_DEPCMD_ENDTRANSFER;
2559         cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2560         cmd |= DWC3_DEPCMD_CMDIOC;
2561         cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2562         memset(&params, 0, sizeof(params));
2563         ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
2564         WARN_ON_ONCE(ret);
2565         dep->resource_index = 0;
2566         dep->flags &= ~DWC3_EP_BUSY;
2567
2568         if (dwc3_is_usb31(dwc) || dwc->revision < DWC3_REVISION_310A) {
2569                 dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
2570                 udelay(100);
2571         }
2572 }
2573
2574 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2575 {
2576         u32 epnum;
2577
2578         for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2579                 struct dwc3_ep *dep;
2580                 int ret;
2581
2582                 dep = dwc->eps[epnum];
2583                 if (!dep)
2584                         continue;
2585
2586                 if (!(dep->flags & DWC3_EP_STALL))
2587                         continue;
2588
2589                 dep->flags &= ~DWC3_EP_STALL;
2590
2591                 ret = dwc3_send_clear_stall_ep_cmd(dep);
2592                 WARN_ON_ONCE(ret);
2593         }
2594 }
2595
2596 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2597 {
2598         int                     reg;
2599
2600         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2601         reg &= ~DWC3_DCTL_INITU1ENA;
2602         dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2603
2604         reg &= ~DWC3_DCTL_INITU2ENA;
2605         dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2606
2607         dwc3_disconnect_gadget(dwc);
2608
2609         dwc->gadget.speed = USB_SPEED_UNKNOWN;
2610         dwc->setup_packet_pending = false;
2611         usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2612
2613         dwc->connected = false;
2614 }
2615
2616 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2617 {
2618         u32                     reg;
2619
2620         dwc->connected = true;
2621
2622         /*
2623          * WORKAROUND: DWC3 revisions <1.88a have an issue which
2624          * would cause a missing Disconnect Event if there's a
2625          * pending Setup Packet in the FIFO.
2626          *
2627          * There's no suggested workaround on the official Bug
2628          * report, which states that "unless the driver/application
2629          * is doing any special handling of a disconnect event,
2630          * there is no functional issue".
2631          *
2632          * Unfortunately, it turns out that we _do_ some special
2633          * handling of a disconnect event, namely complete all
2634          * pending transfers, notify gadget driver of the
2635          * disconnection, and so on.
2636          *
2637          * Our suggested workaround is to follow the Disconnect
2638          * Event steps here, instead, based on a setup_packet_pending
2639          * flag. Such flag gets set whenever we have a SETUP_PENDING
2640          * status for EP0 TRBs and gets cleared on XferComplete for the
2641          * same endpoint.
2642          *
2643          * Refers to:
2644          *
2645          * STAR#9000466709: RTL: Device : Disconnect event not
2646          * generated if setup packet pending in FIFO
2647          */
2648         if (dwc->revision < DWC3_REVISION_188A) {
2649                 if (dwc->setup_packet_pending)
2650                         dwc3_gadget_disconnect_interrupt(dwc);
2651         }
2652
2653         dwc3_reset_gadget(dwc);
2654
2655         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2656         reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2657         dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2658         dwc->test_mode = false;
2659         dwc3_clear_stall_all_ep(dwc);
2660
2661         /* Reset device address to zero */
2662         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2663         reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2664         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2665 }
2666
2667 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2668 {
2669         struct dwc3_ep          *dep;
2670         int                     ret;
2671         u32                     reg;
2672         u8                      speed;
2673
2674         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2675         speed = reg & DWC3_DSTS_CONNECTSPD;
2676         dwc->speed = speed;
2677
2678         /*
2679          * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2680          * each time on Connect Done.
2681          *
2682          * Currently we always use the reset value. If any platform
2683          * wants to set this to a different value, we need to add a
2684          * setting and update GCTL.RAMCLKSEL here.
2685          */
2686
2687         switch (speed) {
2688         case DWC3_DSTS_SUPERSPEED_PLUS:
2689                 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2690                 dwc->gadget.ep0->maxpacket = 512;
2691                 dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
2692                 break;
2693         case DWC3_DSTS_SUPERSPEED:
2694                 /*
2695                  * WORKAROUND: DWC3 revisions <1.90a have an issue which
2696                  * would cause a missing USB3 Reset event.
2697                  *
2698                  * In such situations, we should force a USB3 Reset
2699                  * event by calling our dwc3_gadget_reset_interrupt()
2700                  * routine.
2701                  *
2702                  * Refers to:
2703                  *
2704                  * STAR#9000483510: RTL: SS : USB3 reset event may
2705                  * not be generated always when the link enters poll
2706                  */
2707                 if (dwc->revision < DWC3_REVISION_190A)
2708                         dwc3_gadget_reset_interrupt(dwc);
2709
2710                 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2711                 dwc->gadget.ep0->maxpacket = 512;
2712                 dwc->gadget.speed = USB_SPEED_SUPER;
2713                 break;
2714         case DWC3_DSTS_HIGHSPEED:
2715                 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2716                 dwc->gadget.ep0->maxpacket = 64;
2717                 dwc->gadget.speed = USB_SPEED_HIGH;
2718                 break;
2719         case DWC3_DSTS_FULLSPEED:
2720                 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2721                 dwc->gadget.ep0->maxpacket = 64;
2722                 dwc->gadget.speed = USB_SPEED_FULL;
2723                 break;
2724         case DWC3_DSTS_LOWSPEED:
2725                 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2726                 dwc->gadget.ep0->maxpacket = 8;
2727                 dwc->gadget.speed = USB_SPEED_LOW;
2728                 break;
2729         }
2730
2731         /* Enable USB2 LPM Capability */
2732
2733         if ((dwc->revision > DWC3_REVISION_194A) &&
2734             (speed != DWC3_DSTS_SUPERSPEED) &&
2735             (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
2736                 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2737                 reg |= DWC3_DCFG_LPM_CAP;
2738                 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2739
2740                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2741                 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2742
2743                 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2744
2745                 /*
2746                  * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2747                  * DCFG.LPMCap is set, core responses with an ACK and the
2748                  * BESL value in the LPM token is less than or equal to LPM
2749                  * NYET threshold.
2750                  */
2751                 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2752                                 && dwc->has_lpm_erratum,
2753                                 "LPM Erratum not available on dwc3 revisions < 2.40a\n");
2754
2755                 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2756                         reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2757
2758                 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2759         } else {
2760                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2761                 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2762                 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2763         }
2764
2765         dep = dwc->eps[0];
2766         ret = __dwc3_gadget_ep_enable(dep, true, false);
2767         if (ret) {
2768                 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2769                 return;
2770         }
2771
2772         dep = dwc->eps[1];
2773         ret = __dwc3_gadget_ep_enable(dep, true, false);
2774         if (ret) {
2775                 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2776                 return;
2777         }
2778
2779         /*
2780          * Configure PHY via GUSB3PIPECTLn if required.
2781          *
2782          * Update GTXFIFOSIZn
2783          *
2784          * In both cases reset values should be sufficient.
2785          */
2786 }
2787
2788 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2789 {
2790         /*
2791          * TODO take core out of low power mode when that's
2792          * implemented.
2793          */
2794
2795         if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2796                 spin_unlock(&dwc->lock);
2797                 dwc->gadget_driver->resume(&dwc->gadget);
2798                 spin_lock(&dwc->lock);
2799         }
2800 }
2801
2802 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2803                 unsigned int evtinfo)
2804 {
2805         enum dwc3_link_state    next = evtinfo & DWC3_LINK_STATE_MASK;
2806         unsigned int            pwropt;
2807
2808         /*
2809          * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2810          * Hibernation mode enabled which would show up when device detects
2811          * host-initiated U3 exit.
2812          *
2813          * In that case, device will generate a Link State Change Interrupt
2814          * from U3 to RESUME which is only necessary if Hibernation is
2815          * configured in.
2816          *
2817          * There are no functional changes due to such spurious event and we
2818          * just need to ignore it.
2819          *
2820          * Refers to:
2821          *
2822          * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2823          * operational mode
2824          */
2825         pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2826         if ((dwc->revision < DWC3_REVISION_250A) &&
2827                         (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2828                 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2829                                 (next == DWC3_LINK_STATE_RESUME)) {
2830                         return;
2831                 }
2832         }
2833
2834         /*
2835          * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2836          * on the link partner, the USB session might do multiple entry/exit
2837          * of low power states before a transfer takes place.
2838          *
2839          * Due to this problem, we might experience lower throughput. The
2840          * suggested workaround is to disable DCTL[12:9] bits if we're
2841          * transitioning from U1/U2 to U0 and enable those bits again
2842          * after a transfer completes and there are no pending transfers
2843          * on any of the enabled endpoints.
2844          *
2845          * This is the first half of that workaround.
2846          *
2847          * Refers to:
2848          *
2849          * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2850          * core send LGO_Ux entering U0
2851          */
2852         if (dwc->revision < DWC3_REVISION_183A) {
2853                 if (next == DWC3_LINK_STATE_U0) {
2854                         u32     u1u2;
2855                         u32     reg;
2856
2857                         switch (dwc->link_state) {
2858                         case DWC3_LINK_STATE_U1:
2859                         case DWC3_LINK_STATE_U2:
2860                                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2861                                 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2862                                                 | DWC3_DCTL_ACCEPTU2ENA
2863                                                 | DWC3_DCTL_INITU1ENA
2864                                                 | DWC3_DCTL_ACCEPTU1ENA);
2865
2866                                 if (!dwc->u1u2)
2867                                         dwc->u1u2 = reg & u1u2;
2868
2869                                 reg &= ~u1u2;
2870
2871                                 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2872                                 break;
2873                         default:
2874                                 /* do nothing */
2875                                 break;
2876                         }
2877                 }
2878         }
2879
2880         switch (next) {
2881         case DWC3_LINK_STATE_U1:
2882                 if (dwc->speed == USB_SPEED_SUPER)
2883                         dwc3_suspend_gadget(dwc);
2884                 break;
2885         case DWC3_LINK_STATE_U2:
2886         case DWC3_LINK_STATE_U3:
2887                 dwc3_suspend_gadget(dwc);
2888                 break;
2889         case DWC3_LINK_STATE_RESUME:
2890                 dwc3_resume_gadget(dwc);
2891                 break;
2892         default:
2893                 /* do nothing */
2894                 break;
2895         }
2896
2897         dwc->link_state = next;
2898 }
2899
2900 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
2901                                           unsigned int evtinfo)
2902 {
2903         enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2904
2905         if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
2906                 dwc3_suspend_gadget(dwc);
2907
2908         dwc->link_state = next;
2909 }
2910
2911 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2912                 unsigned int evtinfo)
2913 {
2914         unsigned int is_ss = evtinfo & BIT(4);
2915
2916         /**
2917          * WORKAROUND: DWC3 revison 2.20a with hibernation support
2918          * have a known issue which can cause USB CV TD.9.23 to fail
2919          * randomly.
2920          *
2921          * Because of this issue, core could generate bogus hibernation
2922          * events which SW needs to ignore.
2923          *
2924          * Refers to:
2925          *
2926          * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2927          * Device Fallback from SuperSpeed
2928          */
2929         if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2930                 return;
2931
2932         /* enter hibernation here */
2933 }
2934
2935 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2936                 const struct dwc3_event_devt *event)
2937 {
2938         switch (event->type) {
2939         case DWC3_DEVICE_EVENT_DISCONNECT:
2940                 dwc3_gadget_disconnect_interrupt(dwc);
2941                 break;
2942         case DWC3_DEVICE_EVENT_RESET:
2943                 dwc3_gadget_reset_interrupt(dwc);
2944                 break;
2945         case DWC3_DEVICE_EVENT_CONNECT_DONE:
2946                 dwc3_gadget_conndone_interrupt(dwc);
2947                 break;
2948         case DWC3_DEVICE_EVENT_WAKEUP:
2949                 dwc3_gadget_wakeup_interrupt(dwc);
2950                 break;
2951         case DWC3_DEVICE_EVENT_HIBER_REQ:
2952                 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2953                                         "unexpected hibernation event\n"))
2954                         break;
2955
2956                 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2957                 break;
2958         case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2959                 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2960                 break;
2961         case DWC3_DEVICE_EVENT_EOPF:
2962                 /* It changed to be suspend event for version 2.30a and above */
2963                 if (dwc->revision >= DWC3_REVISION_230A) {
2964                         /*
2965                          * Ignore suspend event until the gadget enters into
2966                          * USB_STATE_CONFIGURED state.
2967                          */
2968                         if (dwc->gadget.state >= USB_STATE_CONFIGURED)
2969                                 dwc3_gadget_suspend_interrupt(dwc,
2970                                                 event->event_info);
2971                 }
2972                 break;
2973         case DWC3_DEVICE_EVENT_SOF:
2974         case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2975         case DWC3_DEVICE_EVENT_CMD_CMPL:
2976         case DWC3_DEVICE_EVENT_OVERFLOW:
2977                 break;
2978         default:
2979                 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2980         }
2981 }
2982
2983 static void dwc3_process_event_entry(struct dwc3 *dwc,
2984                 const union dwc3_event *event)
2985 {
2986         trace_dwc3_event(event->raw, dwc);
2987
2988         /* Endpoint IRQ, handle it and return early */
2989         if (event->type.is_devspec == 0) {
2990                 /* depevt */
2991                 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2992         }
2993
2994         switch (event->type.type) {
2995         case DWC3_EVENT_TYPE_DEV:
2996                 dwc3_gadget_interrupt(dwc, &event->devt);
2997                 break;
2998         /* REVISIT what to do with Carkit and I2C events ? */
2999         default:
3000                 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
3001         }
3002 }
3003
3004 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
3005 {
3006         struct dwc3 *dwc = evt->dwc;
3007         irqreturn_t ret = IRQ_NONE;
3008         int left;
3009         u32 reg;
3010
3011         left = evt->count;
3012
3013         if (!(evt->flags & DWC3_EVENT_PENDING))
3014                 return IRQ_NONE;
3015
3016         while (left > 0) {
3017                 union dwc3_event event;
3018
3019                 event.raw = *(u32 *) (evt->cache + evt->lpos);
3020
3021                 dwc3_process_event_entry(dwc, &event);
3022
3023                 /*
3024                  * FIXME we wrap around correctly to the next entry as
3025                  * almost all entries are 4 bytes in size. There is one
3026                  * entry which has 12 bytes which is a regular entry
3027                  * followed by 8 bytes data. ATM I don't know how
3028                  * things are organized if we get next to the a
3029                  * boundary so I worry about that once we try to handle
3030                  * that.
3031                  */
3032                 evt->lpos = (evt->lpos + 4) % evt->length;
3033                 left -= 4;
3034         }
3035
3036         evt->count = 0;
3037         evt->flags &= ~DWC3_EVENT_PENDING;
3038         ret = IRQ_HANDLED;
3039
3040         /* Unmask interrupt */
3041         reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
3042         reg &= ~DWC3_GEVNTSIZ_INTMASK;
3043         dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
3044
3045         if (dwc->imod_interval) {
3046                 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
3047                 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
3048         }
3049
3050         return ret;
3051 }
3052
3053 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
3054 {
3055         struct dwc3_event_buffer *evt = _evt;
3056         struct dwc3 *dwc = evt->dwc;
3057         unsigned long flags;
3058         irqreturn_t ret = IRQ_NONE;
3059
3060         spin_lock_irqsave(&dwc->lock, flags);
3061         ret = dwc3_process_event_buf(evt);
3062         spin_unlock_irqrestore(&dwc->lock, flags);
3063
3064         return ret;
3065 }
3066
3067 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
3068 {
3069         struct dwc3 *dwc = evt->dwc;
3070         u32 amount;
3071         u32 count;
3072         u32 reg;
3073
3074         if (pm_runtime_suspended(dwc->dev)) {
3075                 pm_runtime_get(dwc->dev);
3076                 disable_irq_nosync(dwc->irq_gadget);
3077                 dwc->pending_events = true;
3078                 return IRQ_HANDLED;
3079         }
3080
3081         count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
3082         count &= DWC3_GEVNTCOUNT_MASK;
3083         if (!count)
3084                 return IRQ_NONE;
3085
3086         evt->count = count;
3087         evt->flags |= DWC3_EVENT_PENDING;
3088
3089         /* Mask interrupt */
3090         reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
3091         reg |= DWC3_GEVNTSIZ_INTMASK;
3092         dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
3093
3094         amount = min(count, evt->length - evt->lpos);
3095         memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
3096
3097         if (amount < count)
3098                 memcpy(evt->cache, evt->buf, count - amount);
3099
3100         dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
3101
3102         return IRQ_WAKE_THREAD;
3103 }
3104
3105 static irqreturn_t dwc3_interrupt(int irq, void *_evt)
3106 {
3107         struct dwc3_event_buffer        *evt = _evt;
3108
3109         return dwc3_check_event_buf(evt);
3110 }
3111
3112 static int dwc3_gadget_get_irq(struct dwc3 *dwc)
3113 {
3114         struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
3115         int irq;
3116
3117         irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
3118         if (irq > 0)
3119                 goto out;
3120
3121         if (irq == -EPROBE_DEFER)
3122                 goto out;
3123
3124         irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
3125         if (irq > 0)
3126                 goto out;
3127
3128         if (irq == -EPROBE_DEFER)
3129                 goto out;
3130
3131         irq = platform_get_irq(dwc3_pdev, 0);
3132         if (irq > 0)
3133                 goto out;
3134
3135         if (irq != -EPROBE_DEFER)
3136                 dev_err(dwc->dev, "missing peripheral IRQ\n");
3137
3138         if (!irq)
3139                 irq = -EINVAL;
3140
3141 out:
3142         return irq;
3143 }
3144
3145 /**
3146  * dwc3_gadget_init - Initializes gadget related registers
3147  * @dwc: pointer to our controller context structure
3148  *
3149  * Returns 0 on success otherwise negative errno.
3150  */
3151 int dwc3_gadget_init(struct dwc3 *dwc)
3152 {
3153         int ret;
3154         int irq;
3155
3156         irq = dwc3_gadget_get_irq(dwc);
3157         if (irq < 0) {
3158                 ret = irq;
3159                 goto err0;
3160         }
3161
3162         dwc->irq_gadget = irq;
3163
3164         dwc->ctrl_req = dma_alloc_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
3165                         &dwc->ctrl_req_addr, GFP_KERNEL);
3166         if (!dwc->ctrl_req) {
3167                 dev_err(dwc->dev, "failed to allocate ctrl request\n");
3168                 ret = -ENOMEM;
3169                 goto err0;
3170         }
3171
3172         dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
3173                                           sizeof(*dwc->ep0_trb) * 2,
3174                                           &dwc->ep0_trb_addr, GFP_KERNEL);
3175         if (!dwc->ep0_trb) {
3176                 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
3177                 ret = -ENOMEM;
3178                 goto err1;
3179         }
3180
3181         dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
3182         if (!dwc->setup_buf) {
3183                 ret = -ENOMEM;
3184                 goto err2;
3185         }
3186
3187         dwc->ep0_bounce = dma_alloc_coherent(dwc->sysdev,
3188                         DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
3189                         GFP_KERNEL);
3190         if (!dwc->ep0_bounce) {
3191                 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
3192                 ret = -ENOMEM;
3193                 goto err3;
3194         }
3195
3196         dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
3197         if (!dwc->zlp_buf) {
3198                 ret = -ENOMEM;
3199                 goto err4;
3200         }
3201
3202         dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE,
3203                         &dwc->bounce_addr, GFP_KERNEL);
3204         if (!dwc->bounce) {
3205                 ret = -ENOMEM;
3206                 goto err5;
3207         }
3208
3209         init_completion(&dwc->ep0_in_setup);
3210
3211         dwc->gadget.ops                 = &dwc3_gadget_ops;
3212         dwc->gadget.speed               = USB_SPEED_UNKNOWN;
3213         dwc->gadget.sg_supported        = true;
3214         dwc->gadget.name                = "dwc3-gadget";
3215         dwc->gadget.is_otg              = dwc->dr_mode == USB_DR_MODE_OTG;
3216
3217         /*
3218          * FIXME We might be setting max_speed to <SUPER, however versions
3219          * <2.20a of dwc3 have an issue with metastability (documented
3220          * elsewhere in this driver) which tells us we can't set max speed to
3221          * anything lower than SUPER.
3222          *
3223          * Because gadget.max_speed is only used by composite.c and function
3224          * drivers (i.e. it won't go into dwc3's registers) we are allowing this
3225          * to happen so we avoid sending SuperSpeed Capability descriptor
3226          * together with our BOS descriptor as that could confuse host into
3227          * thinking we can handle super speed.
3228          *
3229          * Note that, in fact, we won't even support GetBOS requests when speed
3230          * is less than super speed because we don't have means, yet, to tell
3231          * composite.c that we are USB 2.0 + LPM ECN.
3232          */
3233         if (dwc->revision < DWC3_REVISION_220A)
3234                 dev_info(dwc->dev, "changing max_speed on rev %08x\n",
3235                                 dwc->revision);
3236
3237         dwc->gadget.max_speed           = dwc->maximum_speed;
3238
3239         /*
3240          * REVISIT: Here we should clear all pending IRQs to be
3241          * sure we're starting from a well known location.
3242          */
3243
3244         ret = dwc3_gadget_init_endpoints(dwc);
3245         if (ret)
3246                 goto err6;
3247
3248         ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
3249         if (ret) {
3250                 dev_err(dwc->dev, "failed to register udc\n");
3251                 goto err6;
3252         }
3253
3254         return 0;
3255 err6:
3256         dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
3257                         dwc->bounce_addr);
3258
3259 err5:
3260         kfree(dwc->zlp_buf);
3261
3262 err4:
3263         dwc3_gadget_free_endpoints(dwc);
3264         dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
3265                         dwc->ep0_bounce, dwc->ep0_bounce_addr);
3266
3267 err3:
3268         kfree(dwc->setup_buf);
3269
3270 err2:
3271         dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
3272                         dwc->ep0_trb, dwc->ep0_trb_addr);
3273
3274 err1:
3275         dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
3276                         dwc->ctrl_req, dwc->ctrl_req_addr);
3277
3278 err0:
3279         return ret;
3280 }
3281
3282 /* -------------------------------------------------------------------------- */
3283
3284 void dwc3_gadget_exit(struct dwc3 *dwc)
3285 {
3286         usb_del_gadget_udc(&dwc->gadget);
3287
3288         dwc3_gadget_free_endpoints(dwc);
3289
3290         dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
3291                         dwc->bounce_addr);
3292         dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE,
3293                         dwc->ep0_bounce, dwc->ep0_bounce_addr);
3294
3295         kfree(dwc->setup_buf);
3296         kfree(dwc->zlp_buf);
3297
3298         dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
3299                         dwc->ep0_trb, dwc->ep0_trb_addr);
3300
3301         dma_free_coherent(dwc->sysdev, sizeof(*dwc->ctrl_req),
3302                         dwc->ctrl_req, dwc->ctrl_req_addr);
3303 }
3304
3305 int dwc3_gadget_suspend(struct dwc3 *dwc)
3306 {
3307         if (!dwc->gadget_driver)
3308                 return 0;
3309
3310         dwc3_gadget_run_stop(dwc, false, false);
3311         dwc3_disconnect_gadget(dwc);
3312         __dwc3_gadget_stop(dwc);
3313
3314         return 0;
3315 }
3316
3317 int dwc3_gadget_resume(struct dwc3 *dwc)
3318 {
3319         int                     ret;
3320
3321         if (!dwc->gadget_driver)
3322                 return 0;
3323
3324         ret = __dwc3_gadget_start(dwc);
3325         if (ret < 0)
3326                 goto err0;
3327
3328         ret = dwc3_gadget_run_stop(dwc, true, false);
3329         if (ret < 0)
3330                 goto err1;
3331
3332         return 0;
3333
3334 err1:
3335         __dwc3_gadget_stop(dwc);
3336
3337 err0:
3338         return ret;
3339 }
3340
3341 void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
3342 {
3343         if (dwc->pending_events) {
3344                 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
3345                 dwc->pending_events = false;
3346                 enable_irq(dwc->irq_gadget);
3347         }
3348 }