kill dentry_update_name_case()
[sfrench/cifs-2.6.git] / drivers / usb / gadget / udc / aspeed-vhub / epn.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
4  *
5  * epn.c - Generic endpoints management
6  *
7  * Copyright 2017 IBM Corporation
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  */
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/delay.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/interrupt.h>
24 #include <linux/proc_fs.h>
25 #include <linux/prefetch.h>
26 #include <linux/clk.h>
27 #include <linux/usb/gadget.h>
28 #include <linux/of.h>
29 #include <linux/of_gpio.h>
30 #include <linux/regmap.h>
31 #include <linux/dma-mapping.h>
32
33 #include "vhub.h"
34
35 #define EXTRA_CHECKS
36
37 #ifdef EXTRA_CHECKS
38 #define CHECK(ep, expr, fmt...)                                 \
39         do {                                                    \
40                 if (!(expr)) EPDBG(ep, "CHECK:" fmt);           \
41         } while(0)
42 #else
43 #define CHECK(ep, expr, fmt...) do { } while(0)
44 #endif
45
46 static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
47 {
48         unsigned int act = req->req.actual;
49         unsigned int len = req->req.length;
50         unsigned int chunk;
51
52         /* There should be no DMA ongoing */
53         WARN_ON(req->active);
54
55         /* Calculate next chunk size */
56         chunk = len - act;
57         if (chunk > ep->ep.maxpacket)
58                 chunk = ep->ep.maxpacket;
59         else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
60                 req->last_desc = 1;
61
62         EPVDBG(ep, "kick req %p act=%d/%d chunk=%d last=%d\n",
63                req, act, len, chunk, req->last_desc);
64
65         /* If DMA unavailable, using staging EP buffer */
66         if (!req->req.dma) {
67
68                 /* For IN transfers, copy data over first */
69                 if (ep->epn.is_in)
70                         memcpy(ep->buf, req->req.buf + act, chunk);
71                 writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
72         } else
73                 writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
74
75         /* Start DMA */
76         req->active = true;
77         writel(VHUB_EP_DMA_SET_TX_SIZE(chunk),
78                ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
79         writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK,
80                ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
81 }
82
83 static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep)
84 {
85         struct ast_vhub_req *req;
86         unsigned int len;
87         u32 stat;
88
89         /* Read EP status */
90         stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
91
92         /* Grab current request if any */
93         req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
94
95         EPVDBG(ep, "ACK status=%08x is_in=%d, req=%p (active=%d)\n",
96                stat, ep->epn.is_in, req, req ? req->active : 0);
97
98         /* In absence of a request, bail out, must have been dequeued */
99         if (!req)
100                 return;
101
102         /*
103          * Request not active, move on to processing queue, active request
104          * was probably dequeued
105          */
106         if (!req->active)
107                 goto next_chunk;
108
109         /* Check if HW has moved on */
110         if (VHUB_EP_DMA_RPTR(stat) != 0) {
111                 EPDBG(ep, "DMA read pointer not 0 !\n");
112                 return;
113         }
114
115         /* No current DMA ongoing */
116         req->active = false;
117
118         /* Grab lenght out of HW */
119         len = VHUB_EP_DMA_TX_SIZE(stat);
120
121         /* If not using DMA, copy data out if needed */
122         if (!req->req.dma && !ep->epn.is_in && len)
123                 memcpy(req->req.buf + req->req.actual, ep->buf, len);
124
125         /* Adjust size */
126         req->req.actual += len;
127
128         /* Check for short packet */
129         if (len < ep->ep.maxpacket)
130                 req->last_desc = 1;
131
132         /* That's it ? complete the request and pick a new one */
133         if (req->last_desc >= 0) {
134                 ast_vhub_done(ep, req, 0);
135                 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req,
136                                                queue);
137
138                 /*
139                  * Due to lock dropping inside "done" the next request could
140                  * already be active, so check for that and bail if needed.
141                  */
142                 if (!req || req->active)
143                         return;
144         }
145
146  next_chunk:
147         ast_vhub_epn_kick(ep, req);
148 }
149
150 static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
151 {
152         /*
153          * d_next == d_last means descriptor list empty to HW,
154          * thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors
155          * in the list
156          */
157         return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) &
158                 (AST_VHUB_DESCS_COUNT - 1);
159 }
160
161 static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
162                                    struct ast_vhub_req *req)
163 {
164         unsigned int act = req->act_count;
165         unsigned int len = req->req.length;
166         unsigned int chunk;
167
168         /* Mark request active if not already */
169         req->active = true;
170
171         /* If the request was already completely written, do nothing */
172         if (req->last_desc >= 0)
173                 return;
174
175         EPVDBG(ep, "kick act=%d/%d chunk_max=%d free_descs=%d\n",
176                act, len, ep->epn.chunk_max, ast_vhub_count_free_descs(ep));
177
178         /* While we can create descriptors */
179         while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
180                 struct ast_vhub_desc *desc;
181                 unsigned int d_num;
182
183                 /* Grab next free descriptor */
184                 d_num = ep->epn.d_next;
185                 desc = &ep->epn.descs[d_num];
186                 ep->epn.d_next = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
187
188                 /* Calculate next chunk size */
189                 chunk = len - act;
190                 if (chunk <= ep->epn.chunk_max) {
191                         /*
192                          * Is this the last packet ? Because of having up to 8
193                          * packets in a descriptor we can't just compare "chunk"
194                          * with ep.maxpacket. We have to see if it's a multiple
195                          * of it to know if we have to send a zero packet.
196                          * Sadly that involves a modulo which is a bit expensive
197                          * but probably still better than not doing it.
198                          */
199                         if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0)
200                                 req->last_desc = d_num;
201                 } else {
202                         chunk = ep->epn.chunk_max;
203                 }
204
205                 EPVDBG(ep, " chunk: act=%d/%d chunk=%d last=%d desc=%d free=%d\n",
206                        act, len, chunk, req->last_desc, d_num,
207                        ast_vhub_count_free_descs(ep));
208
209                 /* Populate descriptor */
210                 desc->w0 = cpu_to_le32(req->req.dma + act);
211
212                 /* Interrupt if end of request or no more descriptors */
213
214                 /*
215                  * TODO: Be smarter about it, if we don't have enough
216                  * descriptors request an interrupt before queue empty
217                  * or so in order to be able to populate more before
218                  * the HW runs out. This isn't a problem at the moment
219                  * as we use 256 descriptors and only put at most one
220                  * request in the ring.
221                  */
222                 desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk));
223                 if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep))
224                         desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT);
225
226                 /* Account packet */
227                 req->act_count = act = act + chunk;
228         }
229
230         /* Tell HW about new descriptors */
231         writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
232                ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
233
234         EPVDBG(ep, "HW kicked, d_next=%d dstat=%08x\n",
235                ep->epn.d_next, readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS));
236 }
237
238 static void ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep *ep)
239 {
240         struct ast_vhub_req *req;
241         unsigned int len, d_last;
242         u32 stat, stat1;
243
244         /* Read EP status, workaround HW race */
245         do {
246                 stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
247                 stat1 = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
248         } while(stat != stat1);
249
250         /* Extract RPTR */
251         d_last = VHUB_EP_DMA_RPTR(stat);
252
253         /* Grab current request if any */
254         req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
255
256         EPVDBG(ep, "ACK status=%08x is_in=%d ep->d_last=%d..%d\n",
257                stat, ep->epn.is_in, ep->epn.d_last, d_last);
258
259         /* Check all completed descriptors */
260         while (ep->epn.d_last != d_last) {
261                 struct ast_vhub_desc *desc;
262                 unsigned int d_num;
263                 bool is_last_desc;
264
265                 /* Grab next completed descriptor */
266                 d_num = ep->epn.d_last;
267                 desc = &ep->epn.descs[d_num];
268                 ep->epn.d_last = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
269
270                 /* Grab len out of descriptor */
271                 len = VHUB_DSC1_IN_LEN(le32_to_cpu(desc->w1));
272
273                 EPVDBG(ep, " desc %d len=%d req=%p (act=%d)\n",
274                        d_num, len, req, req ? req->active : 0);
275
276                 /* If no active request pending, move on */
277                 if (!req || !req->active)
278                         continue;
279
280                 /* Adjust size */
281                 req->req.actual += len;
282
283                 /* Is that the last chunk ? */
284                 is_last_desc = req->last_desc == d_num;
285                 CHECK(ep, is_last_desc == (len < ep->ep.maxpacket ||
286                                            (req->req.actual >= req->req.length &&
287                                             !req->req.zero)),
288                       "Last packet discrepancy: last_desc=%d len=%d r.act=%d "
289                       "r.len=%d r.zero=%d mp=%d\n",
290                       is_last_desc, len, req->req.actual, req->req.length,
291                       req->req.zero, ep->ep.maxpacket);
292
293                 if (is_last_desc) {
294                         /*
295                          * Because we can only have one request at a time
296                          * in our descriptor list in this implementation,
297                          * d_last and ep->d_last should now be equal
298                          */
299                         CHECK(ep, d_last == ep->epn.d_last,
300                               "DMA read ptr mismatch %d vs %d\n",
301                               d_last, ep->epn.d_last);
302
303                         /* Note: done will drop and re-acquire the lock */
304                         ast_vhub_done(ep, req, 0);
305                         req = list_first_entry_or_null(&ep->queue,
306                                                        struct ast_vhub_req,
307                                                        queue);
308                         break;
309                 }
310         }
311
312         /* More work ? */
313         if (req)
314                 ast_vhub_epn_kick_desc(ep, req);
315 }
316
317 void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep)
318 {
319         if (ep->epn.desc_mode)
320                 ast_vhub_epn_handle_ack_desc(ep);
321         else
322                 ast_vhub_epn_handle_ack(ep);
323 }
324
325 static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
326                               gfp_t gfp_flags)
327 {
328         struct ast_vhub_req *req = to_ast_req(u_req);
329         struct ast_vhub_ep *ep = to_ast_ep(u_ep);
330         struct ast_vhub *vhub = ep->vhub;
331         unsigned long flags;
332         bool empty;
333         int rc;
334
335         /* Paranoid checks */
336         if (!u_req || !u_req->complete || !u_req->buf) {
337                 dev_warn(&vhub->pdev->dev, "Bogus EPn request ! u_req=%p\n", u_req);
338                 if (u_req) {
339                         dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n",
340                                  u_req->complete, req->internal);
341                 }
342                 return -EINVAL;
343         }
344
345         /* Endpoint enabled ? */
346         if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
347             !ep->dev->enabled || ep->dev->suspended) {
348                 EPDBG(ep,"Enqueing request on wrong or disabled EP\n");
349                 return -ESHUTDOWN;
350         }
351
352         /* Map request for DMA if possible. For now, the rule for DMA is
353          * that:
354          *
355          *  * For single stage mode (no descriptors):
356          *
357          *   - The buffer is aligned to a 8 bytes boundary (HW requirement)
358          *   - For a OUT endpoint, the request size is a multiple of the EP
359          *     packet size (otherwise the controller will DMA past the end
360          *     of the buffer if the host is sending a too long packet).
361          *
362          *  * For descriptor mode (tx only for now), always.
363          *
364          * We could relax the latter by making the decision to use the bounce
365          * buffer based on the size of a given *segment* of the request rather
366          * than the whole request.
367          */
368         if (ep->epn.desc_mode ||
369             ((((unsigned long)u_req->buf & 7) == 0) &&
370              (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
371                 rc = usb_gadget_map_request(&ep->dev->gadget, u_req,
372                                             ep->epn.is_in);
373                 if (rc) {
374                         dev_warn(&vhub->pdev->dev,
375                                  "Request mapping failure %d\n", rc);
376                         return rc;
377                 }
378         } else
379                 u_req->dma = 0;
380
381         EPVDBG(ep, "enqueue req @%p\n", req);
382         EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n",
383                u_req->length, (u32)u_req->dma, u_req->zero,
384                u_req->short_not_ok, u_req->no_interrupt,
385                ep->epn.is_in);
386
387         /* Initialize request progress fields */
388         u_req->status = -EINPROGRESS;
389         u_req->actual = 0;
390         req->act_count = 0;
391         req->active = false;
392         req->last_desc = -1;
393         spin_lock_irqsave(&vhub->lock, flags);
394         empty = list_empty(&ep->queue);
395
396         /* Add request to list and kick processing if empty */
397         list_add_tail(&req->queue, &ep->queue);
398         if (empty) {
399                 if (ep->epn.desc_mode)
400                         ast_vhub_epn_kick_desc(ep, req);
401                 else
402                         ast_vhub_epn_kick(ep, req);
403         }
404         spin_unlock_irqrestore(&vhub->lock, flags);
405
406         return 0;
407 }
408
409 static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
410                                      bool restart_ep)
411 {
412         u32 state, reg, loops;
413
414         /* Stop DMA activity */
415         writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
416
417         /* Wait for it to complete */
418         for (loops = 0; loops < 1000; loops++) {
419                 state = readl(ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
420                 state = VHUB_EP_DMA_PROC_STATUS(state);
421                 if (state == EP_DMA_PROC_RX_IDLE ||
422                     state == EP_DMA_PROC_TX_IDLE)
423                         break;
424                 udelay(1);
425         }
426         if (loops >= 1000)
427                 dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n");
428
429         /* If we don't have to restart the endpoint, that's it */
430         if (!restart_ep)
431                 return;
432
433         /* Restart the endpoint */
434         if (ep->epn.desc_mode) {
435                 /*
436                  * Take out descriptors by resetting the DMA read
437                  * pointer to be equal to the CPU write pointer.
438                  *
439                  * Note: If we ever support creating descriptors for
440                  * requests that aren't the head of the queue, we
441                  * may have to do something more complex here,
442                  * especially if the request being taken out is
443                  * not the current head descriptors.
444                  */
445                 reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) |
446                         VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next);
447                 writel(reg, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
448
449                 /* Then turn it back on */
450                 writel(ep->epn.dma_conf,
451                        ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
452         } else {
453                 /* Single mode: just turn it back on */
454                 writel(ep->epn.dma_conf,
455                        ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
456         }
457 }
458
459 static int ast_vhub_epn_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
460 {
461         struct ast_vhub_ep *ep = to_ast_ep(u_ep);
462         struct ast_vhub *vhub = ep->vhub;
463         struct ast_vhub_req *req;
464         unsigned long flags;
465         int rc = -EINVAL;
466
467         spin_lock_irqsave(&vhub->lock, flags);
468
469         /* Make sure it's actually queued on this endpoint */
470         list_for_each_entry (req, &ep->queue, queue) {
471                 if (&req->req == u_req)
472                         break;
473         }
474
475         if (&req->req == u_req) {
476                 EPVDBG(ep, "dequeue req @%p active=%d\n",
477                        req, req->active);
478                 if (req->active)
479                         ast_vhub_stop_active_req(ep, true);
480                 ast_vhub_done(ep, req, -ECONNRESET);
481                 rc = 0;
482         }
483
484         spin_unlock_irqrestore(&vhub->lock, flags);
485         return rc;
486 }
487
488 void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep)
489 {
490         u32 reg;
491
492         if (WARN_ON(ep->d_idx == 0))
493                 return;
494         reg = readl(ep->epn.regs + AST_VHUB_EP_CONFIG);
495         if (ep->epn.stalled || ep->epn.wedged)
496                 reg |= VHUB_EP_CFG_STALL_CTRL;
497         else
498                 reg &= ~VHUB_EP_CFG_STALL_CTRL;
499         writel(reg, ep->epn.regs + AST_VHUB_EP_CONFIG);
500
501         if (!ep->epn.stalled && !ep->epn.wedged)
502                 writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
503                        ep->vhub->regs + AST_VHUB_EP_TOGGLE);
504 }
505
506 static int ast_vhub_set_halt_and_wedge(struct usb_ep* u_ep, bool halt,
507                                       bool wedge)
508 {
509         struct ast_vhub_ep *ep = to_ast_ep(u_ep);
510         struct ast_vhub *vhub = ep->vhub;
511         unsigned long flags;
512
513         EPDBG(ep, "Set halt (%d) & wedge (%d)\n", halt, wedge);
514
515         if (!u_ep || !u_ep->desc)
516                 return -EINVAL;
517         if (ep->d_idx == 0)
518                 return 0;
519         if (ep->epn.is_iso)
520                 return -EOPNOTSUPP;
521
522         spin_lock_irqsave(&vhub->lock, flags);
523
524         /* Fail with still-busy IN endpoints */
525         if (halt && ep->epn.is_in && !list_empty(&ep->queue)) {
526                 spin_unlock_irqrestore(&vhub->lock, flags);
527                 return -EAGAIN;
528         }
529         ep->epn.stalled = halt;
530         ep->epn.wedged = wedge;
531         ast_vhub_update_epn_stall(ep);
532
533         spin_unlock_irqrestore(&vhub->lock, flags);
534
535         return 0;
536 }
537
538 static int ast_vhub_epn_set_halt(struct usb_ep *u_ep, int value)
539 {
540         return ast_vhub_set_halt_and_wedge(u_ep, value != 0, false);
541 }
542
543 static int ast_vhub_epn_set_wedge(struct usb_ep *u_ep)
544 {
545         return ast_vhub_set_halt_and_wedge(u_ep, true, true);
546 }
547
548 static int ast_vhub_epn_disable(struct usb_ep* u_ep)
549 {
550         struct ast_vhub_ep *ep = to_ast_ep(u_ep);
551         struct ast_vhub *vhub = ep->vhub;
552         unsigned long flags;
553         u32 imask, ep_ier;
554
555         EPDBG(ep, "Disabling !\n");
556
557         spin_lock_irqsave(&vhub->lock, flags);
558
559         ep->epn.enabled = false;
560
561         /* Stop active DMA if any */
562         ast_vhub_stop_active_req(ep, false);
563
564         /* Disable endpoint */
565         writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
566
567         /* Disable ACK interrupt */
568         imask = VHUB_EP_IRQ(ep->epn.g_idx);
569         ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
570         ep_ier &= ~imask;
571         writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
572         writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
573
574         /* Nuke all pending requests */
575         ast_vhub_nuke(ep, -ESHUTDOWN);
576
577         /* No more descriptor associated with request */
578         ep->ep.desc = NULL;
579
580         spin_unlock_irqrestore(&vhub->lock, flags);
581
582         return 0;
583 }
584
585 static int ast_vhub_epn_enable(struct usb_ep* u_ep,
586                                const struct usb_endpoint_descriptor *desc)
587 {
588         static const char *ep_type_string[] __maybe_unused = { "ctrl",
589                                                                "isoc",
590                                                                "bulk",
591                                                                "intr" };
592         struct ast_vhub_ep *ep = to_ast_ep(u_ep);
593         struct ast_vhub_dev *dev;
594         struct ast_vhub *vhub;
595         u16 maxpacket, type;
596         unsigned long flags;
597         u32 ep_conf, ep_ier, imask;
598
599         /* Check arguments */
600         if (!u_ep || !desc)
601                 return -EINVAL;
602
603         maxpacket = usb_endpoint_maxp(desc);
604         if (!ep->d_idx || !ep->dev ||
605             desc->bDescriptorType != USB_DT_ENDPOINT ||
606             maxpacket == 0 || maxpacket > ep->ep.maxpacket) {
607                 EPDBG(ep, "Invalid EP enable,d_idx=%d,dev=%p,type=%d,mp=%d/%d\n",
608                       ep->d_idx, ep->dev, desc->bDescriptorType,
609                       maxpacket, ep->ep.maxpacket);
610                 return -EINVAL;
611         }
612         if (ep->d_idx != usb_endpoint_num(desc)) {
613                 EPDBG(ep, "EP number mismatch !\n");
614                 return -EINVAL;
615         }
616
617         if (ep->epn.enabled) {
618                 EPDBG(ep, "Already enabled\n");
619                 return -EBUSY;
620         }
621         dev = ep->dev;
622         vhub = ep->vhub;
623
624         /* Check device state */
625         if (!dev->driver) {
626                 EPDBG(ep, "Bogus device state: driver=%p speed=%d\n",
627                        dev->driver, dev->gadget.speed);
628                 return -ESHUTDOWN;
629         }
630
631         /* Grab some info from the descriptor */
632         ep->epn.is_in = usb_endpoint_dir_in(desc);
633         ep->ep.maxpacket = maxpacket;
634         type = usb_endpoint_type(desc);
635         ep->epn.d_next = ep->epn.d_last = 0;
636         ep->epn.is_iso = false;
637         ep->epn.stalled = false;
638         ep->epn.wedged = false;
639
640         EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n",
641               ep->epn.is_in ? "in" : "out", ep_type_string[type],
642               usb_endpoint_num(desc), maxpacket);
643
644         /* Can we use DMA descriptor mode ? */
645         ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in;
646         if (ep->epn.desc_mode)
647                 memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT);
648
649         /*
650          * Large send function can send up to 8 packets from
651          * one descriptor with a limit of 4095 bytes.
652          */
653         ep->epn.chunk_max = ep->ep.maxpacket;
654         if (ep->epn.is_in) {
655                 ep->epn.chunk_max <<= 3;
656                 while (ep->epn.chunk_max > 4095)
657                         ep->epn.chunk_max -= ep->ep.maxpacket;
658         }
659
660         switch(type) {
661         case USB_ENDPOINT_XFER_CONTROL:
662                 EPDBG(ep, "Only one control endpoint\n");
663                 return -EINVAL;
664         case USB_ENDPOINT_XFER_INT:
665                 ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT);
666                 break;
667         case USB_ENDPOINT_XFER_BULK:
668                 ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK);
669                 break;
670         case USB_ENDPOINT_XFER_ISOC:
671                 ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO);
672                 ep->epn.is_iso = true;
673                 break;
674         default:
675                 return -EINVAL;
676         }
677
678         /* Encode the rest of the EP config register */
679         if (maxpacket < 1024)
680                 ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket);
681         if (!ep->epn.is_in)
682                 ep_conf |= VHUB_EP_CFG_DIR_OUT;
683         ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc));
684         ep_conf |= VHUB_EP_CFG_ENABLE;
685         ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1);
686         EPVDBG(ep, "config=%08x\n", ep_conf);
687
688         spin_lock_irqsave(&vhub->lock, flags);
689
690         /* Disable HW and reset DMA */
691         writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
692         writel(VHUB_EP_DMA_CTRL_RESET,
693                ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
694
695         /* Configure and enable */
696         writel(ep_conf, ep->epn.regs + AST_VHUB_EP_CONFIG);
697
698         if (ep->epn.desc_mode) {
699                 /* Clear DMA status, including the DMA read ptr */
700                 writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
701
702                 /* Set descriptor base */
703                 writel(ep->epn.descs_dma,
704                        ep->epn.regs + AST_VHUB_EP_DESC_BASE);
705
706                 /* Set base DMA config value */
707                 ep->epn.dma_conf = VHUB_EP_DMA_DESC_MODE;
708                 if (ep->epn.is_in)
709                         ep->epn.dma_conf |= VHUB_EP_DMA_IN_LONG_MODE;
710
711                 /* First reset and disable all operations */
712                 writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
713                        ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
714
715                 /* Enable descriptor mode */
716                 writel(ep->epn.dma_conf,
717                        ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
718         } else {
719                 /* Set base DMA config value */
720                 ep->epn.dma_conf = VHUB_EP_DMA_SINGLE_STAGE;
721
722                 /* Reset and switch to single stage mode */
723                 writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
724                        ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
725                 writel(ep->epn.dma_conf,
726                        ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
727                 writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
728         }
729
730         /* Cleanup data toggle just in case */
731         writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
732                vhub->regs + AST_VHUB_EP_TOGGLE);
733
734         /* Cleanup and enable ACK interrupt */
735         imask = VHUB_EP_IRQ(ep->epn.g_idx);
736         writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
737         ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
738         ep_ier |= imask;
739         writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
740
741         /* Woot, we are online ! */
742         ep->epn.enabled = true;
743
744         spin_unlock_irqrestore(&vhub->lock, flags);
745
746         return 0;
747 }
748
749 static void ast_vhub_epn_dispose(struct usb_ep *u_ep)
750 {
751         struct ast_vhub_ep *ep = to_ast_ep(u_ep);
752
753         if (WARN_ON(!ep->dev || !ep->d_idx))
754                 return;
755
756         EPDBG(ep, "Releasing endpoint\n");
757
758         /* Take it out of the EP list */
759         list_del_init(&ep->ep.ep_list);
760
761         /* Mark the address free in the device */
762         ep->dev->epns[ep->d_idx - 1] = NULL;
763
764         /* Free name & DMA buffers */
765         kfree(ep->ep.name);
766         ep->ep.name = NULL;
767         dma_free_coherent(&ep->vhub->pdev->dev,
768                           AST_VHUB_EPn_MAX_PACKET +
769                           8 * AST_VHUB_DESCS_COUNT,
770                           ep->buf, ep->buf_dma);
771         ep->buf = NULL;
772         ep->epn.descs = NULL;
773
774         /* Mark free */
775         ep->dev = NULL;
776 }
777
778 static const struct usb_ep_ops ast_vhub_epn_ops = {
779         .enable         = ast_vhub_epn_enable,
780         .disable        = ast_vhub_epn_disable,
781         .dispose        = ast_vhub_epn_dispose,
782         .queue          = ast_vhub_epn_queue,
783         .dequeue        = ast_vhub_epn_dequeue,
784         .set_halt       = ast_vhub_epn_set_halt,
785         .set_wedge      = ast_vhub_epn_set_wedge,
786         .alloc_request  = ast_vhub_alloc_request,
787         .free_request   = ast_vhub_free_request,
788 };
789
790 struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr)
791 {
792         struct ast_vhub *vhub = d->vhub;
793         struct ast_vhub_ep *ep;
794         unsigned long flags;
795         int i;
796
797         /* Find a free one (no device) */
798         spin_lock_irqsave(&vhub->lock, flags);
799         for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++)
800                 if (vhub->epns[i].dev == NULL)
801                         break;
802         if (i >= AST_VHUB_NUM_GEN_EPs) {
803                 spin_unlock_irqrestore(&vhub->lock, flags);
804                 return NULL;
805         }
806
807         /* Set it up */
808         ep = &vhub->epns[i];
809         ep->dev = d;
810         spin_unlock_irqrestore(&vhub->lock, flags);
811
812         DDBG(d, "Allocating gen EP %d for addr %d\n", i, addr);
813         INIT_LIST_HEAD(&ep->queue);
814         ep->d_idx = addr;
815         ep->vhub = vhub;
816         ep->ep.ops = &ast_vhub_epn_ops;
817         ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", addr);
818         d->epns[addr-1] = ep;
819         ep->epn.g_idx = i;
820         ep->epn.regs = vhub->regs + 0x200 + (i * 0x10);
821
822         ep->buf = dma_alloc_coherent(&vhub->pdev->dev,
823                                      AST_VHUB_EPn_MAX_PACKET +
824                                      8 * AST_VHUB_DESCS_COUNT,
825                                      &ep->buf_dma, GFP_KERNEL);
826         if (!ep->buf) {
827                 kfree(ep->ep.name);
828                 ep->ep.name = NULL;
829                 return NULL;
830         }
831         ep->epn.descs = ep->buf + AST_VHUB_EPn_MAX_PACKET;
832         ep->epn.descs_dma = ep->buf_dma + AST_VHUB_EPn_MAX_PACKET;
833
834         usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EPn_MAX_PACKET);
835         list_add_tail(&ep->ep.ep_list, &d->gadget.ep_list);
836         ep->ep.caps.type_iso = true;
837         ep->ep.caps.type_bulk = true;
838         ep->ep.caps.type_int = true;
839         ep->ep.caps.dir_in = true;
840         ep->ep.caps.dir_out = true;
841
842         return ep;
843 }