Merge branch 'work.lookup' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[sfrench/cifs-2.6.git] / drivers / usb / gadget / udc / aspeed-vhub / ep0.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
4  *
5  * ep0.c - Endpoint 0 handling
6  *
7  * Copyright 2017 IBM Corporation
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  */
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/delay.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/interrupt.h>
24 #include <linux/proc_fs.h>
25 #include <linux/prefetch.h>
26 #include <linux/clk.h>
27 #include <linux/usb/gadget.h>
28 #include <linux/of.h>
29 #include <linux/of_gpio.h>
30 #include <linux/regmap.h>
31 #include <linux/dma-mapping.h>
32
33 #include "vhub.h"
34
35 int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len)
36 {
37         struct usb_request *req = &ep->ep0.req.req;
38         int rc;
39
40         if (WARN_ON(ep->d_idx != 0))
41                 return std_req_stall;
42         if (WARN_ON(!ep->ep0.dir_in))
43                 return std_req_stall;
44         if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET))
45                 return std_req_stall;
46         if (WARN_ON(req->status == -EINPROGRESS))
47                 return std_req_stall;
48
49         req->buf = ptr;
50         req->length = len;
51         req->complete = NULL;
52         req->zero = true;
53
54         /*
55          * Call internal queue directly after dropping the lock. This is
56          * safe to do as the reply is always the last thing done when
57          * processing a SETUP packet, usually as a tail call
58          */
59         spin_unlock(&ep->vhub->lock);
60         if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
61                 rc = std_req_stall;
62         else
63                 rc = std_req_data;
64         spin_lock(&ep->vhub->lock);
65         return rc;
66 }
67
68 int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
69 {
70         u8 *buffer = ep->buf;
71         unsigned int i;
72         va_list args;
73
74         va_start(args, len);
75
76         /* Copy data directly into EP buffer */
77         for (i = 0; i < len; i++)
78                 buffer[i] = va_arg(args, int);
79         va_end(args);
80
81         /* req->buf NULL means data is already there */
82         return ast_vhub_reply(ep, NULL, len);
83 }
84
85 void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
86 {
87         struct usb_ctrlrequest crq;
88         enum std_req_rc std_req_rc;
89         int rc = -ENODEV;
90
91         if (WARN_ON(ep->d_idx != 0))
92                 return;
93
94         /*
95          * Grab the setup packet from the chip and byteswap
96          * interesting fields
97          */
98         memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq));
99
100         EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
101               crq.bRequestType, crq.bRequest,
102                le16_to_cpu(crq.wValue),
103                le16_to_cpu(crq.wIndex),
104                le16_to_cpu(crq.wLength),
105                (crq.bRequestType & USB_DIR_IN) ? "in" : "out",
106                ep->ep0.state);
107
108         /* Check our state, cancel pending requests if needed */
109         if (ep->ep0.state != ep0_state_token) {
110                 EPDBG(ep, "wrong state\n");
111                 ast_vhub_nuke(ep, -EIO);
112
113                 /*
114                  * Accept the packet regardless, this seems to happen
115                  * when stalling a SETUP packet that has an OUT data
116                  * phase.
117                  */
118                 ast_vhub_nuke(ep, 0);
119                 goto stall;
120         }
121
122         /* Calculate next state for EP0 */
123         ep->ep0.state = ep0_state_data;
124         ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
125
126         /* If this is the vHub, we handle requests differently */
127         std_req_rc = std_req_driver;
128         if (ep->dev == NULL) {
129                 if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
130                         std_req_rc = ast_vhub_std_hub_request(ep, &crq);
131                 else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
132                         std_req_rc = ast_vhub_class_hub_request(ep, &crq);
133                 else
134                         std_req_rc = std_req_stall;
135         } else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
136                 std_req_rc = ast_vhub_std_dev_request(ep, &crq);
137
138         /* Act upon result */
139         switch(std_req_rc) {
140         case std_req_complete:
141                 goto complete;
142         case std_req_stall:
143                 goto stall;
144         case std_req_driver:
145                 break;
146         case std_req_data:
147                 return;
148         }
149
150         /* Pass request up to the gadget driver */
151         if (WARN_ON(!ep->dev))
152                 goto stall;
153         if (ep->dev->driver) {
154                 EPDBG(ep, "forwarding to gadget...\n");
155                 spin_unlock(&ep->vhub->lock);
156                 rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
157                 spin_lock(&ep->vhub->lock);
158                 EPDBG(ep, "driver returned %d\n", rc);
159         } else {
160                 EPDBG(ep, "no gadget for request !\n");
161         }
162         if (rc >= 0)
163                 return;
164
165  stall:
166         EPDBG(ep, "stalling\n");
167         writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
168         ep->ep0.state = ep0_state_status;
169         ep->ep0.dir_in = false;
170         return;
171
172  complete:
173         EPVDBG(ep, "sending [in] status with no data\n");
174         writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
175         ep->ep0.state = ep0_state_status;
176         ep->ep0.dir_in = false;
177 }
178
179
180 static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
181                                  struct ast_vhub_req *req)
182 {
183         unsigned int chunk;
184         u32 reg;
185
186         /* If this is a 0-length request, it's the gadget trying to
187          * send a status on our behalf. We take it from here.
188          */
189         if (req->req.length == 0)
190                 req->last_desc = 1;
191
192         /* Are we done ? Complete request, otherwise wait for next interrupt */
193         if (req->last_desc >= 0) {
194                 EPVDBG(ep, "complete send %d/%d\n",
195                        req->req.actual, req->req.length);
196                 ep->ep0.state = ep0_state_status;
197                 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
198                 ast_vhub_done(ep, req, 0);
199                 return;
200         }
201
202         /*
203          * Next chunk cropped to max packet size. Also check if this
204          * is the last packet
205          */
206         chunk = req->req.length - req->req.actual;
207         if (chunk > ep->ep.maxpacket)
208                 chunk = ep->ep.maxpacket;
209         else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
210                 req->last_desc = 1;
211
212         EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n",
213                chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
214
215         /*
216          * Copy data if any (internal requests already have data
217          * in the EP buffer)
218          */
219         if (chunk && req->req.buf)
220                 memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
221
222         vhub_dma_workaround(ep->buf);
223
224         /* Remember chunk size and trigger send */
225         reg = VHUB_EP0_SET_TX_LEN(chunk);
226         writel(reg, ep->ep0.ctlstat);
227         writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
228         req->req.actual += chunk;
229 }
230
231 static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
232 {
233         EPVDBG(ep, "rx prime\n");
234
235         /* Prime endpoint for receiving data */
236         writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
237 }
238
239 static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
240                                     unsigned int len)
241 {
242         unsigned int remain;
243         int rc = 0;
244
245         /* We are receiving... grab request */
246         remain = req->req.length - req->req.actual;
247
248         EPVDBG(ep, "receive got=%d remain=%d\n", len, remain);
249
250         /* Are we getting more than asked ? */
251         if (len > remain) {
252                 EPDBG(ep, "receiving too much (ovf: %d) !\n",
253                       len - remain);
254                 len = remain;
255                 rc = -EOVERFLOW;
256         }
257         if (len && req->req.buf)
258                 memcpy(req->req.buf + req->req.actual, ep->buf, len);
259         req->req.actual += len;
260
261         /* Done ? */
262         if (len < ep->ep.maxpacket || len == remain) {
263                 ep->ep0.state = ep0_state_status;
264                 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
265                 ast_vhub_done(ep, req, rc);
266         } else
267                 ast_vhub_ep0_rx_prime(ep);
268 }
269
270 void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
271 {
272         struct ast_vhub_req *req;
273         struct ast_vhub *vhub = ep->vhub;
274         struct device *dev = &vhub->pdev->dev;
275         bool stall = false;
276         u32 stat;
277
278         /* Read EP0 status */
279         stat = readl(ep->ep0.ctlstat);
280
281         /* Grab current request if any */
282         req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
283
284         EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
285                 stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
286
287         switch(ep->ep0.state) {
288         case ep0_state_token:
289                 /* There should be no request queued in that state... */
290                 if (req) {
291                         dev_warn(dev, "request present while in TOKEN state\n");
292                         ast_vhub_nuke(ep, -EINVAL);
293                 }
294                 dev_warn(dev, "ack while in TOKEN state\n");
295                 stall = true;
296                 break;
297         case ep0_state_data:
298                 /* Check the state bits corresponding to our direction */
299                 if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
300                     (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
301                     (ep->ep0.dir_in != in_ack)) {
302                         dev_warn(dev, "irq state mismatch");
303                         stall = true;
304                         break;
305                 }
306                 /*
307                  * We are in data phase and there's no request, something is
308                  * wrong, stall
309                  */
310                 if (!req) {
311                         dev_warn(dev, "data phase, no request\n");
312                         stall = true;
313                         break;
314                 }
315
316                 /* We have a request, handle data transfers */
317                 if (ep->ep0.dir_in)
318                         ast_vhub_ep0_do_send(ep, req);
319                 else
320                         ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
321                 return;
322         case ep0_state_status:
323                 /* Nuke stale requests */
324                 if (req) {
325                         dev_warn(dev, "request present while in STATUS state\n");
326                         ast_vhub_nuke(ep, -EINVAL);
327                 }
328
329                 /*
330                  * If the status phase completes with the wrong ack, stall
331                  * the endpoint just in case, to abort whatever the host
332                  * was doing.
333                  */
334                 if (ep->ep0.dir_in == in_ack) {
335                         dev_warn(dev, "status direction mismatch\n");
336                         stall = true;
337                 }
338         }
339
340         /* Reset to token state */
341         ep->ep0.state = ep0_state_token;
342         if (stall)
343                 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
344 }
345
346 static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
347                               gfp_t gfp_flags)
348 {
349         struct ast_vhub_req *req = to_ast_req(u_req);
350         struct ast_vhub_ep *ep = to_ast_ep(u_ep);
351         struct ast_vhub *vhub = ep->vhub;
352         struct device *dev = &vhub->pdev->dev;
353         unsigned long flags;
354
355         /* Paranoid cheks */
356         if (!u_req || (!u_req->complete && !req->internal)) {
357                 dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req);
358                 if (u_req) {
359                         dev_warn(dev, "complete=%p internal=%d\n",
360                                  u_req->complete, req->internal);
361                 }
362                 return -EINVAL;
363         }
364
365         /* Not endpoint 0 ? */
366         if (WARN_ON(ep->d_idx != 0))
367                 return -EINVAL;
368
369         /* Disabled device */
370         if (ep->dev && (!ep->dev->enabled || ep->dev->suspended))
371                 return -ESHUTDOWN;
372
373         /* Data, no buffer and not internal ? */
374         if (u_req->length && !u_req->buf && !req->internal) {
375                 dev_warn(dev, "Request with no buffer !\n");
376                 return -EINVAL;
377         }
378
379         EPVDBG(ep, "enqueue req @%p\n", req);
380         EPVDBG(ep, "  l=%d zero=%d noshort=%d is_in=%d\n",
381                u_req->length, u_req->zero,
382                u_req->short_not_ok, ep->ep0.dir_in);
383
384         /* Initialize request progress fields */
385         u_req->status = -EINPROGRESS;
386         u_req->actual = 0;
387         req->last_desc = -1;
388         req->active = false;
389
390         spin_lock_irqsave(&vhub->lock, flags);
391
392         /* EP0 can only support a single request at a time */
393         if (!list_empty(&ep->queue) || ep->ep0.state == ep0_state_token) {
394                 dev_warn(dev, "EP0: Request in wrong state\n");
395                 spin_unlock_irqrestore(&vhub->lock, flags);
396                 return -EBUSY;
397         }
398
399         /* Add request to list and kick processing if empty */
400         list_add_tail(&req->queue, &ep->queue);
401
402         if (ep->ep0.dir_in) {
403                 /* IN request, send data */
404                 ast_vhub_ep0_do_send(ep, req);
405         } else if (u_req->length == 0) {
406                 /* 0-len request, send completion as rx */
407                 EPVDBG(ep, "0-length rx completion\n");
408                 ep->ep0.state = ep0_state_status;
409                 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
410                 ast_vhub_done(ep, req, 0);
411         } else {
412                 /* OUT request, start receiver */
413                 ast_vhub_ep0_rx_prime(ep);
414         }
415
416         spin_unlock_irqrestore(&vhub->lock, flags);
417
418         return 0;
419 }
420
421 static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
422 {
423         struct ast_vhub_ep *ep = to_ast_ep(u_ep);
424         struct ast_vhub *vhub = ep->vhub;
425         struct ast_vhub_req *req;
426         unsigned long flags;
427         int rc = -EINVAL;
428
429         spin_lock_irqsave(&vhub->lock, flags);
430
431         /* Only one request can be in the queue */
432         req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
433
434         /* Is it ours ? */
435         if (req && u_req == &req->req) {
436                 EPVDBG(ep, "dequeue req @%p\n", req);
437
438                 /*
439                  * We don't have to deal with "active" as all
440                  * DMAs go to the EP buffers, not the request.
441                  */
442                 ast_vhub_done(ep, req, -ECONNRESET);
443
444                 /* We do stall the EP to clean things up in HW */
445                 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
446                 ep->ep0.state = ep0_state_status;
447                 ep->ep0.dir_in = false;
448                 rc = 0;
449         }
450         spin_unlock_irqrestore(&vhub->lock, flags);
451         return rc;
452 }
453
454
455 static const struct usb_ep_ops ast_vhub_ep0_ops = {
456         .queue          = ast_vhub_ep0_queue,
457         .dequeue        = ast_vhub_ep0_dequeue,
458         .alloc_request  = ast_vhub_alloc_request,
459         .free_request   = ast_vhub_free_request,
460 };
461
462 void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
463                        struct ast_vhub_dev *dev)
464 {
465         memset(ep, 0, sizeof(*ep));
466
467         INIT_LIST_HEAD(&ep->ep.ep_list);
468         INIT_LIST_HEAD(&ep->queue);
469         ep->ep.ops = &ast_vhub_ep0_ops;
470         ep->ep.name = "ep0";
471         ep->ep.caps.type_control = true;
472         usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET);
473         ep->d_idx = 0;
474         ep->dev = dev;
475         ep->vhub = vhub;
476         ep->ep0.state = ep0_state_token;
477         INIT_LIST_HEAD(&ep->ep0.req.queue);
478         ep->ep0.req.internal = true;
479
480         /* Small difference between vHub and devices */
481         if (dev) {
482                 ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL;
483                 ep->ep0.setup = vhub->regs +
484                         AST_VHUB_SETUP0 + 8 * (dev->index + 1);
485                 ep->buf = vhub->ep0_bufs +
486                         AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
487                 ep->buf_dma = vhub->ep0_bufs_dma +
488                         AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
489         } else {
490                 ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL;
491                 ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0;
492                 ep->buf = vhub->ep0_bufs;
493                 ep->buf_dma = vhub->ep0_bufs_dma;
494         }
495 }