1 // SPDX-License-Identifier: GPL-2.0+
3 * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
5 * epn.c - Generic endpoints management
7 * Copyright 2017 IBM Corporation
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/delay.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/list.h>
23 #include <linux/interrupt.h>
24 #include <linux/proc_fs.h>
25 #include <linux/prefetch.h>
26 #include <linux/clk.h>
27 #include <linux/usb/gadget.h>
29 #include <linux/of_gpio.h>
30 #include <linux/regmap.h>
31 #include <linux/dma-mapping.h>
38 #define CHECK(ep, expr, fmt...) \
40 if (!(expr)) EPDBG(ep, "CHECK:" fmt); \
43 #define CHECK(ep, expr, fmt...) do { } while(0)
46 static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
48 unsigned int act = req->req.actual;
49 unsigned int len = req->req.length;
52 /* There should be no DMA ongoing */
55 /* Calculate next chunk size */
57 if (chunk > ep->ep.maxpacket)
58 chunk = ep->ep.maxpacket;
59 else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
62 EPVDBG(ep, "kick req %p act=%d/%d chunk=%d last=%d\n",
63 req, act, len, chunk, req->last_desc);
65 /* If DMA unavailable, using staging EP buffer */
68 /* For IN transfers, copy data over first */
70 memcpy(ep->buf, req->req.buf + act, chunk);
71 writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
73 writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
77 writel(VHUB_EP_DMA_SET_TX_SIZE(chunk),
78 ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
79 writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK,
80 ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
83 static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep)
85 struct ast_vhub_req *req;
90 stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
92 /* Grab current request if any */
93 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
95 EPVDBG(ep, "ACK status=%08x is_in=%d, req=%p (active=%d)\n",
96 stat, ep->epn.is_in, req, req ? req->active : 0);
98 /* In absence of a request, bail out, must have been dequeued */
103 * Request not active, move on to processing queue, active request
104 * was probably dequeued
109 /* Check if HW has moved on */
110 if (VHUB_EP_DMA_RPTR(stat) != 0) {
111 EPDBG(ep, "DMA read pointer not 0 !\n");
115 /* No current DMA ongoing */
118 /* Grab lenght out of HW */
119 len = VHUB_EP_DMA_TX_SIZE(stat);
121 /* If not using DMA, copy data out if needed */
122 if (!req->req.dma && !ep->epn.is_in && len)
123 memcpy(req->req.buf + req->req.actual, ep->buf, len);
126 req->req.actual += len;
128 /* Check for short packet */
129 if (len < ep->ep.maxpacket)
132 /* That's it ? complete the request and pick a new one */
133 if (req->last_desc >= 0) {
134 ast_vhub_done(ep, req, 0);
135 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req,
139 * Due to lock dropping inside "done" the next request could
140 * already be active, so check for that and bail if needed.
142 if (!req || req->active)
147 ast_vhub_epn_kick(ep, req);
150 static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
153 * d_next == d_last means descriptor list empty to HW,
154 * thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors
157 return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) &
158 (AST_VHUB_DESCS_COUNT - 1);
161 static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
162 struct ast_vhub_req *req)
164 unsigned int act = req->act_count;
165 unsigned int len = req->req.length;
168 /* Mark request active if not already */
171 /* If the request was already completely written, do nothing */
172 if (req->last_desc >= 0)
175 EPVDBG(ep, "kick act=%d/%d chunk_max=%d free_descs=%d\n",
176 act, len, ep->epn.chunk_max, ast_vhub_count_free_descs(ep));
178 /* While we can create descriptors */
179 while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
180 struct ast_vhub_desc *desc;
183 /* Grab next free descriptor */
184 d_num = ep->epn.d_next;
185 desc = &ep->epn.descs[d_num];
186 ep->epn.d_next = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
188 /* Calculate next chunk size */
190 if (chunk <= ep->epn.chunk_max) {
192 * Is this the last packet ? Because of having up to 8
193 * packets in a descriptor we can't just compare "chunk"
194 * with ep.maxpacket. We have to see if it's a multiple
195 * of it to know if we have to send a zero packet.
196 * Sadly that involves a modulo which is a bit expensive
197 * but probably still better than not doing it.
199 if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0)
200 req->last_desc = d_num;
202 chunk = ep->epn.chunk_max;
205 EPVDBG(ep, " chunk: act=%d/%d chunk=%d last=%d desc=%d free=%d\n",
206 act, len, chunk, req->last_desc, d_num,
207 ast_vhub_count_free_descs(ep));
209 /* Populate descriptor */
210 desc->w0 = cpu_to_le32(req->req.dma + act);
212 /* Interrupt if end of request or no more descriptors */
215 * TODO: Be smarter about it, if we don't have enough
216 * descriptors request an interrupt before queue empty
217 * or so in order to be able to populate more before
218 * the HW runs out. This isn't a problem at the moment
219 * as we use 256 descriptors and only put at most one
220 * request in the ring.
222 desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk));
223 if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep))
224 desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT);
227 req->act_count = act = act + chunk;
230 /* Tell HW about new descriptors */
231 writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
232 ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
234 EPVDBG(ep, "HW kicked, d_next=%d dstat=%08x\n",
235 ep->epn.d_next, readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS));
238 static void ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep *ep)
240 struct ast_vhub_req *req;
241 unsigned int len, d_last;
244 /* Read EP status, workaround HW race */
246 stat = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
247 stat1 = readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
248 } while(stat != stat1);
251 d_last = VHUB_EP_DMA_RPTR(stat);
253 /* Grab current request if any */
254 req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
256 EPVDBG(ep, "ACK status=%08x is_in=%d ep->d_last=%d..%d\n",
257 stat, ep->epn.is_in, ep->epn.d_last, d_last);
259 /* Check all completed descriptors */
260 while (ep->epn.d_last != d_last) {
261 struct ast_vhub_desc *desc;
265 /* Grab next completed descriptor */
266 d_num = ep->epn.d_last;
267 desc = &ep->epn.descs[d_num];
268 ep->epn.d_last = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1);
270 /* Grab len out of descriptor */
271 len = VHUB_DSC1_IN_LEN(le32_to_cpu(desc->w1));
273 EPVDBG(ep, " desc %d len=%d req=%p (act=%d)\n",
274 d_num, len, req, req ? req->active : 0);
276 /* If no active request pending, move on */
277 if (!req || !req->active)
281 req->req.actual += len;
283 /* Is that the last chunk ? */
284 is_last_desc = req->last_desc == d_num;
285 CHECK(ep, is_last_desc == (len < ep->ep.maxpacket ||
286 (req->req.actual >= req->req.length &&
288 "Last packet discrepancy: last_desc=%d len=%d r.act=%d "
289 "r.len=%d r.zero=%d mp=%d\n",
290 is_last_desc, len, req->req.actual, req->req.length,
291 req->req.zero, ep->ep.maxpacket);
295 * Because we can only have one request at a time
296 * in our descriptor list in this implementation,
297 * d_last and ep->d_last should now be equal
299 CHECK(ep, d_last == ep->epn.d_last,
300 "DMA read ptr mismatch %d vs %d\n",
301 d_last, ep->epn.d_last);
303 /* Note: done will drop and re-acquire the lock */
304 ast_vhub_done(ep, req, 0);
305 req = list_first_entry_or_null(&ep->queue,
314 ast_vhub_epn_kick_desc(ep, req);
317 void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep)
319 if (ep->epn.desc_mode)
320 ast_vhub_epn_handle_ack_desc(ep);
322 ast_vhub_epn_handle_ack(ep);
325 static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
328 struct ast_vhub_req *req = to_ast_req(u_req);
329 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
330 struct ast_vhub *vhub = ep->vhub;
335 /* Paranoid checks */
336 if (!u_req || !u_req->complete || !u_req->buf) {
337 dev_warn(&vhub->pdev->dev, "Bogus EPn request ! u_req=%p\n", u_req);
339 dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n",
340 u_req->complete, req->internal);
345 /* Endpoint enabled ? */
346 if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
347 !ep->dev->enabled || ep->dev->suspended) {
348 EPDBG(ep,"Enqueing request on wrong or disabled EP\n");
352 /* Map request for DMA if possible. For now, the rule for DMA is
355 * * For single stage mode (no descriptors):
357 * - The buffer is aligned to a 8 bytes boundary (HW requirement)
358 * - For a OUT endpoint, the request size is a multiple of the EP
359 * packet size (otherwise the controller will DMA past the end
360 * of the buffer if the host is sending a too long packet).
362 * * For descriptor mode (tx only for now), always.
364 * We could relax the latter by making the decision to use the bounce
365 * buffer based on the size of a given *segment* of the request rather
366 * than the whole request.
368 if (ep->epn.desc_mode ||
369 ((((unsigned long)u_req->buf & 7) == 0) &&
370 (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
371 rc = usb_gadget_map_request(&ep->dev->gadget, u_req,
374 dev_warn(&vhub->pdev->dev,
375 "Request mapping failure %d\n", rc);
381 EPVDBG(ep, "enqueue req @%p\n", req);
382 EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n",
383 u_req->length, (u32)u_req->dma, u_req->zero,
384 u_req->short_not_ok, u_req->no_interrupt,
387 /* Initialize request progress fields */
388 u_req->status = -EINPROGRESS;
393 spin_lock_irqsave(&vhub->lock, flags);
394 empty = list_empty(&ep->queue);
396 /* Add request to list and kick processing if empty */
397 list_add_tail(&req->queue, &ep->queue);
399 if (ep->epn.desc_mode)
400 ast_vhub_epn_kick_desc(ep, req);
402 ast_vhub_epn_kick(ep, req);
404 spin_unlock_irqrestore(&vhub->lock, flags);
409 static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
412 u32 state, reg, loops;
414 /* Stop DMA activity */
415 writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
417 /* Wait for it to complete */
418 for (loops = 0; loops < 1000; loops++) {
419 state = readl(ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
420 state = VHUB_EP_DMA_PROC_STATUS(state);
421 if (state == EP_DMA_PROC_RX_IDLE ||
422 state == EP_DMA_PROC_TX_IDLE)
427 dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n");
429 /* If we don't have to restart the endpoint, that's it */
433 /* Restart the endpoint */
434 if (ep->epn.desc_mode) {
436 * Take out descriptors by resetting the DMA read
437 * pointer to be equal to the CPU write pointer.
439 * Note: If we ever support creating descriptors for
440 * requests that aren't the head of the queue, we
441 * may have to do something more complex here,
442 * especially if the request being taken out is
443 * not the current head descriptors.
445 reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) |
446 VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next);
447 writel(reg, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
449 /* Then turn it back on */
450 writel(ep->epn.dma_conf,
451 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
453 /* Single mode: just turn it back on */
454 writel(ep->epn.dma_conf,
455 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
459 static int ast_vhub_epn_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
461 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
462 struct ast_vhub *vhub = ep->vhub;
463 struct ast_vhub_req *req;
467 spin_lock_irqsave(&vhub->lock, flags);
469 /* Make sure it's actually queued on this endpoint */
470 list_for_each_entry (req, &ep->queue, queue) {
471 if (&req->req == u_req)
475 if (&req->req == u_req) {
476 EPVDBG(ep, "dequeue req @%p active=%d\n",
479 ast_vhub_stop_active_req(ep, true);
480 ast_vhub_done(ep, req, -ECONNRESET);
484 spin_unlock_irqrestore(&vhub->lock, flags);
488 void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep)
492 if (WARN_ON(ep->d_idx == 0))
494 reg = readl(ep->epn.regs + AST_VHUB_EP_CONFIG);
495 if (ep->epn.stalled || ep->epn.wedged)
496 reg |= VHUB_EP_CFG_STALL_CTRL;
498 reg &= ~VHUB_EP_CFG_STALL_CTRL;
499 writel(reg, ep->epn.regs + AST_VHUB_EP_CONFIG);
501 if (!ep->epn.stalled && !ep->epn.wedged)
502 writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
503 ep->vhub->regs + AST_VHUB_EP_TOGGLE);
506 static int ast_vhub_set_halt_and_wedge(struct usb_ep* u_ep, bool halt,
509 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
510 struct ast_vhub *vhub = ep->vhub;
513 EPDBG(ep, "Set halt (%d) & wedge (%d)\n", halt, wedge);
515 if (!u_ep || !u_ep->desc)
522 spin_lock_irqsave(&vhub->lock, flags);
524 /* Fail with still-busy IN endpoints */
525 if (halt && ep->epn.is_in && !list_empty(&ep->queue)) {
526 spin_unlock_irqrestore(&vhub->lock, flags);
529 ep->epn.stalled = halt;
530 ep->epn.wedged = wedge;
531 ast_vhub_update_epn_stall(ep);
533 spin_unlock_irqrestore(&vhub->lock, flags);
538 static int ast_vhub_epn_set_halt(struct usb_ep *u_ep, int value)
540 return ast_vhub_set_halt_and_wedge(u_ep, value != 0, false);
543 static int ast_vhub_epn_set_wedge(struct usb_ep *u_ep)
545 return ast_vhub_set_halt_and_wedge(u_ep, true, true);
548 static int ast_vhub_epn_disable(struct usb_ep* u_ep)
550 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
551 struct ast_vhub *vhub = ep->vhub;
555 EPDBG(ep, "Disabling !\n");
557 spin_lock_irqsave(&vhub->lock, flags);
559 ep->epn.enabled = false;
561 /* Stop active DMA if any */
562 ast_vhub_stop_active_req(ep, false);
564 /* Disable endpoint */
565 writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
567 /* Disable ACK interrupt */
568 imask = VHUB_EP_IRQ(ep->epn.g_idx);
569 ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
571 writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
572 writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
574 /* Nuke all pending requests */
575 ast_vhub_nuke(ep, -ESHUTDOWN);
577 /* No more descriptor associated with request */
580 spin_unlock_irqrestore(&vhub->lock, flags);
585 static int ast_vhub_epn_enable(struct usb_ep* u_ep,
586 const struct usb_endpoint_descriptor *desc)
588 static const char *ep_type_string[] __maybe_unused = { "ctrl",
592 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
593 struct ast_vhub_dev *dev;
594 struct ast_vhub *vhub;
597 u32 ep_conf, ep_ier, imask;
599 /* Check arguments */
603 maxpacket = usb_endpoint_maxp(desc);
604 if (!ep->d_idx || !ep->dev ||
605 desc->bDescriptorType != USB_DT_ENDPOINT ||
606 maxpacket == 0 || maxpacket > ep->ep.maxpacket) {
607 EPDBG(ep, "Invalid EP enable,d_idx=%d,dev=%p,type=%d,mp=%d/%d\n",
608 ep->d_idx, ep->dev, desc->bDescriptorType,
609 maxpacket, ep->ep.maxpacket);
612 if (ep->d_idx != usb_endpoint_num(desc)) {
613 EPDBG(ep, "EP number mismatch !\n");
617 if (ep->epn.enabled) {
618 EPDBG(ep, "Already enabled\n");
624 /* Check device state */
626 EPDBG(ep, "Bogus device state: driver=%p speed=%d\n",
627 dev->driver, dev->gadget.speed);
631 /* Grab some info from the descriptor */
632 ep->epn.is_in = usb_endpoint_dir_in(desc);
633 ep->ep.maxpacket = maxpacket;
634 type = usb_endpoint_type(desc);
635 ep->epn.d_next = ep->epn.d_last = 0;
636 ep->epn.is_iso = false;
637 ep->epn.stalled = false;
638 ep->epn.wedged = false;
640 EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n",
641 ep->epn.is_in ? "in" : "out", ep_type_string[type],
642 usb_endpoint_num(desc), maxpacket);
644 /* Can we use DMA descriptor mode ? */
645 ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in;
646 if (ep->epn.desc_mode)
647 memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT);
650 * Large send function can send up to 8 packets from
651 * one descriptor with a limit of 4095 bytes.
653 ep->epn.chunk_max = ep->ep.maxpacket;
655 ep->epn.chunk_max <<= 3;
656 while (ep->epn.chunk_max > 4095)
657 ep->epn.chunk_max -= ep->ep.maxpacket;
661 case USB_ENDPOINT_XFER_CONTROL:
662 EPDBG(ep, "Only one control endpoint\n");
664 case USB_ENDPOINT_XFER_INT:
665 ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT);
667 case USB_ENDPOINT_XFER_BULK:
668 ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK);
670 case USB_ENDPOINT_XFER_ISOC:
671 ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO);
672 ep->epn.is_iso = true;
678 /* Encode the rest of the EP config register */
679 if (maxpacket < 1024)
680 ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket);
682 ep_conf |= VHUB_EP_CFG_DIR_OUT;
683 ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc));
684 ep_conf |= VHUB_EP_CFG_ENABLE;
685 ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1);
686 EPVDBG(ep, "config=%08x\n", ep_conf);
688 spin_lock_irqsave(&vhub->lock, flags);
690 /* Disable HW and reset DMA */
691 writel(0, ep->epn.regs + AST_VHUB_EP_CONFIG);
692 writel(VHUB_EP_DMA_CTRL_RESET,
693 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
695 /* Configure and enable */
696 writel(ep_conf, ep->epn.regs + AST_VHUB_EP_CONFIG);
698 if (ep->epn.desc_mode) {
699 /* Clear DMA status, including the DMA read ptr */
700 writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
702 /* Set descriptor base */
703 writel(ep->epn.descs_dma,
704 ep->epn.regs + AST_VHUB_EP_DESC_BASE);
706 /* Set base DMA config value */
707 ep->epn.dma_conf = VHUB_EP_DMA_DESC_MODE;
709 ep->epn.dma_conf |= VHUB_EP_DMA_IN_LONG_MODE;
711 /* First reset and disable all operations */
712 writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
713 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
715 /* Enable descriptor mode */
716 writel(ep->epn.dma_conf,
717 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
719 /* Set base DMA config value */
720 ep->epn.dma_conf = VHUB_EP_DMA_SINGLE_STAGE;
722 /* Reset and switch to single stage mode */
723 writel(ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET,
724 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
725 writel(ep->epn.dma_conf,
726 ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
727 writel(0, ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
730 /* Cleanup data toggle just in case */
731 writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx),
732 vhub->regs + AST_VHUB_EP_TOGGLE);
734 /* Cleanup and enable ACK interrupt */
735 imask = VHUB_EP_IRQ(ep->epn.g_idx);
736 writel(imask, vhub->regs + AST_VHUB_EP_ACK_ISR);
737 ep_ier = readl(vhub->regs + AST_VHUB_EP_ACK_IER);
739 writel(ep_ier, vhub->regs + AST_VHUB_EP_ACK_IER);
741 /* Woot, we are online ! */
742 ep->epn.enabled = true;
744 spin_unlock_irqrestore(&vhub->lock, flags);
749 static void ast_vhub_epn_dispose(struct usb_ep *u_ep)
751 struct ast_vhub_ep *ep = to_ast_ep(u_ep);
753 if (WARN_ON(!ep->dev || !ep->d_idx))
756 EPDBG(ep, "Releasing endpoint\n");
758 /* Take it out of the EP list */
759 list_del_init(&ep->ep.ep_list);
761 /* Mark the address free in the device */
762 ep->dev->epns[ep->d_idx - 1] = NULL;
764 /* Free name & DMA buffers */
767 dma_free_coherent(&ep->vhub->pdev->dev,
768 AST_VHUB_EPn_MAX_PACKET +
769 8 * AST_VHUB_DESCS_COUNT,
770 ep->buf, ep->buf_dma);
772 ep->epn.descs = NULL;
778 static const struct usb_ep_ops ast_vhub_epn_ops = {
779 .enable = ast_vhub_epn_enable,
780 .disable = ast_vhub_epn_disable,
781 .dispose = ast_vhub_epn_dispose,
782 .queue = ast_vhub_epn_queue,
783 .dequeue = ast_vhub_epn_dequeue,
784 .set_halt = ast_vhub_epn_set_halt,
785 .set_wedge = ast_vhub_epn_set_wedge,
786 .alloc_request = ast_vhub_alloc_request,
787 .free_request = ast_vhub_free_request,
790 struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr)
792 struct ast_vhub *vhub = d->vhub;
793 struct ast_vhub_ep *ep;
797 /* Find a free one (no device) */
798 spin_lock_irqsave(&vhub->lock, flags);
799 for (i = 0; i < AST_VHUB_NUM_GEN_EPs; i++)
800 if (vhub->epns[i].dev == NULL)
802 if (i >= AST_VHUB_NUM_GEN_EPs) {
803 spin_unlock_irqrestore(&vhub->lock, flags);
810 spin_unlock_irqrestore(&vhub->lock, flags);
812 DDBG(d, "Allocating gen EP %d for addr %d\n", i, addr);
813 INIT_LIST_HEAD(&ep->queue);
816 ep->ep.ops = &ast_vhub_epn_ops;
817 ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", addr);
818 d->epns[addr-1] = ep;
820 ep->epn.regs = vhub->regs + 0x200 + (i * 0x10);
822 ep->buf = dma_alloc_coherent(&vhub->pdev->dev,
823 AST_VHUB_EPn_MAX_PACKET +
824 8 * AST_VHUB_DESCS_COUNT,
825 &ep->buf_dma, GFP_KERNEL);
831 ep->epn.descs = ep->buf + AST_VHUB_EPn_MAX_PACKET;
832 ep->epn.descs_dma = ep->buf_dma + AST_VHUB_EPn_MAX_PACKET;
834 usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EPn_MAX_PACKET);
835 list_add_tail(&ep->ep.ep_list, &d->gadget.ep_list);
836 ep->ep.caps.type_iso = true;
837 ep->ep.caps.type_bulk = true;
838 ep->ep.caps.type_int = true;
839 ep->ep.caps.dir_in = true;
840 ep->ep.caps.dir_out = true;