Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6
[sfrench/cifs-2.6.git] / drivers / usb / host / isp1362-hcd.c
1 /*
2  * ISP1362 HCD (Host Controller Driver) for USB.
3  *
4  * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
5  *
6  * Derived from the SL811 HCD, rewritten for ISP116x.
7  * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
8  *
9  * Portions:
10  * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
11  * Copyright (C) 2004 David Brownell
12  */
13
14 /*
15  * The ISP1362 chip requires a large delay (300ns and 462ns) between
16  * accesses to the address and data register.
17  * The following timing options exist:
18  *
19  * 1. Configure your memory controller to add such delays if it can (the best)
20  * 2. Implement platform-specific delay function possibly
21  *    combined with configuring the memory controller; see
22  *    include/linux/usb_isp1362.h for more info.
23  * 3. Use ndelay (easiest, poorest).
24  *
25  * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
26  * platform specific section of isp1362.h to select the appropriate variant.
27  *
28  * Also note that according to the Philips "ISP1362 Errata" document
29  * Rev 1.00 from 27 May data corruption may occur when the #WR signal
30  * is reasserted (even with #CS deasserted) within 132ns after a
31  * write cycle to any controller register. If the hardware doesn't
32  * implement the recommended fix (gating the #WR with #CS) software
33  * must ensure that no further write cycle (not necessarily to the chip!)
34  * is issued by the CPU within this interval.
35
36  * For PXA25x this can be ensured by using VLIO with the maximum
37  * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
38  */
39
40 #ifdef CONFIG_USB_DEBUG
41 # define ISP1362_DEBUG
42 #else
43 # undef ISP1362_DEBUG
44 #endif
45
46 /*
47  * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
48  * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
49  * requests are carried out in separate frames. This will delay any SETUP
50  * packets until the start of the next frame so that this situation is
51  * unlikely to occur (and makes usbtest happy running with a PXA255 target
52  * device).
53  */
54 #undef BUGGY_PXA2XX_UDC_USBTEST
55
56 #undef PTD_TRACE
57 #undef URB_TRACE
58 #undef VERBOSE
59 #undef REGISTERS
60
61 /* This enables a memory test on the ISP1362 chip memory to make sure the
62  * chip access timing is correct.
63  */
64 #undef CHIP_BUFFER_TEST
65
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/kernel.h>
69 #include <linux/delay.h>
70 #include <linux/ioport.h>
71 #include <linux/sched.h>
72 #include <linux/slab.h>
73 #include <linux/smp_lock.h>
74 #include <linux/errno.h>
75 #include <linux/init.h>
76 #include <linux/list.h>
77 #include <linux/interrupt.h>
78 #include <linux/usb.h>
79 #include <linux/usb/isp1362.h>
80 #include <linux/usb/hcd.h>
81 #include <linux/platform_device.h>
82 #include <linux/pm.h>
83 #include <linux/io.h>
84 #include <linux/bitmap.h>
85
86 #include <asm/irq.h>
87 #include <asm/system.h>
88 #include <asm/byteorder.h>
89 #include <asm/unaligned.h>
90
91 static int dbg_level;
92 #ifdef ISP1362_DEBUG
93 module_param(dbg_level, int, 0644);
94 #else
95 module_param(dbg_level, int, 0);
96 #define STUB_DEBUG_FILE
97 #endif
98
99 #include "../core/usb.h"
100 #include "isp1362.h"
101
102
103 #define DRIVER_VERSION  "2005-04-04"
104 #define DRIVER_DESC     "ISP1362 USB Host Controller Driver"
105
106 MODULE_DESCRIPTION(DRIVER_DESC);
107 MODULE_LICENSE("GPL");
108
109 static const char hcd_name[] = "isp1362-hcd";
110
111 static void isp1362_hc_stop(struct usb_hcd *hcd);
112 static int isp1362_hc_start(struct usb_hcd *hcd);
113
114 /*-------------------------------------------------------------------------*/
115
116 /*
117  * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
118  * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
119  * completion.
120  * We don't need a 'disable' counterpart, since interrupts will be disabled
121  * only by the interrupt handler.
122  */
123 static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
124 {
125         if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
126                 return;
127         if (mask & ~isp1362_hcd->irqenb)
128                 isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
129         isp1362_hcd->irqenb |= mask;
130         if (isp1362_hcd->irq_active)
131                 return;
132         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
133 }
134
135 /*-------------------------------------------------------------------------*/
136
137 static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
138                                                      u16 offset)
139 {
140         struct isp1362_ep_queue *epq = NULL;
141
142         if (offset < isp1362_hcd->istl_queue[1].buf_start)
143                 epq = &isp1362_hcd->istl_queue[0];
144         else if (offset < isp1362_hcd->intl_queue.buf_start)
145                 epq = &isp1362_hcd->istl_queue[1];
146         else if (offset < isp1362_hcd->atl_queue.buf_start)
147                 epq = &isp1362_hcd->intl_queue;
148         else if (offset < isp1362_hcd->atl_queue.buf_start +
149                    isp1362_hcd->atl_queue.buf_size)
150                 epq = &isp1362_hcd->atl_queue;
151
152         if (epq)
153                 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
154         else
155                 pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
156
157         return epq;
158 }
159
160 static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
161 {
162         int offset;
163
164         if (index * epq->blk_size > epq->buf_size) {
165                 pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
166                      epq->buf_size / epq->blk_size);
167                 return -EINVAL;
168         }
169         offset = epq->buf_start + index * epq->blk_size;
170         DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
171
172         return offset;
173 }
174
175 /*-------------------------------------------------------------------------*/
176
177 static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
178                                     int mps)
179 {
180         u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
181
182         xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
183         if (xfer_size < size && xfer_size % mps)
184                 xfer_size -= xfer_size % mps;
185
186         return xfer_size;
187 }
188
189 static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
190                              struct isp1362_ep *ep, u16 len)
191 {
192         int ptd_offset = -EINVAL;
193         int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
194         int found;
195
196         BUG_ON(len > epq->buf_size);
197
198         if (!epq->buf_avail)
199                 return -ENOMEM;
200
201         if (ep->num_ptds)
202                 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
203                     epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
204         BUG_ON(ep->num_ptds != 0);
205
206         found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
207                                                 num_ptds, 0);
208         if (found >= epq->buf_count)
209                 return -EOVERFLOW;
210
211         DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
212             num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
213         ptd_offset = get_ptd_offset(epq, found);
214         WARN_ON(ptd_offset < 0);
215         ep->ptd_offset = ptd_offset;
216         ep->num_ptds += num_ptds;
217         epq->buf_avail -= num_ptds;
218         BUG_ON(epq->buf_avail > epq->buf_count);
219         ep->ptd_index = found;
220         bitmap_set(&epq->buf_map, found, num_ptds);
221         DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
222             __func__, epq->name, ep->ptd_index, ep->ptd_offset,
223             epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
224
225         return found;
226 }
227
228 static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
229 {
230         int index = ep->ptd_index;
231         int last = ep->ptd_index + ep->num_ptds;
232
233         if (last > epq->buf_count)
234                 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
235                     __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
236                     ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
237                     epq->buf_map, epq->skip_map);
238         BUG_ON(last > epq->buf_count);
239
240         for (; index < last; index++) {
241                 __clear_bit(index, &epq->buf_map);
242                 __set_bit(index, &epq->skip_map);
243         }
244         epq->buf_avail += ep->num_ptds;
245         epq->ptd_count--;
246
247         BUG_ON(epq->buf_avail > epq->buf_count);
248         BUG_ON(epq->ptd_count > epq->buf_count);
249
250         DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
251             __func__, epq->name,
252             ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
253         DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
254             epq->buf_map, epq->skip_map);
255
256         ep->num_ptds = 0;
257         ep->ptd_offset = -EINVAL;
258         ep->ptd_index = -EINVAL;
259 }
260
261 /*-------------------------------------------------------------------------*/
262
263 /*
264   Set up PTD's.
265 */
266 static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
267                         struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
268                         u16 fno)
269 {
270         struct ptd *ptd;
271         int toggle;
272         int dir;
273         u16 len;
274         size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
275
276         DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
277
278         ptd = &ep->ptd;
279
280         ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
281
282         switch (ep->nextpid) {
283         case USB_PID_IN:
284                 toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
285                 dir = PTD_DIR_IN;
286                 if (usb_pipecontrol(urb->pipe)) {
287                         len = min_t(size_t, ep->maxpacket, buf_len);
288                 } else if (usb_pipeisoc(urb->pipe)) {
289                         len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
290                         ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
291                 } else
292                         len = max_transfer_size(epq, buf_len, ep->maxpacket);
293                 DBG(1, "%s: IN    len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
294                     (int)buf_len);
295                 break;
296         case USB_PID_OUT:
297                 toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
298                 dir = PTD_DIR_OUT;
299                 if (usb_pipecontrol(urb->pipe))
300                         len = min_t(size_t, ep->maxpacket, buf_len);
301                 else if (usb_pipeisoc(urb->pipe))
302                         len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
303                 else
304                         len = max_transfer_size(epq, buf_len, ep->maxpacket);
305                 if (len == 0)
306                         pr_info("%s: Sending ZERO packet: %d\n", __func__,
307                              urb->transfer_flags & URB_ZERO_PACKET);
308                 DBG(1, "%s: OUT   len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
309                     (int)buf_len);
310                 break;
311         case USB_PID_SETUP:
312                 toggle = 0;
313                 dir = PTD_DIR_SETUP;
314                 len = sizeof(struct usb_ctrlrequest);
315                 DBG(1, "%s: SETUP len %d\n", __func__, len);
316                 ep->data = urb->setup_packet;
317                 break;
318         case USB_PID_ACK:
319                 toggle = 1;
320                 len = 0;
321                 dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
322                         PTD_DIR_OUT : PTD_DIR_IN;
323                 DBG(1, "%s: ACK   len %d\n", __func__, len);
324                 break;
325         default:
326                 toggle = dir = len = 0;
327                 pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
328                 BUG_ON(1);
329         }
330
331         ep->length = len;
332         if (!len)
333                 ep->data = NULL;
334
335         ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
336         ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
337                 PTD_EP(ep->epnum);
338         ptd->len = PTD_LEN(len) | PTD_DIR(dir);
339         ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
340
341         if (usb_pipeint(urb->pipe)) {
342                 ptd->faddr |= PTD_SF_INT(ep->branch);
343                 ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
344         }
345         if (usb_pipeisoc(urb->pipe))
346                 ptd->faddr |= PTD_SF_ISO(fno);
347
348         DBG(1, "%s: Finished\n", __func__);
349 }
350
351 static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
352                               struct isp1362_ep_queue *epq)
353 {
354         struct ptd *ptd = &ep->ptd;
355         int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
356
357         _BUG_ON(ep->ptd_offset < 0);
358
359         prefetch(ptd);
360         isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
361         if (len)
362                 isp1362_write_buffer(isp1362_hcd, ep->data,
363                                      ep->ptd_offset + PTD_HEADER_SIZE, len);
364
365         dump_ptd(ptd);
366         dump_ptd_out_data(ptd, ep->data);
367 }
368
369 static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
370                              struct isp1362_ep_queue *epq)
371 {
372         struct ptd *ptd = &ep->ptd;
373         int act_len;
374
375         WARN_ON(list_empty(&ep->active));
376         BUG_ON(ep->ptd_offset < 0);
377
378         list_del_init(&ep->active);
379         DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
380
381         prefetchw(ptd);
382         isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
383         dump_ptd(ptd);
384         act_len = PTD_GET_COUNT(ptd);
385         if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
386                 return;
387         if (act_len > ep->length)
388                 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
389                          ep->ptd_offset, act_len, ep->length);
390         BUG_ON(act_len > ep->length);
391         /* Only transfer the amount of data that has actually been overwritten
392          * in the chip buffer. We don't want any data that doesn't belong to the
393          * transfer to leak out of the chip to the callers transfer buffer!
394          */
395         prefetchw(ep->data);
396         isp1362_read_buffer(isp1362_hcd, ep->data,
397                             ep->ptd_offset + PTD_HEADER_SIZE, act_len);
398         dump_ptd_in_data(ptd, ep->data);
399 }
400
401 /*
402  * INT PTDs will stay in the chip until data is available.
403  * This function will remove a PTD from the chip when the URB is dequeued.
404  * Must be called with the spinlock held and IRQs disabled
405  */
406 static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
407
408 {
409         int index;
410         struct isp1362_ep_queue *epq;
411
412         DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
413         BUG_ON(ep->ptd_offset < 0);
414
415         epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
416         BUG_ON(!epq);
417
418         /* put ep in remove_list for cleanup */
419         WARN_ON(!list_empty(&ep->remove_list));
420         list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
421         /* let SOF interrupt handle the cleanup */
422         isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
423
424         index = ep->ptd_index;
425         if (index < 0)
426                 /* ISO queues don't have SKIP registers */
427                 return;
428
429         DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
430             index, ep->ptd_offset, epq->skip_map, 1 << index);
431
432         /* prevent further processing of PTD (will be effective after next SOF) */
433         epq->skip_map |= 1 << index;
434         if (epq == &isp1362_hcd->atl_queue) {
435                 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
436                     isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
437                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
438                 if (~epq->skip_map == 0)
439                         isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
440         } else if (epq == &isp1362_hcd->intl_queue) {
441                 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
442                     isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
443                 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
444                 if (~epq->skip_map == 0)
445                         isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
446         }
447 }
448
449 /*
450   Take done or failed requests out of schedule. Give back
451   processed urbs.
452 */
453 static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
454                            struct urb *urb, int status)
455      __releases(isp1362_hcd->lock)
456      __acquires(isp1362_hcd->lock)
457 {
458         urb->hcpriv = NULL;
459         ep->error_count = 0;
460
461         if (usb_pipecontrol(urb->pipe))
462                 ep->nextpid = USB_PID_SETUP;
463
464         URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
465                 ep->num_req, usb_pipedevice(urb->pipe),
466                 usb_pipeendpoint(urb->pipe),
467                 !usb_pipein(urb->pipe) ? "out" : "in",
468                 usb_pipecontrol(urb->pipe) ? "ctrl" :
469                         usb_pipeint(urb->pipe) ? "int" :
470                         usb_pipebulk(urb->pipe) ? "bulk" :
471                         "iso",
472                 urb->actual_length, urb->transfer_buffer_length,
473                 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
474                 "short_ok" : "", urb->status);
475
476
477         usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
478         spin_unlock(&isp1362_hcd->lock);
479         usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
480         spin_lock(&isp1362_hcd->lock);
481
482         /* take idle endpoints out of the schedule right away */
483         if (!list_empty(&ep->hep->urb_list))
484                 return;
485
486         /* async deschedule */
487         if (!list_empty(&ep->schedule)) {
488                 list_del_init(&ep->schedule);
489                 return;
490         }
491
492
493         if (ep->interval) {
494                 /* periodic deschedule */
495                 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
496                     ep, ep->branch, ep->load,
497                     isp1362_hcd->load[ep->branch],
498                     isp1362_hcd->load[ep->branch] - ep->load);
499                 isp1362_hcd->load[ep->branch] -= ep->load;
500                 ep->branch = PERIODIC_SIZE;
501         }
502 }
503
504 /*
505  * Analyze transfer results, handle partial transfers and errors
506 */
507 static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
508 {
509         struct urb *urb = get_urb(ep);
510         struct usb_device *udev;
511         struct ptd *ptd;
512         int short_ok;
513         u16 len;
514         int urbstat = -EINPROGRESS;
515         u8 cc;
516
517         DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
518
519         udev = urb->dev;
520         ptd = &ep->ptd;
521         cc = PTD_GET_CC(ptd);
522         if (cc == PTD_NOTACCESSED) {
523                 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
524                     ep->num_req, ptd);
525                 cc = PTD_DEVNOTRESP;
526         }
527
528         short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
529         len = urb->transfer_buffer_length - urb->actual_length;
530
531         /* Data underrun is special. For allowed underrun
532            we clear the error and continue as normal. For
533            forbidden underrun we finish the DATA stage
534            immediately while for control transfer,
535            we do a STATUS stage.
536         */
537         if (cc == PTD_DATAUNDERRUN) {
538                 if (short_ok) {
539                         DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
540                             __func__, ep->num_req, short_ok ? "" : "not_",
541                             PTD_GET_COUNT(ptd), ep->maxpacket, len);
542                         cc = PTD_CC_NOERROR;
543                         urbstat = 0;
544                 } else {
545                         DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
546                             __func__, ep->num_req,
547                             usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
548                             short_ok ? "" : "not_",
549                             PTD_GET_COUNT(ptd), ep->maxpacket, len);
550                         if (usb_pipecontrol(urb->pipe)) {
551                                 ep->nextpid = USB_PID_ACK;
552                                 /* save the data underrun error code for later and
553                                  * procede with the status stage
554                                  */
555                                 urb->actual_length += PTD_GET_COUNT(ptd);
556                                 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
557
558                                 if (urb->status == -EINPROGRESS)
559                                         urb->status = cc_to_error[PTD_DATAUNDERRUN];
560                         } else {
561                                 usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
562                                               PTD_GET_TOGGLE(ptd));
563                                 urbstat = cc_to_error[PTD_DATAUNDERRUN];
564                         }
565                         goto out;
566                 }
567         }
568
569         if (cc != PTD_CC_NOERROR) {
570                 if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
571                         urbstat = cc_to_error[cc];
572                         DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
573                             __func__, ep->num_req, ep->nextpid, urbstat, cc,
574                             ep->error_count);
575                 }
576                 goto out;
577         }
578
579         switch (ep->nextpid) {
580         case USB_PID_OUT:
581                 if (PTD_GET_COUNT(ptd) != ep->length)
582                         pr_err("%s: count=%d len=%d\n", __func__,
583                            PTD_GET_COUNT(ptd), ep->length);
584                 BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
585                 urb->actual_length += ep->length;
586                 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
587                 usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
588                 if (urb->actual_length == urb->transfer_buffer_length) {
589                         DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
590                             ep->num_req, len, ep->maxpacket, urbstat);
591                         if (usb_pipecontrol(urb->pipe)) {
592                                 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
593                                     ep->num_req,
594                                     usb_pipein(urb->pipe) ? "IN" : "OUT");
595                                 ep->nextpid = USB_PID_ACK;
596                         } else {
597                                 if (len % ep->maxpacket ||
598                                     !(urb->transfer_flags & URB_ZERO_PACKET)) {
599                                         urbstat = 0;
600                                         DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
601                                             __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
602                                             urbstat, len, ep->maxpacket, urb->actual_length);
603                                 }
604                         }
605                 }
606                 break;
607         case USB_PID_IN:
608                 len = PTD_GET_COUNT(ptd);
609                 BUG_ON(len > ep->length);
610                 urb->actual_length += len;
611                 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
612                 usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
613                 /* if transfer completed or (allowed) data underrun */
614                 if ((urb->transfer_buffer_length == urb->actual_length) ||
615                     len % ep->maxpacket) {
616                         DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
617                             ep->num_req, len, ep->maxpacket, urbstat);
618                         if (usb_pipecontrol(urb->pipe)) {
619                                 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
620                                     ep->num_req,
621                                     usb_pipein(urb->pipe) ? "IN" : "OUT");
622                                 ep->nextpid = USB_PID_ACK;
623                         } else {
624                                 urbstat = 0;
625                                 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
626                                     __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
627                                     urbstat, len, ep->maxpacket, urb->actual_length);
628                         }
629                 }
630                 break;
631         case USB_PID_SETUP:
632                 if (urb->transfer_buffer_length == urb->actual_length) {
633                         ep->nextpid = USB_PID_ACK;
634                 } else if (usb_pipeout(urb->pipe)) {
635                         usb_settoggle(udev, 0, 1, 1);
636                         ep->nextpid = USB_PID_OUT;
637                 } else {
638                         usb_settoggle(udev, 0, 0, 1);
639                         ep->nextpid = USB_PID_IN;
640                 }
641                 break;
642         case USB_PID_ACK:
643                 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
644                     urbstat);
645                 WARN_ON(urbstat != -EINPROGRESS);
646                 urbstat = 0;
647                 ep->nextpid = 0;
648                 break;
649         default:
650                 BUG_ON(1);
651         }
652
653  out:
654         if (urbstat != -EINPROGRESS) {
655                 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
656                     ep, ep->num_req, urb, urbstat);
657                 finish_request(isp1362_hcd, ep, urb, urbstat);
658         }
659 }
660
661 static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
662 {
663         struct isp1362_ep *ep;
664         struct isp1362_ep *tmp;
665
666         list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
667                 struct isp1362_ep_queue *epq =
668                         get_ptd_queue(isp1362_hcd, ep->ptd_offset);
669                 int index = ep->ptd_index;
670
671                 BUG_ON(epq == NULL);
672                 if (index >= 0) {
673                         DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
674                         BUG_ON(ep->num_ptds == 0);
675                         release_ptd_buffers(epq, ep);
676                 }
677                 if (!list_empty(&ep->hep->urb_list)) {
678                         struct urb *urb = get_urb(ep);
679
680                         DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
681                             ep->num_req, ep);
682                         finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
683                 }
684                 WARN_ON(list_empty(&ep->active));
685                 if (!list_empty(&ep->active)) {
686                         list_del_init(&ep->active);
687                         DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
688                 }
689                 list_del_init(&ep->remove_list);
690                 DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
691         }
692         DBG(1, "%s: Done\n", __func__);
693 }
694
695 static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
696 {
697         if (count > 0) {
698                 if (count < isp1362_hcd->atl_queue.ptd_count)
699                         isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
700                 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
701                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
702                 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
703         } else
704                 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
705 }
706
707 static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
708 {
709         isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
710         isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
711         isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
712 }
713
714 static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
715 {
716         isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
717         isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
718                            HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
719 }
720
721 static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
722                       struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
723 {
724         int index = epq->free_ptd;
725
726         prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
727         index = claim_ptd_buffers(epq, ep, ep->length);
728         if (index == -ENOMEM) {
729                 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
730                     ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
731                 return index;
732         } else if (index == -EOVERFLOW) {
733                 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
734                     __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
735                     epq->buf_map, epq->skip_map);
736                 return index;
737         } else
738                 BUG_ON(index < 0);
739         list_add_tail(&ep->active, &epq->active);
740         DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
741             ep, ep->num_req, ep->length, &epq->active);
742         DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
743             ep->ptd_offset, ep, ep->num_req);
744         isp1362_write_ptd(isp1362_hcd, ep, epq);
745         __clear_bit(ep->ptd_index, &epq->skip_map);
746
747         return 0;
748 }
749
750 static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
751 {
752         int ptd_count = 0;
753         struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
754         struct isp1362_ep *ep;
755         int defer = 0;
756
757         if (atomic_read(&epq->finishing)) {
758                 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
759                 return;
760         }
761
762         list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
763                 struct urb *urb = get_urb(ep);
764                 int ret;
765
766                 if (!list_empty(&ep->active)) {
767                         DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
768                         continue;
769                 }
770
771                 DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
772                     ep, ep->num_req);
773
774                 ret = submit_req(isp1362_hcd, urb, ep, epq);
775                 if (ret == -ENOMEM) {
776                         defer = 1;
777                         break;
778                 } else if (ret == -EOVERFLOW) {
779                         defer = 1;
780                         continue;
781                 }
782 #ifdef BUGGY_PXA2XX_UDC_USBTEST
783                 defer = ep->nextpid == USB_PID_SETUP;
784 #endif
785                 ptd_count++;
786         }
787
788         /* Avoid starving of endpoints */
789         if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
790                 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
791                 list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
792         }
793         if (ptd_count || defer)
794                 enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
795
796         epq->ptd_count += ptd_count;
797         if (epq->ptd_count > epq->stat_maxptds) {
798                 epq->stat_maxptds = epq->ptd_count;
799                 DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
800         }
801 }
802
803 static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
804 {
805         int ptd_count = 0;
806         struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
807         struct isp1362_ep *ep;
808
809         if (atomic_read(&epq->finishing)) {
810                 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
811                 return;
812         }
813
814         list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
815                 struct urb *urb = get_urb(ep);
816                 int ret;
817
818                 if (!list_empty(&ep->active)) {
819                         DBG(1, "%s: Skipping active %s ep %p\n", __func__,
820                             epq->name, ep);
821                         continue;
822                 }
823
824                 DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
825                     epq->name, ep, ep->num_req);
826                 ret = submit_req(isp1362_hcd, urb, ep, epq);
827                 if (ret == -ENOMEM)
828                         break;
829                 else if (ret == -EOVERFLOW)
830                         continue;
831                 ptd_count++;
832         }
833
834         if (ptd_count) {
835                 static int last_count;
836
837                 if (ptd_count != last_count) {
838                         DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
839                         last_count = ptd_count;
840                 }
841                 enable_intl_transfers(isp1362_hcd);
842         }
843
844         epq->ptd_count += ptd_count;
845         if (epq->ptd_count > epq->stat_maxptds)
846                 epq->stat_maxptds = epq->ptd_count;
847 }
848
849 static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
850 {
851         u16 ptd_offset = ep->ptd_offset;
852         int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
853
854         DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
855             ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
856
857         ptd_offset += num_ptds * epq->blk_size;
858         if (ptd_offset < epq->buf_start + epq->buf_size)
859                 return ptd_offset;
860         else
861                 return -ENOMEM;
862 }
863
864 static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
865 {
866         int ptd_count = 0;
867         int flip = isp1362_hcd->istl_flip;
868         struct isp1362_ep_queue *epq;
869         int ptd_offset;
870         struct isp1362_ep *ep;
871         struct isp1362_ep *tmp;
872         u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
873
874  fill2:
875         epq = &isp1362_hcd->istl_queue[flip];
876         if (atomic_read(&epq->finishing)) {
877                 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
878                 return;
879         }
880
881         if (!list_empty(&epq->active))
882                 return;
883
884         ptd_offset = epq->buf_start;
885         list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
886                 struct urb *urb = get_urb(ep);
887                 s16 diff = fno - (u16)urb->start_frame;
888
889                 DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
890
891                 if (diff > urb->number_of_packets) {
892                         /* time frame for this URB has elapsed */
893                         finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
894                         continue;
895                 } else if (diff < -1) {
896                         /* URB is not due in this frame or the next one.
897                          * Comparing with '-1' instead of '0' accounts for double
898                          * buffering in the ISP1362 which enables us to queue the PTD
899                          * one frame ahead of time
900                          */
901                 } else if (diff == -1) {
902                         /* submit PTD's that are due in the next frame */
903                         prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
904                         if (ptd_offset + PTD_HEADER_SIZE + ep->length >
905                             epq->buf_start + epq->buf_size) {
906                                 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
907                                     __func__, ep->length);
908                                 continue;
909                         }
910                         ep->ptd_offset = ptd_offset;
911                         list_add_tail(&ep->active, &epq->active);
912
913                         ptd_offset = next_ptd(epq, ep);
914                         if (ptd_offset < 0) {
915                                 pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
916                                      ep->num_req, epq->name);
917                                 break;
918                         }
919                 }
920         }
921         list_for_each_entry(ep, &epq->active, active) {
922                 if (epq->active.next == &ep->active)
923                         ep->ptd.mps |= PTD_LAST_MSK;
924                 isp1362_write_ptd(isp1362_hcd, ep, epq);
925                 ptd_count++;
926         }
927
928         if (ptd_count)
929                 enable_istl_transfers(isp1362_hcd, flip);
930
931         epq->ptd_count += ptd_count;
932         if (epq->ptd_count > epq->stat_maxptds)
933                 epq->stat_maxptds = epq->ptd_count;
934
935         /* check, whether the second ISTL buffer may also be filled */
936         if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
937               (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
938                 fno++;
939                 ptd_count = 0;
940                 flip = 1 - flip;
941                 goto fill2;
942         }
943 }
944
945 static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
946                              struct isp1362_ep_queue *epq)
947 {
948         struct isp1362_ep *ep;
949         struct isp1362_ep *tmp;
950
951         if (list_empty(&epq->active)) {
952                 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
953                 return;
954         }
955
956         DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
957
958         atomic_inc(&epq->finishing);
959         list_for_each_entry_safe(ep, tmp, &epq->active, active) {
960                 int index = ep->ptd_index;
961
962                 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
963                     index, ep->ptd_offset);
964
965                 BUG_ON(index < 0);
966                 if (__test_and_clear_bit(index, &done_map)) {
967                         isp1362_read_ptd(isp1362_hcd, ep, epq);
968                         epq->free_ptd = index;
969                         BUG_ON(ep->num_ptds == 0);
970                         release_ptd_buffers(epq, ep);
971
972                         DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
973                             ep, ep->num_req);
974                         if (!list_empty(&ep->remove_list)) {
975                                 list_del_init(&ep->remove_list);
976                                 DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
977                         }
978                         DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
979                             ep, ep->num_req);
980                         postproc_ep(isp1362_hcd, ep);
981                 }
982                 if (!done_map)
983                         break;
984         }
985         if (done_map)
986                 pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
987                      epq->skip_map);
988         atomic_dec(&epq->finishing);
989 }
990
991 static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
992 {
993         struct isp1362_ep *ep;
994         struct isp1362_ep *tmp;
995
996         if (list_empty(&epq->active)) {
997                 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
998                 return;
999         }
1000
1001         DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
1002
1003         atomic_inc(&epq->finishing);
1004         list_for_each_entry_safe(ep, tmp, &epq->active, active) {
1005                 DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
1006
1007                 isp1362_read_ptd(isp1362_hcd, ep, epq);
1008                 DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1009                 postproc_ep(isp1362_hcd, ep);
1010         }
1011         WARN_ON(epq->blk_size != 0);
1012         atomic_dec(&epq->finishing);
1013 }
1014
1015 static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1016 {
1017         int handled = 0;
1018         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1019         u16 irqstat;
1020         u16 svc_mask;
1021
1022         spin_lock(&isp1362_hcd->lock);
1023
1024         BUG_ON(isp1362_hcd->irq_active++);
1025
1026         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1027
1028         irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1029         DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1030
1031         /* only handle interrupts that are currently enabled */
1032         irqstat &= isp1362_hcd->irqenb;
1033         isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1034         svc_mask = irqstat;
1035
1036         if (irqstat & HCuPINT_SOF) {
1037                 isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1038                 isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1039                 handled = 1;
1040                 svc_mask &= ~HCuPINT_SOF;
1041                 DBG(3, "%s: SOF\n", __func__);
1042                 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1043                 if (!list_empty(&isp1362_hcd->remove_list))
1044                         finish_unlinks(isp1362_hcd);
1045                 if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1046                         if (list_empty(&isp1362_hcd->atl_queue.active)) {
1047                                 start_atl_transfers(isp1362_hcd);
1048                         } else {
1049                                 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1050                                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1051                                                     isp1362_hcd->atl_queue.skip_map);
1052                                 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1053                         }
1054                 }
1055         }
1056
1057         if (irqstat & HCuPINT_ISTL0) {
1058                 isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1059                 handled = 1;
1060                 svc_mask &= ~HCuPINT_ISTL0;
1061                 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1062                 DBG(1, "%s: ISTL0\n", __func__);
1063                 WARN_ON((int)!!isp1362_hcd->istl_flip);
1064                 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1065                         HCBUFSTAT_ISTL0_ACTIVE);
1066                 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1067                         HCBUFSTAT_ISTL0_DONE));
1068                 isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1069         }
1070
1071         if (irqstat & HCuPINT_ISTL1) {
1072                 isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1073                 handled = 1;
1074                 svc_mask &= ~HCuPINT_ISTL1;
1075                 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1076                 DBG(1, "%s: ISTL1\n", __func__);
1077                 WARN_ON(!(int)isp1362_hcd->istl_flip);
1078                 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1079                         HCBUFSTAT_ISTL1_ACTIVE);
1080                 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1081                         HCBUFSTAT_ISTL1_DONE));
1082                 isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1083         }
1084
1085         if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1086                 WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1087                         (HCuPINT_ISTL0 | HCuPINT_ISTL1));
1088                 finish_iso_transfers(isp1362_hcd,
1089                                      &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1090                 start_iso_transfers(isp1362_hcd);
1091                 isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1092         }
1093
1094         if (irqstat & HCuPINT_INTL) {
1095                 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1096                 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1097                 isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1098
1099                 DBG(2, "%s: INTL\n", __func__);
1100
1101                 svc_mask &= ~HCuPINT_INTL;
1102
1103                 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1104                 if (~(done_map | skip_map) == 0)
1105                         /* All PTDs are finished, disable INTL processing entirely */
1106                         isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1107
1108                 handled = 1;
1109                 WARN_ON(!done_map);
1110                 if (done_map) {
1111                         DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1112                         finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1113                         start_intl_transfers(isp1362_hcd);
1114                 }
1115         }
1116
1117         if (irqstat & HCuPINT_ATL) {
1118                 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1119                 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1120                 isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1121
1122                 DBG(2, "%s: ATL\n", __func__);
1123
1124                 svc_mask &= ~HCuPINT_ATL;
1125
1126                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1127                 if (~(done_map | skip_map) == 0)
1128                         isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1129                 if (done_map) {
1130                         DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1131                         finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1132                         start_atl_transfers(isp1362_hcd);
1133                 }
1134                 handled = 1;
1135         }
1136
1137         if (irqstat & HCuPINT_OPR) {
1138                 u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1139                 isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1140
1141                 svc_mask &= ~HCuPINT_OPR;
1142                 DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1143                 intstat &= isp1362_hcd->intenb;
1144                 if (intstat & OHCI_INTR_UE) {
1145                         pr_err("Unrecoverable error\n");
1146                         /* FIXME: do here reset or cleanup or whatever */
1147                 }
1148                 if (intstat & OHCI_INTR_RHSC) {
1149                         isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1150                         isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1151                         isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1152                 }
1153                 if (intstat & OHCI_INTR_RD) {
1154                         pr_info("%s: RESUME DETECTED\n", __func__);
1155                         isp1362_show_reg(isp1362_hcd, HCCONTROL);
1156                         usb_hcd_resume_root_hub(hcd);
1157                 }
1158                 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1159                 irqstat &= ~HCuPINT_OPR;
1160                 handled = 1;
1161         }
1162
1163         if (irqstat & HCuPINT_SUSP) {
1164                 isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1165                 handled = 1;
1166                 svc_mask &= ~HCuPINT_SUSP;
1167
1168                 pr_info("%s: SUSPEND IRQ\n", __func__);
1169         }
1170
1171         if (irqstat & HCuPINT_CLKRDY) {
1172                 isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1173                 handled = 1;
1174                 isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1175                 svc_mask &= ~HCuPINT_CLKRDY;
1176                 pr_info("%s: CLKRDY IRQ\n", __func__);
1177         }
1178
1179         if (svc_mask)
1180                 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1181
1182         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1183         isp1362_hcd->irq_active--;
1184         spin_unlock(&isp1362_hcd->lock);
1185
1186         return IRQ_RETVAL(handled);
1187 }
1188
1189 /*-------------------------------------------------------------------------*/
1190
1191 #define MAX_PERIODIC_LOAD       900     /* out of 1000 usec */
1192 static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1193 {
1194         int i, branch = -ENOSPC;
1195
1196         /* search for the least loaded schedule branch of that interval
1197          * which has enough bandwidth left unreserved.
1198          */
1199         for (i = 0; i < interval; i++) {
1200                 if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1201                         int j;
1202
1203                         for (j = i; j < PERIODIC_SIZE; j += interval) {
1204                                 if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1205                                         pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1206                                             load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1207                                         break;
1208                                 }
1209                         }
1210                         if (j < PERIODIC_SIZE)
1211                                 continue;
1212                         branch = i;
1213                 }
1214         }
1215         return branch;
1216 }
1217
1218 /* NB! ALL the code above this point runs with isp1362_hcd->lock
1219    held, irqs off
1220 */
1221
1222 /*-------------------------------------------------------------------------*/
1223
1224 static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1225                                struct urb *urb,
1226                                gfp_t mem_flags)
1227 {
1228         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1229         struct usb_device *udev = urb->dev;
1230         unsigned int pipe = urb->pipe;
1231         int is_out = !usb_pipein(pipe);
1232         int type = usb_pipetype(pipe);
1233         int epnum = usb_pipeendpoint(pipe);
1234         struct usb_host_endpoint *hep = urb->ep;
1235         struct isp1362_ep *ep = NULL;
1236         unsigned long flags;
1237         int retval = 0;
1238
1239         DBG(3, "%s: urb %p\n", __func__, urb);
1240
1241         if (type == PIPE_ISOCHRONOUS) {
1242                 pr_err("Isochronous transfers not supported\n");
1243                 return -ENOSPC;
1244         }
1245
1246         URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1247                 usb_pipedevice(pipe), epnum,
1248                 is_out ? "out" : "in",
1249                 usb_pipecontrol(pipe) ? "ctrl" :
1250                         usb_pipeint(pipe) ? "int" :
1251                         usb_pipebulk(pipe) ? "bulk" :
1252                         "iso",
1253                 urb->transfer_buffer_length,
1254                 (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1255                 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1256                 "short_ok" : "");
1257
1258         /* avoid all allocations within spinlocks: request or endpoint */
1259         if (!hep->hcpriv) {
1260                 ep = kzalloc(sizeof *ep, mem_flags);
1261                 if (!ep)
1262                         return -ENOMEM;
1263         }
1264         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1265
1266         /* don't submit to a dead or disabled port */
1267         if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1268               USB_PORT_STAT_ENABLE) ||
1269             !HC_IS_RUNNING(hcd->state)) {
1270                 kfree(ep);
1271                 retval = -ENODEV;
1272                 goto fail_not_linked;
1273         }
1274
1275         retval = usb_hcd_link_urb_to_ep(hcd, urb);
1276         if (retval) {
1277                 kfree(ep);
1278                 goto fail_not_linked;
1279         }
1280
1281         if (hep->hcpriv) {
1282                 ep = hep->hcpriv;
1283         } else {
1284                 INIT_LIST_HEAD(&ep->schedule);
1285                 INIT_LIST_HEAD(&ep->active);
1286                 INIT_LIST_HEAD(&ep->remove_list);
1287                 ep->udev = usb_get_dev(udev);
1288                 ep->hep = hep;
1289                 ep->epnum = epnum;
1290                 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1291                 ep->ptd_offset = -EINVAL;
1292                 ep->ptd_index = -EINVAL;
1293                 usb_settoggle(udev, epnum, is_out, 0);
1294
1295                 if (type == PIPE_CONTROL)
1296                         ep->nextpid = USB_PID_SETUP;
1297                 else if (is_out)
1298                         ep->nextpid = USB_PID_OUT;
1299                 else
1300                         ep->nextpid = USB_PID_IN;
1301
1302                 switch (type) {
1303                 case PIPE_ISOCHRONOUS:
1304                 case PIPE_INTERRUPT:
1305                         if (urb->interval > PERIODIC_SIZE)
1306                                 urb->interval = PERIODIC_SIZE;
1307                         ep->interval = urb->interval;
1308                         ep->branch = PERIODIC_SIZE;
1309                         ep->load = usb_calc_bus_time(udev->speed, !is_out,
1310                                                      (type == PIPE_ISOCHRONOUS),
1311                                                      usb_maxpacket(udev, pipe, is_out)) / 1000;
1312                         break;
1313                 }
1314                 hep->hcpriv = ep;
1315         }
1316         ep->num_req = isp1362_hcd->req_serial++;
1317
1318         /* maybe put endpoint into schedule */
1319         switch (type) {
1320         case PIPE_CONTROL:
1321         case PIPE_BULK:
1322                 if (list_empty(&ep->schedule)) {
1323                         DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1324                                 __func__, ep, ep->num_req);
1325                         list_add_tail(&ep->schedule, &isp1362_hcd->async);
1326                 }
1327                 break;
1328         case PIPE_ISOCHRONOUS:
1329         case PIPE_INTERRUPT:
1330                 urb->interval = ep->interval;
1331
1332                 /* urb submitted for already existing EP */
1333                 if (ep->branch < PERIODIC_SIZE)
1334                         break;
1335
1336                 retval = balance(isp1362_hcd, ep->interval, ep->load);
1337                 if (retval < 0) {
1338                         pr_err("%s: balance returned %d\n", __func__, retval);
1339                         goto fail;
1340                 }
1341                 ep->branch = retval;
1342                 retval = 0;
1343                 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1344                 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1345                     __func__, isp1362_hcd->fmindex, ep->branch,
1346                     ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1347                      ~(PERIODIC_SIZE - 1)) + ep->branch,
1348                     (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1349
1350                 if (list_empty(&ep->schedule)) {
1351                         if (type == PIPE_ISOCHRONOUS) {
1352                                 u16 frame = isp1362_hcd->fmindex;
1353
1354                                 frame += max_t(u16, 8, ep->interval);
1355                                 frame &= ~(ep->interval - 1);
1356                                 frame |= ep->branch;
1357                                 if (frame_before(frame, isp1362_hcd->fmindex))
1358                                         frame += ep->interval;
1359                                 urb->start_frame = frame;
1360
1361                                 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1362                                 list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1363                         } else {
1364                                 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1365                                 list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1366                         }
1367                 } else
1368                         DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1369
1370                 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1371                     ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1372                     isp1362_hcd->load[ep->branch] + ep->load);
1373                 isp1362_hcd->load[ep->branch] += ep->load;
1374         }
1375
1376         urb->hcpriv = hep;
1377         ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1378
1379         switch (type) {
1380         case PIPE_CONTROL:
1381         case PIPE_BULK:
1382                 start_atl_transfers(isp1362_hcd);
1383                 break;
1384         case PIPE_INTERRUPT:
1385                 start_intl_transfers(isp1362_hcd);
1386                 break;
1387         case PIPE_ISOCHRONOUS:
1388                 start_iso_transfers(isp1362_hcd);
1389                 break;
1390         default:
1391                 BUG();
1392         }
1393  fail:
1394         if (retval)
1395                 usb_hcd_unlink_urb_from_ep(hcd, urb);
1396
1397
1398  fail_not_linked:
1399         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1400         if (retval)
1401                 DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1402         return retval;
1403 }
1404
1405 static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1406 {
1407         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1408         struct usb_host_endpoint *hep;
1409         unsigned long flags;
1410         struct isp1362_ep *ep;
1411         int retval = 0;
1412
1413         DBG(3, "%s: urb %p\n", __func__, urb);
1414
1415         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1416         retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1417         if (retval)
1418                 goto done;
1419
1420         hep = urb->hcpriv;
1421
1422         if (!hep) {
1423                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1424                 return -EIDRM;
1425         }
1426
1427         ep = hep->hcpriv;
1428         if (ep) {
1429                 /* In front of queue? */
1430                 if (ep->hep->urb_list.next == &urb->urb_list) {
1431                         if (!list_empty(&ep->active)) {
1432                                 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1433                                     urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1434                                 /* disable processing and queue PTD for removal */
1435                                 remove_ptd(isp1362_hcd, ep);
1436                                 urb = NULL;
1437                         }
1438                 }
1439                 if (urb) {
1440                         DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1441                             ep->num_req);
1442                         finish_request(isp1362_hcd, ep, urb, status);
1443                 } else
1444                         DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1445         } else {
1446                 pr_warning("%s: No EP in URB %p\n", __func__, urb);
1447                 retval = -EINVAL;
1448         }
1449 done:
1450         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1451
1452         DBG(3, "%s: exit\n", __func__);
1453
1454         return retval;
1455 }
1456
1457 static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1458 {
1459         struct isp1362_ep *ep = hep->hcpriv;
1460         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1461         unsigned long flags;
1462
1463         DBG(1, "%s: ep %p\n", __func__, ep);
1464         if (!ep)
1465                 return;
1466         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1467         if (!list_empty(&hep->urb_list)) {
1468                 if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1469                         DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1470                             ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1471                         remove_ptd(isp1362_hcd, ep);
1472                         pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1473                 }
1474         }
1475         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1476         /* Wait for interrupt to clear out active list */
1477         while (!list_empty(&ep->active))
1478                 msleep(1);
1479
1480         DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1481
1482         usb_put_dev(ep->udev);
1483         kfree(ep);
1484         hep->hcpriv = NULL;
1485 }
1486
1487 static int isp1362_get_frame(struct usb_hcd *hcd)
1488 {
1489         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1490         u32 fmnum;
1491         unsigned long flags;
1492
1493         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1494         fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1495         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1496
1497         return (int)fmnum;
1498 }
1499
1500 /*-------------------------------------------------------------------------*/
1501
1502 /* Adapted from ohci-hub.c */
1503 static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1504 {
1505         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1506         int ports, i, changed = 0;
1507         unsigned long flags;
1508
1509         if (!HC_IS_RUNNING(hcd->state))
1510                 return -ESHUTDOWN;
1511
1512         /* Report no status change now, if we are scheduled to be
1513            called later */
1514         if (timer_pending(&hcd->rh_timer))
1515                 return 0;
1516
1517         ports = isp1362_hcd->rhdesca & RH_A_NDP;
1518         BUG_ON(ports > 2);
1519
1520         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1521         /* init status */
1522         if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1523                 buf[0] = changed = 1;
1524         else
1525                 buf[0] = 0;
1526
1527         for (i = 0; i < ports; i++) {
1528                 u32 status = isp1362_hcd->rhport[i];
1529
1530                 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1531                               RH_PS_OCIC | RH_PS_PRSC)) {
1532                         changed = 1;
1533                         buf[0] |= 1 << (i + 1);
1534                         continue;
1535                 }
1536
1537                 if (!(status & RH_PS_CCS))
1538                         continue;
1539         }
1540         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1541         return changed;
1542 }
1543
1544 static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1545                                    struct usb_hub_descriptor *desc)
1546 {
1547         u32 reg = isp1362_hcd->rhdesca;
1548
1549         DBG(3, "%s: enter\n", __func__);
1550
1551         desc->bDescriptorType = 0x29;
1552         desc->bDescLength = 9;
1553         desc->bHubContrCurrent = 0;
1554         desc->bNbrPorts = reg & 0x3;
1555         /* Power switching, device type, overcurrent. */
1556         desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
1557         DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
1558         desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1559         /* two bitmaps:  ports removable, and legacy PortPwrCtrlMask */
1560         desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1561         desc->bitmap[1] = ~0;
1562
1563         DBG(3, "%s: exit\n", __func__);
1564 }
1565
1566 /* Adapted from ohci-hub.c */
1567 static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1568                                u16 wIndex, char *buf, u16 wLength)
1569 {
1570         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1571         int retval = 0;
1572         unsigned long flags;
1573         unsigned long t1;
1574         int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1575         u32 tmp = 0;
1576
1577         switch (typeReq) {
1578         case ClearHubFeature:
1579                 DBG(0, "ClearHubFeature: ");
1580                 switch (wValue) {
1581                 case C_HUB_OVER_CURRENT:
1582                         _DBG(0, "C_HUB_OVER_CURRENT\n");
1583                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1584                         isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1585                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1586                 case C_HUB_LOCAL_POWER:
1587                         _DBG(0, "C_HUB_LOCAL_POWER\n");
1588                         break;
1589                 default:
1590                         goto error;
1591                 }
1592                 break;
1593         case SetHubFeature:
1594                 DBG(0, "SetHubFeature: ");
1595                 switch (wValue) {
1596                 case C_HUB_OVER_CURRENT:
1597                 case C_HUB_LOCAL_POWER:
1598                         _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1599                         break;
1600                 default:
1601                         goto error;
1602                 }
1603                 break;
1604         case GetHubDescriptor:
1605                 DBG(0, "GetHubDescriptor\n");
1606                 isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1607                 break;
1608         case GetHubStatus:
1609                 DBG(0, "GetHubStatus\n");
1610                 put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1611                 break;
1612         case GetPortStatus:
1613 #ifndef VERBOSE
1614                 DBG(0, "GetPortStatus\n");
1615 #endif
1616                 if (!wIndex || wIndex > ports)
1617                         goto error;
1618                 tmp = isp1362_hcd->rhport[--wIndex];
1619                 put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1620                 break;
1621         case ClearPortFeature:
1622                 DBG(0, "ClearPortFeature: ");
1623                 if (!wIndex || wIndex > ports)
1624                         goto error;
1625                 wIndex--;
1626
1627                 switch (wValue) {
1628                 case USB_PORT_FEAT_ENABLE:
1629                         _DBG(0, "USB_PORT_FEAT_ENABLE\n");
1630                         tmp = RH_PS_CCS;
1631                         break;
1632                 case USB_PORT_FEAT_C_ENABLE:
1633                         _DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1634                         tmp = RH_PS_PESC;
1635                         break;
1636                 case USB_PORT_FEAT_SUSPEND:
1637                         _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1638                         tmp = RH_PS_POCI;
1639                         break;
1640                 case USB_PORT_FEAT_C_SUSPEND:
1641                         _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1642                         tmp = RH_PS_PSSC;
1643                         break;
1644                 case USB_PORT_FEAT_POWER:
1645                         _DBG(0, "USB_PORT_FEAT_POWER\n");
1646                         tmp = RH_PS_LSDA;
1647
1648                         break;
1649                 case USB_PORT_FEAT_C_CONNECTION:
1650                         _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1651                         tmp = RH_PS_CSC;
1652                         break;
1653                 case USB_PORT_FEAT_C_OVER_CURRENT:
1654                         _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1655                         tmp = RH_PS_OCIC;
1656                         break;
1657                 case USB_PORT_FEAT_C_RESET:
1658                         _DBG(0, "USB_PORT_FEAT_C_RESET\n");
1659                         tmp = RH_PS_PRSC;
1660                         break;
1661                 default:
1662                         goto error;
1663                 }
1664
1665                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1666                 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1667                 isp1362_hcd->rhport[wIndex] =
1668                         isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1669                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1670                 break;
1671         case SetPortFeature:
1672                 DBG(0, "SetPortFeature: ");
1673                 if (!wIndex || wIndex > ports)
1674                         goto error;
1675                 wIndex--;
1676                 switch (wValue) {
1677                 case USB_PORT_FEAT_SUSPEND:
1678                         _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1679                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1680                         isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1681                         isp1362_hcd->rhport[wIndex] =
1682                                 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1683                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1684                         break;
1685                 case USB_PORT_FEAT_POWER:
1686                         _DBG(0, "USB_PORT_FEAT_POWER\n");
1687                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1688                         isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1689                         isp1362_hcd->rhport[wIndex] =
1690                                 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1691                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1692                         break;
1693                 case USB_PORT_FEAT_RESET:
1694                         _DBG(0, "USB_PORT_FEAT_RESET\n");
1695                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1696
1697                         t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1698                         while (time_before(jiffies, t1)) {
1699                                 /* spin until any current reset finishes */
1700                                 for (;;) {
1701                                         tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1702                                         if (!(tmp & RH_PS_PRS))
1703                                                 break;
1704                                         udelay(500);
1705                                 }
1706                                 if (!(tmp & RH_PS_CCS))
1707                                         break;
1708                                 /* Reset lasts 10ms (claims datasheet) */
1709                                 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1710
1711                                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1712                                 msleep(10);
1713                                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1714                         }
1715
1716                         isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1717                                                                          HCRHPORT1 + wIndex);
1718                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1719                         break;
1720                 default:
1721                         goto error;
1722                 }
1723                 break;
1724
1725         default:
1726  error:
1727                 /* "protocol stall" on error */
1728                 _DBG(0, "PROTOCOL STALL\n");
1729                 retval = -EPIPE;
1730         }
1731
1732         return retval;
1733 }
1734
1735 #ifdef  CONFIG_PM
1736 static int isp1362_bus_suspend(struct usb_hcd *hcd)
1737 {
1738         int status = 0;
1739         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1740         unsigned long flags;
1741
1742         if (time_before(jiffies, isp1362_hcd->next_statechange))
1743                 msleep(5);
1744
1745         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1746
1747         isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1748         switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1749         case OHCI_USB_RESUME:
1750                 DBG(0, "%s: resume/suspend?\n", __func__);
1751                 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1752                 isp1362_hcd->hc_control |= OHCI_USB_RESET;
1753                 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1754                 /* FALL THROUGH */
1755         case OHCI_USB_RESET:
1756                 status = -EBUSY;
1757                 pr_warning("%s: needs reinit!\n", __func__);
1758                 goto done;
1759         case OHCI_USB_SUSPEND:
1760                 pr_warning("%s: already suspended?\n", __func__);
1761                 goto done;
1762         }
1763         DBG(0, "%s: suspend root hub\n", __func__);
1764
1765         /* First stop any processing */
1766         hcd->state = HC_STATE_QUIESCING;
1767         if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1768             !list_empty(&isp1362_hcd->intl_queue.active) ||
1769             !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1770             !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1771                 int limit;
1772
1773                 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1774                 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1775                 isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1776                 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1777                 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1778
1779                 DBG(0, "%s: stopping schedules ...\n", __func__);
1780                 limit = 2000;
1781                 while (limit > 0) {
1782                         udelay(250);
1783                         limit -= 250;
1784                         if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1785                                 break;
1786                 }
1787                 mdelay(7);
1788                 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1789                         u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1790                         finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1791                 }
1792                 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1793                         u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1794                         finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1795                 }
1796                 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1797                         finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1798                 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1799                         finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1800         }
1801         DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1802                     isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1803         isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1804                             isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1805
1806         /* Suspend hub */
1807         isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1808         isp1362_show_reg(isp1362_hcd, HCCONTROL);
1809         isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1810         isp1362_show_reg(isp1362_hcd, HCCONTROL);
1811
1812 #if 1
1813         isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1814         if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1815                 pr_err("%s: controller won't suspend %08x\n", __func__,
1816                     isp1362_hcd->hc_control);
1817                 status = -EBUSY;
1818         } else
1819 #endif
1820         {
1821                 /* no resumes until devices finish suspending */
1822                 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1823         }
1824 done:
1825         if (status == 0) {
1826                 hcd->state = HC_STATE_SUSPENDED;
1827                 DBG(0, "%s: HCD suspended: %08x\n", __func__,
1828                     isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1829         }
1830         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1831         return status;
1832 }
1833
1834 static int isp1362_bus_resume(struct usb_hcd *hcd)
1835 {
1836         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1837         u32 port;
1838         unsigned long flags;
1839         int status = -EINPROGRESS;
1840
1841         if (time_before(jiffies, isp1362_hcd->next_statechange))
1842                 msleep(5);
1843
1844         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1845         isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1846         pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1847         if (hcd->state == HC_STATE_RESUMING) {
1848                 pr_warning("%s: duplicate resume\n", __func__);
1849                 status = 0;
1850         } else
1851                 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1852                 case OHCI_USB_SUSPEND:
1853                         DBG(0, "%s: resume root hub\n", __func__);
1854                         isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1855                         isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1856                         isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1857                         break;
1858                 case OHCI_USB_RESUME:
1859                         /* HCFS changes sometime after INTR_RD */
1860                         DBG(0, "%s: remote wakeup\n", __func__);
1861                         break;
1862                 case OHCI_USB_OPER:
1863                         DBG(0, "%s: odd resume\n", __func__);
1864                         status = 0;
1865                         hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1866                         break;
1867                 default:                /* RESET, we lost power */
1868                         DBG(0, "%s: root hub hardware reset\n", __func__);
1869                         status = -EBUSY;
1870                 }
1871         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1872         if (status == -EBUSY) {
1873                 DBG(0, "%s: Restarting HC\n", __func__);
1874                 isp1362_hc_stop(hcd);
1875                 return isp1362_hc_start(hcd);
1876         }
1877         if (status != -EINPROGRESS)
1878                 return status;
1879         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1880         port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1881         while (port--) {
1882                 u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1883
1884                 /* force global, not selective, resume */
1885                 if (!(stat & RH_PS_PSS)) {
1886                         DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1887                         continue;
1888                 }
1889                 DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1890                 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1891         }
1892         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1893
1894         /* Some controllers (lucent) need extra-long delays */
1895         hcd->state = HC_STATE_RESUMING;
1896         mdelay(20 /* usb 11.5.1.10 */ + 15);
1897
1898         isp1362_hcd->hc_control = OHCI_USB_OPER;
1899         spin_lock_irqsave(&isp1362_hcd->lock, flags);
1900         isp1362_show_reg(isp1362_hcd, HCCONTROL);
1901         isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1902         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1903         /* TRSMRCY */
1904         msleep(10);
1905
1906         /* keep it alive for ~5x suspend + resume costs */
1907         isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1908
1909         hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1910         hcd->state = HC_STATE_RUNNING;
1911         return 0;
1912 }
1913 #else
1914 #define isp1362_bus_suspend     NULL
1915 #define isp1362_bus_resume      NULL
1916 #endif
1917
1918 /*-------------------------------------------------------------------------*/
1919
1920 #ifdef STUB_DEBUG_FILE
1921
1922 static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
1923 {
1924 }
1925 static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
1926 {
1927 }
1928
1929 #else
1930
1931 #include <linux/proc_fs.h>
1932 #include <linux/seq_file.h>
1933
1934 static void dump_irq(struct seq_file *s, char *label, u16 mask)
1935 {
1936         seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1937                    mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1938                    mask & HCuPINT_SUSP ? " susp" : "",
1939                    mask & HCuPINT_OPR ? " opr" : "",
1940                    mask & HCuPINT_EOT ? " eot" : "",
1941                    mask & HCuPINT_ATL ? " atl" : "",
1942                    mask & HCuPINT_SOF ? " sof" : "");
1943 }
1944
1945 static void dump_int(struct seq_file *s, char *label, u32 mask)
1946 {
1947         seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1948                    mask & OHCI_INTR_MIE ? " MIE" : "",
1949                    mask & OHCI_INTR_RHSC ? " rhsc" : "",
1950                    mask & OHCI_INTR_FNO ? " fno" : "",
1951                    mask & OHCI_INTR_UE ? " ue" : "",
1952                    mask & OHCI_INTR_RD ? " rd" : "",
1953                    mask & OHCI_INTR_SF ? " sof" : "",
1954                    mask & OHCI_INTR_SO ? " so" : "");
1955 }
1956
1957 static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1958 {
1959         seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1960                    mask & OHCI_CTRL_RWC ? " rwc" : "",
1961                    mask & OHCI_CTRL_RWE ? " rwe" : "",
1962                    ({
1963                            char *hcfs;
1964                            switch (mask & OHCI_CTRL_HCFS) {
1965                            case OHCI_USB_OPER:
1966                                    hcfs = " oper";
1967                                    break;
1968                            case OHCI_USB_RESET:
1969                                    hcfs = " reset";
1970                                    break;
1971                            case OHCI_USB_RESUME:
1972                                    hcfs = " resume";
1973                                    break;
1974                            case OHCI_USB_SUSPEND:
1975                                    hcfs = " suspend";
1976                                    break;
1977                            default:
1978                                    hcfs = " ?";
1979                            }
1980                            hcfs;
1981                    }));
1982 }
1983
1984 static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1985 {
1986         seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1987                    isp1362_read_reg32(isp1362_hcd, HCREVISION));
1988         seq_printf(s, "HCCONTROL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1989                    isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1990         seq_printf(s, "HCCMDSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1991                    isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1992         seq_printf(s, "HCINTSTAT  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1993                    isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1994         seq_printf(s, "HCINTENB   [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1995                    isp1362_read_reg32(isp1362_hcd, HCINTENB));
1996         seq_printf(s, "HCFMINTVL  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1997                    isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1998         seq_printf(s, "HCFMREM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1999                    isp1362_read_reg32(isp1362_hcd, HCFMREM));
2000         seq_printf(s, "HCFMNUM    [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
2001                    isp1362_read_reg32(isp1362_hcd, HCFMNUM));
2002         seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
2003                    isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
2004         seq_printf(s, "HCRHDESCA  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
2005                    isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
2006         seq_printf(s, "HCRHDESCB  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
2007                    isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
2008         seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
2009                    isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
2010         seq_printf(s, "HCRHPORT1  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
2011                    isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
2012         seq_printf(s, "HCRHPORT2  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
2013                    isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
2014         seq_printf(s, "\n");
2015         seq_printf(s, "HCHWCFG    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
2016                    isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2017         seq_printf(s, "HCDMACFG   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2018                    isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2019         seq_printf(s, "HCXFERCTR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2020                    isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2021         seq_printf(s, "HCuPINT    [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2022                    isp1362_read_reg16(isp1362_hcd, HCuPINT));
2023         seq_printf(s, "HCuPINTENB [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2024                    isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2025         seq_printf(s, "HCCHIPID   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2026                    isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2027         seq_printf(s, "HCSCRATCH  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2028                    isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2029         seq_printf(s, "HCBUFSTAT  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2030                    isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2031         seq_printf(s, "HCDIRADDR  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2032                    isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2033 #if 0
2034         seq_printf(s, "HCDIRDATA  [%02x]     %04x\n", ISP1362_REG_NO(HCDIRDATA),
2035                    isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2036 #endif
2037         seq_printf(s, "HCISTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2038                    isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2039         seq_printf(s, "HCISTLRATE [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2040                    isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2041         seq_printf(s, "\n");
2042         seq_printf(s, "HCINTLBUFSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2043                    isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2044         seq_printf(s, "HCINTLBLKSZ[%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2045                    isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2046         seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2047                    isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2048         seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2049                    isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2050         seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2051                    isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2052         seq_printf(s, "HCINTLCURR [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2053                    isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2054         seq_printf(s, "\n");
2055         seq_printf(s, "HCATLBUFSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2056                    isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2057         seq_printf(s, "HCATLBLKSZ [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2058                    isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2059 #if 0
2060         seq_printf(s, "HCATLDONE  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2061                    isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2062 #endif
2063         seq_printf(s, "HCATLSKIP  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2064                    isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2065         seq_printf(s, "HCATLLAST  [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2066                    isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2067         seq_printf(s, "HCATLCURR  [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2068                    isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2069         seq_printf(s, "\n");
2070         seq_printf(s, "HCATLDTC   [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2071                    isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2072         seq_printf(s, "HCATLDTCTO [%02x]     %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2073                    isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2074 }
2075
2076 static int proc_isp1362_show(struct seq_file *s, void *unused)
2077 {
2078         struct isp1362_hcd *isp1362_hcd = s->private;
2079         struct isp1362_ep *ep;
2080         int i;
2081
2082         seq_printf(s, "%s\n%s version %s\n",
2083                    isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2084
2085         /* collect statistics to help estimate potential win for
2086          * DMA engines that care about alignment (PXA)
2087          */
2088         seq_printf(s, "alignment:  16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2089                    isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2090                    isp1362_hcd->stat2, isp1362_hcd->stat1);
2091         seq_printf(s, "max # ptds in ATL  fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2092         seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2093         seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2094                    max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2095                        isp1362_hcd->istl_queue[1] .stat_maxptds));
2096
2097         /* FIXME: don't show the following in suspended state */
2098         spin_lock_irq(&isp1362_hcd->lock);
2099
2100         dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2101         dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2102         dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2103         dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2104         dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2105
2106         for (i = 0; i < NUM_ISP1362_IRQS; i++)
2107                 if (isp1362_hcd->irq_stat[i])
2108                         seq_printf(s, "%-15s: %d\n",
2109                                    ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2110
2111         dump_regs(s, isp1362_hcd);
2112         list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2113                 struct urb *urb;
2114
2115                 seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2116                            ({
2117                                    char *s;
2118                                    switch (ep->nextpid) {
2119                                    case USB_PID_IN:
2120                                            s = "in";
2121                                            break;
2122                                    case USB_PID_OUT:
2123                                            s = "out";
2124                                            break;
2125                                    case USB_PID_SETUP:
2126                                            s = "setup";
2127                                            break;
2128                                    case USB_PID_ACK:
2129                                            s = "status";
2130                                            break;
2131                                    default:
2132                                            s = "?";
2133                                            break;
2134                                    };
2135                                    s;}), ep->maxpacket) ;
2136                 list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2137                         seq_printf(s, "  urb%p, %d/%d\n", urb,
2138                                    urb->actual_length,
2139                                    urb->transfer_buffer_length);
2140                 }
2141         }
2142         if (!list_empty(&isp1362_hcd->async))
2143                 seq_printf(s, "\n");
2144         dump_ptd_queue(&isp1362_hcd->atl_queue);
2145
2146         seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2147
2148         list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2149                 seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2150                            isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2151
2152                 seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2153                            ep->interval, ep,
2154                            (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2155                            ep->udev->devnum, ep->epnum,
2156                            (ep->epnum == 0) ? "" :
2157                            ((ep->nextpid == USB_PID_IN) ?
2158                             "in" : "out"), ep->maxpacket);
2159         }
2160         dump_ptd_queue(&isp1362_hcd->intl_queue);
2161
2162         seq_printf(s, "ISO:\n");
2163
2164         list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2165                 seq_printf(s, "   %d/%p (%sdev%d ep%d%s max %d)\n",
2166                            ep->interval, ep,
2167                            (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2168                            ep->udev->devnum, ep->epnum,
2169                            (ep->epnum == 0) ? "" :
2170                            ((ep->nextpid == USB_PID_IN) ?
2171                             "in" : "out"), ep->maxpacket);
2172         }
2173
2174         spin_unlock_irq(&isp1362_hcd->lock);
2175         seq_printf(s, "\n");
2176
2177         return 0;
2178 }
2179
2180 static int proc_isp1362_open(struct inode *inode, struct file *file)
2181 {
2182         return single_open(file, proc_isp1362_show, PDE(inode)->data);
2183 }
2184
2185 static const struct file_operations proc_ops = {
2186         .open = proc_isp1362_open,
2187         .read = seq_read,
2188         .llseek = seq_lseek,
2189         .release = single_release,
2190 };
2191
2192 /* expect just one isp1362_hcd per system */
2193 static const char proc_filename[] = "driver/isp1362";
2194
2195 static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2196 {
2197         struct proc_dir_entry *pde;
2198
2199         pde = create_proc_entry(proc_filename, 0, NULL);
2200         if (pde == NULL) {
2201                 pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
2202                 return;
2203         }
2204
2205         pde->proc_fops = &proc_ops;
2206         pde->data = isp1362_hcd;
2207         isp1362_hcd->pde = pde;
2208 }
2209
2210 static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2211 {
2212         if (isp1362_hcd->pde)
2213                 remove_proc_entry(proc_filename, NULL);
2214 }
2215
2216 #endif
2217
2218 /*-------------------------------------------------------------------------*/
2219
2220 static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2221 {
2222         int tmp = 20;
2223
2224         isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2225         isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2226         while (--tmp) {
2227                 mdelay(1);
2228                 if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2229                         break;
2230         }
2231         if (!tmp)
2232                 pr_err("Software reset timeout\n");
2233 }
2234
2235 static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2236 {
2237         unsigned long flags;
2238
2239         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2240         __isp1362_sw_reset(isp1362_hcd);
2241         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2242 }
2243
2244 static int isp1362_mem_config(struct usb_hcd *hcd)
2245 {
2246         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2247         unsigned long flags;
2248         u32 total;
2249         u16 istl_size = ISP1362_ISTL_BUFSIZE;
2250         u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2251         u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2252         u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2253         u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2254         u16 atl_size;
2255         int i;
2256
2257         WARN_ON(istl_size & 3);
2258         WARN_ON(atl_blksize & 3);
2259         WARN_ON(intl_blksize & 3);
2260         WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2261         WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2262
2263         BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2264         if (atl_buffers > 32)
2265                 atl_buffers = 32;
2266         atl_size = atl_buffers * atl_blksize;
2267         total = atl_size + intl_size + istl_size;
2268         dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2269         dev_info(hcd->self.controller, "  ISTL:    2 * %4d:     %4d @ $%04x:$%04x\n",
2270                  istl_size / 2, istl_size, 0, istl_size / 2);
2271         dev_info(hcd->self.controller, "  INTL: %4d * (%3zu+8):  %4d @ $%04x\n",
2272                  ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2273                  intl_size, istl_size);
2274         dev_info(hcd->self.controller, "  ATL : %4d * (%3zu+8):  %4d @ $%04x\n",
2275                  atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2276                  atl_size, istl_size + intl_size);
2277         dev_info(hcd->self.controller, "  USED/FREE:   %4d      %4d\n", total,
2278                  ISP1362_BUF_SIZE - total);
2279
2280         if (total > ISP1362_BUF_SIZE) {
2281                 dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2282                         __func__, total, ISP1362_BUF_SIZE);
2283                 return -ENOMEM;
2284         }
2285
2286         total = istl_size + intl_size + atl_size;
2287         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2288
2289         for (i = 0; i < 2; i++) {
2290                 isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2291                 isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2292                 isp1362_hcd->istl_queue[i].blk_size = 4;
2293                 INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2294                 snprintf(isp1362_hcd->istl_queue[i].name,
2295                          sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2296                 DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2297                      isp1362_hcd->istl_queue[i].name,
2298                      isp1362_hcd->istl_queue[i].buf_start,
2299                      isp1362_hcd->istl_queue[i].buf_size);
2300         }
2301         isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2302
2303         isp1362_hcd->intl_queue.buf_start = istl_size;
2304         isp1362_hcd->intl_queue.buf_size = intl_size;
2305         isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2306         isp1362_hcd->intl_queue.blk_size = intl_blksize;
2307         isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2308         isp1362_hcd->intl_queue.skip_map = ~0;
2309         INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2310
2311         isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2312                             isp1362_hcd->intl_queue.buf_size);
2313         isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2314                             isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2315         isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2316         isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2317                             1 << (ISP1362_INTL_BUFFERS - 1));
2318
2319         isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2320         isp1362_hcd->atl_queue.buf_size = atl_size;
2321         isp1362_hcd->atl_queue.buf_count = atl_buffers;
2322         isp1362_hcd->atl_queue.blk_size = atl_blksize;
2323         isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2324         isp1362_hcd->atl_queue.skip_map = ~0;
2325         INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2326
2327         isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2328                             isp1362_hcd->atl_queue.buf_size);
2329         isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2330                             isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2331         isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2332         isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2333                             1 << (atl_buffers - 1));
2334
2335         snprintf(isp1362_hcd->atl_queue.name,
2336                  sizeof(isp1362_hcd->atl_queue.name), "ATL");
2337         snprintf(isp1362_hcd->intl_queue.name,
2338                  sizeof(isp1362_hcd->intl_queue.name), "INTL");
2339         DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2340              isp1362_hcd->intl_queue.name,
2341              isp1362_hcd->intl_queue.buf_start,
2342              ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2343              isp1362_hcd->intl_queue.buf_size);
2344         DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2345              isp1362_hcd->atl_queue.name,
2346              isp1362_hcd->atl_queue.buf_start,
2347              atl_buffers, isp1362_hcd->atl_queue.blk_size,
2348              isp1362_hcd->atl_queue.buf_size);
2349
2350         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2351
2352         return 0;
2353 }
2354
2355 static int isp1362_hc_reset(struct usb_hcd *hcd)
2356 {
2357         int ret = 0;
2358         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2359         unsigned long t;
2360         unsigned long timeout = 100;
2361         unsigned long flags;
2362         int clkrdy = 0;
2363
2364         pr_info("%s:\n", __func__);
2365
2366         if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2367                 isp1362_hcd->board->reset(hcd->self.controller, 1);
2368                 msleep(20);
2369                 if (isp1362_hcd->board->clock)
2370                         isp1362_hcd->board->clock(hcd->self.controller, 1);
2371                 isp1362_hcd->board->reset(hcd->self.controller, 0);
2372         } else
2373                 isp1362_sw_reset(isp1362_hcd);
2374
2375         /* chip has been reset. First we need to see a clock */
2376         t = jiffies + msecs_to_jiffies(timeout);
2377         while (!clkrdy && time_before_eq(jiffies, t)) {
2378                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2379                 clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2380                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2381                 if (!clkrdy)
2382                         msleep(4);
2383         }
2384
2385         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2386         isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2387         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2388         if (!clkrdy) {
2389                 pr_err("Clock not ready after %lums\n", timeout);
2390                 ret = -ENODEV;
2391         }
2392         return ret;
2393 }
2394
2395 static void isp1362_hc_stop(struct usb_hcd *hcd)
2396 {
2397         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2398         unsigned long flags;
2399         u32 tmp;
2400
2401         pr_info("%s:\n", __func__);
2402
2403         del_timer_sync(&hcd->rh_timer);
2404
2405         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2406
2407         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2408
2409         /* Switch off power for all ports */
2410         tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2411         tmp &= ~(RH_A_NPS | RH_A_PSM);
2412         isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2413         isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2414
2415         /* Reset the chip */
2416         if (isp1362_hcd->board && isp1362_hcd->board->reset)
2417                 isp1362_hcd->board->reset(hcd->self.controller, 1);
2418         else
2419                 __isp1362_sw_reset(isp1362_hcd);
2420
2421         if (isp1362_hcd->board && isp1362_hcd->board->clock)
2422                 isp1362_hcd->board->clock(hcd->self.controller, 0);
2423
2424         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2425 }
2426
2427 #ifdef CHIP_BUFFER_TEST
2428 static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2429 {
2430         int ret = 0;
2431         u16 *ref;
2432         unsigned long flags;
2433
2434         ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2435         if (ref) {
2436                 int offset;
2437                 u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2438
2439                 for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2440                         ref[offset] = ~offset;
2441                         tst[offset] = offset;
2442                 }
2443
2444                 for (offset = 0; offset < 4; offset++) {
2445                         int j;
2446
2447                         for (j = 0; j < 8; j++) {
2448                                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2449                                 isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2450                                 isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2451                                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2452
2453                                 if (memcmp(ref, tst, j)) {
2454                                         ret = -ENODEV;
2455                                         pr_err("%s: memory check with %d byte offset %d failed\n",
2456                                             __func__, j, offset);
2457                                         dump_data((u8 *)ref + offset, j);
2458                                         dump_data((u8 *)tst + offset, j);
2459                                 }
2460                         }
2461                 }
2462
2463                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2464                 isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2465                 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2466                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2467
2468                 if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2469                         ret = -ENODEV;
2470                         pr_err("%s: memory check failed\n", __func__);
2471                         dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2472                 }
2473
2474                 for (offset = 0; offset < 256; offset++) {
2475                         int test_size = 0;
2476
2477                         yield();
2478
2479                         memset(tst, 0, ISP1362_BUF_SIZE);
2480                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2481                         isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2482                         isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2483                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2484                         if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2485                                    ISP1362_BUF_SIZE / 2)) {
2486                                 pr_err("%s: Failed to clear buffer\n", __func__);
2487                                 dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2488                                 break;
2489                         }
2490                         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2491                         isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2492                         isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2493                                              offset * 2 + PTD_HEADER_SIZE, test_size);
2494                         isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2495                                             PTD_HEADER_SIZE + test_size);
2496                         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2497                         if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2498                                 dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2499                                 dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2500                                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2501                                 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2502                                                     PTD_HEADER_SIZE + test_size);
2503                                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2504                                 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2505                                         ret = -ENODEV;
2506                                         pr_err("%s: memory check with offset %02x failed\n",
2507                                             __func__, offset);
2508                                         break;
2509                                 }
2510                                 pr_warning("%s: memory check with offset %02x ok after second read\n",
2511                                      __func__, offset);
2512                         }
2513                 }
2514                 kfree(ref);
2515         }
2516         return ret;
2517 }
2518 #endif
2519
2520 static int isp1362_hc_start(struct usb_hcd *hcd)
2521 {
2522         int ret;
2523         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2524         struct isp1362_platform_data *board = isp1362_hcd->board;
2525         u16 hwcfg;
2526         u16 chipid;
2527         unsigned long flags;
2528
2529         pr_info("%s:\n", __func__);
2530
2531         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2532         chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2533         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2534
2535         if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2536                 pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2537                 return -ENODEV;
2538         }
2539
2540 #ifdef CHIP_BUFFER_TEST
2541         ret = isp1362_chip_test(isp1362_hcd);
2542         if (ret)
2543                 return -ENODEV;
2544 #endif
2545         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2546         /* clear interrupt status and disable all interrupt sources */
2547         isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2548         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2549
2550         /* HW conf */
2551         hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2552         if (board->sel15Kres)
2553                 hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2554                         ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2555         if (board->clknotstop)
2556                 hwcfg |= HCHWCFG_CLKNOTSTOP;
2557         if (board->oc_enable)
2558                 hwcfg |= HCHWCFG_ANALOG_OC;
2559         if (board->int_act_high)
2560                 hwcfg |= HCHWCFG_INT_POL;
2561         if (board->int_edge_triggered)
2562                 hwcfg |= HCHWCFG_INT_TRIGGER;
2563         if (board->dreq_act_high)
2564                 hwcfg |= HCHWCFG_DREQ_POL;
2565         if (board->dack_act_high)
2566                 hwcfg |= HCHWCFG_DACK_POL;
2567         isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2568         isp1362_show_reg(isp1362_hcd, HCHWCFG);
2569         isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2570         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2571
2572         ret = isp1362_mem_config(hcd);
2573         if (ret)
2574                 return ret;
2575
2576         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2577
2578         /* Root hub conf */
2579         isp1362_hcd->rhdesca = 0;
2580         if (board->no_power_switching)
2581                 isp1362_hcd->rhdesca |= RH_A_NPS;
2582         if (board->power_switching_mode)
2583                 isp1362_hcd->rhdesca |= RH_A_PSM;
2584         if (board->potpg)
2585                 isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2586         else
2587                 isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2588
2589         isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2590         isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2591         isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2592
2593         isp1362_hcd->rhdescb = RH_B_PPCM;
2594         isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2595         isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2596
2597         isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2598         isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2599         isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2600
2601         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2602
2603         isp1362_hcd->hc_control = OHCI_USB_OPER;
2604         hcd->state = HC_STATE_RUNNING;
2605
2606         spin_lock_irqsave(&isp1362_hcd->lock, flags);
2607         /* Set up interrupts */
2608         isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2609         isp1362_hcd->intenb |= OHCI_INTR_RD;
2610         isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2611         isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2612         isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2613
2614         /* Go operational */
2615         isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2616         /* enable global power */
2617         isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2618
2619         spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2620
2621         return 0;
2622 }
2623
2624 /*-------------------------------------------------------------------------*/
2625
2626 static struct hc_driver isp1362_hc_driver = {
2627         .description =          hcd_name,
2628         .product_desc =         "ISP1362 Host Controller",
2629         .hcd_priv_size =        sizeof(struct isp1362_hcd),
2630
2631         .irq =                  isp1362_irq,
2632         .flags =                HCD_USB11 | HCD_MEMORY,
2633
2634         .reset =                isp1362_hc_reset,
2635         .start =                isp1362_hc_start,
2636         .stop =                 isp1362_hc_stop,
2637
2638         .urb_enqueue =          isp1362_urb_enqueue,
2639         .urb_dequeue =          isp1362_urb_dequeue,
2640         .endpoint_disable =     isp1362_endpoint_disable,
2641
2642         .get_frame_number =     isp1362_get_frame,
2643
2644         .hub_status_data =      isp1362_hub_status_data,
2645         .hub_control =          isp1362_hub_control,
2646         .bus_suspend =          isp1362_bus_suspend,
2647         .bus_resume =           isp1362_bus_resume,
2648 };
2649
2650 /*-------------------------------------------------------------------------*/
2651
2652 static int __devexit isp1362_remove(struct platform_device *pdev)
2653 {
2654         struct usb_hcd *hcd = platform_get_drvdata(pdev);
2655         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2656         struct resource *res;
2657
2658         remove_debug_file(isp1362_hcd);
2659         DBG(0, "%s: Removing HCD\n", __func__);
2660         usb_remove_hcd(hcd);
2661
2662         DBG(0, "%s: Unmapping data_reg @ %p\n", __func__,
2663             isp1362_hcd->data_reg);
2664         iounmap(isp1362_hcd->data_reg);
2665
2666         DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__,
2667             isp1362_hcd->addr_reg);
2668         iounmap(isp1362_hcd->addr_reg);
2669
2670         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2671         DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2672         if (res)
2673                 release_mem_region(res->start, resource_size(res));
2674
2675         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2676         DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2677         if (res)
2678                 release_mem_region(res->start, resource_size(res));
2679
2680         DBG(0, "%s: put_hcd\n", __func__);
2681         usb_put_hcd(hcd);
2682         DBG(0, "%s: Done\n", __func__);
2683
2684         return 0;
2685 }
2686
2687 static int __init isp1362_probe(struct platform_device *pdev)
2688 {
2689         struct usb_hcd *hcd;
2690         struct isp1362_hcd *isp1362_hcd;
2691         struct resource *addr, *data;
2692         void __iomem *addr_reg;
2693         void __iomem *data_reg;
2694         int irq;
2695         int retval = 0;
2696         struct resource *irq_res;
2697         unsigned int irq_flags = 0;
2698
2699         /* basic sanity checks first.  board-specific init logic should
2700          * have initialized this the three resources and probably board
2701          * specific platform_data.  we don't probe for IRQs, and do only
2702          * minimal sanity checking.
2703          */
2704         if (pdev->num_resources < 3) {
2705                 retval = -ENODEV;
2706                 goto err1;
2707         }
2708
2709         data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2710         addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2711         irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2712         if (!addr || !data || !irq_res) {
2713                 retval = -ENODEV;
2714                 goto err1;
2715         }
2716         irq = irq_res->start;
2717
2718         if (pdev->dev.dma_mask) {
2719                 DBG(1, "won't do DMA");
2720                 retval = -ENODEV;
2721                 goto err1;
2722         }
2723
2724         if (!request_mem_region(addr->start, resource_size(addr), hcd_name)) {
2725                 retval = -EBUSY;
2726                 goto err1;
2727         }
2728         addr_reg = ioremap(addr->start, resource_size(addr));
2729         if (addr_reg == NULL) {
2730                 retval = -ENOMEM;
2731                 goto err2;
2732         }
2733
2734         if (!request_mem_region(data->start, resource_size(data), hcd_name)) {
2735                 retval = -EBUSY;
2736                 goto err3;
2737         }
2738         data_reg = ioremap(data->start, resource_size(data));
2739         if (data_reg == NULL) {
2740                 retval = -ENOMEM;
2741                 goto err4;
2742         }
2743
2744         /* allocate and initialize hcd */
2745         hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2746         if (!hcd) {
2747                 retval = -ENOMEM;
2748                 goto err5;
2749         }
2750         hcd->rsrc_start = data->start;
2751         isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2752         isp1362_hcd->data_reg = data_reg;
2753         isp1362_hcd->addr_reg = addr_reg;
2754
2755         isp1362_hcd->next_statechange = jiffies;
2756         spin_lock_init(&isp1362_hcd->lock);
2757         INIT_LIST_HEAD(&isp1362_hcd->async);
2758         INIT_LIST_HEAD(&isp1362_hcd->periodic);
2759         INIT_LIST_HEAD(&isp1362_hcd->isoc);
2760         INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2761         isp1362_hcd->board = pdev->dev.platform_data;
2762 #if USE_PLATFORM_DELAY
2763         if (!isp1362_hcd->board->delay) {
2764                 dev_err(hcd->self.controller, "No platform delay function given\n");
2765                 retval = -ENODEV;
2766                 goto err6;
2767         }
2768 #endif
2769
2770         if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2771                 irq_flags |= IRQF_TRIGGER_RISING;
2772         if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2773                 irq_flags |= IRQF_TRIGGER_FALLING;
2774         if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2775                 irq_flags |= IRQF_TRIGGER_HIGH;
2776         if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2777                 irq_flags |= IRQF_TRIGGER_LOW;
2778
2779         retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_DISABLED | IRQF_SHARED);
2780         if (retval != 0)
2781                 goto err6;
2782         pr_info("%s, irq %d\n", hcd->product_desc, irq);
2783
2784         create_debug_file(isp1362_hcd);
2785
2786         return 0;
2787
2788  err6:
2789         DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd);
2790         usb_put_hcd(hcd);
2791  err5:
2792         DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg);
2793         iounmap(data_reg);
2794  err4:
2795         DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
2796         release_mem_region(data->start, resource_size(data));
2797  err3:
2798         DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
2799         iounmap(addr_reg);
2800  err2:
2801         DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
2802         release_mem_region(addr->start, resource_size(addr));
2803  err1:
2804         pr_err("%s: init error, %d\n", __func__, retval);
2805
2806         return retval;
2807 }
2808
2809 #ifdef  CONFIG_PM
2810 static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2811 {
2812         struct usb_hcd *hcd = platform_get_drvdata(pdev);
2813         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2814         unsigned long flags;
2815         int retval = 0;
2816
2817         DBG(0, "%s: Suspending device\n", __func__);
2818
2819         if (state.event == PM_EVENT_FREEZE) {
2820                 DBG(0, "%s: Suspending root hub\n", __func__);
2821                 retval = isp1362_bus_suspend(hcd);
2822         } else {
2823                 DBG(0, "%s: Suspending RH ports\n", __func__);
2824                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2825                 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2826                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2827         }
2828         if (retval == 0)
2829                 pdev->dev.power.power_state = state;
2830         return retval;
2831 }
2832
2833 static int isp1362_resume(struct platform_device *pdev)
2834 {
2835         struct usb_hcd *hcd = platform_get_drvdata(pdev);
2836         struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2837         unsigned long flags;
2838
2839         DBG(0, "%s: Resuming\n", __func__);
2840
2841         if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2842                 DBG(0, "%s: Resume RH ports\n", __func__);
2843                 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2844                 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2845                 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2846                 return 0;
2847         }
2848
2849         pdev->dev.power.power_state = PMSG_ON;
2850
2851         return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2852 }
2853 #else
2854 #define isp1362_suspend NULL
2855 #define isp1362_resume  NULL
2856 #endif
2857
2858 static struct platform_driver isp1362_driver = {
2859         .probe = isp1362_probe,
2860         .remove = __devexit_p(isp1362_remove),
2861
2862         .suspend = isp1362_suspend,
2863         .resume = isp1362_resume,
2864         .driver = {
2865                 .name = (char *)hcd_name,
2866                 .owner = THIS_MODULE,
2867         },
2868 };
2869
2870 /*-------------------------------------------------------------------------*/
2871
2872 static int __init isp1362_init(void)
2873 {
2874         if (usb_disabled())
2875                 return -ENODEV;
2876         pr_info("driver %s, %s\n", hcd_name, DRIVER_VERSION);
2877         return platform_driver_register(&isp1362_driver);
2878 }
2879 module_init(isp1362_init);
2880
2881 static void __exit isp1362_cleanup(void)
2882 {
2883         platform_driver_unregister(&isp1362_driver);
2884 }
2885 module_exit(isp1362_cleanup);