Merge branch 'linux-2.6' into for-2.6.22
[sfrench/cifs-2.6.git] / drivers / net / wireless / bcm43xx / bcm43xx_dma.c
1 /*
2
3   Broadcom BCM43xx wireless driver
4
5   DMA ringbuffer and descriptor allocation/management
6
7   Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
8
9   Some code in this file is derived from the b44.c driver
10   Copyright (C) 2002 David S. Miller
11   Copyright (C) Pekka Pietikainen
12
13   This program is free software; you can redistribute it and/or modify
14   it under the terms of the GNU General Public License as published by
15   the Free Software Foundation; either version 2 of the License, or
16   (at your option) any later version.
17
18   This program is distributed in the hope that it will be useful,
19   but WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21   GNU General Public License for more details.
22
23   You should have received a copy of the GNU General Public License
24   along with this program; see the file COPYING.  If not, write to
25   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26   Boston, MA 02110-1301, USA.
27
28 */
29
30 #include "bcm43xx.h"
31 #include "bcm43xx_dma.h"
32 #include "bcm43xx_main.h"
33 #include "bcm43xx_debugfs.h"
34 #include "bcm43xx_power.h"
35 #include "bcm43xx_xmit.h"
36
37 #include <linux/dma-mapping.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/skbuff.h>
41
42
43 static inline int free_slots(struct bcm43xx_dmaring *ring)
44 {
45         return (ring->nr_slots - ring->used_slots);
46 }
47
48 static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
49 {
50         assert(slot >= -1 && slot <= ring->nr_slots - 1);
51         if (slot == ring->nr_slots - 1)
52                 return 0;
53         return slot + 1;
54 }
55
56 static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
57 {
58         assert(slot >= 0 && slot <= ring->nr_slots - 1);
59         if (slot == 0)
60                 return ring->nr_slots - 1;
61         return slot - 1;
62 }
63
64 /* Request a slot for usage. */
65 static inline
66 int request_slot(struct bcm43xx_dmaring *ring)
67 {
68         int slot;
69
70         assert(ring->tx);
71         assert(!ring->suspended);
72         assert(free_slots(ring) != 0);
73
74         slot = next_slot(ring, ring->current_slot);
75         ring->current_slot = slot;
76         ring->used_slots++;
77
78         /* Check the number of available slots and suspend TX,
79          * if we are running low on free slots.
80          */
81         if (unlikely(free_slots(ring) < ring->suspend_mark)) {
82                 netif_stop_queue(ring->bcm->net_dev);
83                 ring->suspended = 1;
84         }
85 #ifdef CONFIG_BCM43XX_DEBUG
86         if (ring->used_slots > ring->max_used_slots)
87                 ring->max_used_slots = ring->used_slots;
88 #endif /* CONFIG_BCM43XX_DEBUG*/
89
90         return slot;
91 }
92
93 /* Return a slot to the free slots. */
94 static inline
95 void return_slot(struct bcm43xx_dmaring *ring, int slot)
96 {
97         assert(ring->tx);
98
99         ring->used_slots--;
100
101         /* Check if TX is suspended and check if we have
102          * enough free slots to resume it again.
103          */
104         if (unlikely(ring->suspended)) {
105                 if (free_slots(ring) >= ring->resume_mark) {
106                         ring->suspended = 0;
107                         netif_wake_queue(ring->bcm->net_dev);
108                 }
109         }
110 }
111
112 u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
113 {
114         static const u16 map64[] = {
115                 BCM43xx_MMIO_DMA64_BASE0,
116                 BCM43xx_MMIO_DMA64_BASE1,
117                 BCM43xx_MMIO_DMA64_BASE2,
118                 BCM43xx_MMIO_DMA64_BASE3,
119                 BCM43xx_MMIO_DMA64_BASE4,
120                 BCM43xx_MMIO_DMA64_BASE5,
121         };
122         static const u16 map32[] = {
123                 BCM43xx_MMIO_DMA32_BASE0,
124                 BCM43xx_MMIO_DMA32_BASE1,
125                 BCM43xx_MMIO_DMA32_BASE2,
126                 BCM43xx_MMIO_DMA32_BASE3,
127                 BCM43xx_MMIO_DMA32_BASE4,
128                 BCM43xx_MMIO_DMA32_BASE5,
129         };
130
131         if (dma64bit) {
132                 assert(controller_idx >= 0 &&
133                        controller_idx < ARRAY_SIZE(map64));
134                 return map64[controller_idx];
135         }
136         assert(controller_idx >= 0 &&
137                controller_idx < ARRAY_SIZE(map32));
138         return map32[controller_idx];
139 }
140
141 static inline
142 dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
143                           unsigned char *buf,
144                           size_t len,
145                           int tx)
146 {
147         dma_addr_t dmaaddr;
148         int direction = PCI_DMA_FROMDEVICE;
149
150         if (tx)
151                 direction = PCI_DMA_TODEVICE;
152
153         dmaaddr = pci_map_single(ring->bcm->pci_dev,
154                                          buf, len,
155                                          direction);
156
157         return dmaaddr;
158 }
159
160 static inline
161 void unmap_descbuffer(struct bcm43xx_dmaring *ring,
162                       dma_addr_t addr,
163                       size_t len,
164                       int tx)
165 {
166         if (tx) {
167                 pci_unmap_single(ring->bcm->pci_dev,
168                                  addr, len,
169                                  PCI_DMA_TODEVICE);
170         } else {
171                 pci_unmap_single(ring->bcm->pci_dev,
172                                  addr, len,
173                                  PCI_DMA_FROMDEVICE);
174         }
175 }
176
177 static inline
178 void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
179                              dma_addr_t addr,
180                              size_t len)
181 {
182         assert(!ring->tx);
183
184         pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
185                                     addr, len, PCI_DMA_FROMDEVICE);
186 }
187
188 static inline
189 void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
190                                 dma_addr_t addr,
191                                 size_t len)
192 {
193         assert(!ring->tx);
194
195         pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
196                                     addr, len, PCI_DMA_TODEVICE);
197 }
198
199 /* Unmap and free a descriptor buffer. */
200 static inline
201 void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
202                             struct bcm43xx_dmadesc_meta *meta,
203                             int irq_context)
204 {
205         assert(meta->skb);
206         if (irq_context)
207                 dev_kfree_skb_irq(meta->skb);
208         else
209                 dev_kfree_skb(meta->skb);
210         meta->skb = NULL;
211 }
212
213 static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
214 {
215         ring->descbase = pci_alloc_consistent(ring->bcm->pci_dev, BCM43xx_DMA_RINGMEMSIZE,
216                                             &(ring->dmabase));
217         if (!ring->descbase) {
218                 /* Allocation may have failed due to pci_alloc_consistent
219                    insisting on use of GFP_DMA, which is more restrictive
220                    than necessary...  */
221                 struct dma_desc *rx_ring;
222                 dma_addr_t rx_ring_dma;
223
224                 rx_ring = kzalloc(BCM43xx_DMA_RINGMEMSIZE, GFP_KERNEL);
225                 if (!rx_ring)
226                         goto out_err;
227
228                 rx_ring_dma = pci_map_single(ring->bcm->pci_dev, rx_ring,
229                                              BCM43xx_DMA_RINGMEMSIZE,
230                                              PCI_DMA_BIDIRECTIONAL);
231
232                 if (pci_dma_mapping_error(rx_ring_dma) ||
233                     rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
234                         /* Sigh... */
235                         if (!pci_dma_mapping_error(rx_ring_dma))
236                                 pci_unmap_single(ring->bcm->pci_dev,
237                                                  rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
238                                                  PCI_DMA_BIDIRECTIONAL);
239                         rx_ring_dma = pci_map_single(ring->bcm->pci_dev,
240                                                  rx_ring, BCM43xx_DMA_RINGMEMSIZE,
241                                                  PCI_DMA_BIDIRECTIONAL);
242                         if (pci_dma_mapping_error(rx_ring_dma) ||
243                             rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
244                                 assert(0);
245                                 if (!pci_dma_mapping_error(rx_ring_dma))
246                                         pci_unmap_single(ring->bcm->pci_dev,
247                                                          rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
248                                                          PCI_DMA_BIDIRECTIONAL);
249                                 goto out_err;
250                         }
251                 }
252
253                 ring->descbase = rx_ring;
254                 ring->dmabase = rx_ring_dma;
255         }
256         memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
257
258         return 0;
259 out_err:
260         printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
261         return -ENOMEM;
262 }
263
264 static void free_ringmemory(struct bcm43xx_dmaring *ring)
265 {
266         struct device *dev = &(ring->bcm->pci_dev->dev);
267
268         dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
269                           ring->descbase, ring->dmabase);
270 }
271
272 /* Reset the RX DMA channel */
273 int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
274                                    u16 mmio_base, int dma64)
275 {
276         int i;
277         u32 value;
278         u16 offset;
279
280         offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
281         bcm43xx_write32(bcm, mmio_base + offset, 0);
282         for (i = 0; i < 1000; i++) {
283                 offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
284                 value = bcm43xx_read32(bcm, mmio_base + offset);
285                 if (dma64) {
286                         value &= BCM43xx_DMA64_RXSTAT;
287                         if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
288                                 i = -1;
289                                 break;
290                         }
291                 } else {
292                         value &= BCM43xx_DMA32_RXSTATE;
293                         if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
294                                 i = -1;
295                                 break;
296                         }
297                 }
298                 udelay(10);
299         }
300         if (i != -1) {
301                 printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
302                 return -ENODEV;
303         }
304
305         return 0;
306 }
307
308 /* Reset the RX DMA channel */
309 int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
310                                    u16 mmio_base, int dma64)
311 {
312         int i;
313         u32 value;
314         u16 offset;
315
316         for (i = 0; i < 1000; i++) {
317                 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
318                 value = bcm43xx_read32(bcm, mmio_base + offset);
319                 if (dma64) {
320                         value &= BCM43xx_DMA64_TXSTAT;
321                         if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
322                             value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
323                             value == BCM43xx_DMA64_TXSTAT_STOPPED)
324                                 break;
325                 } else {
326                         value &= BCM43xx_DMA32_TXSTATE;
327                         if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
328                             value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
329                             value == BCM43xx_DMA32_TXSTAT_STOPPED)
330                                 break;
331                 }
332                 udelay(10);
333         }
334         offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
335         bcm43xx_write32(bcm, mmio_base + offset, 0);
336         for (i = 0; i < 1000; i++) {
337                 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
338                 value = bcm43xx_read32(bcm, mmio_base + offset);
339                 if (dma64) {
340                         value &= BCM43xx_DMA64_TXSTAT;
341                         if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
342                                 i = -1;
343                                 break;
344                         }
345                 } else {
346                         value &= BCM43xx_DMA32_TXSTATE;
347                         if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
348                                 i = -1;
349                                 break;
350                         }
351                 }
352                 udelay(10);
353         }
354         if (i != -1) {
355                 printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
356                 return -ENODEV;
357         }
358         /* ensure the reset is completed. */
359         udelay(300);
360
361         return 0;
362 }
363
364 static void fill_descriptor(struct bcm43xx_dmaring *ring,
365                             struct bcm43xx_dmadesc_generic *desc,
366                             dma_addr_t dmaaddr,
367                             u16 bufsize,
368                             int start, int end, int irq)
369 {
370         int slot;
371
372         slot = bcm43xx_dma_desc2idx(ring, desc);
373         assert(slot >= 0 && slot < ring->nr_slots);
374
375         if (ring->dma64) {
376                 u32 ctl0 = 0, ctl1 = 0;
377                 u32 addrlo, addrhi;
378                 u32 addrext;
379
380                 addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
381                 addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
382                 addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
383                 addrhi |= ring->routing;
384                 if (slot == ring->nr_slots - 1)
385                         ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
386                 if (start)
387                         ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
388                 if (end)
389                         ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
390                 if (irq)
391                         ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
392                 ctl1 |= (bufsize - ring->frameoffset)
393                         & BCM43xx_DMA64_DCTL1_BYTECNT;
394                 ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
395                         & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
396
397                 desc->dma64.control0 = cpu_to_le32(ctl0);
398                 desc->dma64.control1 = cpu_to_le32(ctl1);
399                 desc->dma64.address_low = cpu_to_le32(addrlo);
400                 desc->dma64.address_high = cpu_to_le32(addrhi);
401         } else {
402                 u32 ctl;
403                 u32 addr;
404                 u32 addrext;
405
406                 addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
407                 addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
408                            >> BCM43xx_DMA32_ROUTING_SHIFT;
409                 addr |= ring->routing;
410                 ctl = (bufsize - ring->frameoffset)
411                       & BCM43xx_DMA32_DCTL_BYTECNT;
412                 if (slot == ring->nr_slots - 1)
413                         ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
414                 if (start)
415                         ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
416                 if (end)
417                         ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
418                 if (irq)
419                         ctl |= BCM43xx_DMA32_DCTL_IRQ;
420                 ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
421                        & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
422
423                 desc->dma32.control = cpu_to_le32(ctl);
424                 desc->dma32.address = cpu_to_le32(addr);
425         }
426 }
427
428 static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
429                                struct bcm43xx_dmadesc_generic *desc,
430                                struct bcm43xx_dmadesc_meta *meta,
431                                gfp_t gfp_flags)
432 {
433         struct bcm43xx_rxhdr *rxhdr;
434         struct bcm43xx_hwxmitstatus *xmitstat;
435         dma_addr_t dmaaddr;
436         struct sk_buff *skb;
437
438         assert(!ring->tx);
439
440         skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
441         if (unlikely(!skb))
442                 return -ENOMEM;
443         dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
444         /* This hardware bug work-around adapted from the b44 driver.
445            The chip may be unable to do PCI DMA to/from anything above 1GB */
446         if (pci_dma_mapping_error(dmaaddr) ||
447             dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
448                 /* This one has 30-bit addressing... */
449                 if (!pci_dma_mapping_error(dmaaddr))
450                         pci_unmap_single(ring->bcm->pci_dev,
451                                          dmaaddr, ring->rx_buffersize,
452                                          PCI_DMA_FROMDEVICE);
453                 dev_kfree_skb_any(skb);
454                 skb = __dev_alloc_skb(ring->rx_buffersize,GFP_DMA);
455                 if (skb == NULL)
456                         return -ENOMEM;
457                 dmaaddr = pci_map_single(ring->bcm->pci_dev,
458                                          skb->data, ring->rx_buffersize,
459                                          PCI_DMA_FROMDEVICE);
460                 if (pci_dma_mapping_error(dmaaddr) ||
461                     dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
462                         assert(0);
463                         dev_kfree_skb_any(skb);
464                         return -ENOMEM;
465                 }
466         }
467         meta->skb = skb;
468         meta->dmaaddr = dmaaddr;
469         skb->dev = ring->bcm->net_dev;
470
471         fill_descriptor(ring, desc, dmaaddr,
472                         ring->rx_buffersize, 0, 0, 0);
473
474         rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
475         rxhdr->frame_length = 0;
476         rxhdr->flags1 = 0;
477         xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
478         xmitstat->cookie = 0;
479
480         return 0;
481 }
482
483 /* Allocate the initial descbuffers.
484  * This is used for an RX ring only.
485  */
486 static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
487 {
488         int i, err = -ENOMEM;
489         struct bcm43xx_dmadesc_generic *desc;
490         struct bcm43xx_dmadesc_meta *meta;
491
492         for (i = 0; i < ring->nr_slots; i++) {
493                 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
494
495                 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
496                 if (err)
497                         goto err_unwind;
498         }
499         mb();
500         ring->used_slots = ring->nr_slots;
501         err = 0;
502 out:
503         return err;
504
505 err_unwind:
506         for (i--; i >= 0; i--) {
507                 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
508
509                 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
510                 dev_kfree_skb(meta->skb);
511         }
512         goto out;
513 }
514
515 /* Do initial setup of the DMA controller.
516  * Reset the controller, write the ring busaddress
517  * and switch the "enable" bit on.
518  */
519 static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
520 {
521         int err = 0;
522         u32 value;
523         u32 addrext;
524
525         if (ring->tx) {
526                 if (ring->dma64) {
527                         u64 ringbase = (u64)(ring->dmabase);
528
529                         addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
530                         value = BCM43xx_DMA64_TXENABLE;
531                         value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
532                                 & BCM43xx_DMA64_TXADDREXT_MASK;
533                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
534                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
535                                         (ringbase & 0xFFFFFFFF));
536                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
537                                         ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
538                                         | ring->routing);
539                 } else {
540                         u32 ringbase = (u32)(ring->dmabase);
541
542                         addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
543                         value = BCM43xx_DMA32_TXENABLE;
544                         value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
545                                 & BCM43xx_DMA32_TXADDREXT_MASK;
546                         bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
547                         bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
548                                         (ringbase & ~BCM43xx_DMA32_ROUTING)
549                                         | ring->routing);
550                 }
551         } else {
552                 err = alloc_initial_descbuffers(ring);
553                 if (err)
554                         goto out;
555                 if (ring->dma64) {
556                         u64 ringbase = (u64)(ring->dmabase);
557
558                         addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
559                         value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
560                         value |= BCM43xx_DMA64_RXENABLE;
561                         value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
562                                 & BCM43xx_DMA64_RXADDREXT_MASK;
563                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
564                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
565                                         (ringbase & 0xFFFFFFFF));
566                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
567                                         ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
568                                         | ring->routing);
569                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
570                 } else {
571                         u32 ringbase = (u32)(ring->dmabase);
572
573                         addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
574                         value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
575                         value |= BCM43xx_DMA32_RXENABLE;
576                         value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
577                                 & BCM43xx_DMA32_RXADDREXT_MASK;
578                         bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
579                         bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
580                                         (ringbase & ~BCM43xx_DMA32_ROUTING)
581                                         | ring->routing);
582                         bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
583                 }
584         }
585
586 out:
587         return err;
588 }
589
590 /* Shutdown the DMA controller. */
591 static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
592 {
593         if (ring->tx) {
594                 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
595                 if (ring->dma64) {
596                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
597                         bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
598                 } else
599                         bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
600         } else {
601                 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
602                 if (ring->dma64) {
603                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
604                         bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
605                 } else
606                         bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
607         }
608 }
609
610 static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
611 {
612         struct bcm43xx_dmadesc_generic *desc;
613         struct bcm43xx_dmadesc_meta *meta;
614         int i;
615
616         if (!ring->used_slots)
617                 return;
618         for (i = 0; i < ring->nr_slots; i++) {
619                 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
620
621                 if (!meta->skb) {
622                         assert(ring->tx);
623                         continue;
624                 }
625                 if (ring->tx) {
626                         unmap_descbuffer(ring, meta->dmaaddr,
627                                         meta->skb->len, 1);
628                 } else {
629                         unmap_descbuffer(ring, meta->dmaaddr,
630                                         ring->rx_buffersize, 0);
631                 }
632                 free_descriptor_buffer(ring, meta, 0);
633         }
634 }
635
636 /* Main initialization function. */
637 static
638 struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
639                                                int controller_index,
640                                                int for_tx,
641                                                int dma64)
642 {
643         struct bcm43xx_dmaring *ring;
644         int err;
645         int nr_slots;
646
647         ring = kzalloc(sizeof(*ring), GFP_KERNEL);
648         if (!ring)
649                 goto out;
650
651         nr_slots = BCM43xx_RXRING_SLOTS;
652         if (for_tx)
653                 nr_slots = BCM43xx_TXRING_SLOTS;
654
655         ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
656                              GFP_KERNEL);
657         if (!ring->meta)
658                 goto err_kfree_ring;
659
660         ring->routing = BCM43xx_DMA32_CLIENTTRANS;
661         if (dma64)
662                 ring->routing = BCM43xx_DMA64_CLIENTTRANS;
663 #ifdef CONFIG_BCM947XX
664         if (bcm->pci_dev->bus->number == 0)
665                 ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
666 #endif
667
668         ring->bcm = bcm;
669         ring->nr_slots = nr_slots;
670         ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
671         ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
672         assert(ring->suspend_mark < ring->resume_mark);
673         ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
674         ring->index = controller_index;
675         ring->dma64 = !!dma64;
676         if (for_tx) {
677                 ring->tx = 1;
678                 ring->current_slot = -1;
679         } else {
680                 if (ring->index == 0) {
681                         ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
682                         ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
683                 } else if (ring->index == 3) {
684                         ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
685                         ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
686                 } else
687                         assert(0);
688         }
689
690         err = alloc_ringmemory(ring);
691         if (err)
692                 goto err_kfree_meta;
693         err = dmacontroller_setup(ring);
694         if (err)
695                 goto err_free_ringmemory;
696         return ring;
697
698 out:
699         printk(KERN_ERR PFX "Error in bcm43xx_setup_dmaring\n");
700         return ring;
701
702 err_free_ringmemory:
703         free_ringmemory(ring);
704 err_kfree_meta:
705         kfree(ring->meta);
706 err_kfree_ring:
707         kfree(ring);
708         ring = NULL;
709         goto out;
710 }
711
712 /* Main cleanup function. */
713 static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
714 {
715         if (!ring)
716                 return;
717
718         dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
719                 (ring->dma64) ? "64" : "32",
720                 ring->mmio_base,
721                 (ring->tx) ? "TX" : "RX",
722                 ring->max_used_slots, ring->nr_slots);
723         /* Device IRQs are disabled prior entering this function,
724          * so no need to take care of concurrency with rx handler stuff.
725          */
726         dmacontroller_cleanup(ring);
727         free_all_descbuffers(ring);
728         free_ringmemory(ring);
729
730         kfree(ring->meta);
731         kfree(ring);
732 }
733
734 void bcm43xx_dma_free(struct bcm43xx_private *bcm)
735 {
736         struct bcm43xx_dma *dma;
737
738         if (bcm43xx_using_pio(bcm))
739                 return;
740         dma = bcm43xx_current_dma(bcm);
741
742         bcm43xx_destroy_dmaring(dma->rx_ring3);
743         dma->rx_ring3 = NULL;
744         bcm43xx_destroy_dmaring(dma->rx_ring0);
745         dma->rx_ring0 = NULL;
746
747         bcm43xx_destroy_dmaring(dma->tx_ring5);
748         dma->tx_ring5 = NULL;
749         bcm43xx_destroy_dmaring(dma->tx_ring4);
750         dma->tx_ring4 = NULL;
751         bcm43xx_destroy_dmaring(dma->tx_ring3);
752         dma->tx_ring3 = NULL;
753         bcm43xx_destroy_dmaring(dma->tx_ring2);
754         dma->tx_ring2 = NULL;
755         bcm43xx_destroy_dmaring(dma->tx_ring1);
756         dma->tx_ring1 = NULL;
757         bcm43xx_destroy_dmaring(dma->tx_ring0);
758         dma->tx_ring0 = NULL;
759 }
760
761 int bcm43xx_dma_init(struct bcm43xx_private *bcm)
762 {
763         struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
764         struct bcm43xx_dmaring *ring;
765         int err = -ENOMEM;
766         int dma64 = 0;
767
768         bcm->dma_mask = bcm43xx_get_supported_dma_mask(bcm);
769         if (bcm->dma_mask == DMA_64BIT_MASK)
770                 dma64 = 1;
771         err = pci_set_dma_mask(bcm->pci_dev, bcm->dma_mask);
772         if (err)
773                 goto no_dma;
774         err = pci_set_consistent_dma_mask(bcm->pci_dev, bcm->dma_mask);
775         if (err)
776                 goto no_dma;
777
778         /* setup TX DMA channels. */
779         ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
780         if (!ring)
781                 goto out;
782         dma->tx_ring0 = ring;
783
784         ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
785         if (!ring)
786                 goto err_destroy_tx0;
787         dma->tx_ring1 = ring;
788
789         ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
790         if (!ring)
791                 goto err_destroy_tx1;
792         dma->tx_ring2 = ring;
793
794         ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
795         if (!ring)
796                 goto err_destroy_tx2;
797         dma->tx_ring3 = ring;
798
799         ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
800         if (!ring)
801                 goto err_destroy_tx3;
802         dma->tx_ring4 = ring;
803
804         ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
805         if (!ring)
806                 goto err_destroy_tx4;
807         dma->tx_ring5 = ring;
808
809         /* setup RX DMA channels. */
810         ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
811         if (!ring)
812                 goto err_destroy_tx5;
813         dma->rx_ring0 = ring;
814
815         if (bcm->current_core->rev < 5) {
816                 ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
817                 if (!ring)
818                         goto err_destroy_rx0;
819                 dma->rx_ring3 = ring;
820         }
821
822         dprintk(KERN_INFO PFX "%d-bit DMA initialized\n",
823                 (bcm->dma_mask == DMA_64BIT_MASK) ? 64 :
824                 (bcm->dma_mask == DMA_32BIT_MASK) ? 32 : 30);
825         err = 0;
826 out:
827         return err;
828
829 err_destroy_rx0:
830         bcm43xx_destroy_dmaring(dma->rx_ring0);
831         dma->rx_ring0 = NULL;
832 err_destroy_tx5:
833         bcm43xx_destroy_dmaring(dma->tx_ring5);
834         dma->tx_ring5 = NULL;
835 err_destroy_tx4:
836         bcm43xx_destroy_dmaring(dma->tx_ring4);
837         dma->tx_ring4 = NULL;
838 err_destroy_tx3:
839         bcm43xx_destroy_dmaring(dma->tx_ring3);
840         dma->tx_ring3 = NULL;
841 err_destroy_tx2:
842         bcm43xx_destroy_dmaring(dma->tx_ring2);
843         dma->tx_ring2 = NULL;
844 err_destroy_tx1:
845         bcm43xx_destroy_dmaring(dma->tx_ring1);
846         dma->tx_ring1 = NULL;
847 err_destroy_tx0:
848         bcm43xx_destroy_dmaring(dma->tx_ring0);
849         dma->tx_ring0 = NULL;
850 no_dma:
851 #ifdef CONFIG_BCM43XX_PIO
852         printk(KERN_WARNING PFX "DMA not supported on this device."
853                                 " Falling back to PIO.\n");
854         bcm->__using_pio = 1;
855         return -ENOSYS;
856 #else
857         printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. "
858                             "Please recompile the driver with PIO support.\n");
859         return -ENODEV;
860 #endif /* CONFIG_BCM43XX_PIO */
861 }
862
863 /* Generate a cookie for the TX header. */
864 static u16 generate_cookie(struct bcm43xx_dmaring *ring,
865                            int slot)
866 {
867         u16 cookie = 0x1000;
868
869         /* Use the upper 4 bits of the cookie as
870          * DMA controller ID and store the slot number
871          * in the lower 12 bits.
872          * Note that the cookie must never be 0, as this
873          * is a special value used in RX path.
874          */
875         switch (ring->index) {
876         case 0:
877                 cookie = 0xA000;
878                 break;
879         case 1:
880                 cookie = 0xB000;
881                 break;
882         case 2:
883                 cookie = 0xC000;
884                 break;
885         case 3:
886                 cookie = 0xD000;
887                 break;
888         case 4:
889                 cookie = 0xE000;
890                 break;
891         case 5:
892                 cookie = 0xF000;
893                 break;
894         }
895         assert(((u16)slot & 0xF000) == 0x0000);
896         cookie |= (u16)slot;
897
898         return cookie;
899 }
900
901 /* Inspect a cookie and find out to which controller/slot it belongs. */
902 static
903 struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
904                                       u16 cookie, int *slot)
905 {
906         struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
907         struct bcm43xx_dmaring *ring = NULL;
908
909         switch (cookie & 0xF000) {
910         case 0xA000:
911                 ring = dma->tx_ring0;
912                 break;
913         case 0xB000:
914                 ring = dma->tx_ring1;
915                 break;
916         case 0xC000:
917                 ring = dma->tx_ring2;
918                 break;
919         case 0xD000:
920                 ring = dma->tx_ring3;
921                 break;
922         case 0xE000:
923                 ring = dma->tx_ring4;
924                 break;
925         case 0xF000:
926                 ring = dma->tx_ring5;
927                 break;
928         default:
929                 assert(0);
930         }
931         *slot = (cookie & 0x0FFF);
932         assert(*slot >= 0 && *slot < ring->nr_slots);
933
934         return ring;
935 }
936
937 static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
938                                   int slot)
939 {
940         u16 offset;
941         int descsize;
942
943         /* Everything is ready to start. Buffers are DMA mapped and
944          * associated with slots.
945          * "slot" is the last slot of the new frame we want to transmit.
946          * Close your seat belts now, please.
947          */
948         wmb();
949         slot = next_slot(ring, slot);
950         offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
951         descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
952                 : sizeof(struct bcm43xx_dmadesc32);
953         bcm43xx_dma_write(ring, offset,
954                         (u32)(slot * descsize));
955 }
956
957 static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
958                             struct sk_buff *skb,
959                             u8 cur_frag)
960 {
961         int slot;
962         struct bcm43xx_dmadesc_generic *desc;
963         struct bcm43xx_dmadesc_meta *meta;
964         dma_addr_t dmaaddr;
965         struct sk_buff *bounce_skb;
966
967         assert(skb_shinfo(skb)->nr_frags == 0);
968
969         slot = request_slot(ring);
970         desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
971
972         /* Add a device specific TX header. */
973         assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
974         /* Reserve enough headroom for the device tx header. */
975         __skb_push(skb, sizeof(struct bcm43xx_txhdr));
976         /* Now calculate and add the tx header.
977          * The tx header includes the PLCP header.
978          */
979         bcm43xx_generate_txhdr(ring->bcm,
980                                (struct bcm43xx_txhdr *)skb->data,
981                                skb->data + sizeof(struct bcm43xx_txhdr),
982                                skb->len - sizeof(struct bcm43xx_txhdr),
983                                (cur_frag == 0),
984                                generate_cookie(ring, slot));
985         dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
986         if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
987                 /* chip cannot handle DMA to/from > 1GB, use bounce buffer (copied from b44 driver) */
988                 if (!dma_mapping_error(dmaaddr))
989                         unmap_descbuffer(ring, dmaaddr, skb->len, 1);
990                 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC|GFP_DMA);
991                 if (!bounce_skb)
992                         return;
993                 dmaaddr = map_descbuffer(ring, bounce_skb->data, bounce_skb->len, 1);
994                 if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
995                         if (!dma_mapping_error(dmaaddr))
996                                 unmap_descbuffer(ring, dmaaddr, skb->len, 1);
997                         dev_kfree_skb_any(bounce_skb);
998                         assert(0);
999                         return;
1000                 }
1001                 skb_copy_from_linear_data(skb, skb_put(bounce_skb, skb->len),
1002                                           skb->len);
1003                 dev_kfree_skb_any(skb);
1004                 skb = bounce_skb;
1005         }
1006
1007         meta->skb = skb;
1008         meta->dmaaddr = dmaaddr;
1009
1010         fill_descriptor(ring, desc, dmaaddr,
1011                         skb->len, 1, 1, 1);
1012
1013         /* Now transfer the whole frame. */
1014         dmacontroller_poke_tx(ring, slot);
1015 }
1016
1017 int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
1018                    struct ieee80211_txb *txb)
1019 {
1020         /* We just received a packet from the kernel network subsystem.
1021          * Add headers and DMA map the memory. Poke
1022          * the device to send the stuff.
1023          * Note that this is called from atomic context.
1024          */
1025         struct bcm43xx_dmaring *ring = bcm43xx_current_dma(bcm)->tx_ring1;
1026         u8 i;
1027         struct sk_buff *skb;
1028
1029         assert(ring->tx);
1030         if (unlikely(free_slots(ring) < txb->nr_frags)) {
1031                 /* The queue should be stopped,
1032                  * if we are low on free slots.
1033                  * If this ever triggers, we have to lower the suspend_mark.
1034                  */
1035                 dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
1036                 return -ENOMEM;
1037         }
1038
1039         for (i = 0; i < txb->nr_frags; i++) {
1040                 skb = txb->fragments[i];
1041                 /* Take skb from ieee80211_txb_free */
1042                 txb->fragments[i] = NULL;
1043                 dma_tx_fragment(ring, skb, i);
1044         }
1045         ieee80211_txb_free(txb);
1046
1047         return 0;
1048 }
1049
1050 void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
1051                                    struct bcm43xx_xmitstatus *status)
1052 {
1053         struct bcm43xx_dmaring *ring;
1054         struct bcm43xx_dmadesc_generic *desc;
1055         struct bcm43xx_dmadesc_meta *meta;
1056         int is_last_fragment;
1057         int slot;
1058         u32 tmp;
1059
1060         ring = parse_cookie(bcm, status->cookie, &slot);
1061         assert(ring);
1062         assert(ring->tx);
1063         while (1) {
1064                 assert(slot >= 0 && slot < ring->nr_slots);
1065                 desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
1066
1067                 if (ring->dma64) {
1068                         tmp = le32_to_cpu(desc->dma64.control0);
1069                         is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
1070                 } else {
1071                         tmp = le32_to_cpu(desc->dma32.control);
1072                         is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
1073                 }
1074                 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1075                 free_descriptor_buffer(ring, meta, 1);
1076                 /* Everything belonging to the slot is unmapped
1077                  * and freed, so we can return it.
1078                  */
1079                 return_slot(ring, slot);
1080
1081                 if (is_last_fragment)
1082                         break;
1083                 slot = next_slot(ring, slot);
1084         }
1085         bcm->stats.last_tx = jiffies;
1086 }
1087
1088 static void dma_rx(struct bcm43xx_dmaring *ring,
1089                    int *slot)
1090 {
1091         struct bcm43xx_dmadesc_generic *desc;
1092         struct bcm43xx_dmadesc_meta *meta;
1093         struct bcm43xx_rxhdr *rxhdr;
1094         struct sk_buff *skb;
1095         u16 len;
1096         int err;
1097         dma_addr_t dmaaddr;
1098
1099         desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
1100
1101         sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1102         skb = meta->skb;
1103
1104         if (ring->index == 3) {
1105                 /* We received an xmit status. */
1106                 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
1107                 struct bcm43xx_xmitstatus stat;
1108                 int i = 0;
1109
1110                 stat.cookie = le16_to_cpu(hw->cookie);
1111                 while (stat.cookie == 0) {
1112                         if (unlikely(++i >= 10000)) {
1113                                 assert(0);
1114                                 break;
1115                         }
1116                         udelay(2);
1117                         barrier();
1118                         stat.cookie = le16_to_cpu(hw->cookie);
1119                 }
1120                 stat.flags = hw->flags;
1121                 stat.cnt1 = hw->cnt1;
1122                 stat.cnt2 = hw->cnt2;
1123                 stat.seq = le16_to_cpu(hw->seq);
1124                 stat.unknown = le16_to_cpu(hw->unknown);
1125
1126                 bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
1127                 bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
1128                 /* recycle the descriptor buffer. */
1129                 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
1130
1131                 return;
1132         }
1133         rxhdr = (struct bcm43xx_rxhdr *)skb->data;
1134         len = le16_to_cpu(rxhdr->frame_length);
1135         if (len == 0) {
1136                 int i = 0;
1137
1138                 do {
1139                         udelay(2);
1140                         barrier();
1141                         len = le16_to_cpu(rxhdr->frame_length);
1142                 } while (len == 0 && i++ < 5);
1143                 if (unlikely(len == 0)) {
1144                         /* recycle the descriptor buffer. */
1145                         sync_descbuffer_for_device(ring, meta->dmaaddr,
1146                                                    ring->rx_buffersize);
1147                         goto drop;
1148                 }
1149         }
1150         if (unlikely(len > ring->rx_buffersize)) {
1151                 /* The data did not fit into one descriptor buffer
1152                  * and is split over multiple buffers.
1153                  * This should never happen, as we try to allocate buffers
1154                  * big enough. So simply ignore this packet.
1155                  */
1156                 int cnt = 0;
1157                 s32 tmp = len;
1158
1159                 while (1) {
1160                         desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
1161                         /* recycle the descriptor buffer. */
1162                         sync_descbuffer_for_device(ring, meta->dmaaddr,
1163                                                    ring->rx_buffersize);
1164                         *slot = next_slot(ring, *slot);
1165                         cnt++;
1166                         tmp -= ring->rx_buffersize;
1167                         if (tmp <= 0)
1168                                 break;
1169                 }
1170                 printkl(KERN_ERR PFX "DMA RX buffer too small "
1171                         "(len: %u, buffer: %u, nr-dropped: %d)\n",
1172                         len, ring->rx_buffersize, cnt);
1173                 goto drop;
1174         }
1175         len -= IEEE80211_FCS_LEN;
1176
1177         dmaaddr = meta->dmaaddr;
1178         err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1179         if (unlikely(err)) {
1180                 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
1181                 sync_descbuffer_for_device(ring, dmaaddr,
1182                                            ring->rx_buffersize);
1183                 goto drop;
1184         }
1185
1186         unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1187         skb_put(skb, len + ring->frameoffset);
1188         skb_pull(skb, ring->frameoffset);
1189
1190         err = bcm43xx_rx(ring->bcm, skb, rxhdr);
1191         if (err) {
1192                 dev_kfree_skb_irq(skb);
1193                 goto drop;
1194         }
1195
1196 drop:
1197         return;
1198 }
1199
1200 void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
1201 {
1202         u32 status;
1203         u16 descptr;
1204         int slot, current_slot;
1205 #ifdef CONFIG_BCM43XX_DEBUG
1206         int used_slots = 0;
1207 #endif
1208
1209         assert(!ring->tx);
1210         if (ring->dma64) {
1211                 status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
1212                 descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
1213                 current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
1214         } else {
1215                 status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
1216                 descptr = (status & BCM43xx_DMA32_RXDPTR);
1217                 current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
1218         }
1219         assert(current_slot >= 0 && current_slot < ring->nr_slots);
1220
1221         slot = ring->current_slot;
1222         for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
1223                 dma_rx(ring, &slot);
1224 #ifdef CONFIG_BCM43XX_DEBUG
1225                 if (++used_slots > ring->max_used_slots)
1226                         ring->max_used_slots = used_slots;
1227 #endif
1228         }
1229         if (ring->dma64) {
1230                 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
1231                                 (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
1232         } else {
1233                 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
1234                                 (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
1235         }
1236         ring->current_slot = slot;
1237 }
1238
1239 void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
1240 {
1241         assert(ring->tx);
1242         bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
1243         if (ring->dma64) {
1244                 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
1245                                 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1246                                 | BCM43xx_DMA64_TXSUSPEND);
1247         } else {
1248                 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1249                                 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1250                                 | BCM43xx_DMA32_TXSUSPEND);
1251         }
1252 }
1253
1254 void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
1255 {
1256         assert(ring->tx);
1257         if (ring->dma64) {
1258                 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
1259                                 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1260                                 & ~BCM43xx_DMA64_TXSUSPEND);
1261         } else {
1262                 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1263                                 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1264                                 & ~BCM43xx_DMA32_TXSUSPEND);
1265         }
1266         bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
1267 }