Merge tag 'nfs-for-4.20-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[sfrench/cifs-2.6.git] / drivers / atm / nicstar.c
1 /*
2  * nicstar.c
3  *
4  * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards.
5  *
6  * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME.
7  *            It was taken from the frle-0.22 device driver.
8  *            As the file doesn't have a copyright notice, in the file
9  *            nicstarmac.copyright I put the copyright notice from the
10  *            frle-0.22 device driver.
11  *            Some code is based on the nicstar driver by M. Welsh.
12  *
13  * Author: Rui Prior (rprior@inescn.pt)
14  * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999
15  *
16  *
17  * (C) INESC 1999
18  */
19
20 /*
21  * IMPORTANT INFORMATION
22  *
23  * There are currently three types of spinlocks:
24  *
25  * 1 - Per card interrupt spinlock (to protect structures and such)
26  * 2 - Per SCQ scq spinlock
27  * 3 - Per card resource spinlock (to access registers, etc.)
28  *
29  * These must NEVER be grabbed in reverse order.
30  *
31  */
32
33 /* Header files */
34
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/skbuff.h>
38 #include <linux/atmdev.h>
39 #include <linux/atm.h>
40 #include <linux/pci.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/types.h>
43 #include <linux/string.h>
44 #include <linux/delay.h>
45 #include <linux/init.h>
46 #include <linux/sched.h>
47 #include <linux/timer.h>
48 #include <linux/interrupt.h>
49 #include <linux/bitops.h>
50 #include <linux/slab.h>
51 #include <linux/idr.h>
52 #include <asm/io.h>
53 #include <linux/uaccess.h>
54 #include <linux/atomic.h>
55 #include <linux/etherdevice.h>
56 #include "nicstar.h"
57 #ifdef CONFIG_ATM_NICSTAR_USE_SUNI
58 #include "suni.h"
59 #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
60 #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
61 #include "idt77105.h"
62 #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
63
64 /* Additional code */
65
66 #include "nicstarmac.c"
67
68 /* Configurable parameters */
69
70 #undef PHY_LOOPBACK
71 #undef TX_DEBUG
72 #undef RX_DEBUG
73 #undef GENERAL_DEBUG
74 #undef EXTRA_DEBUG
75
76 /* Do not touch these */
77
78 #ifdef TX_DEBUG
79 #define TXPRINTK(args...) printk(args)
80 #else
81 #define TXPRINTK(args...)
82 #endif /* TX_DEBUG */
83
84 #ifdef RX_DEBUG
85 #define RXPRINTK(args...) printk(args)
86 #else
87 #define RXPRINTK(args...)
88 #endif /* RX_DEBUG */
89
90 #ifdef GENERAL_DEBUG
91 #define PRINTK(args...) printk(args)
92 #else
93 #define PRINTK(args...)
94 #endif /* GENERAL_DEBUG */
95
96 #ifdef EXTRA_DEBUG
97 #define XPRINTK(args...) printk(args)
98 #else
99 #define XPRINTK(args...)
100 #endif /* EXTRA_DEBUG */
101
102 /* Macros */
103
104 #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ)
105
106 #define NS_DELAY mdelay(1)
107
108 #define PTR_DIFF(a, b)  ((u32)((unsigned long)(a) - (unsigned long)(b)))
109
110 #ifndef ATM_SKB
111 #define ATM_SKB(s) (&(s)->atm)
112 #endif
113
114 #define scq_virt_to_bus(scq, p) \
115                 (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org))
116
117 /* Function declarations */
118
119 static u32 ns_read_sram(ns_dev * card, u32 sram_address);
120 static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
121                           int count);
122 static int ns_init_card(int i, struct pci_dev *pcidev);
123 static void ns_init_card_error(ns_dev * card, int error);
124 static scq_info *get_scq(ns_dev *card, int size, u32 scd);
125 static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc);
126 static void push_rxbufs(ns_dev *, struct sk_buff *);
127 static irqreturn_t ns_irq_handler(int irq, void *dev_id);
128 static int ns_open(struct atm_vcc *vcc);
129 static void ns_close(struct atm_vcc *vcc);
130 static void fill_tst(ns_dev * card, int n, vc_map * vc);
131 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb);
132 static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
133                      struct sk_buff *skb);
134 static void process_tsq(ns_dev * card);
135 static void drain_scq(ns_dev * card, scq_info * scq, int pos);
136 static void process_rsq(ns_dev * card);
137 static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe);
138 static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb);
139 static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count);
140 static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb);
141 static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb);
142 static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb);
143 static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page);
144 static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
145 #ifdef EXTRA_DEBUG
146 static void which_list(ns_dev * card, struct sk_buff *skb);
147 #endif
148 static void ns_poll(struct timer_list *unused);
149 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
150                        unsigned long addr);
151 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
152
153 /* Global variables */
154
155 static struct ns_dev *cards[NS_MAX_CARDS];
156 static unsigned num_cards;
157 static const struct atmdev_ops atm_ops = {
158         .open = ns_open,
159         .close = ns_close,
160         .ioctl = ns_ioctl,
161         .send = ns_send,
162         .phy_put = ns_phy_put,
163         .phy_get = ns_phy_get,
164         .proc_read = ns_proc_read,
165         .owner = THIS_MODULE,
166 };
167
168 static struct timer_list ns_timer;
169 static char *mac[NS_MAX_CARDS];
170 module_param_array(mac, charp, NULL, 0);
171 MODULE_LICENSE("GPL");
172
173 /* Functions */
174
175 static int nicstar_init_one(struct pci_dev *pcidev,
176                             const struct pci_device_id *ent)
177 {
178         static int index = -1;
179         unsigned int error;
180
181         index++;
182         cards[index] = NULL;
183
184         error = ns_init_card(index, pcidev);
185         if (error) {
186                 cards[index--] = NULL;  /* don't increment index */
187                 goto err_out;
188         }
189
190         return 0;
191 err_out:
192         return -ENODEV;
193 }
194
195 static void nicstar_remove_one(struct pci_dev *pcidev)
196 {
197         int i, j;
198         ns_dev *card = pci_get_drvdata(pcidev);
199         struct sk_buff *hb;
200         struct sk_buff *iovb;
201         struct sk_buff *lb;
202         struct sk_buff *sb;
203
204         i = card->index;
205
206         if (cards[i] == NULL)
207                 return;
208
209         if (card->atmdev->phy && card->atmdev->phy->stop)
210                 card->atmdev->phy->stop(card->atmdev);
211
212         /* Stop everything */
213         writel(0x00000000, card->membase + CFG);
214
215         /* De-register device */
216         atm_dev_deregister(card->atmdev);
217
218         /* Disable PCI device */
219         pci_disable_device(pcidev);
220
221         /* Free up resources */
222         j = 0;
223         PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count);
224         while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) {
225                 dev_kfree_skb_any(hb);
226                 j++;
227         }
228         PRINTK("nicstar%d: %d huge buffers freed.\n", i, j);
229         j = 0;
230         PRINTK("nicstar%d: freeing %d iovec buffers.\n", i,
231                card->iovpool.count);
232         while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) {
233                 dev_kfree_skb_any(iovb);
234                 j++;
235         }
236         PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j);
237         while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
238                 dev_kfree_skb_any(lb);
239         while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
240                 dev_kfree_skb_any(sb);
241         free_scq(card, card->scq0, NULL);
242         for (j = 0; j < NS_FRSCD_NUM; j++) {
243                 if (card->scd2vc[j] != NULL)
244                         free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
245         }
246         idr_destroy(&card->idr);
247         dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
248                           card->rsq.org, card->rsq.dma);
249         dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
250                           card->tsq.org, card->tsq.dma);
251         free_irq(card->pcidev->irq, card);
252         iounmap(card->membase);
253         kfree(card);
254 }
255
256 static const struct pci_device_id nicstar_pci_tbl[] = {
257         { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 },
258         {0,}                    /* terminate list */
259 };
260
261 MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl);
262
263 static struct pci_driver nicstar_driver = {
264         .name = "nicstar",
265         .id_table = nicstar_pci_tbl,
266         .probe = nicstar_init_one,
267         .remove = nicstar_remove_one,
268 };
269
270 static int __init nicstar_init(void)
271 {
272         unsigned error = 0;     /* Initialized to remove compile warning */
273
274         XPRINTK("nicstar: nicstar_init() called.\n");
275
276         error = pci_register_driver(&nicstar_driver);
277
278         TXPRINTK("nicstar: TX debug enabled.\n");
279         RXPRINTK("nicstar: RX debug enabled.\n");
280         PRINTK("nicstar: General debug enabled.\n");
281 #ifdef PHY_LOOPBACK
282         printk("nicstar: using PHY loopback.\n");
283 #endif /* PHY_LOOPBACK */
284         XPRINTK("nicstar: nicstar_init() returned.\n");
285
286         if (!error) {
287                 timer_setup(&ns_timer, ns_poll, 0);
288                 ns_timer.expires = jiffies + NS_POLL_PERIOD;
289                 add_timer(&ns_timer);
290         }
291
292         return error;
293 }
294
295 static void __exit nicstar_cleanup(void)
296 {
297         XPRINTK("nicstar: nicstar_cleanup() called.\n");
298
299         del_timer(&ns_timer);
300
301         pci_unregister_driver(&nicstar_driver);
302
303         XPRINTK("nicstar: nicstar_cleanup() returned.\n");
304 }
305
306 static u32 ns_read_sram(ns_dev * card, u32 sram_address)
307 {
308         unsigned long flags;
309         u32 data;
310         sram_address <<= 2;
311         sram_address &= 0x0007FFFC;     /* address must be dword aligned */
312         sram_address |= 0x50000000;     /* SRAM read command */
313         spin_lock_irqsave(&card->res_lock, flags);
314         while (CMD_BUSY(card)) ;
315         writel(sram_address, card->membase + CMD);
316         while (CMD_BUSY(card)) ;
317         data = readl(card->membase + DR0);
318         spin_unlock_irqrestore(&card->res_lock, flags);
319         return data;
320 }
321
322 static void ns_write_sram(ns_dev * card, u32 sram_address, u32 * value,
323                           int count)
324 {
325         unsigned long flags;
326         int i, c;
327         count--;                /* count range now is 0..3 instead of 1..4 */
328         c = count;
329         c <<= 2;                /* to use increments of 4 */
330         spin_lock_irqsave(&card->res_lock, flags);
331         while (CMD_BUSY(card)) ;
332         for (i = 0; i <= c; i += 4)
333                 writel(*(value++), card->membase + i);
334         /* Note: DR# registers are the first 4 dwords in nicstar's memspace,
335            so card->membase + DR0 == card->membase */
336         sram_address <<= 2;
337         sram_address &= 0x0007FFFC;
338         sram_address |= (0x40000000 | count);
339         writel(sram_address, card->membase + CMD);
340         spin_unlock_irqrestore(&card->res_lock, flags);
341 }
342
343 static int ns_init_card(int i, struct pci_dev *pcidev)
344 {
345         int j;
346         struct ns_dev *card = NULL;
347         unsigned char pci_latency;
348         unsigned error;
349         u32 data;
350         u32 u32d[4];
351         u32 ns_cfg_rctsize;
352         int bcount;
353         unsigned long membase;
354
355         error = 0;
356
357         if (pci_enable_device(pcidev)) {
358                 printk("nicstar%d: can't enable PCI device\n", i);
359                 error = 2;
360                 ns_init_card_error(card, error);
361                 return error;
362         }
363         if (dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)) != 0) {
364                 printk(KERN_WARNING
365                        "nicstar%d: No suitable DMA available.\n", i);
366                 error = 2;
367                 ns_init_card_error(card, error);
368                 return error;
369         }
370
371         card = kmalloc(sizeof(*card), GFP_KERNEL);
372         if (!card) {
373                 printk
374                     ("nicstar%d: can't allocate memory for device structure.\n",
375                      i);
376                 error = 2;
377                 ns_init_card_error(card, error);
378                 return error;
379         }
380         cards[i] = card;
381         spin_lock_init(&card->int_lock);
382         spin_lock_init(&card->res_lock);
383
384         pci_set_drvdata(pcidev, card);
385
386         card->index = i;
387         card->atmdev = NULL;
388         card->pcidev = pcidev;
389         membase = pci_resource_start(pcidev, 1);
390         card->membase = ioremap(membase, NS_IOREMAP_SIZE);
391         if (!card->membase) {
392                 printk("nicstar%d: can't ioremap() membase.\n", i);
393                 error = 3;
394                 ns_init_card_error(card, error);
395                 return error;
396         }
397         PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase);
398
399         pci_set_master(pcidev);
400
401         if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) {
402                 printk("nicstar%d: can't read PCI latency timer.\n", i);
403                 error = 6;
404                 ns_init_card_error(card, error);
405                 return error;
406         }
407 #ifdef NS_PCI_LATENCY
408         if (pci_latency < NS_PCI_LATENCY) {
409                 PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i,
410                        NS_PCI_LATENCY);
411                 for (j = 1; j < 4; j++) {
412                         if (pci_write_config_byte
413                             (pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0)
414                                 break;
415                 }
416                 if (j == 4) {
417                         printk
418                             ("nicstar%d: can't set PCI latency timer to %d.\n",
419                              i, NS_PCI_LATENCY);
420                         error = 7;
421                         ns_init_card_error(card, error);
422                         return error;
423                 }
424         }
425 #endif /* NS_PCI_LATENCY */
426
427         /* Clear timer overflow */
428         data = readl(card->membase + STAT);
429         if (data & NS_STAT_TMROF)
430                 writel(NS_STAT_TMROF, card->membase + STAT);
431
432         /* Software reset */
433         writel(NS_CFG_SWRST, card->membase + CFG);
434         NS_DELAY;
435         writel(0x00000000, card->membase + CFG);
436
437         /* PHY reset */
438         writel(0x00000008, card->membase + GP);
439         NS_DELAY;
440         writel(0x00000001, card->membase + GP);
441         NS_DELAY;
442         while (CMD_BUSY(card)) ;
443         writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */
444         NS_DELAY;
445
446         /* Detect PHY type */
447         while (CMD_BUSY(card)) ;
448         writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD);
449         while (CMD_BUSY(card)) ;
450         data = readl(card->membase + DR0);
451         switch (data) {
452         case 0x00000009:
453                 printk("nicstar%d: PHY seems to be 25 Mbps.\n", i);
454                 card->max_pcr = ATM_25_PCR;
455                 while (CMD_BUSY(card)) ;
456                 writel(0x00000008, card->membase + DR0);
457                 writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD);
458                 /* Clear an eventual pending interrupt */
459                 writel(NS_STAT_SFBQF, card->membase + STAT);
460 #ifdef PHY_LOOPBACK
461                 while (CMD_BUSY(card)) ;
462                 writel(0x00000022, card->membase + DR0);
463                 writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD);
464 #endif /* PHY_LOOPBACK */
465                 break;
466         case 0x00000030:
467         case 0x00000031:
468                 printk("nicstar%d: PHY seems to be 155 Mbps.\n", i);
469                 card->max_pcr = ATM_OC3_PCR;
470 #ifdef PHY_LOOPBACK
471                 while (CMD_BUSY(card)) ;
472                 writel(0x00000002, card->membase + DR0);
473                 writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD);
474 #endif /* PHY_LOOPBACK */
475                 break;
476         default:
477                 printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data);
478                 error = 8;
479                 ns_init_card_error(card, error);
480                 return error;
481         }
482         writel(0x00000000, card->membase + GP);
483
484         /* Determine SRAM size */
485         data = 0x76543210;
486         ns_write_sram(card, 0x1C003, &data, 1);
487         data = 0x89ABCDEF;
488         ns_write_sram(card, 0x14003, &data, 1);
489         if (ns_read_sram(card, 0x14003) == 0x89ABCDEF &&
490             ns_read_sram(card, 0x1C003) == 0x76543210)
491                 card->sram_size = 128;
492         else
493                 card->sram_size = 32;
494         PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size);
495
496         card->rct_size = NS_MAX_RCTSIZE;
497
498 #if (NS_MAX_RCTSIZE == 4096)
499         if (card->sram_size == 128)
500                 printk
501                     ("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n",
502                      i);
503 #elif (NS_MAX_RCTSIZE == 16384)
504         if (card->sram_size == 32) {
505                 printk
506                     ("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n",
507                      i);
508                 card->rct_size = 4096;
509         }
510 #else
511 #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c
512 #endif
513
514         card->vpibits = NS_VPIBITS;
515         if (card->rct_size == 4096)
516                 card->vcibits = 12 - NS_VPIBITS;
517         else                    /* card->rct_size == 16384 */
518                 card->vcibits = 14 - NS_VPIBITS;
519
520         /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */
521         if (mac[i] == NULL)
522                 nicstar_init_eprom(card->membase);
523
524         /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
525         writel(0x00000000, card->membase + VPM);
526
527         /* Initialize TSQ */
528         card->tsq.org = dma_alloc_coherent(&card->pcidev->dev,
529                                            NS_TSQSIZE + NS_TSQ_ALIGNMENT,
530                                            &card->tsq.dma, GFP_KERNEL);
531         if (card->tsq.org == NULL) {
532                 printk("nicstar%d: can't allocate TSQ.\n", i);
533                 error = 10;
534                 ns_init_card_error(card, error);
535                 return error;
536         }
537         card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT);
538         card->tsq.next = card->tsq.base;
539         card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1);
540         for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++)
541                 ns_tsi_init(card->tsq.base + j);
542         writel(0x00000000, card->membase + TSQH);
543         writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB);
544         PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base);
545
546         /* Initialize RSQ */
547         card->rsq.org = dma_alloc_coherent(&card->pcidev->dev,
548                                            NS_RSQSIZE + NS_RSQ_ALIGNMENT,
549                                            &card->rsq.dma, GFP_KERNEL);
550         if (card->rsq.org == NULL) {
551                 printk("nicstar%d: can't allocate RSQ.\n", i);
552                 error = 11;
553                 ns_init_card_error(card, error);
554                 return error;
555         }
556         card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT);
557         card->rsq.next = card->rsq.base;
558         card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1);
559         for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++)
560                 ns_rsqe_init(card->rsq.base + j);
561         writel(0x00000000, card->membase + RSQH);
562         writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB);
563         PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base);
564
565         /* Initialize SCQ0, the only VBR SCQ used */
566         card->scq1 = NULL;
567         card->scq2 = NULL;
568         card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0);
569         if (card->scq0 == NULL) {
570                 printk("nicstar%d: can't get SCQ0.\n", i);
571                 error = 12;
572                 ns_init_card_error(card, error);
573                 return error;
574         }
575         u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base);
576         u32d[1] = (u32) 0x00000000;
577         u32d[2] = (u32) 0xffffffff;
578         u32d[3] = (u32) 0x00000000;
579         ns_write_sram(card, NS_VRSCD0, u32d, 4);
580         ns_write_sram(card, NS_VRSCD1, u32d, 4);        /* These last two won't be used */
581         ns_write_sram(card, NS_VRSCD2, u32d, 4);        /* but are initialized, just in case... */
582         card->scq0->scd = NS_VRSCD0;
583         PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base);
584
585         /* Initialize TSTs */
586         card->tst_addr = NS_TST0;
587         card->tst_free_entries = NS_TST_NUM_ENTRIES;
588         data = NS_TST_OPCODE_VARIABLE;
589         for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
590                 ns_write_sram(card, NS_TST0 + j, &data, 1);
591         data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0);
592         ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1);
593         for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
594                 ns_write_sram(card, NS_TST1 + j, &data, 1);
595         data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1);
596         ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1);
597         for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
598                 card->tste2vc[j] = NULL;
599         writel(NS_TST0 << 2, card->membase + TSTB);
600
601         /* Initialize RCT. AAL type is set on opening the VC. */
602 #ifdef RCQ_SUPPORT
603         u32d[0] = NS_RCTE_RAWCELLINTEN;
604 #else
605         u32d[0] = 0x00000000;
606 #endif /* RCQ_SUPPORT */
607         u32d[1] = 0x00000000;
608         u32d[2] = 0x00000000;
609         u32d[3] = 0xFFFFFFFF;
610         for (j = 0; j < card->rct_size; j++)
611                 ns_write_sram(card, j * 4, u32d, 4);
612
613         memset(card->vcmap, 0, sizeof(card->vcmap));
614
615         for (j = 0; j < NS_FRSCD_NUM; j++)
616                 card->scd2vc[j] = NULL;
617
618         /* Initialize buffer levels */
619         card->sbnr.min = MIN_SB;
620         card->sbnr.init = NUM_SB;
621         card->sbnr.max = MAX_SB;
622         card->lbnr.min = MIN_LB;
623         card->lbnr.init = NUM_LB;
624         card->lbnr.max = MAX_LB;
625         card->iovnr.min = MIN_IOVB;
626         card->iovnr.init = NUM_IOVB;
627         card->iovnr.max = MAX_IOVB;
628         card->hbnr.min = MIN_HB;
629         card->hbnr.init = NUM_HB;
630         card->hbnr.max = MAX_HB;
631
632         card->sm_handle = NULL;
633         card->sm_addr = 0x00000000;
634         card->lg_handle = NULL;
635         card->lg_addr = 0x00000000;
636
637         card->efbie = 1;        /* To prevent push_rxbufs from enabling the interrupt */
638
639         idr_init(&card->idr);
640
641         /* Pre-allocate some huge buffers */
642         skb_queue_head_init(&card->hbpool.queue);
643         card->hbpool.count = 0;
644         for (j = 0; j < NUM_HB; j++) {
645                 struct sk_buff *hb;
646                 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
647                 if (hb == NULL) {
648                         printk
649                             ("nicstar%d: can't allocate %dth of %d huge buffers.\n",
650                              i, j, NUM_HB);
651                         error = 13;
652                         ns_init_card_error(card, error);
653                         return error;
654                 }
655                 NS_PRV_BUFTYPE(hb) = BUF_NONE;
656                 skb_queue_tail(&card->hbpool.queue, hb);
657                 card->hbpool.count++;
658         }
659
660         /* Allocate large buffers */
661         skb_queue_head_init(&card->lbpool.queue);
662         card->lbpool.count = 0; /* Not used */
663         for (j = 0; j < NUM_LB; j++) {
664                 struct sk_buff *lb;
665                 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
666                 if (lb == NULL) {
667                         printk
668                             ("nicstar%d: can't allocate %dth of %d large buffers.\n",
669                              i, j, NUM_LB);
670                         error = 14;
671                         ns_init_card_error(card, error);
672                         return error;
673                 }
674                 NS_PRV_BUFTYPE(lb) = BUF_LG;
675                 skb_queue_tail(&card->lbpool.queue, lb);
676                 skb_reserve(lb, NS_SMBUFSIZE);
677                 push_rxbufs(card, lb);
678                 /* Due to the implementation of push_rxbufs() this is 1, not 0 */
679                 if (j == 1) {
680                         card->rcbuf = lb;
681                         card->rawcell = (struct ns_rcqe *) lb->data;
682                         card->rawch = NS_PRV_DMA(lb);
683                 }
684         }
685         /* Test for strange behaviour which leads to crashes */
686         if ((bcount =
687              ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) {
688                 printk
689                     ("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n",
690                      i, j, bcount);
691                 error = 14;
692                 ns_init_card_error(card, error);
693                 return error;
694         }
695
696         /* Allocate small buffers */
697         skb_queue_head_init(&card->sbpool.queue);
698         card->sbpool.count = 0; /* Not used */
699         for (j = 0; j < NUM_SB; j++) {
700                 struct sk_buff *sb;
701                 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
702                 if (sb == NULL) {
703                         printk
704                             ("nicstar%d: can't allocate %dth of %d small buffers.\n",
705                              i, j, NUM_SB);
706                         error = 15;
707                         ns_init_card_error(card, error);
708                         return error;
709                 }
710                 NS_PRV_BUFTYPE(sb) = BUF_SM;
711                 skb_queue_tail(&card->sbpool.queue, sb);
712                 skb_reserve(sb, NS_AAL0_HEADER);
713                 push_rxbufs(card, sb);
714         }
715         /* Test for strange behaviour which leads to crashes */
716         if ((bcount =
717              ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) {
718                 printk
719                     ("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n",
720                      i, j, bcount);
721                 error = 15;
722                 ns_init_card_error(card, error);
723                 return error;
724         }
725
726         /* Allocate iovec buffers */
727         skb_queue_head_init(&card->iovpool.queue);
728         card->iovpool.count = 0;
729         for (j = 0; j < NUM_IOVB; j++) {
730                 struct sk_buff *iovb;
731                 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
732                 if (iovb == NULL) {
733                         printk
734                             ("nicstar%d: can't allocate %dth of %d iovec buffers.\n",
735                              i, j, NUM_IOVB);
736                         error = 16;
737                         ns_init_card_error(card, error);
738                         return error;
739                 }
740                 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
741                 skb_queue_tail(&card->iovpool.queue, iovb);
742                 card->iovpool.count++;
743         }
744
745         /* Configure NICStAR */
746         if (card->rct_size == 4096)
747                 ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
748         else                    /* (card->rct_size == 16384) */
749                 ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES;
750
751         card->efbie = 1;
752
753         card->intcnt = 0;
754         if (request_irq
755             (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
756                 printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
757                 error = 9;
758                 ns_init_card_error(card, error);
759                 return error;
760         }
761
762         /* Register device */
763         card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops,
764                                         -1, NULL);
765         if (card->atmdev == NULL) {
766                 printk("nicstar%d: can't register device.\n", i);
767                 error = 17;
768                 ns_init_card_error(card, error);
769                 return error;
770         }
771
772         if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) {
773                 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
774                                    card->atmdev->esi, 6);
775                 if (ether_addr_equal(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00")) {
776                         nicstar_read_eprom(card->membase,
777                                            NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT,
778                                            card->atmdev->esi, 6);
779                 }
780         }
781
782         printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi);
783
784         card->atmdev->dev_data = card;
785         card->atmdev->ci_range.vpi_bits = card->vpibits;
786         card->atmdev->ci_range.vci_bits = card->vcibits;
787         card->atmdev->link_rate = card->max_pcr;
788         card->atmdev->phy = NULL;
789
790 #ifdef CONFIG_ATM_NICSTAR_USE_SUNI
791         if (card->max_pcr == ATM_OC3_PCR)
792                 suni_init(card->atmdev);
793 #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
794
795 #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
796         if (card->max_pcr == ATM_25_PCR)
797                 idt77105_init(card->atmdev);
798 #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
799
800         if (card->atmdev->phy && card->atmdev->phy->start)
801                 card->atmdev->phy->start(card->atmdev);
802
803         writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE |    /* Only enabled if RCQ_SUPPORT */
804                NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */
805                NS_CFG_PHYIE, card->membase + CFG);
806
807         num_cards++;
808
809         return error;
810 }
811
812 static void ns_init_card_error(ns_dev *card, int error)
813 {
814         if (error >= 17) {
815                 writel(0x00000000, card->membase + CFG);
816         }
817         if (error >= 16) {
818                 struct sk_buff *iovb;
819                 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
820                         dev_kfree_skb_any(iovb);
821         }
822         if (error >= 15) {
823                 struct sk_buff *sb;
824                 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
825                         dev_kfree_skb_any(sb);
826                 free_scq(card, card->scq0, NULL);
827         }
828         if (error >= 14) {
829                 struct sk_buff *lb;
830                 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
831                         dev_kfree_skb_any(lb);
832         }
833         if (error >= 13) {
834                 struct sk_buff *hb;
835                 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
836                         dev_kfree_skb_any(hb);
837         }
838         if (error >= 12) {
839                 kfree(card->rsq.org);
840         }
841         if (error >= 11) {
842                 kfree(card->tsq.org);
843         }
844         if (error >= 10) {
845                 free_irq(card->pcidev->irq, card);
846         }
847         if (error >= 4) {
848                 iounmap(card->membase);
849         }
850         if (error >= 3) {
851                 pci_disable_device(card->pcidev);
852                 kfree(card);
853         }
854 }
855
856 static scq_info *get_scq(ns_dev *card, int size, u32 scd)
857 {
858         scq_info *scq;
859         int i;
860
861         if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
862                 return NULL;
863
864         scq = kmalloc(sizeof(*scq), GFP_KERNEL);
865         if (!scq)
866                 return NULL;
867         scq->org = dma_alloc_coherent(&card->pcidev->dev,
868                                       2 * size,  &scq->dma, GFP_KERNEL);
869         if (!scq->org) {
870                 kfree(scq);
871                 return NULL;
872         }
873         scq->skb = kmalloc_array(size / NS_SCQE_SIZE,
874                                  sizeof(*scq->skb),
875                                  GFP_KERNEL);
876         if (!scq->skb) {
877                 dma_free_coherent(&card->pcidev->dev,
878                                   2 * size, scq->org, scq->dma);
879                 kfree(scq);
880                 return NULL;
881         }
882         scq->num_entries = size / NS_SCQE_SIZE;
883         scq->base = PTR_ALIGN(scq->org, size);
884         scq->next = scq->base;
885         scq->last = scq->base + (scq->num_entries - 1);
886         scq->tail = scq->last;
887         scq->scd = scd;
888         scq->num_entries = size / NS_SCQE_SIZE;
889         scq->tbd_count = 0;
890         init_waitqueue_head(&scq->scqfull_waitq);
891         scq->full = 0;
892         spin_lock_init(&scq->lock);
893
894         for (i = 0; i < scq->num_entries; i++)
895                 scq->skb[i] = NULL;
896
897         return scq;
898 }
899
900 /* For variable rate SCQ vcc must be NULL */
901 static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
902 {
903         int i;
904
905         if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
906                 for (i = 0; i < scq->num_entries; i++) {
907                         if (scq->skb[i] != NULL) {
908                                 vcc = ATM_SKB(scq->skb[i])->vcc;
909                                 if (vcc->pop != NULL)
910                                         vcc->pop(vcc, scq->skb[i]);
911                                 else
912                                         dev_kfree_skb_any(scq->skb[i]);
913                         }
914         } else {                /* vcc must be != NULL */
915
916                 if (vcc == NULL) {
917                         printk
918                             ("nicstar: free_scq() called with vcc == NULL for fixed rate scq.");
919                         for (i = 0; i < scq->num_entries; i++)
920                                 dev_kfree_skb_any(scq->skb[i]);
921                 } else
922                         for (i = 0; i < scq->num_entries; i++) {
923                                 if (scq->skb[i] != NULL) {
924                                         if (vcc->pop != NULL)
925                                                 vcc->pop(vcc, scq->skb[i]);
926                                         else
927                                                 dev_kfree_skb_any(scq->skb[i]);
928                                 }
929                         }
930         }
931         kfree(scq->skb);
932         dma_free_coherent(&card->pcidev->dev,
933                           2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ?
934                                VBR_SCQSIZE : CBR_SCQSIZE),
935                           scq->org, scq->dma);
936         kfree(scq);
937 }
938
939 /* The handles passed must be pointers to the sk_buff containing the small
940    or large buffer(s) cast to u32. */
941 static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
942 {
943         struct sk_buff *handle1, *handle2;
944         int id1, id2;
945         u32 addr1, addr2;
946         u32 stat;
947         unsigned long flags;
948
949         /* *BARF* */
950         handle2 = NULL;
951         addr2 = 0;
952         handle1 = skb;
953         addr1 = dma_map_single(&card->pcidev->dev,
954                                skb->data,
955                                (NS_PRV_BUFTYPE(skb) == BUF_SM
956                                 ? NS_SMSKBSIZE : NS_LGSKBSIZE),
957                                DMA_TO_DEVICE);
958         NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */
959
960 #ifdef GENERAL_DEBUG
961         if (!addr1)
962                 printk("nicstar%d: push_rxbufs called with addr1 = 0.\n",
963                        card->index);
964 #endif /* GENERAL_DEBUG */
965
966         stat = readl(card->membase + STAT);
967         card->sbfqc = ns_stat_sfbqc_get(stat);
968         card->lbfqc = ns_stat_lfbqc_get(stat);
969         if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
970                 if (!addr2) {
971                         if (card->sm_addr) {
972                                 addr2 = card->sm_addr;
973                                 handle2 = card->sm_handle;
974                                 card->sm_addr = 0x00000000;
975                                 card->sm_handle = NULL;
976                         } else {        /* (!sm_addr) */
977
978                                 card->sm_addr = addr1;
979                                 card->sm_handle = handle1;
980                         }
981                 }
982         } else {                /* buf_type == BUF_LG */
983
984                 if (!addr2) {
985                         if (card->lg_addr) {
986                                 addr2 = card->lg_addr;
987                                 handle2 = card->lg_handle;
988                                 card->lg_addr = 0x00000000;
989                                 card->lg_handle = NULL;
990                         } else {        /* (!lg_addr) */
991
992                                 card->lg_addr = addr1;
993                                 card->lg_handle = handle1;
994                         }
995                 }
996         }
997
998         if (addr2) {
999                 if (NS_PRV_BUFTYPE(skb) == BUF_SM) {
1000                         if (card->sbfqc >= card->sbnr.max) {
1001                                 skb_unlink(handle1, &card->sbpool.queue);
1002                                 dev_kfree_skb_any(handle1);
1003                                 skb_unlink(handle2, &card->sbpool.queue);
1004                                 dev_kfree_skb_any(handle2);
1005                                 return;
1006                         } else
1007                                 card->sbfqc += 2;
1008                 } else {        /* (buf_type == BUF_LG) */
1009
1010                         if (card->lbfqc >= card->lbnr.max) {
1011                                 skb_unlink(handle1, &card->lbpool.queue);
1012                                 dev_kfree_skb_any(handle1);
1013                                 skb_unlink(handle2, &card->lbpool.queue);
1014                                 dev_kfree_skb_any(handle2);
1015                                 return;
1016                         } else
1017                                 card->lbfqc += 2;
1018                 }
1019
1020                 id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC);
1021                 if (id1 < 0)
1022                         goto out;
1023
1024                 id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC);
1025                 if (id2 < 0)
1026                         goto out;
1027
1028                 spin_lock_irqsave(&card->res_lock, flags);
1029                 while (CMD_BUSY(card)) ;
1030                 writel(addr2, card->membase + DR3);
1031                 writel(id2, card->membase + DR2);
1032                 writel(addr1, card->membase + DR1);
1033                 writel(id1, card->membase + DR0);
1034                 writel(NS_CMD_WRITE_FREEBUFQ | NS_PRV_BUFTYPE(skb),
1035                        card->membase + CMD);
1036                 spin_unlock_irqrestore(&card->res_lock, flags);
1037
1038                 XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n",
1039                         card->index,
1040                         (NS_PRV_BUFTYPE(skb) == BUF_SM ? "small" : "large"),
1041                         addr1, addr2);
1042         }
1043
1044         if (!card->efbie && card->sbfqc >= card->sbnr.min &&
1045             card->lbfqc >= card->lbnr.min) {
1046                 card->efbie = 1;
1047                 writel((readl(card->membase + CFG) | NS_CFG_EFBIE),
1048                        card->membase + CFG);
1049         }
1050
1051 out:
1052         return;
1053 }
1054
1055 static irqreturn_t ns_irq_handler(int irq, void *dev_id)
1056 {
1057         u32 stat_r;
1058         ns_dev *card;
1059         struct atm_dev *dev;
1060         unsigned long flags;
1061
1062         card = (ns_dev *) dev_id;
1063         dev = card->atmdev;
1064         card->intcnt++;
1065
1066         PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index);
1067
1068         spin_lock_irqsave(&card->int_lock, flags);
1069
1070         stat_r = readl(card->membase + STAT);
1071
1072         /* Transmit Status Indicator has been written to T. S. Queue */
1073         if (stat_r & NS_STAT_TSIF) {
1074                 TXPRINTK("nicstar%d: TSI interrupt\n", card->index);
1075                 process_tsq(card);
1076                 writel(NS_STAT_TSIF, card->membase + STAT);
1077         }
1078
1079         /* Incomplete CS-PDU has been transmitted */
1080         if (stat_r & NS_STAT_TXICP) {
1081                 writel(NS_STAT_TXICP, card->membase + STAT);
1082                 TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n",
1083                          card->index);
1084         }
1085
1086         /* Transmit Status Queue 7/8 full */
1087         if (stat_r & NS_STAT_TSQF) {
1088                 writel(NS_STAT_TSQF, card->membase + STAT);
1089                 PRINTK("nicstar%d: TSQ full.\n", card->index);
1090                 process_tsq(card);
1091         }
1092
1093         /* Timer overflow */
1094         if (stat_r & NS_STAT_TMROF) {
1095                 writel(NS_STAT_TMROF, card->membase + STAT);
1096                 PRINTK("nicstar%d: Timer overflow.\n", card->index);
1097         }
1098
1099         /* PHY device interrupt signal active */
1100         if (stat_r & NS_STAT_PHYI) {
1101                 writel(NS_STAT_PHYI, card->membase + STAT);
1102                 PRINTK("nicstar%d: PHY interrupt.\n", card->index);
1103                 if (dev->phy && dev->phy->interrupt) {
1104                         dev->phy->interrupt(dev);
1105                 }
1106         }
1107
1108         /* Small Buffer Queue is full */
1109         if (stat_r & NS_STAT_SFBQF) {
1110                 writel(NS_STAT_SFBQF, card->membase + STAT);
1111                 printk("nicstar%d: Small free buffer queue is full.\n",
1112                        card->index);
1113         }
1114
1115         /* Large Buffer Queue is full */
1116         if (stat_r & NS_STAT_LFBQF) {
1117                 writel(NS_STAT_LFBQF, card->membase + STAT);
1118                 printk("nicstar%d: Large free buffer queue is full.\n",
1119                        card->index);
1120         }
1121
1122         /* Receive Status Queue is full */
1123         if (stat_r & NS_STAT_RSQF) {
1124                 writel(NS_STAT_RSQF, card->membase + STAT);
1125                 printk("nicstar%d: RSQ full.\n", card->index);
1126                 process_rsq(card);
1127         }
1128
1129         /* Complete CS-PDU received */
1130         if (stat_r & NS_STAT_EOPDU) {
1131                 RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index);
1132                 process_rsq(card);
1133                 writel(NS_STAT_EOPDU, card->membase + STAT);
1134         }
1135
1136         /* Raw cell received */
1137         if (stat_r & NS_STAT_RAWCF) {
1138                 writel(NS_STAT_RAWCF, card->membase + STAT);
1139 #ifndef RCQ_SUPPORT
1140                 printk("nicstar%d: Raw cell received and no support yet...\n",
1141                        card->index);
1142 #endif /* RCQ_SUPPORT */
1143                 /* NOTE: the following procedure may keep a raw cell pending until the
1144                    next interrupt. As this preliminary support is only meant to
1145                    avoid buffer leakage, this is not an issue. */
1146                 while (readl(card->membase + RAWCT) != card->rawch) {
1147
1148                         if (ns_rcqe_islast(card->rawcell)) {
1149                                 struct sk_buff *oldbuf;
1150
1151                                 oldbuf = card->rcbuf;
1152                                 card->rcbuf = idr_find(&card->idr,
1153                                                        ns_rcqe_nextbufhandle(card->rawcell));
1154                                 card->rawch = NS_PRV_DMA(card->rcbuf);
1155                                 card->rawcell = (struct ns_rcqe *)
1156                                                 card->rcbuf->data;
1157                                 recycle_rx_buf(card, oldbuf);
1158                         } else {
1159                                 card->rawch += NS_RCQE_SIZE;
1160                                 card->rawcell++;
1161                         }
1162                 }
1163         }
1164
1165         /* Small buffer queue is empty */
1166         if (stat_r & NS_STAT_SFBQE) {
1167                 int i;
1168                 struct sk_buff *sb;
1169
1170                 writel(NS_STAT_SFBQE, card->membase + STAT);
1171                 printk("nicstar%d: Small free buffer queue empty.\n",
1172                        card->index);
1173                 for (i = 0; i < card->sbnr.min; i++) {
1174                         sb = dev_alloc_skb(NS_SMSKBSIZE);
1175                         if (sb == NULL) {
1176                                 writel(readl(card->membase + CFG) &
1177                                        ~NS_CFG_EFBIE, card->membase + CFG);
1178                                 card->efbie = 0;
1179                                 break;
1180                         }
1181                         NS_PRV_BUFTYPE(sb) = BUF_SM;
1182                         skb_queue_tail(&card->sbpool.queue, sb);
1183                         skb_reserve(sb, NS_AAL0_HEADER);
1184                         push_rxbufs(card, sb);
1185                 }
1186                 card->sbfqc = i;
1187                 process_rsq(card);
1188         }
1189
1190         /* Large buffer queue empty */
1191         if (stat_r & NS_STAT_LFBQE) {
1192                 int i;
1193                 struct sk_buff *lb;
1194
1195                 writel(NS_STAT_LFBQE, card->membase + STAT);
1196                 printk("nicstar%d: Large free buffer queue empty.\n",
1197                        card->index);
1198                 for (i = 0; i < card->lbnr.min; i++) {
1199                         lb = dev_alloc_skb(NS_LGSKBSIZE);
1200                         if (lb == NULL) {
1201                                 writel(readl(card->membase + CFG) &
1202                                        ~NS_CFG_EFBIE, card->membase + CFG);
1203                                 card->efbie = 0;
1204                                 break;
1205                         }
1206                         NS_PRV_BUFTYPE(lb) = BUF_LG;
1207                         skb_queue_tail(&card->lbpool.queue, lb);
1208                         skb_reserve(lb, NS_SMBUFSIZE);
1209                         push_rxbufs(card, lb);
1210                 }
1211                 card->lbfqc = i;
1212                 process_rsq(card);
1213         }
1214
1215         /* Receive Status Queue is 7/8 full */
1216         if (stat_r & NS_STAT_RSQAF) {
1217                 writel(NS_STAT_RSQAF, card->membase + STAT);
1218                 RXPRINTK("nicstar%d: RSQ almost full.\n", card->index);
1219                 process_rsq(card);
1220         }
1221
1222         spin_unlock_irqrestore(&card->int_lock, flags);
1223         PRINTK("nicstar%d: end of interrupt service\n", card->index);
1224         return IRQ_HANDLED;
1225 }
1226
1227 static int ns_open(struct atm_vcc *vcc)
1228 {
1229         ns_dev *card;
1230         vc_map *vc;
1231         unsigned long tmpl, modl;
1232         int tcr, tcra;          /* target cell rate, and absolute value */
1233         int n = 0;              /* Number of entries in the TST. Initialized to remove
1234                                    the compiler warning. */
1235         u32 u32d[4];
1236         int frscdi = 0;         /* Index of the SCD. Initialized to remove the compiler
1237                                    warning. How I wish compilers were clever enough to
1238                                    tell which variables can truly be used
1239                                    uninitialized... */
1240         int inuse;              /* tx or rx vc already in use by another vcc */
1241         short vpi = vcc->vpi;
1242         int vci = vcc->vci;
1243
1244         card = (ns_dev *) vcc->dev->dev_data;
1245         PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi,
1246                vci);
1247         if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
1248                 PRINTK("nicstar%d: unsupported AAL.\n", card->index);
1249                 return -EINVAL;
1250         }
1251
1252         vc = &(card->vcmap[vpi << card->vcibits | vci]);
1253         vcc->dev_data = vc;
1254
1255         inuse = 0;
1256         if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx)
1257                 inuse = 1;
1258         if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx)
1259                 inuse += 2;
1260         if (inuse) {
1261                 printk("nicstar%d: %s vci already in use.\n", card->index,
1262                        inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx");
1263                 return -EINVAL;
1264         }
1265
1266         set_bit(ATM_VF_ADDR, &vcc->flags);
1267
1268         /* NOTE: You are not allowed to modify an open connection's QOS. To change
1269            that, remove the ATM_VF_PARTIAL flag checking. There may be other changes
1270            needed to do that. */
1271         if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
1272                 scq_info *scq;
1273
1274                 set_bit(ATM_VF_PARTIAL, &vcc->flags);
1275                 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1276                         /* Check requested cell rate and availability of SCD */
1277                         if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0
1278                             && vcc->qos.txtp.min_pcr == 0) {
1279                                 PRINTK
1280                                     ("nicstar%d: trying to open a CBR vc with cell rate = 0 \n",
1281                                      card->index);
1282                                 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1283                                 clear_bit(ATM_VF_ADDR, &vcc->flags);
1284                                 return -EINVAL;
1285                         }
1286
1287                         tcr = atm_pcr_goal(&(vcc->qos.txtp));
1288                         tcra = tcr >= 0 ? tcr : -tcr;
1289
1290                         PRINTK("nicstar%d: target cell rate = %d.\n",
1291                                card->index, vcc->qos.txtp.max_pcr);
1292
1293                         tmpl =
1294                             (unsigned long)tcra *(unsigned long)
1295                             NS_TST_NUM_ENTRIES;
1296                         modl = tmpl % card->max_pcr;
1297
1298                         n = (int)(tmpl / card->max_pcr);
1299                         if (tcr > 0) {
1300                                 if (modl > 0)
1301                                         n++;
1302                         } else if (tcr == 0) {
1303                                 if ((n =
1304                                      (card->tst_free_entries -
1305                                       NS_TST_RESERVED)) <= 0) {
1306                                         PRINTK
1307                                             ("nicstar%d: no CBR bandwidth free.\n",
1308                                              card->index);
1309                                         clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1310                                         clear_bit(ATM_VF_ADDR, &vcc->flags);
1311                                         return -EINVAL;
1312                                 }
1313                         }
1314
1315                         if (n == 0) {
1316                                 printk
1317                                     ("nicstar%d: selected bandwidth < granularity.\n",
1318                                      card->index);
1319                                 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1320                                 clear_bit(ATM_VF_ADDR, &vcc->flags);
1321                                 return -EINVAL;
1322                         }
1323
1324                         if (n > (card->tst_free_entries - NS_TST_RESERVED)) {
1325                                 PRINTK
1326                                     ("nicstar%d: not enough free CBR bandwidth.\n",
1327                                      card->index);
1328                                 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1329                                 clear_bit(ATM_VF_ADDR, &vcc->flags);
1330                                 return -EINVAL;
1331                         } else
1332                                 card->tst_free_entries -= n;
1333
1334                         XPRINTK("nicstar%d: writing %d tst entries.\n",
1335                                 card->index, n);
1336                         for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) {
1337                                 if (card->scd2vc[frscdi] == NULL) {
1338                                         card->scd2vc[frscdi] = vc;
1339                                         break;
1340                                 }
1341                         }
1342                         if (frscdi == NS_FRSCD_NUM) {
1343                                 PRINTK
1344                                     ("nicstar%d: no SCD available for CBR channel.\n",
1345                                      card->index);
1346                                 card->tst_free_entries += n;
1347                                 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1348                                 clear_bit(ATM_VF_ADDR, &vcc->flags);
1349                                 return -EBUSY;
1350                         }
1351
1352                         vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
1353
1354                         scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd);
1355                         if (scq == NULL) {
1356                                 PRINTK("nicstar%d: can't get fixed rate SCQ.\n",
1357                                        card->index);
1358                                 card->scd2vc[frscdi] = NULL;
1359                                 card->tst_free_entries += n;
1360                                 clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1361                                 clear_bit(ATM_VF_ADDR, &vcc->flags);
1362                                 return -ENOMEM;
1363                         }
1364                         vc->scq = scq;
1365                         u32d[0] = scq_virt_to_bus(scq, scq->base);
1366                         u32d[1] = (u32) 0x00000000;
1367                         u32d[2] = (u32) 0xffffffff;
1368                         u32d[3] = (u32) 0x00000000;
1369                         ns_write_sram(card, vc->cbr_scd, u32d, 4);
1370
1371                         fill_tst(card, n, vc);
1372                 } else if (vcc->qos.txtp.traffic_class == ATM_UBR) {
1373                         vc->cbr_scd = 0x00000000;
1374                         vc->scq = card->scq0;
1375                 }
1376
1377                 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
1378                         vc->tx = 1;
1379                         vc->tx_vcc = vcc;
1380                         vc->tbd_count = 0;
1381                 }
1382                 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
1383                         u32 status;
1384
1385                         vc->rx = 1;
1386                         vc->rx_vcc = vcc;
1387                         vc->rx_iov = NULL;
1388
1389                         /* Open the connection in hardware */
1390                         if (vcc->qos.aal == ATM_AAL5)
1391                                 status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN;
1392                         else    /* vcc->qos.aal == ATM_AAL0 */
1393                                 status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN;
1394 #ifdef RCQ_SUPPORT
1395                         status |= NS_RCTE_RAWCELLINTEN;
1396 #endif /* RCQ_SUPPORT */
1397                         ns_write_sram(card,
1398                                       NS_RCT +
1399                                       (vpi << card->vcibits | vci) *
1400                                       NS_RCT_ENTRY_SIZE, &status, 1);
1401                 }
1402
1403         }
1404
1405         set_bit(ATM_VF_READY, &vcc->flags);
1406         return 0;
1407 }
1408
1409 static void ns_close(struct atm_vcc *vcc)
1410 {
1411         vc_map *vc;
1412         ns_dev *card;
1413         u32 data;
1414         int i;
1415
1416         vc = vcc->dev_data;
1417         card = vcc->dev->dev_data;
1418         PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index,
1419                (int)vcc->vpi, vcc->vci);
1420
1421         clear_bit(ATM_VF_READY, &vcc->flags);
1422
1423         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
1424                 u32 addr;
1425                 unsigned long flags;
1426
1427                 addr =
1428                     NS_RCT +
1429                     (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE;
1430                 spin_lock_irqsave(&card->res_lock, flags);
1431                 while (CMD_BUSY(card)) ;
1432                 writel(NS_CMD_CLOSE_CONNECTION | addr << 2,
1433                        card->membase + CMD);
1434                 spin_unlock_irqrestore(&card->res_lock, flags);
1435
1436                 vc->rx = 0;
1437                 if (vc->rx_iov != NULL) {
1438                         struct sk_buff *iovb;
1439                         u32 stat;
1440
1441                         stat = readl(card->membase + STAT);
1442                         card->sbfqc = ns_stat_sfbqc_get(stat);
1443                         card->lbfqc = ns_stat_lfbqc_get(stat);
1444
1445                         PRINTK
1446                             ("nicstar%d: closing a VC with pending rx buffers.\n",
1447                              card->index);
1448                         iovb = vc->rx_iov;
1449                         recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
1450                                               NS_PRV_IOVCNT(iovb));
1451                         NS_PRV_IOVCNT(iovb) = 0;
1452                         spin_lock_irqsave(&card->int_lock, flags);
1453                         recycle_iov_buf(card, iovb);
1454                         spin_unlock_irqrestore(&card->int_lock, flags);
1455                         vc->rx_iov = NULL;
1456                 }
1457         }
1458
1459         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
1460                 vc->tx = 0;
1461         }
1462
1463         if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1464                 unsigned long flags;
1465                 ns_scqe *scqep;
1466                 scq_info *scq;
1467
1468                 scq = vc->scq;
1469
1470                 for (;;) {
1471                         spin_lock_irqsave(&scq->lock, flags);
1472                         scqep = scq->next;
1473                         if (scqep == scq->base)
1474                                 scqep = scq->last;
1475                         else
1476                                 scqep--;
1477                         if (scqep == scq->tail) {
1478                                 spin_unlock_irqrestore(&scq->lock, flags);
1479                                 break;
1480                         }
1481                         /* If the last entry is not a TSR, place one in the SCQ in order to
1482                            be able to completely drain it and then close. */
1483                         if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) {
1484                                 ns_scqe tsr;
1485                                 u32 scdi, scqi;
1486                                 u32 data;
1487                                 int index;
1488
1489                                 tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1490                                 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1491                                 scqi = scq->next - scq->base;
1492                                 tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1493                                 tsr.word_3 = 0x00000000;
1494                                 tsr.word_4 = 0x00000000;
1495                                 *scq->next = tsr;
1496                                 index = (int)scqi;
1497                                 scq->skb[index] = NULL;
1498                                 if (scq->next == scq->last)
1499                                         scq->next = scq->base;
1500                                 else
1501                                         scq->next++;
1502                                 data = scq_virt_to_bus(scq, scq->next);
1503                                 ns_write_sram(card, scq->scd, &data, 1);
1504                         }
1505                         spin_unlock_irqrestore(&scq->lock, flags);
1506                         schedule();
1507                 }
1508
1509                 /* Free all TST entries */
1510                 data = NS_TST_OPCODE_VARIABLE;
1511                 for (i = 0; i < NS_TST_NUM_ENTRIES; i++) {
1512                         if (card->tste2vc[i] == vc) {
1513                                 ns_write_sram(card, card->tst_addr + i, &data,
1514                                               1);
1515                                 card->tste2vc[i] = NULL;
1516                                 card->tst_free_entries++;
1517                         }
1518                 }
1519
1520                 card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL;
1521                 free_scq(card, vc->scq, vcc);
1522         }
1523
1524         /* remove all references to vcc before deleting it */
1525         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
1526                 unsigned long flags;
1527                 scq_info *scq = card->scq0;
1528
1529                 spin_lock_irqsave(&scq->lock, flags);
1530
1531                 for (i = 0; i < scq->num_entries; i++) {
1532                         if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) {
1533                                 ATM_SKB(scq->skb[i])->vcc = NULL;
1534                                 atm_return(vcc, scq->skb[i]->truesize);
1535                                 PRINTK
1536                                     ("nicstar: deleted pending vcc mapping\n");
1537                         }
1538                 }
1539
1540                 spin_unlock_irqrestore(&scq->lock, flags);
1541         }
1542
1543         vcc->dev_data = NULL;
1544         clear_bit(ATM_VF_PARTIAL, &vcc->flags);
1545         clear_bit(ATM_VF_ADDR, &vcc->flags);
1546
1547 #ifdef RX_DEBUG
1548         {
1549                 u32 stat, cfg;
1550                 stat = readl(card->membase + STAT);
1551                 cfg = readl(card->membase + CFG);
1552                 printk("STAT = 0x%08X  CFG = 0x%08X  \n", stat, cfg);
1553                 printk
1554                     ("TSQ: base = 0x%p  next = 0x%p  last = 0x%p  TSQT = 0x%08X \n",
1555                      card->tsq.base, card->tsq.next,
1556                      card->tsq.last, readl(card->membase + TSQT));
1557                 printk
1558                     ("RSQ: base = 0x%p  next = 0x%p  last = 0x%p  RSQT = 0x%08X \n",
1559                      card->rsq.base, card->rsq.next,
1560                      card->rsq.last, readl(card->membase + RSQT));
1561                 printk("Empty free buffer queue interrupt %s \n",
1562                        card->efbie ? "enabled" : "disabled");
1563                 printk("SBCNT = %d  count = %d   LBCNT = %d count = %d \n",
1564                        ns_stat_sfbqc_get(stat), card->sbpool.count,
1565                        ns_stat_lfbqc_get(stat), card->lbpool.count);
1566                 printk("hbpool.count = %d  iovpool.count = %d \n",
1567                        card->hbpool.count, card->iovpool.count);
1568         }
1569 #endif /* RX_DEBUG */
1570 }
1571
1572 static void fill_tst(ns_dev * card, int n, vc_map * vc)
1573 {
1574         u32 new_tst;
1575         unsigned long cl;
1576         int e, r;
1577         u32 data;
1578
1579         /* It would be very complicated to keep the two TSTs synchronized while
1580            assuring that writes are only made to the inactive TST. So, for now I
1581            will use only one TST. If problems occur, I will change this again */
1582
1583         new_tst = card->tst_addr;
1584
1585         /* Fill procedure */
1586
1587         for (e = 0; e < NS_TST_NUM_ENTRIES; e++) {
1588                 if (card->tste2vc[e] == NULL)
1589                         break;
1590         }
1591         if (e == NS_TST_NUM_ENTRIES) {
1592                 printk("nicstar%d: No free TST entries found. \n", card->index);
1593                 return;
1594         }
1595
1596         r = n;
1597         cl = NS_TST_NUM_ENTRIES;
1598         data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd);
1599
1600         while (r > 0) {
1601                 if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) {
1602                         card->tste2vc[e] = vc;
1603                         ns_write_sram(card, new_tst + e, &data, 1);
1604                         cl -= NS_TST_NUM_ENTRIES;
1605                         r--;
1606                 }
1607
1608                 if (++e == NS_TST_NUM_ENTRIES) {
1609                         e = 0;
1610                 }
1611                 cl += n;
1612         }
1613
1614         /* End of fill procedure */
1615
1616         data = ns_tste_make(NS_TST_OPCODE_END, new_tst);
1617         ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1);
1618         ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1);
1619         card->tst_addr = new_tst;
1620 }
1621
1622 static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
1623 {
1624         ns_dev *card;
1625         vc_map *vc;
1626         scq_info *scq;
1627         unsigned long buflen;
1628         ns_scqe scqe;
1629         u32 flags;              /* TBD flags, not CPU flags */
1630
1631         card = vcc->dev->dev_data;
1632         TXPRINTK("nicstar%d: ns_send() called.\n", card->index);
1633         if ((vc = (vc_map *) vcc->dev_data) == NULL) {
1634                 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
1635                        card->index);
1636                 atomic_inc(&vcc->stats->tx_err);
1637                 dev_kfree_skb_any(skb);
1638                 return -EINVAL;
1639         }
1640
1641         if (!vc->tx) {
1642                 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
1643                        card->index);
1644                 atomic_inc(&vcc->stats->tx_err);
1645                 dev_kfree_skb_any(skb);
1646                 return -EINVAL;
1647         }
1648
1649         if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
1650                 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
1651                        card->index);
1652                 atomic_inc(&vcc->stats->tx_err);
1653                 dev_kfree_skb_any(skb);
1654                 return -EINVAL;
1655         }
1656
1657         if (skb_shinfo(skb)->nr_frags != 0) {
1658                 printk("nicstar%d: No scatter-gather yet.\n", card->index);
1659                 atomic_inc(&vcc->stats->tx_err);
1660                 dev_kfree_skb_any(skb);
1661                 return -EINVAL;
1662         }
1663
1664         ATM_SKB(skb)->vcc = vcc;
1665
1666         NS_PRV_DMA(skb) = dma_map_single(&card->pcidev->dev, skb->data,
1667                                          skb->len, DMA_TO_DEVICE);
1668
1669         if (vcc->qos.aal == ATM_AAL5) {
1670                 buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */
1671                 flags = NS_TBD_AAL5;
1672                 scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb));
1673                 scqe.word_3 = cpu_to_le32(skb->len);
1674                 scqe.word_4 =
1675                     ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0,
1676                                     ATM_SKB(skb)->
1677                                     atm_options & ATM_ATMOPT_CLP ? 1 : 0);
1678                 flags |= NS_TBD_EOPDU;
1679         } else {                /* (vcc->qos.aal == ATM_AAL0) */
1680
1681                 buflen = ATM_CELL_PAYLOAD;      /* i.e., 48 bytes */
1682                 flags = NS_TBD_AAL0;
1683                 scqe.word_2 = cpu_to_le32(NS_PRV_DMA(skb) + NS_AAL0_HEADER);
1684                 scqe.word_3 = cpu_to_le32(0x00000000);
1685                 if (*skb->data & 0x02)  /* Payload type 1 - end of pdu */
1686                         flags |= NS_TBD_EOPDU;
1687                 scqe.word_4 =
1688                     cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK);
1689                 /* Force the VPI/VCI to be the same as in VCC struct */
1690                 scqe.word_4 |=
1691                     cpu_to_le32((((u32) vcc->
1692                                   vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc->
1693                                                               vci) <<
1694                                  NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK);
1695         }
1696
1697         if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1698                 scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen);
1699                 scq = ((vc_map *) vcc->dev_data)->scq;
1700         } else {
1701                 scqe.word_1 =
1702                     ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen);
1703                 scq = card->scq0;
1704         }
1705
1706         if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
1707                 atomic_inc(&vcc->stats->tx_err);
1708                 dev_kfree_skb_any(skb);
1709                 return -EIO;
1710         }
1711         atomic_inc(&vcc->stats->tx);
1712
1713         return 0;
1714 }
1715
1716 static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
1717                      struct sk_buff *skb)
1718 {
1719         unsigned long flags;
1720         ns_scqe tsr;
1721         u32 scdi, scqi;
1722         int scq_is_vbr;
1723         u32 data;
1724         int index;
1725
1726         spin_lock_irqsave(&scq->lock, flags);
1727         while (scq->tail == scq->next) {
1728                 if (in_interrupt()) {
1729                         spin_unlock_irqrestore(&scq->lock, flags);
1730                         printk("nicstar%d: Error pushing TBD.\n", card->index);
1731                         return 1;
1732                 }
1733
1734                 scq->full = 1;
1735                 wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq,
1736                                                           scq->tail != scq->next,
1737                                                           scq->lock,
1738                                                           SCQFULL_TIMEOUT);
1739
1740                 if (scq->full) {
1741                         spin_unlock_irqrestore(&scq->lock, flags);
1742                         printk("nicstar%d: Timeout pushing TBD.\n",
1743                                card->index);
1744                         return 1;
1745                 }
1746         }
1747         *scq->next = *tbd;
1748         index = (int)(scq->next - scq->base);
1749         scq->skb[index] = skb;
1750         XPRINTK("nicstar%d: sending skb at 0x%p (pos %d).\n",
1751                 card->index, skb, index);
1752         XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
1753                 card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2),
1754                 le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4),
1755                 scq->next);
1756         if (scq->next == scq->last)
1757                 scq->next = scq->base;
1758         else
1759                 scq->next++;
1760
1761         vc->tbd_count++;
1762         if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) {
1763                 scq->tbd_count++;
1764                 scq_is_vbr = 1;
1765         } else
1766                 scq_is_vbr = 0;
1767
1768         if (vc->tbd_count >= MAX_TBD_PER_VC
1769             || scq->tbd_count >= MAX_TBD_PER_SCQ) {
1770                 int has_run = 0;
1771
1772                 while (scq->tail == scq->next) {
1773                         if (in_interrupt()) {
1774                                 data = scq_virt_to_bus(scq, scq->next);
1775                                 ns_write_sram(card, scq->scd, &data, 1);
1776                                 spin_unlock_irqrestore(&scq->lock, flags);
1777                                 printk("nicstar%d: Error pushing TSR.\n",
1778                                        card->index);
1779                                 return 0;
1780                         }
1781
1782                         scq->full = 1;
1783                         if (has_run++)
1784                                 break;
1785                         wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq,
1786                                                                   scq->tail != scq->next,
1787                                                                   scq->lock,
1788                                                                   SCQFULL_TIMEOUT);
1789                 }
1790
1791                 if (!scq->full) {
1792                         tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1793                         if (scq_is_vbr)
1794                                 scdi = NS_TSR_SCDISVBR;
1795                         else
1796                                 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1797                         scqi = scq->next - scq->base;
1798                         tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1799                         tsr.word_3 = 0x00000000;
1800                         tsr.word_4 = 0x00000000;
1801
1802                         *scq->next = tsr;
1803                         index = (int)scqi;
1804                         scq->skb[index] = NULL;
1805                         XPRINTK
1806                             ("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%p.\n",
1807                              card->index, le32_to_cpu(tsr.word_1),
1808                              le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3),
1809                              le32_to_cpu(tsr.word_4), scq->next);
1810                         if (scq->next == scq->last)
1811                                 scq->next = scq->base;
1812                         else
1813                                 scq->next++;
1814                         vc->tbd_count = 0;
1815                         scq->tbd_count = 0;
1816                 } else
1817                         PRINTK("nicstar%d: Timeout pushing TSR.\n",
1818                                card->index);
1819         }
1820         data = scq_virt_to_bus(scq, scq->next);
1821         ns_write_sram(card, scq->scd, &data, 1);
1822
1823         spin_unlock_irqrestore(&scq->lock, flags);
1824
1825         return 0;
1826 }
1827
1828 static void process_tsq(ns_dev * card)
1829 {
1830         u32 scdi;
1831         scq_info *scq;
1832         ns_tsi *previous = NULL, *one_ahead, *two_ahead;
1833         int serviced_entries;   /* flag indicating at least on entry was serviced */
1834
1835         serviced_entries = 0;
1836
1837         if (card->tsq.next == card->tsq.last)
1838                 one_ahead = card->tsq.base;
1839         else
1840                 one_ahead = card->tsq.next + 1;
1841
1842         if (one_ahead == card->tsq.last)
1843                 two_ahead = card->tsq.base;
1844         else
1845                 two_ahead = one_ahead + 1;
1846
1847         while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) ||
1848                !ns_tsi_isempty(two_ahead))
1849                 /* At most two empty, as stated in the 77201 errata */
1850         {
1851                 serviced_entries = 1;
1852
1853                 /* Skip the one or two possible empty entries */
1854                 while (ns_tsi_isempty(card->tsq.next)) {
1855                         if (card->tsq.next == card->tsq.last)
1856                                 card->tsq.next = card->tsq.base;
1857                         else
1858                                 card->tsq.next++;
1859                 }
1860
1861                 if (!ns_tsi_tmrof(card->tsq.next)) {
1862                         scdi = ns_tsi_getscdindex(card->tsq.next);
1863                         if (scdi == NS_TSI_SCDISVBR)
1864                                 scq = card->scq0;
1865                         else {
1866                                 if (card->scd2vc[scdi] == NULL) {
1867                                         printk
1868                                             ("nicstar%d: could not find VC from SCD index.\n",
1869                                              card->index);
1870                                         ns_tsi_init(card->tsq.next);
1871                                         return;
1872                                 }
1873                                 scq = card->scd2vc[scdi]->scq;
1874                         }
1875                         drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next));
1876                         scq->full = 0;
1877                         wake_up_interruptible(&(scq->scqfull_waitq));
1878                 }
1879
1880                 ns_tsi_init(card->tsq.next);
1881                 previous = card->tsq.next;
1882                 if (card->tsq.next == card->tsq.last)
1883                         card->tsq.next = card->tsq.base;
1884                 else
1885                         card->tsq.next++;
1886
1887                 if (card->tsq.next == card->tsq.last)
1888                         one_ahead = card->tsq.base;
1889                 else
1890                         one_ahead = card->tsq.next + 1;
1891
1892                 if (one_ahead == card->tsq.last)
1893                         two_ahead = card->tsq.base;
1894                 else
1895                         two_ahead = one_ahead + 1;
1896         }
1897
1898         if (serviced_entries)
1899                 writel(PTR_DIFF(previous, card->tsq.base),
1900                        card->membase + TSQH);
1901 }
1902
1903 static void drain_scq(ns_dev * card, scq_info * scq, int pos)
1904 {
1905         struct atm_vcc *vcc;
1906         struct sk_buff *skb;
1907         int i;
1908         unsigned long flags;
1909
1910         XPRINTK("nicstar%d: drain_scq() called, scq at 0x%p, pos %d.\n",
1911                 card->index, scq, pos);
1912         if (pos >= scq->num_entries) {
1913                 printk("nicstar%d: Bad index on drain_scq().\n", card->index);
1914                 return;
1915         }
1916
1917         spin_lock_irqsave(&scq->lock, flags);
1918         i = (int)(scq->tail - scq->base);
1919         if (++i == scq->num_entries)
1920                 i = 0;
1921         while (i != pos) {
1922                 skb = scq->skb[i];
1923                 XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n",
1924                         card->index, skb, i);
1925                 if (skb != NULL) {
1926                         dma_unmap_single(&card->pcidev->dev,
1927                                          NS_PRV_DMA(skb),
1928                                          skb->len,
1929                                          DMA_TO_DEVICE);
1930                         vcc = ATM_SKB(skb)->vcc;
1931                         if (vcc && vcc->pop != NULL) {
1932                                 vcc->pop(vcc, skb);
1933                         } else {
1934                                 dev_kfree_skb_irq(skb);
1935                         }
1936                         scq->skb[i] = NULL;
1937                 }
1938                 if (++i == scq->num_entries)
1939                         i = 0;
1940         }
1941         scq->tail = scq->base + pos;
1942         spin_unlock_irqrestore(&scq->lock, flags);
1943 }
1944
1945 static void process_rsq(ns_dev * card)
1946 {
1947         ns_rsqe *previous;
1948
1949         if (!ns_rsqe_valid(card->rsq.next))
1950                 return;
1951         do {
1952                 dequeue_rx(card, card->rsq.next);
1953                 ns_rsqe_init(card->rsq.next);
1954                 previous = card->rsq.next;
1955                 if (card->rsq.next == card->rsq.last)
1956                         card->rsq.next = card->rsq.base;
1957                 else
1958                         card->rsq.next++;
1959         } while (ns_rsqe_valid(card->rsq.next));
1960         writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH);
1961 }
1962
1963 static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
1964 {
1965         u32 vpi, vci;
1966         vc_map *vc;
1967         struct sk_buff *iovb;
1968         struct iovec *iov;
1969         struct atm_vcc *vcc;
1970         struct sk_buff *skb;
1971         unsigned short aal5_len;
1972         int len;
1973         u32 stat;
1974         u32 id;
1975
1976         stat = readl(card->membase + STAT);
1977         card->sbfqc = ns_stat_sfbqc_get(stat);
1978         card->lbfqc = ns_stat_lfbqc_get(stat);
1979
1980         id = le32_to_cpu(rsqe->buffer_handle);
1981         skb = idr_remove(&card->idr, id);
1982         if (!skb) {
1983                 RXPRINTK(KERN_ERR
1984                          "nicstar%d: skb not found!\n", card->index);
1985                 return;
1986         }
1987         dma_sync_single_for_cpu(&card->pcidev->dev,
1988                                 NS_PRV_DMA(skb),
1989                                 (NS_PRV_BUFTYPE(skb) == BUF_SM
1990                                  ? NS_SMSKBSIZE : NS_LGSKBSIZE),
1991                                 DMA_FROM_DEVICE);
1992         dma_unmap_single(&card->pcidev->dev,
1993                          NS_PRV_DMA(skb),
1994                          (NS_PRV_BUFTYPE(skb) == BUF_SM
1995                           ? NS_SMSKBSIZE : NS_LGSKBSIZE),
1996                          DMA_FROM_DEVICE);
1997         vpi = ns_rsqe_vpi(rsqe);
1998         vci = ns_rsqe_vci(rsqe);
1999         if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) {
2000                 printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n",
2001                        card->index, vpi, vci);
2002                 recycle_rx_buf(card, skb);
2003                 return;
2004         }
2005
2006         vc = &(card->vcmap[vpi << card->vcibits | vci]);
2007         if (!vc->rx) {
2008                 RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n",
2009                          card->index, vpi, vci);
2010                 recycle_rx_buf(card, skb);
2011                 return;
2012         }
2013
2014         vcc = vc->rx_vcc;
2015
2016         if (vcc->qos.aal == ATM_AAL0) {
2017                 struct sk_buff *sb;
2018                 unsigned char *cell;
2019                 int i;
2020
2021                 cell = skb->data;
2022                 for (i = ns_rsqe_cellcount(rsqe); i; i--) {
2023                         sb = dev_alloc_skb(NS_SMSKBSIZE);
2024                         if (!sb) {
2025                                 printk
2026                                     ("nicstar%d: Can't allocate buffers for aal0.\n",
2027                                      card->index);
2028                                 atomic_add(i, &vcc->stats->rx_drop);
2029                                 break;
2030                         }
2031                         if (!atm_charge(vcc, sb->truesize)) {
2032                                 RXPRINTK
2033                                     ("nicstar%d: atm_charge() dropped aal0 packets.\n",
2034                                      card->index);
2035                                 atomic_add(i - 1, &vcc->stats->rx_drop);        /* already increased by 1 */
2036                                 dev_kfree_skb_any(sb);
2037                                 break;
2038                         }
2039                         /* Rebuild the header */
2040                         *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 |
2041                             (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000);
2042                         if (i == 1 && ns_rsqe_eopdu(rsqe))
2043                                 *((u32 *) sb->data) |= 0x00000002;
2044                         skb_put(sb, NS_AAL0_HEADER);
2045                         memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD);
2046                         skb_put(sb, ATM_CELL_PAYLOAD);
2047                         ATM_SKB(sb)->vcc = vcc;
2048                         __net_timestamp(sb);
2049                         vcc->push(vcc, sb);
2050                         atomic_inc(&vcc->stats->rx);
2051                         cell += ATM_CELL_PAYLOAD;
2052                 }
2053
2054                 recycle_rx_buf(card, skb);
2055                 return;
2056         }
2057
2058         /* To reach this point, the AAL layer can only be AAL5 */
2059
2060         if ((iovb = vc->rx_iov) == NULL) {
2061                 iovb = skb_dequeue(&(card->iovpool.queue));
2062                 if (iovb == NULL) {     /* No buffers in the queue */
2063                         iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC);
2064                         if (iovb == NULL) {
2065                                 printk("nicstar%d: Out of iovec buffers.\n",
2066                                        card->index);
2067                                 atomic_inc(&vcc->stats->rx_drop);
2068                                 recycle_rx_buf(card, skb);
2069                                 return;
2070                         }
2071                         NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2072                 } else if (--card->iovpool.count < card->iovnr.min) {
2073                         struct sk_buff *new_iovb;
2074                         if ((new_iovb =
2075                              alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) {
2076                                 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2077                                 skb_queue_tail(&card->iovpool.queue, new_iovb);
2078                                 card->iovpool.count++;
2079                         }
2080                 }
2081                 vc->rx_iov = iovb;
2082                 NS_PRV_IOVCNT(iovb) = 0;
2083                 iovb->len = 0;
2084                 iovb->data = iovb->head;
2085                 skb_reset_tail_pointer(iovb);
2086                 /* IMPORTANT: a pointer to the sk_buff containing the small or large
2087                    buffer is stored as iovec base, NOT a pointer to the
2088                    small or large buffer itself. */
2089         } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
2090                 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
2091                 atomic_inc(&vcc->stats->rx_err);
2092                 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2093                                       NS_MAX_IOVECS);
2094                 NS_PRV_IOVCNT(iovb) = 0;
2095                 iovb->len = 0;
2096                 iovb->data = iovb->head;
2097                 skb_reset_tail_pointer(iovb);
2098         }
2099         iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++];
2100         iov->iov_base = (void *)skb;
2101         iov->iov_len = ns_rsqe_cellcount(rsqe) * 48;
2102         iovb->len += iov->iov_len;
2103
2104 #ifdef EXTRA_DEBUG
2105         if (NS_PRV_IOVCNT(iovb) == 1) {
2106                 if (NS_PRV_BUFTYPE(skb) != BUF_SM) {
2107                         printk
2108                             ("nicstar%d: Expected a small buffer, and this is not one.\n",
2109                              card->index);
2110                         which_list(card, skb);
2111                         atomic_inc(&vcc->stats->rx_err);
2112                         recycle_rx_buf(card, skb);
2113                         vc->rx_iov = NULL;
2114                         recycle_iov_buf(card, iovb);
2115                         return;
2116                 }
2117         } else {                /* NS_PRV_IOVCNT(iovb) >= 2 */
2118
2119                 if (NS_PRV_BUFTYPE(skb) != BUF_LG) {
2120                         printk
2121                             ("nicstar%d: Expected a large buffer, and this is not one.\n",
2122                              card->index);
2123                         which_list(card, skb);
2124                         atomic_inc(&vcc->stats->rx_err);
2125                         recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2126                                               NS_PRV_IOVCNT(iovb));
2127                         vc->rx_iov = NULL;
2128                         recycle_iov_buf(card, iovb);
2129                         return;
2130                 }
2131         }
2132 #endif /* EXTRA_DEBUG */
2133
2134         if (ns_rsqe_eopdu(rsqe)) {
2135                 /* This works correctly regardless of the endianness of the host */
2136                 unsigned char *L1L2 = (unsigned char *)
2137                                                 (skb->data + iov->iov_len - 6);
2138                 aal5_len = L1L2[0] << 8 | L1L2[1];
2139                 len = (aal5_len == 0x0000) ? 0x10000 : aal5_len;
2140                 if (ns_rsqe_crcerr(rsqe) ||
2141                     len + 8 > iovb->len || len + (47 + 8) < iovb->len) {
2142                         printk("nicstar%d: AAL5 CRC error", card->index);
2143                         if (len + 8 > iovb->len || len + (47 + 8) < iovb->len)
2144                                 printk(" - PDU size mismatch.\n");
2145                         else
2146                                 printk(".\n");
2147                         atomic_inc(&vcc->stats->rx_err);
2148                         recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
2149                                               NS_PRV_IOVCNT(iovb));
2150                         vc->rx_iov = NULL;
2151                         recycle_iov_buf(card, iovb);
2152                         return;
2153                 }
2154
2155                 /* By this point we (hopefully) have a complete SDU without errors. */
2156
2157                 if (NS_PRV_IOVCNT(iovb) == 1) { /* Just a small buffer */
2158                         /* skb points to a small buffer */
2159                         if (!atm_charge(vcc, skb->truesize)) {
2160                                 push_rxbufs(card, skb);
2161                                 atomic_inc(&vcc->stats->rx_drop);
2162                         } else {
2163                                 skb_put(skb, len);
2164                                 dequeue_sm_buf(card, skb);
2165                                 ATM_SKB(skb)->vcc = vcc;
2166                                 __net_timestamp(skb);
2167                                 vcc->push(vcc, skb);
2168                                 atomic_inc(&vcc->stats->rx);
2169                         }
2170                 } else if (NS_PRV_IOVCNT(iovb) == 2) {  /* One small plus one large buffer */
2171                         struct sk_buff *sb;
2172
2173                         sb = (struct sk_buff *)(iov - 1)->iov_base;
2174                         /* skb points to a large buffer */
2175
2176                         if (len <= NS_SMBUFSIZE) {
2177                                 if (!atm_charge(vcc, sb->truesize)) {
2178                                         push_rxbufs(card, sb);
2179                                         atomic_inc(&vcc->stats->rx_drop);
2180                                 } else {
2181                                         skb_put(sb, len);
2182                                         dequeue_sm_buf(card, sb);
2183                                         ATM_SKB(sb)->vcc = vcc;
2184                                         __net_timestamp(sb);
2185                                         vcc->push(vcc, sb);
2186                                         atomic_inc(&vcc->stats->rx);
2187                                 }
2188
2189                                 push_rxbufs(card, skb);
2190
2191                         } else {        /* len > NS_SMBUFSIZE, the usual case */
2192
2193                                 if (!atm_charge(vcc, skb->truesize)) {
2194                                         push_rxbufs(card, skb);
2195                                         atomic_inc(&vcc->stats->rx_drop);
2196                                 } else {
2197                                         dequeue_lg_buf(card, skb);
2198                                         skb_push(skb, NS_SMBUFSIZE);
2199                                         skb_copy_from_linear_data(sb, skb->data,
2200                                                                   NS_SMBUFSIZE);
2201                                         skb_put(skb, len - NS_SMBUFSIZE);
2202                                         ATM_SKB(skb)->vcc = vcc;
2203                                         __net_timestamp(skb);
2204                                         vcc->push(vcc, skb);
2205                                         atomic_inc(&vcc->stats->rx);
2206                                 }
2207
2208                                 push_rxbufs(card, sb);
2209
2210                         }
2211
2212                 } else {        /* Must push a huge buffer */
2213
2214                         struct sk_buff *hb, *sb, *lb;
2215                         int remaining, tocopy;
2216                         int j;
2217
2218                         hb = skb_dequeue(&(card->hbpool.queue));
2219                         if (hb == NULL) {       /* No buffers in the queue */
2220
2221                                 hb = dev_alloc_skb(NS_HBUFSIZE);
2222                                 if (hb == NULL) {
2223                                         printk
2224                                             ("nicstar%d: Out of huge buffers.\n",
2225                                              card->index);
2226                                         atomic_inc(&vcc->stats->rx_drop);
2227                                         recycle_iovec_rx_bufs(card,
2228                                                               (struct iovec *)
2229                                                               iovb->data,
2230                                                               NS_PRV_IOVCNT(iovb));
2231                                         vc->rx_iov = NULL;
2232                                         recycle_iov_buf(card, iovb);
2233                                         return;
2234                                 } else if (card->hbpool.count < card->hbnr.min) {
2235                                         struct sk_buff *new_hb;
2236                                         if ((new_hb =
2237                                              dev_alloc_skb(NS_HBUFSIZE)) !=
2238                                             NULL) {
2239                                                 skb_queue_tail(&card->hbpool.
2240                                                                queue, new_hb);
2241                                                 card->hbpool.count++;
2242                                         }
2243                                 }
2244                                 NS_PRV_BUFTYPE(hb) = BUF_NONE;
2245                         } else if (--card->hbpool.count < card->hbnr.min) {
2246                                 struct sk_buff *new_hb;
2247                                 if ((new_hb =
2248                                      dev_alloc_skb(NS_HBUFSIZE)) != NULL) {
2249                                         NS_PRV_BUFTYPE(new_hb) = BUF_NONE;
2250                                         skb_queue_tail(&card->hbpool.queue,
2251                                                        new_hb);
2252                                         card->hbpool.count++;
2253                                 }
2254                                 if (card->hbpool.count < card->hbnr.min) {
2255                                         if ((new_hb =
2256                                              dev_alloc_skb(NS_HBUFSIZE)) !=
2257                                             NULL) {
2258                                                 NS_PRV_BUFTYPE(new_hb) =
2259                                                     BUF_NONE;
2260                                                 skb_queue_tail(&card->hbpool.
2261                                                                queue, new_hb);
2262                                                 card->hbpool.count++;
2263                                         }
2264                                 }
2265                         }
2266
2267                         iov = (struct iovec *)iovb->data;
2268
2269                         if (!atm_charge(vcc, hb->truesize)) {
2270                                 recycle_iovec_rx_bufs(card, iov,
2271                                                       NS_PRV_IOVCNT(iovb));
2272                                 if (card->hbpool.count < card->hbnr.max) {
2273                                         skb_queue_tail(&card->hbpool.queue, hb);
2274                                         card->hbpool.count++;
2275                                 } else
2276                                         dev_kfree_skb_any(hb);
2277                                 atomic_inc(&vcc->stats->rx_drop);
2278                         } else {
2279                                 /* Copy the small buffer to the huge buffer */
2280                                 sb = (struct sk_buff *)iov->iov_base;
2281                                 skb_copy_from_linear_data(sb, hb->data,
2282                                                           iov->iov_len);
2283                                 skb_put(hb, iov->iov_len);
2284                                 remaining = len - iov->iov_len;
2285                                 iov++;
2286                                 /* Free the small buffer */
2287                                 push_rxbufs(card, sb);
2288
2289                                 /* Copy all large buffers to the huge buffer and free them */
2290                                 for (j = 1; j < NS_PRV_IOVCNT(iovb); j++) {
2291                                         lb = (struct sk_buff *)iov->iov_base;
2292                                         tocopy =
2293                                             min_t(int, remaining, iov->iov_len);
2294                                         skb_copy_from_linear_data(lb,
2295                                                                   skb_tail_pointer
2296                                                                   (hb), tocopy);
2297                                         skb_put(hb, tocopy);
2298                                         iov++;
2299                                         remaining -= tocopy;
2300                                         push_rxbufs(card, lb);
2301                                 }
2302 #ifdef EXTRA_DEBUG
2303                                 if (remaining != 0 || hb->len != len)
2304                                         printk
2305                                             ("nicstar%d: Huge buffer len mismatch.\n",
2306                                              card->index);
2307 #endif /* EXTRA_DEBUG */
2308                                 ATM_SKB(hb)->vcc = vcc;
2309                                 __net_timestamp(hb);
2310                                 vcc->push(vcc, hb);
2311                                 atomic_inc(&vcc->stats->rx);
2312                         }
2313                 }
2314
2315                 vc->rx_iov = NULL;
2316                 recycle_iov_buf(card, iovb);
2317         }
2318
2319 }
2320
2321 static void recycle_rx_buf(ns_dev * card, struct sk_buff *skb)
2322 {
2323         if (unlikely(NS_PRV_BUFTYPE(skb) == BUF_NONE)) {
2324                 printk("nicstar%d: What kind of rx buffer is this?\n",
2325                        card->index);
2326                 dev_kfree_skb_any(skb);
2327         } else
2328                 push_rxbufs(card, skb);
2329 }
2330
2331 static void recycle_iovec_rx_bufs(ns_dev * card, struct iovec *iov, int count)
2332 {
2333         while (count-- > 0)
2334                 recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base);
2335 }
2336
2337 static void recycle_iov_buf(ns_dev * card, struct sk_buff *iovb)
2338 {
2339         if (card->iovpool.count < card->iovnr.max) {
2340                 skb_queue_tail(&card->iovpool.queue, iovb);
2341                 card->iovpool.count++;
2342         } else
2343                 dev_kfree_skb_any(iovb);
2344 }
2345
2346 static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb)
2347 {
2348         skb_unlink(sb, &card->sbpool.queue);
2349         if (card->sbfqc < card->sbnr.init) {
2350                 struct sk_buff *new_sb;
2351                 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
2352                         NS_PRV_BUFTYPE(new_sb) = BUF_SM;
2353                         skb_queue_tail(&card->sbpool.queue, new_sb);
2354                         skb_reserve(new_sb, NS_AAL0_HEADER);
2355                         push_rxbufs(card, new_sb);
2356                 }
2357         }
2358         if (card->sbfqc < card->sbnr.init)
2359         {
2360                 struct sk_buff *new_sb;
2361                 if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) {
2362                         NS_PRV_BUFTYPE(new_sb) = BUF_SM;
2363                         skb_queue_tail(&card->sbpool.queue, new_sb);
2364                         skb_reserve(new_sb, NS_AAL0_HEADER);
2365                         push_rxbufs(card, new_sb);
2366                 }
2367         }
2368 }
2369
2370 static void dequeue_lg_buf(ns_dev * card, struct sk_buff *lb)
2371 {
2372         skb_unlink(lb, &card->lbpool.queue);
2373         if (card->lbfqc < card->lbnr.init) {
2374                 struct sk_buff *new_lb;
2375                 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
2376                         NS_PRV_BUFTYPE(new_lb) = BUF_LG;
2377                         skb_queue_tail(&card->lbpool.queue, new_lb);
2378                         skb_reserve(new_lb, NS_SMBUFSIZE);
2379                         push_rxbufs(card, new_lb);
2380                 }
2381         }
2382         if (card->lbfqc < card->lbnr.init)
2383         {
2384                 struct sk_buff *new_lb;
2385                 if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) {
2386                         NS_PRV_BUFTYPE(new_lb) = BUF_LG;
2387                         skb_queue_tail(&card->lbpool.queue, new_lb);
2388                         skb_reserve(new_lb, NS_SMBUFSIZE);
2389                         push_rxbufs(card, new_lb);
2390                 }
2391         }
2392 }
2393
2394 static int ns_proc_read(struct atm_dev *dev, loff_t * pos, char *page)
2395 {
2396         u32 stat;
2397         ns_dev *card;
2398         int left;
2399
2400         left = (int)*pos;
2401         card = (ns_dev *) dev->dev_data;
2402         stat = readl(card->membase + STAT);
2403         if (!left--)
2404                 return sprintf(page, "Pool   count    min   init    max \n");
2405         if (!left--)
2406                 return sprintf(page, "Small  %5d  %5d  %5d  %5d \n",
2407                                ns_stat_sfbqc_get(stat), card->sbnr.min,
2408                                card->sbnr.init, card->sbnr.max);
2409         if (!left--)
2410                 return sprintf(page, "Large  %5d  %5d  %5d  %5d \n",
2411                                ns_stat_lfbqc_get(stat), card->lbnr.min,
2412                                card->lbnr.init, card->lbnr.max);
2413         if (!left--)
2414                 return sprintf(page, "Huge   %5d  %5d  %5d  %5d \n",
2415                                card->hbpool.count, card->hbnr.min,
2416                                card->hbnr.init, card->hbnr.max);
2417         if (!left--)
2418                 return sprintf(page, "Iovec  %5d  %5d  %5d  %5d \n",
2419                                card->iovpool.count, card->iovnr.min,
2420                                card->iovnr.init, card->iovnr.max);
2421         if (!left--) {
2422                 int retval;
2423                 retval =
2424                     sprintf(page, "Interrupt counter: %u \n", card->intcnt);
2425                 card->intcnt = 0;
2426                 return retval;
2427         }
2428 #if 0
2429         /* Dump 25.6 Mbps PHY registers */
2430         /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it
2431            here just in case it's needed for debugging. */
2432         if (card->max_pcr == ATM_25_PCR && !left--) {
2433                 u32 phy_regs[4];
2434                 u32 i;
2435
2436                 for (i = 0; i < 4; i++) {
2437                         while (CMD_BUSY(card)) ;
2438                         writel(NS_CMD_READ_UTILITY | 0x00000200 | i,
2439                                card->membase + CMD);
2440                         while (CMD_BUSY(card)) ;
2441                         phy_regs[i] = readl(card->membase + DR0) & 0x000000FF;
2442                 }
2443
2444                 return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n",
2445                                phy_regs[0], phy_regs[1], phy_regs[2],
2446                                phy_regs[3]);
2447         }
2448 #endif /* 0 - Dump 25.6 Mbps PHY registers */
2449 #if 0
2450         /* Dump TST */
2451         if (left-- < NS_TST_NUM_ENTRIES) {
2452                 if (card->tste2vc[left + 1] == NULL)
2453                         return sprintf(page, "%5d - VBR/UBR \n", left + 1);
2454                 else
2455                         return sprintf(page, "%5d - %d %d \n", left + 1,
2456                                        card->tste2vc[left + 1]->tx_vcc->vpi,
2457                                        card->tste2vc[left + 1]->tx_vcc->vci);
2458         }
2459 #endif /* 0 */
2460         return 0;
2461 }
2462
2463 static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg)
2464 {
2465         ns_dev *card;
2466         pool_levels pl;
2467         long btype;
2468         unsigned long flags;
2469
2470         card = dev->dev_data;
2471         switch (cmd) {
2472         case NS_GETPSTAT:
2473                 if (get_user
2474                     (pl.buftype, &((pool_levels __user *) arg)->buftype))
2475                         return -EFAULT;
2476                 switch (pl.buftype) {
2477                 case NS_BUFTYPE_SMALL:
2478                         pl.count =
2479                             ns_stat_sfbqc_get(readl(card->membase + STAT));
2480                         pl.level.min = card->sbnr.min;
2481                         pl.level.init = card->sbnr.init;
2482                         pl.level.max = card->sbnr.max;
2483                         break;
2484
2485                 case NS_BUFTYPE_LARGE:
2486                         pl.count =
2487                             ns_stat_lfbqc_get(readl(card->membase + STAT));
2488                         pl.level.min = card->lbnr.min;
2489                         pl.level.init = card->lbnr.init;
2490                         pl.level.max = card->lbnr.max;
2491                         break;
2492
2493                 case NS_BUFTYPE_HUGE:
2494                         pl.count = card->hbpool.count;
2495                         pl.level.min = card->hbnr.min;
2496                         pl.level.init = card->hbnr.init;
2497                         pl.level.max = card->hbnr.max;
2498                         break;
2499
2500                 case NS_BUFTYPE_IOVEC:
2501                         pl.count = card->iovpool.count;
2502                         pl.level.min = card->iovnr.min;
2503                         pl.level.init = card->iovnr.init;
2504                         pl.level.max = card->iovnr.max;
2505                         break;
2506
2507                 default:
2508                         return -ENOIOCTLCMD;
2509
2510                 }
2511                 if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl)))
2512                         return (sizeof(pl));
2513                 else
2514                         return -EFAULT;
2515
2516         case NS_SETBUFLEV:
2517                 if (!capable(CAP_NET_ADMIN))
2518                         return -EPERM;
2519                 if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl)))
2520                         return -EFAULT;
2521                 if (pl.level.min >= pl.level.init
2522                     || pl.level.init >= pl.level.max)
2523                         return -EINVAL;
2524                 if (pl.level.min == 0)
2525                         return -EINVAL;
2526                 switch (pl.buftype) {
2527                 case NS_BUFTYPE_SMALL:
2528                         if (pl.level.max > TOP_SB)
2529                                 return -EINVAL;
2530                         card->sbnr.min = pl.level.min;
2531                         card->sbnr.init = pl.level.init;
2532                         card->sbnr.max = pl.level.max;
2533                         break;
2534
2535                 case NS_BUFTYPE_LARGE:
2536                         if (pl.level.max > TOP_LB)
2537                                 return -EINVAL;
2538                         card->lbnr.min = pl.level.min;
2539                         card->lbnr.init = pl.level.init;
2540                         card->lbnr.max = pl.level.max;
2541                         break;
2542
2543                 case NS_BUFTYPE_HUGE:
2544                         if (pl.level.max > TOP_HB)
2545                                 return -EINVAL;
2546                         card->hbnr.min = pl.level.min;
2547                         card->hbnr.init = pl.level.init;
2548                         card->hbnr.max = pl.level.max;
2549                         break;
2550
2551                 case NS_BUFTYPE_IOVEC:
2552                         if (pl.level.max > TOP_IOVB)
2553                                 return -EINVAL;
2554                         card->iovnr.min = pl.level.min;
2555                         card->iovnr.init = pl.level.init;
2556                         card->iovnr.max = pl.level.max;
2557                         break;
2558
2559                 default:
2560                         return -EINVAL;
2561
2562                 }
2563                 return 0;
2564
2565         case NS_ADJBUFLEV:
2566                 if (!capable(CAP_NET_ADMIN))
2567                         return -EPERM;
2568                 btype = (long)arg;      /* a long is the same size as a pointer or bigger */
2569                 switch (btype) {
2570                 case NS_BUFTYPE_SMALL:
2571                         while (card->sbfqc < card->sbnr.init) {
2572                                 struct sk_buff *sb;
2573
2574                                 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2575                                 if (sb == NULL)
2576                                         return -ENOMEM;
2577                                 NS_PRV_BUFTYPE(sb) = BUF_SM;
2578                                 skb_queue_tail(&card->sbpool.queue, sb);
2579                                 skb_reserve(sb, NS_AAL0_HEADER);
2580                                 push_rxbufs(card, sb);
2581                         }
2582                         break;
2583
2584                 case NS_BUFTYPE_LARGE:
2585                         while (card->lbfqc < card->lbnr.init) {
2586                                 struct sk_buff *lb;
2587
2588                                 lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2589                                 if (lb == NULL)
2590                                         return -ENOMEM;
2591                                 NS_PRV_BUFTYPE(lb) = BUF_LG;
2592                                 skb_queue_tail(&card->lbpool.queue, lb);
2593                                 skb_reserve(lb, NS_SMBUFSIZE);
2594                                 push_rxbufs(card, lb);
2595                         }
2596                         break;
2597
2598                 case NS_BUFTYPE_HUGE:
2599                         while (card->hbpool.count > card->hbnr.init) {
2600                                 struct sk_buff *hb;
2601
2602                                 spin_lock_irqsave(&card->int_lock, flags);
2603                                 hb = skb_dequeue(&card->hbpool.queue);
2604                                 card->hbpool.count--;
2605                                 spin_unlock_irqrestore(&card->int_lock, flags);
2606                                 if (hb == NULL)
2607                                         printk
2608                                             ("nicstar%d: huge buffer count inconsistent.\n",
2609                                              card->index);
2610                                 else
2611                                         dev_kfree_skb_any(hb);
2612
2613                         }
2614                         while (card->hbpool.count < card->hbnr.init) {
2615                                 struct sk_buff *hb;
2616
2617                                 hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2618                                 if (hb == NULL)
2619                                         return -ENOMEM;
2620                                 NS_PRV_BUFTYPE(hb) = BUF_NONE;
2621                                 spin_lock_irqsave(&card->int_lock, flags);
2622                                 skb_queue_tail(&card->hbpool.queue, hb);
2623                                 card->hbpool.count++;
2624                                 spin_unlock_irqrestore(&card->int_lock, flags);
2625                         }
2626                         break;
2627
2628                 case NS_BUFTYPE_IOVEC:
2629                         while (card->iovpool.count > card->iovnr.init) {
2630                                 struct sk_buff *iovb;
2631
2632                                 spin_lock_irqsave(&card->int_lock, flags);
2633                                 iovb = skb_dequeue(&card->iovpool.queue);
2634                                 card->iovpool.count--;
2635                                 spin_unlock_irqrestore(&card->int_lock, flags);
2636                                 if (iovb == NULL)
2637                                         printk
2638                                             ("nicstar%d: iovec buffer count inconsistent.\n",
2639                                              card->index);
2640                                 else
2641                                         dev_kfree_skb_any(iovb);
2642
2643                         }
2644                         while (card->iovpool.count < card->iovnr.init) {
2645                                 struct sk_buff *iovb;
2646
2647                                 iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2648                                 if (iovb == NULL)
2649                                         return -ENOMEM;
2650                                 NS_PRV_BUFTYPE(iovb) = BUF_NONE;
2651                                 spin_lock_irqsave(&card->int_lock, flags);
2652                                 skb_queue_tail(&card->iovpool.queue, iovb);
2653                                 card->iovpool.count++;
2654                                 spin_unlock_irqrestore(&card->int_lock, flags);
2655                         }
2656                         break;
2657
2658                 default:
2659                         return -EINVAL;
2660
2661                 }
2662                 return 0;
2663
2664         default:
2665                 if (dev->phy && dev->phy->ioctl) {
2666                         return dev->phy->ioctl(dev, cmd, arg);
2667                 } else {
2668                         printk("nicstar%d: %s == NULL \n", card->index,
2669                                dev->phy ? "dev->phy->ioctl" : "dev->phy");
2670                         return -ENOIOCTLCMD;
2671                 }
2672         }
2673 }
2674
2675 #ifdef EXTRA_DEBUG
2676 static void which_list(ns_dev * card, struct sk_buff *skb)
2677 {
2678         printk("skb buf_type: 0x%08x\n", NS_PRV_BUFTYPE(skb));
2679 }
2680 #endif /* EXTRA_DEBUG */
2681
2682 static void ns_poll(struct timer_list *unused)
2683 {
2684         int i;
2685         ns_dev *card;
2686         unsigned long flags;
2687         u32 stat_r, stat_w;
2688
2689         PRINTK("nicstar: Entering ns_poll().\n");
2690         for (i = 0; i < num_cards; i++) {
2691                 card = cards[i];
2692                 if (!spin_trylock_irqsave(&card->int_lock, flags)) {
2693                         /* Probably it isn't worth spinning */
2694                         continue;
2695                 }
2696
2697                 stat_w = 0;
2698                 stat_r = readl(card->membase + STAT);
2699                 if (stat_r & NS_STAT_TSIF)
2700                         stat_w |= NS_STAT_TSIF;
2701                 if (stat_r & NS_STAT_EOPDU)
2702                         stat_w |= NS_STAT_EOPDU;
2703
2704                 process_tsq(card);
2705                 process_rsq(card);
2706
2707                 writel(stat_w, card->membase + STAT);
2708                 spin_unlock_irqrestore(&card->int_lock, flags);
2709         }
2710         mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD);
2711         PRINTK("nicstar: Leaving ns_poll().\n");
2712 }
2713
2714 static void ns_phy_put(struct atm_dev *dev, unsigned char value,
2715                        unsigned long addr)
2716 {
2717         ns_dev *card;
2718         unsigned long flags;
2719
2720         card = dev->dev_data;
2721         spin_lock_irqsave(&card->res_lock, flags);
2722         while (CMD_BUSY(card)) ;
2723         writel((u32) value, card->membase + DR0);
2724         writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF),
2725                card->membase + CMD);
2726         spin_unlock_irqrestore(&card->res_lock, flags);
2727 }
2728
2729 static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr)
2730 {
2731         ns_dev *card;
2732         unsigned long flags;
2733         u32 data;
2734
2735         card = dev->dev_data;
2736         spin_lock_irqsave(&card->res_lock, flags);
2737         while (CMD_BUSY(card)) ;
2738         writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF),
2739                card->membase + CMD);
2740         while (CMD_BUSY(card)) ;
2741         data = readl(card->membase + DR0) & 0x000000FF;
2742         spin_unlock_irqrestore(&card->res_lock, flags);
2743         return (unsigned char)data;
2744 }
2745
2746 module_init(nicstar_init);
2747 module_exit(nicstar_cleanup);