drivers/net/qla3xxx.c: Update logging message style
[sfrench/cifs-2.6.git] / drivers / net / qla3xxx.c
1 /*
2  * QLogic QLA3xxx NIC HBA Driver
3  * Copyright (c)  2003-2006 QLogic Corporation
4  *
5  * See LICENSE.qla3xxx for copyright and licensing details.
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/pci.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/ip.h>
27 #include <linux/in.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_ether.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/ethtool.h>
33 #include <linux/skbuff.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/if_vlan.h>
36 #include <linux/delay.h>
37 #include <linux/mm.h>
38
39 #include "qla3xxx.h"
40
41 #define DRV_NAME        "qla3xxx"
42 #define DRV_STRING      "QLogic ISP3XXX Network Driver"
43 #define DRV_VERSION     "v2.03.00-k5"
44
45 static const char ql3xxx_driver_name[] = DRV_NAME;
46 static const char ql3xxx_driver_version[] = DRV_VERSION;
47
48 #define TIMED_OUT_MSG                                                   \
49 "Timed out waiting for management port to get free before issuing command\n"
50
51 MODULE_AUTHOR("QLogic Corporation");
52 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
53 MODULE_LICENSE("GPL");
54 MODULE_VERSION(DRV_VERSION);
55
56 static const u32 default_msg
57     = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
58     | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
59
60 static int debug = -1;          /* defaults above */
61 module_param(debug, int, 0);
62 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
63
64 static int msi;
65 module_param(msi, int, 0);
66 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
67
68 static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = {
69         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
70         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
71         /* required last entry */
72         {0,}
73 };
74
75 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
76
77 /*
78  *  These are the known PHY's which are used
79  */
80 typedef enum {
81    PHY_TYPE_UNKNOWN   = 0,
82    PHY_VITESSE_VSC8211,
83    PHY_AGERE_ET1011C,
84    MAX_PHY_DEV_TYPES
85 } PHY_DEVICE_et;
86
87 typedef struct {
88         PHY_DEVICE_et phyDevice;
89         u32             phyIdOUI;
90         u16             phyIdModel;
91         char            *name;
92 } PHY_DEVICE_INFO_t;
93
94 static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
95         {{PHY_TYPE_UNKNOWN,    0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
96          {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
97          {PHY_AGERE_ET1011C,   0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
98 };
99
100
101 /*
102  * Caller must take hw_lock.
103  */
104 static int ql_sem_spinlock(struct ql3_adapter *qdev,
105                             u32 sem_mask, u32 sem_bits)
106 {
107         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
108         u32 value;
109         unsigned int seconds = 3;
110
111         do {
112                 writel((sem_mask | sem_bits),
113                        &port_regs->CommonRegs.semaphoreReg);
114                 value = readl(&port_regs->CommonRegs.semaphoreReg);
115                 if ((value & (sem_mask >> 16)) == sem_bits)
116                         return 0;
117                 ssleep(1);
118         } while(--seconds);
119         return -1;
120 }
121
122 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
123 {
124         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
125         writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
126         readl(&port_regs->CommonRegs.semaphoreReg);
127 }
128
129 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
130 {
131         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
132         u32 value;
133
134         writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
135         value = readl(&port_regs->CommonRegs.semaphoreReg);
136         return ((value & (sem_mask >> 16)) == sem_bits);
137 }
138
139 /*
140  * Caller holds hw_lock.
141  */
142 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
143 {
144         int i = 0;
145
146         while (i < 10) {
147                 if (i)
148                         ssleep(1);
149
150                 if (ql_sem_lock(qdev,
151                                 QL_DRVR_SEM_MASK,
152                                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
153                                  * 2) << 1)) {
154                         netdev_printk(KERN_DEBUG, qdev->ndev,
155                                       "driver lock acquired\n");
156                         return 1;
157                 }
158         }
159
160         netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
161         return 0;
162 }
163
164 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
165 {
166         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
167
168         writel(((ISP_CONTROL_NP_MASK << 16) | page),
169                         &port_regs->CommonRegs.ispControlStatus);
170         readl(&port_regs->CommonRegs.ispControlStatus);
171         qdev->current_page = page;
172 }
173
174 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
175                               u32 __iomem * reg)
176 {
177         u32 value;
178         unsigned long hw_flags;
179
180         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
181         value = readl(reg);
182         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
183
184         return value;
185 }
186
187 static u32 ql_read_common_reg(struct ql3_adapter *qdev,
188                               u32 __iomem * reg)
189 {
190         return readl(reg);
191 }
192
193 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
194 {
195         u32 value;
196         unsigned long hw_flags;
197
198         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
199
200         if (qdev->current_page != 0)
201                 ql_set_register_page(qdev,0);
202         value = readl(reg);
203
204         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
205         return value;
206 }
207
208 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
209 {
210         if (qdev->current_page != 0)
211                 ql_set_register_page(qdev,0);
212         return readl(reg);
213 }
214
215 static void ql_write_common_reg_l(struct ql3_adapter *qdev,
216                                 u32 __iomem *reg, u32 value)
217 {
218         unsigned long hw_flags;
219
220         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
221         writel(value, reg);
222         readl(reg);
223         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
224 }
225
226 static void ql_write_common_reg(struct ql3_adapter *qdev,
227                                 u32 __iomem *reg, u32 value)
228 {
229         writel(value, reg);
230         readl(reg);
231 }
232
233 static void ql_write_nvram_reg(struct ql3_adapter *qdev,
234                                 u32 __iomem *reg, u32 value)
235 {
236         writel(value, reg);
237         readl(reg);
238         udelay(1);
239 }
240
241 static void ql_write_page0_reg(struct ql3_adapter *qdev,
242                                u32 __iomem *reg, u32 value)
243 {
244         if (qdev->current_page != 0)
245                 ql_set_register_page(qdev,0);
246         writel(value, reg);
247         readl(reg);
248 }
249
250 /*
251  * Caller holds hw_lock. Only called during init.
252  */
253 static void ql_write_page1_reg(struct ql3_adapter *qdev,
254                                u32 __iomem *reg, u32 value)
255 {
256         if (qdev->current_page != 1)
257                 ql_set_register_page(qdev,1);
258         writel(value, reg);
259         readl(reg);
260 }
261
262 /*
263  * Caller holds hw_lock. Only called during init.
264  */
265 static void ql_write_page2_reg(struct ql3_adapter *qdev,
266                                u32 __iomem *reg, u32 value)
267 {
268         if (qdev->current_page != 2)
269                 ql_set_register_page(qdev,2);
270         writel(value, reg);
271         readl(reg);
272 }
273
274 static void ql_disable_interrupts(struct ql3_adapter *qdev)
275 {
276         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
277
278         ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
279                             (ISP_IMR_ENABLE_INT << 16));
280
281 }
282
283 static void ql_enable_interrupts(struct ql3_adapter *qdev)
284 {
285         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
286
287         ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
288                             ((0xff << 16) | ISP_IMR_ENABLE_INT));
289
290 }
291
292 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
293                                             struct ql_rcv_buf_cb *lrg_buf_cb)
294 {
295         dma_addr_t map;
296         int err;
297         lrg_buf_cb->next = NULL;
298
299         if (qdev->lrg_buf_free_tail == NULL) {  /* The list is empty  */
300                 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
301         } else {
302                 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
303                 qdev->lrg_buf_free_tail = lrg_buf_cb;
304         }
305
306         if (!lrg_buf_cb->skb) {
307                 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
308                                                    qdev->lrg_buffer_len);
309                 if (unlikely(!lrg_buf_cb->skb)) {
310                         netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
311                         qdev->lrg_buf_skb_check++;
312                 } else {
313                         /*
314                          * We save some space to copy the ethhdr from first
315                          * buffer
316                          */
317                         skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
318                         map = pci_map_single(qdev->pdev,
319                                              lrg_buf_cb->skb->data,
320                                              qdev->lrg_buffer_len -
321                                              QL_HEADER_SPACE,
322                                              PCI_DMA_FROMDEVICE);
323                         err = pci_dma_mapping_error(qdev->pdev, map);
324                         if(err) {
325                                 netdev_err(qdev->ndev,
326                                            "PCI mapping failed with error: %d\n",
327                                            err);
328                                 dev_kfree_skb(lrg_buf_cb->skb);
329                                 lrg_buf_cb->skb = NULL;
330
331                                 qdev->lrg_buf_skb_check++;
332                                 return;
333                         }
334
335                         lrg_buf_cb->buf_phy_addr_low =
336                             cpu_to_le32(LS_64BITS(map));
337                         lrg_buf_cb->buf_phy_addr_high =
338                             cpu_to_le32(MS_64BITS(map));
339                         dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
340                         dma_unmap_len_set(lrg_buf_cb, maplen,
341                                           qdev->lrg_buffer_len -
342                                           QL_HEADER_SPACE);
343                 }
344         }
345
346         qdev->lrg_buf_free_count++;
347 }
348
349 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
350                                                            *qdev)
351 {
352         struct ql_rcv_buf_cb *lrg_buf_cb;
353
354         if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
355                 if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
356                         qdev->lrg_buf_free_tail = NULL;
357                 qdev->lrg_buf_free_count--;
358         }
359
360         return lrg_buf_cb;
361 }
362
363 static u32 addrBits = EEPROM_NO_ADDR_BITS;
364 static u32 dataBits = EEPROM_NO_DATA_BITS;
365
366 static void fm93c56a_deselect(struct ql3_adapter *qdev);
367 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
368                             unsigned short *value);
369
370 /*
371  * Caller holds hw_lock.
372  */
373 static void fm93c56a_select(struct ql3_adapter *qdev)
374 {
375         struct ql3xxx_port_registers __iomem *port_regs =
376                         qdev->mem_map_registers;
377
378         qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
379         ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
380                             ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
381         ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
382                             ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
383 }
384
385 /*
386  * Caller holds hw_lock.
387  */
388 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
389 {
390         int i;
391         u32 mask;
392         u32 dataBit;
393         u32 previousBit;
394         struct ql3xxx_port_registers __iomem *port_regs =
395                         qdev->mem_map_registers;
396
397         /* Clock in a zero, then do the start bit */
398         ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
399                             ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
400                             AUBURN_EEPROM_DO_1);
401         ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
402                             ISP_NVRAM_MASK | qdev->
403                             eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
404                             AUBURN_EEPROM_CLK_RISE);
405         ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
406                             ISP_NVRAM_MASK | qdev->
407                             eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
408                             AUBURN_EEPROM_CLK_FALL);
409
410         mask = 1 << (FM93C56A_CMD_BITS - 1);
411         /* Force the previous data bit to be different */
412         previousBit = 0xffff;
413         for (i = 0; i < FM93C56A_CMD_BITS; i++) {
414                 dataBit =
415                     (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
416                 if (previousBit != dataBit) {
417                         /*
418                          * If the bit changed, then change the DO state to
419                          * match
420                          */
421                         ql_write_nvram_reg(qdev,
422                                             &port_regs->CommonRegs.
423                                             serialPortInterfaceReg,
424                                             ISP_NVRAM_MASK | qdev->
425                                             eeprom_cmd_data | dataBit);
426                         previousBit = dataBit;
427                 }
428                 ql_write_nvram_reg(qdev,
429                                     &port_regs->CommonRegs.
430                                     serialPortInterfaceReg,
431                                     ISP_NVRAM_MASK | qdev->
432                                     eeprom_cmd_data | dataBit |
433                                     AUBURN_EEPROM_CLK_RISE);
434                 ql_write_nvram_reg(qdev,
435                                     &port_regs->CommonRegs.
436                                     serialPortInterfaceReg,
437                                     ISP_NVRAM_MASK | qdev->
438                                     eeprom_cmd_data | dataBit |
439                                     AUBURN_EEPROM_CLK_FALL);
440                 cmd = cmd << 1;
441         }
442
443         mask = 1 << (addrBits - 1);
444         /* Force the previous data bit to be different */
445         previousBit = 0xffff;
446         for (i = 0; i < addrBits; i++) {
447                 dataBit =
448                     (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
449                     AUBURN_EEPROM_DO_0;
450                 if (previousBit != dataBit) {
451                         /*
452                          * If the bit changed, then change the DO state to
453                          * match
454                          */
455                         ql_write_nvram_reg(qdev,
456                                             &port_regs->CommonRegs.
457                                             serialPortInterfaceReg,
458                                             ISP_NVRAM_MASK | qdev->
459                                             eeprom_cmd_data | dataBit);
460                         previousBit = dataBit;
461                 }
462                 ql_write_nvram_reg(qdev,
463                                     &port_regs->CommonRegs.
464                                     serialPortInterfaceReg,
465                                     ISP_NVRAM_MASK | qdev->
466                                     eeprom_cmd_data | dataBit |
467                                     AUBURN_EEPROM_CLK_RISE);
468                 ql_write_nvram_reg(qdev,
469                                     &port_regs->CommonRegs.
470                                     serialPortInterfaceReg,
471                                     ISP_NVRAM_MASK | qdev->
472                                     eeprom_cmd_data | dataBit |
473                                     AUBURN_EEPROM_CLK_FALL);
474                 eepromAddr = eepromAddr << 1;
475         }
476 }
477
478 /*
479  * Caller holds hw_lock.
480  */
481 static void fm93c56a_deselect(struct ql3_adapter *qdev)
482 {
483         struct ql3xxx_port_registers __iomem *port_regs =
484                         qdev->mem_map_registers;
485         qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
486         ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
487                             ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
488 }
489
490 /*
491  * Caller holds hw_lock.
492  */
493 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
494 {
495         int i;
496         u32 data = 0;
497         u32 dataBit;
498         struct ql3xxx_port_registers __iomem *port_regs =
499                         qdev->mem_map_registers;
500
501         /* Read the data bits */
502         /* The first bit is a dummy.  Clock right over it. */
503         for (i = 0; i < dataBits; i++) {
504                 ql_write_nvram_reg(qdev,
505                                     &port_regs->CommonRegs.
506                                     serialPortInterfaceReg,
507                                     ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
508                                     AUBURN_EEPROM_CLK_RISE);
509                 ql_write_nvram_reg(qdev,
510                                     &port_regs->CommonRegs.
511                                     serialPortInterfaceReg,
512                                     ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
513                                     AUBURN_EEPROM_CLK_FALL);
514                 dataBit =
515                     (ql_read_common_reg
516                      (qdev,
517                       &port_regs->CommonRegs.
518                       serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
519                 data = (data << 1) | dataBit;
520         }
521         *value = (u16) data;
522 }
523
524 /*
525  * Caller holds hw_lock.
526  */
527 static void eeprom_readword(struct ql3_adapter *qdev,
528                             u32 eepromAddr, unsigned short *value)
529 {
530         fm93c56a_select(qdev);
531         fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
532         fm93c56a_datain(qdev, value);
533         fm93c56a_deselect(qdev);
534 }
535
536 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
537 {
538         __le16 *p = (__le16 *)ndev->dev_addr;
539         p[0] = cpu_to_le16(addr[0]);
540         p[1] = cpu_to_le16(addr[1]);
541         p[2] = cpu_to_le16(addr[2]);
542 }
543
544 static int ql_get_nvram_params(struct ql3_adapter *qdev)
545 {
546         u16 *pEEPROMData;
547         u16 checksum = 0;
548         u32 index;
549         unsigned long hw_flags;
550
551         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
552
553         pEEPROMData = (u16 *) & qdev->nvram_data;
554         qdev->eeprom_cmd_data = 0;
555         if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
556                         (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
557                          2) << 10)) {
558                 pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
559                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
560                 return -1;
561         }
562
563         for (index = 0; index < EEPROM_SIZE; index++) {
564                 eeprom_readword(qdev, index, pEEPROMData);
565                 checksum += *pEEPROMData;
566                 pEEPROMData++;
567         }
568         ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
569
570         if (checksum != 0) {
571                 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
572                            checksum);
573                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
574                 return -1;
575         }
576
577         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
578         return checksum;
579 }
580
581 static const u32 PHYAddr[2] = {
582         PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
583 };
584
585 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
586 {
587         struct ql3xxx_port_registers __iomem *port_regs =
588                         qdev->mem_map_registers;
589         u32 temp;
590         int count = 1000;
591
592         while (count) {
593                 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
594                 if (!(temp & MAC_MII_STATUS_BSY))
595                         return 0;
596                 udelay(10);
597                 count--;
598         }
599         return -1;
600 }
601
602 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
603 {
604         struct ql3xxx_port_registers __iomem *port_regs =
605                         qdev->mem_map_registers;
606         u32 scanControl;
607
608         if (qdev->numPorts > 1) {
609                 /* Auto scan will cycle through multiple ports */
610                 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
611         } else {
612                 scanControl = MAC_MII_CONTROL_SC;
613         }
614
615         /*
616          * Scan register 1 of PHY/PETBI,
617          * Set up to scan both devices
618          * The autoscan starts from the first register, completes
619          * the last one before rolling over to the first
620          */
621         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
622                            PHYAddr[0] | MII_SCAN_REGISTER);
623
624         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
625                            (scanControl) |
626                            ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
627 }
628
629 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
630 {
631         u8 ret;
632         struct ql3xxx_port_registers __iomem *port_regs =
633                                         qdev->mem_map_registers;
634
635         /* See if scan mode is enabled before we turn it off */
636         if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
637             (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
638                 /* Scan is enabled */
639                 ret = 1;
640         } else {
641                 /* Scan is disabled */
642                 ret = 0;
643         }
644
645         /*
646          * When disabling scan mode you must first change the MII register
647          * address
648          */
649         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
650                            PHYAddr[0] | MII_SCAN_REGISTER);
651
652         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
653                            ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
654                              MAC_MII_CONTROL_RC) << 16));
655
656         return ret;
657 }
658
659 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
660                                u16 regAddr, u16 value, u32 phyAddr)
661 {
662         struct ql3xxx_port_registers __iomem *port_regs =
663                         qdev->mem_map_registers;
664         u8 scanWasEnabled;
665
666         scanWasEnabled = ql_mii_disable_scan_mode(qdev);
667
668         if (ql_wait_for_mii_ready(qdev)) {
669                 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
670                 return -1;
671         }
672
673         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
674                            phyAddr | regAddr);
675
676         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
677
678         /* Wait for write to complete 9/10/04 SJP */
679         if (ql_wait_for_mii_ready(qdev)) {
680                 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
681                 return -1;
682         }
683
684         if (scanWasEnabled)
685                 ql_mii_enable_scan_mode(qdev);
686
687         return 0;
688 }
689
690 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
691                               u16 * value, u32 phyAddr)
692 {
693         struct ql3xxx_port_registers __iomem *port_regs =
694                         qdev->mem_map_registers;
695         u8 scanWasEnabled;
696         u32 temp;
697
698         scanWasEnabled = ql_mii_disable_scan_mode(qdev);
699
700         if (ql_wait_for_mii_ready(qdev)) {
701                 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
702                 return -1;
703         }
704
705         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
706                            phyAddr | regAddr);
707
708         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
709                            (MAC_MII_CONTROL_RC << 16));
710
711         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
712                            (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
713
714         /* Wait for the read to complete */
715         if (ql_wait_for_mii_ready(qdev)) {
716                 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
717                 return -1;
718         }
719
720         temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
721         *value = (u16) temp;
722
723         if (scanWasEnabled)
724                 ql_mii_enable_scan_mode(qdev);
725
726         return 0;
727 }
728
729 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
730 {
731         struct ql3xxx_port_registers __iomem *port_regs =
732                         qdev->mem_map_registers;
733
734         ql_mii_disable_scan_mode(qdev);
735
736         if (ql_wait_for_mii_ready(qdev)) {
737                 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
738                 return -1;
739         }
740
741         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
742                            qdev->PHYAddr | regAddr);
743
744         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
745
746         /* Wait for write to complete. */
747         if (ql_wait_for_mii_ready(qdev)) {
748                 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
749                 return -1;
750         }
751
752         ql_mii_enable_scan_mode(qdev);
753
754         return 0;
755 }
756
757 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
758 {
759         u32 temp;
760         struct ql3xxx_port_registers __iomem *port_regs =
761                         qdev->mem_map_registers;
762
763         ql_mii_disable_scan_mode(qdev);
764
765         if (ql_wait_for_mii_ready(qdev)) {
766                 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
767                 return -1;
768         }
769
770         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
771                            qdev->PHYAddr | regAddr);
772
773         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
774                            (MAC_MII_CONTROL_RC << 16));
775
776         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
777                            (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
778
779         /* Wait for the read to complete */
780         if (ql_wait_for_mii_ready(qdev)) {
781                 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
782                 return -1;
783         }
784
785         temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
786         *value = (u16) temp;
787
788         ql_mii_enable_scan_mode(qdev);
789
790         return 0;
791 }
792
793 static void ql_petbi_reset(struct ql3_adapter *qdev)
794 {
795         ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
796 }
797
798 static void ql_petbi_start_neg(struct ql3_adapter *qdev)
799 {
800         u16 reg;
801
802         /* Enable Auto-negotiation sense */
803         ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
804         reg |= PETBI_TBI_AUTO_SENSE;
805         ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
806
807         ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
808                          PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
809
810         ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
811                          PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
812                          PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
813
814 }
815
816 static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
817 {
818         ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
819                             PHYAddr[qdev->mac_index]);
820 }
821
822 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
823 {
824         u16 reg;
825
826         /* Enable Auto-negotiation sense */
827         ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
828                            PHYAddr[qdev->mac_index]);
829         reg |= PETBI_TBI_AUTO_SENSE;
830         ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
831                             PHYAddr[qdev->mac_index]);
832
833         ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
834                             PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
835                             PHYAddr[qdev->mac_index]);
836
837         ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
838                             PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
839                             PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
840                             PHYAddr[qdev->mac_index]);
841 }
842
843 static void ql_petbi_init(struct ql3_adapter *qdev)
844 {
845         ql_petbi_reset(qdev);
846         ql_petbi_start_neg(qdev);
847 }
848
849 static void ql_petbi_init_ex(struct ql3_adapter *qdev)
850 {
851         ql_petbi_reset_ex(qdev);
852         ql_petbi_start_neg_ex(qdev);
853 }
854
855 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
856 {
857         u16 reg;
858
859         if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
860                 return 0;
861
862         return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
863 }
864
865 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
866 {
867         netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
868         /* power down device bit 11 = 1 */
869         ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
870         /* enable diagnostic mode bit 2 = 1 */
871         ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
872         /* 1000MB amplitude adjust (see Agere errata) */
873         ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
874         /* 1000MB amplitude adjust (see Agere errata) */
875         ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
876         /* 100MB amplitude adjust (see Agere errata) */
877         ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
878         /* 100MB amplitude adjust (see Agere errata) */
879         ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
880         /* 10MB amplitude adjust (see Agere errata) */
881         ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
882         /* 10MB amplitude adjust (see Agere errata) */
883         ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
884         /* point to hidden reg 0x2806 */
885         ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
886         /* Write new PHYAD w/bit 5 set */
887         ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
888         /*
889          * Disable diagnostic mode bit 2 = 0
890          * Power up device bit 11 = 0
891          * Link up (on) and activity (blink)
892          */
893         ql_mii_write_reg(qdev, 0x12, 0x840a);
894         ql_mii_write_reg(qdev, 0x00, 0x1140);
895         ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
896 }
897
898 static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
899                                  u16 phyIdReg0, u16 phyIdReg1)
900 {
901         PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
902         u32   oui;
903         u16   model;
904         int i;
905
906         if (phyIdReg0 == 0xffff) {
907                 return result;
908         }
909
910         if (phyIdReg1 == 0xffff) {
911                 return result;
912         }
913
914         /* oui is split between two registers */
915         oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
916
917         model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
918
919         /* Scan table for this PHY */
920         for(i = 0; i < MAX_PHY_DEV_TYPES; i++) {
921                 if ((oui == PHY_DEVICES[i].phyIdOUI) &&
922                     (model == PHY_DEVICES[i].phyIdModel)) {
923                         result = PHY_DEVICES[i].phyDevice;
924
925                         netdev_info(qdev->ndev, "Phy: %s\n",
926                                     PHY_DEVICES[i].name);
927
928                         break;
929                 }
930         }
931
932         return result;
933 }
934
935 static int ql_phy_get_speed(struct ql3_adapter *qdev)
936 {
937         u16 reg;
938
939         switch(qdev->phyType) {
940         case PHY_AGERE_ET1011C:
941         {
942                 if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
943                         return 0;
944
945                 reg = (reg >> 8) & 3;
946                 break;
947         }
948         default:
949         if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
950                 return 0;
951
952         reg = (((reg & 0x18) >> 3) & 3);
953         }
954
955         switch(reg) {
956                 case 2:
957                 return SPEED_1000;
958                 case 1:
959                 return SPEED_100;
960                 case 0:
961                 return SPEED_10;
962                 default:
963                 return -1;
964         }
965 }
966
967 static int ql_is_full_dup(struct ql3_adapter *qdev)
968 {
969         u16 reg;
970
971         switch(qdev->phyType) {
972         case PHY_AGERE_ET1011C:
973         {
974                 if (ql_mii_read_reg(qdev, 0x1A, &reg))
975                         return 0;
976
977                 return ((reg & 0x0080) && (reg & 0x1000)) != 0;
978         }
979         case PHY_VITESSE_VSC8211:
980         default:
981         {
982                 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
983                         return 0;
984                 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
985         }
986         }
987 }
988
989 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
990 {
991         u16 reg;
992
993         if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
994                 return 0;
995
996         return (reg & PHY_NEG_PAUSE) != 0;
997 }
998
999 static int PHY_Setup(struct ql3_adapter *qdev)
1000 {
1001         u16   reg1;
1002         u16   reg2;
1003         bool  agereAddrChangeNeeded = false;
1004         u32 miiAddr = 0;
1005         int err;
1006
1007         /*  Determine the PHY we are using by reading the ID's */
1008         err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
1009         if(err != 0) {
1010                 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
1011                 return err;
1012         }
1013
1014         err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
1015         if(err != 0) {
1016                 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
1017                 return err;
1018         }
1019
1020         /*  Check if we have a Agere PHY */
1021         if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
1022
1023                 /* Determine which MII address we should be using
1024                    determined by the index of the card */
1025                 if (qdev->mac_index == 0) {
1026                         miiAddr = MII_AGERE_ADDR_1;
1027                 } else {
1028                         miiAddr = MII_AGERE_ADDR_2;
1029                 }
1030
1031                 err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1032                 if(err != 0) {
1033                         netdev_err(qdev->ndev,
1034                                    "Could not read from reg PHY_ID_0_REG after Agere detected\n");
1035                         return err;
1036                 }
1037
1038                 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1039                 if(err != 0) {
1040                         netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1041                         return err;
1042                 }
1043
1044                 /*  We need to remember to initialize the Agere PHY */
1045                 agereAddrChangeNeeded = true;
1046         }
1047
1048         /*  Determine the particular PHY we have on board to apply
1049             PHY specific initializations */
1050         qdev->phyType = getPhyType(qdev, reg1, reg2);
1051
1052         if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1053                 /* need this here so address gets changed */
1054                 phyAgereSpecificInit(qdev, miiAddr);
1055         } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1056                 netdev_err(qdev->ndev, "PHY is unknown\n");
1057                 return -EIO;
1058         }
1059
1060         return 0;
1061 }
1062
1063 /*
1064  * Caller holds hw_lock.
1065  */
1066 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1067 {
1068         struct ql3xxx_port_registers __iomem *port_regs =
1069                         qdev->mem_map_registers;
1070         u32 value;
1071
1072         if (enable)
1073                 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
1074         else
1075                 value = (MAC_CONFIG_REG_PE << 16);
1076
1077         if (qdev->mac_index)
1078                 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1079         else
1080                 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1081 }
1082
1083 /*
1084  * Caller holds hw_lock.
1085  */
1086 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1087 {
1088         struct ql3xxx_port_registers __iomem *port_regs =
1089                         qdev->mem_map_registers;
1090         u32 value;
1091
1092         if (enable)
1093                 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
1094         else
1095                 value = (MAC_CONFIG_REG_SR << 16);
1096
1097         if (qdev->mac_index)
1098                 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1099         else
1100                 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1101 }
1102
1103 /*
1104  * Caller holds hw_lock.
1105  */
1106 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1107 {
1108         struct ql3xxx_port_registers __iomem *port_regs =
1109                         qdev->mem_map_registers;
1110         u32 value;
1111
1112         if (enable)
1113                 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
1114         else
1115                 value = (MAC_CONFIG_REG_GM << 16);
1116
1117         if (qdev->mac_index)
1118                 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1119         else
1120                 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1121 }
1122
1123 /*
1124  * Caller holds hw_lock.
1125  */
1126 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1127 {
1128         struct ql3xxx_port_registers __iomem *port_regs =
1129                         qdev->mem_map_registers;
1130         u32 value;
1131
1132         if (enable)
1133                 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1134         else
1135                 value = (MAC_CONFIG_REG_FD << 16);
1136
1137         if (qdev->mac_index)
1138                 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1139         else
1140                 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1141 }
1142
1143 /*
1144  * Caller holds hw_lock.
1145  */
1146 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1147 {
1148         struct ql3xxx_port_registers __iomem *port_regs =
1149                         qdev->mem_map_registers;
1150         u32 value;
1151
1152         if (enable)
1153                 value =
1154                     ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1155                      ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1156         else
1157                 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1158
1159         if (qdev->mac_index)
1160                 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1161         else
1162                 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1163 }
1164
1165 /*
1166  * Caller holds hw_lock.
1167  */
1168 static int ql_is_fiber(struct ql3_adapter *qdev)
1169 {
1170         struct ql3xxx_port_registers __iomem *port_regs =
1171                         qdev->mem_map_registers;
1172         u32 bitToCheck = 0;
1173         u32 temp;
1174
1175         switch (qdev->mac_index) {
1176         case 0:
1177                 bitToCheck = PORT_STATUS_SM0;
1178                 break;
1179         case 1:
1180                 bitToCheck = PORT_STATUS_SM1;
1181                 break;
1182         }
1183
1184         temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1185         return (temp & bitToCheck) != 0;
1186 }
1187
1188 static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1189 {
1190         u16 reg;
1191         ql_mii_read_reg(qdev, 0x00, &reg);
1192         return (reg & 0x1000) != 0;
1193 }
1194
1195 /*
1196  * Caller holds hw_lock.
1197  */
1198 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1199 {
1200         struct ql3xxx_port_registers __iomem *port_regs =
1201                         qdev->mem_map_registers;
1202         u32 bitToCheck = 0;
1203         u32 temp;
1204
1205         switch (qdev->mac_index) {
1206         case 0:
1207                 bitToCheck = PORT_STATUS_AC0;
1208                 break;
1209         case 1:
1210                 bitToCheck = PORT_STATUS_AC1;
1211                 break;
1212         }
1213
1214         temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1215         if (temp & bitToCheck) {
1216                 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1217                 return 1;
1218         }
1219         netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1220         return 0;
1221 }
1222
1223 /*
1224  *  ql_is_neg_pause() returns 1 if pause was negotiated to be on
1225  */
1226 static int ql_is_neg_pause(struct ql3_adapter *qdev)
1227 {
1228         if (ql_is_fiber(qdev))
1229                 return ql_is_petbi_neg_pause(qdev);
1230         else
1231                 return ql_is_phy_neg_pause(qdev);
1232 }
1233
1234 static int ql_auto_neg_error(struct ql3_adapter *qdev)
1235 {
1236         struct ql3xxx_port_registers __iomem *port_regs =
1237                         qdev->mem_map_registers;
1238         u32 bitToCheck = 0;
1239         u32 temp;
1240
1241         switch (qdev->mac_index) {
1242         case 0:
1243                 bitToCheck = PORT_STATUS_AE0;
1244                 break;
1245         case 1:
1246                 bitToCheck = PORT_STATUS_AE1;
1247                 break;
1248         }
1249         temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1250         return (temp & bitToCheck) != 0;
1251 }
1252
1253 static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1254 {
1255         if (ql_is_fiber(qdev))
1256                 return SPEED_1000;
1257         else
1258                 return ql_phy_get_speed(qdev);
1259 }
1260
1261 static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1262 {
1263         if (ql_is_fiber(qdev))
1264                 return 1;
1265         else
1266                 return ql_is_full_dup(qdev);
1267 }
1268
1269 /*
1270  * Caller holds hw_lock.
1271  */
1272 static int ql_link_down_detect(struct ql3_adapter *qdev)
1273 {
1274         struct ql3xxx_port_registers __iomem *port_regs =
1275                         qdev->mem_map_registers;
1276         u32 bitToCheck = 0;
1277         u32 temp;
1278
1279         switch (qdev->mac_index) {
1280         case 0:
1281                 bitToCheck = ISP_CONTROL_LINK_DN_0;
1282                 break;
1283         case 1:
1284                 bitToCheck = ISP_CONTROL_LINK_DN_1;
1285                 break;
1286         }
1287
1288         temp =
1289             ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1290         return (temp & bitToCheck) != 0;
1291 }
1292
1293 /*
1294  * Caller holds hw_lock.
1295  */
1296 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1297 {
1298         struct ql3xxx_port_registers __iomem *port_regs =
1299                         qdev->mem_map_registers;
1300
1301         switch (qdev->mac_index) {
1302         case 0:
1303                 ql_write_common_reg(qdev,
1304                                     &port_regs->CommonRegs.ispControlStatus,
1305                                     (ISP_CONTROL_LINK_DN_0) |
1306                                     (ISP_CONTROL_LINK_DN_0 << 16));
1307                 break;
1308
1309         case 1:
1310                 ql_write_common_reg(qdev,
1311                                     &port_regs->CommonRegs.ispControlStatus,
1312                                     (ISP_CONTROL_LINK_DN_1) |
1313                                     (ISP_CONTROL_LINK_DN_1 << 16));
1314                 break;
1315
1316         default:
1317                 return 1;
1318         }
1319
1320         return 0;
1321 }
1322
1323 /*
1324  * Caller holds hw_lock.
1325  */
1326 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1327 {
1328         struct ql3xxx_port_registers __iomem *port_regs =
1329                         qdev->mem_map_registers;
1330         u32 bitToCheck = 0;
1331         u32 temp;
1332
1333         switch (qdev->mac_index) {
1334         case 0:
1335                 bitToCheck = PORT_STATUS_F1_ENABLED;
1336                 break;
1337         case 1:
1338                 bitToCheck = PORT_STATUS_F3_ENABLED;
1339                 break;
1340         default:
1341                 break;
1342         }
1343
1344         temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1345         if (temp & bitToCheck) {
1346                 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1347                              "not link master\n");
1348                 return 0;
1349         }
1350
1351         netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1352         return 1;
1353 }
1354
1355 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1356 {
1357         ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1358                             PHYAddr[qdev->mac_index]);
1359 }
1360
1361 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1362 {
1363         u16 reg;
1364         u16 portConfiguration;
1365
1366         if(qdev->phyType == PHY_AGERE_ET1011C) {
1367                 /* turn off external loopback */
1368                 ql_mii_write_reg(qdev, 0x13, 0x0000);
1369         }
1370
1371         if(qdev->mac_index == 0)
1372                 portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration;
1373         else
1374                 portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration;
1375
1376         /*  Some HBA's in the field are set to 0 and they need to
1377             be reinterpreted with a default value */
1378         if(portConfiguration == 0)
1379                 portConfiguration = PORT_CONFIG_DEFAULT;
1380
1381         /* Set the 1000 advertisements */
1382         ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1383                            PHYAddr[qdev->mac_index]);
1384         reg &= ~PHY_GIG_ALL_PARAMS;
1385
1386         if(portConfiguration & PORT_CONFIG_1000MB_SPEED) {
1387                 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
1388                         reg |= PHY_GIG_ADV_1000F;
1389                 else
1390                         reg |= PHY_GIG_ADV_1000H;
1391         }
1392
1393         ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1394                             PHYAddr[qdev->mac_index]);
1395
1396         /* Set the 10/100 & pause negotiation advertisements */
1397         ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
1398                            PHYAddr[qdev->mac_index]);
1399         reg &= ~PHY_NEG_ALL_PARAMS;
1400
1401         if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1402                 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1403
1404         if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1405                 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1406                         reg |= PHY_NEG_ADV_100F;
1407
1408                 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1409                         reg |= PHY_NEG_ADV_10F;
1410         }
1411
1412         if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1413                 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1414                         reg |= PHY_NEG_ADV_100H;
1415
1416                 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1417                         reg |= PHY_NEG_ADV_10H;
1418         }
1419
1420         if(portConfiguration &
1421            PORT_CONFIG_1000MB_SPEED) {
1422                 reg |= 1;
1423         }
1424
1425         ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1426                             PHYAddr[qdev->mac_index]);
1427
1428         ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1429
1430         ql_mii_write_reg_ex(qdev, CONTROL_REG,
1431                             reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
1432                             PHYAddr[qdev->mac_index]);
1433 }
1434
1435 static void ql_phy_init_ex(struct ql3_adapter *qdev)
1436 {
1437         ql_phy_reset_ex(qdev);
1438         PHY_Setup(qdev);
1439         ql_phy_start_neg_ex(qdev);
1440 }
1441
1442 /*
1443  * Caller holds hw_lock.
1444  */
1445 static u32 ql_get_link_state(struct ql3_adapter *qdev)
1446 {
1447         struct ql3xxx_port_registers __iomem *port_regs =
1448                         qdev->mem_map_registers;
1449         u32 bitToCheck = 0;
1450         u32 temp, linkState;
1451
1452         switch (qdev->mac_index) {
1453         case 0:
1454                 bitToCheck = PORT_STATUS_UP0;
1455                 break;
1456         case 1:
1457                 bitToCheck = PORT_STATUS_UP1;
1458                 break;
1459         }
1460         temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1461         if (temp & bitToCheck) {
1462                 linkState = LS_UP;
1463         } else {
1464                 linkState = LS_DOWN;
1465         }
1466         return linkState;
1467 }
1468
1469 static int ql_port_start(struct ql3_adapter *qdev)
1470 {
1471         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1472                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1473                          2) << 7)) {
1474                 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1475                 return -1;
1476         }
1477
1478         if (ql_is_fiber(qdev)) {
1479                 ql_petbi_init(qdev);
1480         } else {
1481                 /* Copper port */
1482                 ql_phy_init_ex(qdev);
1483         }
1484
1485         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1486         return 0;
1487 }
1488
1489 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1490 {
1491
1492         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1493                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1494                          2) << 7))
1495                 return -1;
1496
1497         if (!ql_auto_neg_error(qdev)) {
1498                 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1499                         /* configure the MAC */
1500                         netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1501                                      "Configuring link\n");
1502                         ql_mac_cfg_soft_reset(qdev, 1);
1503                         ql_mac_cfg_gig(qdev,
1504                                        (ql_get_link_speed
1505                                         (qdev) ==
1506                                         SPEED_1000));
1507                         ql_mac_cfg_full_dup(qdev,
1508                                             ql_is_link_full_dup
1509                                             (qdev));
1510                         ql_mac_cfg_pause(qdev,
1511                                          ql_is_neg_pause
1512                                          (qdev));
1513                         ql_mac_cfg_soft_reset(qdev, 0);
1514
1515                         /* enable the MAC */
1516                         netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1517                                      "Enabling mac\n");
1518                         ql_mac_enable(qdev, 1);
1519                 }
1520
1521                 qdev->port_link_state = LS_UP;
1522                 netif_start_queue(qdev->ndev);
1523                 netif_carrier_on(qdev->ndev);
1524                 netif_info(qdev, link, qdev->ndev,
1525                            "Link is up at %d Mbps, %s duplex\n",
1526                            ql_get_link_speed(qdev),
1527                            ql_is_link_full_dup(qdev) ? "full" : "half");
1528
1529         } else {        /* Remote error detected */
1530
1531                 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1532                         netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1533                                      "Remote error detected. Calling ql_port_start()\n");
1534                         /*
1535                          * ql_port_start() is shared code and needs
1536                          * to lock the PHY on it's own.
1537                          */
1538                         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1539                         if(ql_port_start(qdev)) {/* Restart port */
1540                                 return -1;
1541                         } else
1542                                 return 0;
1543                 }
1544         }
1545         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1546         return 0;
1547 }
1548
1549 static void ql_link_state_machine_work(struct work_struct *work)
1550 {
1551         struct ql3_adapter *qdev =
1552                 container_of(work, struct ql3_adapter, link_state_work.work);
1553
1554         u32 curr_link_state;
1555         unsigned long hw_flags;
1556
1557         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1558
1559         curr_link_state = ql_get_link_state(qdev);
1560
1561         if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
1562                 netif_info(qdev, link, qdev->ndev,
1563                            "Reset in progress, skip processing link state\n");
1564
1565                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1566
1567                 /* Restart timer on 2 second interval. */
1568                 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1569
1570                 return;
1571         }
1572
1573         switch (qdev->port_link_state) {
1574         default:
1575                 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1576                         ql_port_start(qdev);
1577                 }
1578                 qdev->port_link_state = LS_DOWN;
1579                 /* Fall Through */
1580
1581         case LS_DOWN:
1582                 if (curr_link_state == LS_UP) {
1583                         netif_info(qdev, link, qdev->ndev, "Link is up\n");
1584                         if (ql_is_auto_neg_complete(qdev))
1585                                 ql_finish_auto_neg(qdev);
1586
1587                         if (qdev->port_link_state == LS_UP)
1588                                 ql_link_down_detect_clear(qdev);
1589
1590                         qdev->port_link_state = LS_UP;
1591                 }
1592                 break;
1593
1594         case LS_UP:
1595                 /*
1596                  * See if the link is currently down or went down and came
1597                  * back up
1598                  */
1599                 if (curr_link_state == LS_DOWN) {
1600                         netif_info(qdev, link, qdev->ndev, "Link is down\n");
1601                         qdev->port_link_state = LS_DOWN;
1602                 }
1603                 if (ql_link_down_detect(qdev))
1604                         qdev->port_link_state = LS_DOWN;
1605                 break;
1606         }
1607         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1608
1609         /* Restart timer on 2 second interval. */
1610         mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1611 }
1612
1613 /*
1614  * Caller must take hw_lock and QL_PHY_GIO_SEM.
1615  */
1616 static void ql_get_phy_owner(struct ql3_adapter *qdev)
1617 {
1618         if (ql_this_adapter_controls_port(qdev))
1619                 set_bit(QL_LINK_MASTER,&qdev->flags);
1620         else
1621                 clear_bit(QL_LINK_MASTER,&qdev->flags);
1622 }
1623
1624 /*
1625  * Caller must take hw_lock and QL_PHY_GIO_SEM.
1626  */
1627 static void ql_init_scan_mode(struct ql3_adapter *qdev)
1628 {
1629         ql_mii_enable_scan_mode(qdev);
1630
1631         if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1632                 if (ql_this_adapter_controls_port(qdev))
1633                         ql_petbi_init_ex(qdev);
1634         } else {
1635                 if (ql_this_adapter_controls_port(qdev))
1636                         ql_phy_init_ex(qdev);
1637         }
1638 }
1639
1640 /*
1641  * MII_Setup needs to be called before taking the PHY out of reset so that the
1642  * management interface clock speed can be set properly.  It would be better if
1643  * we had a way to disable MDC until after the PHY is out of reset, but we
1644  * don't have that capability.
1645  */
1646 static int ql_mii_setup(struct ql3_adapter *qdev)
1647 {
1648         u32 reg;
1649         struct ql3xxx_port_registers __iomem *port_regs =
1650                         qdev->mem_map_registers;
1651
1652         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1653                         (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1654                          2) << 7))
1655                 return -1;
1656
1657         if (qdev->device_id == QL3032_DEVICE_ID)
1658                 ql_write_page0_reg(qdev,
1659                         &port_regs->macMIIMgmtControlReg, 0x0f00000);
1660
1661         /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1662         reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1663
1664         ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1665                            reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1666
1667         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1668         return 0;
1669 }
1670
1671 static u32 ql_supported_modes(struct ql3_adapter *qdev)
1672 {
1673         u32 supported;
1674
1675         if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1676                 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1677                     | SUPPORTED_Autoneg;
1678         } else {
1679                 supported = SUPPORTED_10baseT_Half
1680                     | SUPPORTED_10baseT_Full
1681                     | SUPPORTED_100baseT_Half
1682                     | SUPPORTED_100baseT_Full
1683                     | SUPPORTED_1000baseT_Half
1684                     | SUPPORTED_1000baseT_Full
1685                     | SUPPORTED_Autoneg | SUPPORTED_TP;
1686         }
1687
1688         return supported;
1689 }
1690
1691 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1692 {
1693         int status;
1694         unsigned long hw_flags;
1695         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1696         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1697                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1698                          2) << 7)) {
1699                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1700                 return 0;
1701         }
1702         status = ql_is_auto_cfg(qdev);
1703         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1704         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1705         return status;
1706 }
1707
1708 static u32 ql_get_speed(struct ql3_adapter *qdev)
1709 {
1710         u32 status;
1711         unsigned long hw_flags;
1712         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1713         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1714                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1715                          2) << 7)) {
1716                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1717                 return 0;
1718         }
1719         status = ql_get_link_speed(qdev);
1720         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1721         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1722         return status;
1723 }
1724
1725 static int ql_get_full_dup(struct ql3_adapter *qdev)
1726 {
1727         int status;
1728         unsigned long hw_flags;
1729         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1730         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1731                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1732                          2) << 7)) {
1733                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1734                 return 0;
1735         }
1736         status = ql_is_link_full_dup(qdev);
1737         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1738         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1739         return status;
1740 }
1741
1742
1743 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1744 {
1745         struct ql3_adapter *qdev = netdev_priv(ndev);
1746
1747         ecmd->transceiver = XCVR_INTERNAL;
1748         ecmd->supported = ql_supported_modes(qdev);
1749
1750         if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1751                 ecmd->port = PORT_FIBRE;
1752         } else {
1753                 ecmd->port = PORT_TP;
1754                 ecmd->phy_address = qdev->PHYAddr;
1755         }
1756         ecmd->advertising = ql_supported_modes(qdev);
1757         ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1758         ecmd->speed = ql_get_speed(qdev);
1759         ecmd->duplex = ql_get_full_dup(qdev);
1760         return 0;
1761 }
1762
1763 static void ql_get_drvinfo(struct net_device *ndev,
1764                            struct ethtool_drvinfo *drvinfo)
1765 {
1766         struct ql3_adapter *qdev = netdev_priv(ndev);
1767         strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
1768         strncpy(drvinfo->version, ql3xxx_driver_version, 32);
1769         strncpy(drvinfo->fw_version, "N/A", 32);
1770         strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
1771         drvinfo->regdump_len = 0;
1772         drvinfo->eedump_len = 0;
1773 }
1774
1775 static u32 ql_get_msglevel(struct net_device *ndev)
1776 {
1777         struct ql3_adapter *qdev = netdev_priv(ndev);
1778         return qdev->msg_enable;
1779 }
1780
1781 static void ql_set_msglevel(struct net_device *ndev, u32 value)
1782 {
1783         struct ql3_adapter *qdev = netdev_priv(ndev);
1784         qdev->msg_enable = value;
1785 }
1786
1787 static void ql_get_pauseparam(struct net_device *ndev,
1788                               struct ethtool_pauseparam *pause)
1789 {
1790         struct ql3_adapter *qdev = netdev_priv(ndev);
1791         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1792
1793         u32 reg;
1794         if(qdev->mac_index == 0)
1795                 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1796         else
1797                 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1798
1799         pause->autoneg  = ql_get_auto_cfg_status(qdev);
1800         pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
1801         pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
1802 }
1803
1804 static const struct ethtool_ops ql3xxx_ethtool_ops = {
1805         .get_settings = ql_get_settings,
1806         .get_drvinfo = ql_get_drvinfo,
1807         .get_link = ethtool_op_get_link,
1808         .get_msglevel = ql_get_msglevel,
1809         .set_msglevel = ql_set_msglevel,
1810         .get_pauseparam = ql_get_pauseparam,
1811 };
1812
1813 static int ql_populate_free_queue(struct ql3_adapter *qdev)
1814 {
1815         struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1816         dma_addr_t map;
1817         int err;
1818
1819         while (lrg_buf_cb) {
1820                 if (!lrg_buf_cb->skb) {
1821                         lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
1822                                                            qdev->lrg_buffer_len);
1823                         if (unlikely(!lrg_buf_cb->skb)) {
1824                                 netdev_printk(KERN_DEBUG, qdev->ndev,
1825                                               "Failed netdev_alloc_skb()\n");
1826                                 break;
1827                         } else {
1828                                 /*
1829                                  * We save some space to copy the ethhdr from
1830                                  * first buffer
1831                                  */
1832                                 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1833                                 map = pci_map_single(qdev->pdev,
1834                                                      lrg_buf_cb->skb->data,
1835                                                      qdev->lrg_buffer_len -
1836                                                      QL_HEADER_SPACE,
1837                                                      PCI_DMA_FROMDEVICE);
1838
1839                                 err = pci_dma_mapping_error(qdev->pdev, map);
1840                                 if(err) {
1841                                         netdev_err(qdev->ndev,
1842                                                    "PCI mapping failed with error: %d\n",
1843                                                    err);
1844                                         dev_kfree_skb(lrg_buf_cb->skb);
1845                                         lrg_buf_cb->skb = NULL;
1846                                         break;
1847                                 }
1848
1849
1850                                 lrg_buf_cb->buf_phy_addr_low =
1851                                     cpu_to_le32(LS_64BITS(map));
1852                                 lrg_buf_cb->buf_phy_addr_high =
1853                                     cpu_to_le32(MS_64BITS(map));
1854                                 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1855                                 dma_unmap_len_set(lrg_buf_cb, maplen,
1856                                                   qdev->lrg_buffer_len -
1857                                                   QL_HEADER_SPACE);
1858                                 --qdev->lrg_buf_skb_check;
1859                                 if (!qdev->lrg_buf_skb_check)
1860                                         return 1;
1861                         }
1862                 }
1863                 lrg_buf_cb = lrg_buf_cb->next;
1864         }
1865         return 0;
1866 }
1867
1868 /*
1869  * Caller holds hw_lock.
1870  */
1871 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1872 {
1873         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1874         if (qdev->small_buf_release_cnt >= 16) {
1875                 while (qdev->small_buf_release_cnt >= 16) {
1876                         qdev->small_buf_q_producer_index++;
1877
1878                         if (qdev->small_buf_q_producer_index ==
1879                             NUM_SBUFQ_ENTRIES)
1880                                 qdev->small_buf_q_producer_index = 0;
1881                         qdev->small_buf_release_cnt -= 8;
1882                 }
1883                 wmb();
1884                 writel(qdev->small_buf_q_producer_index,
1885                         &port_regs->CommonRegs.rxSmallQProducerIndex);
1886         }
1887 }
1888
1889 /*
1890  * Caller holds hw_lock.
1891  */
1892 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1893 {
1894         struct bufq_addr_element *lrg_buf_q_ele;
1895         int i;
1896         struct ql_rcv_buf_cb *lrg_buf_cb;
1897         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1898
1899         if ((qdev->lrg_buf_free_count >= 8) &&
1900             (qdev->lrg_buf_release_cnt >= 16)) {
1901
1902                 if (qdev->lrg_buf_skb_check)
1903                         if (!ql_populate_free_queue(qdev))
1904                                 return;
1905
1906                 lrg_buf_q_ele = qdev->lrg_buf_next_free;
1907
1908                 while ((qdev->lrg_buf_release_cnt >= 16) &&
1909                        (qdev->lrg_buf_free_count >= 8)) {
1910
1911                         for (i = 0; i < 8; i++) {
1912                                 lrg_buf_cb =
1913                                     ql_get_from_lrg_buf_free_list(qdev);
1914                                 lrg_buf_q_ele->addr_high =
1915                                     lrg_buf_cb->buf_phy_addr_high;
1916                                 lrg_buf_q_ele->addr_low =
1917                                     lrg_buf_cb->buf_phy_addr_low;
1918                                 lrg_buf_q_ele++;
1919
1920                                 qdev->lrg_buf_release_cnt--;
1921                         }
1922
1923                         qdev->lrg_buf_q_producer_index++;
1924
1925                         if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
1926                                 qdev->lrg_buf_q_producer_index = 0;
1927
1928                         if (qdev->lrg_buf_q_producer_index ==
1929                             (qdev->num_lbufq_entries - 1)) {
1930                                 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1931                         }
1932                 }
1933                 wmb();
1934                 qdev->lrg_buf_next_free = lrg_buf_q_ele;
1935                 writel(qdev->lrg_buf_q_producer_index,
1936                         &port_regs->CommonRegs.rxLargeQProducerIndex);
1937         }
1938 }
1939
1940 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1941                                    struct ob_mac_iocb_rsp *mac_rsp)
1942 {
1943         struct ql_tx_buf_cb *tx_cb;
1944         int i;
1945         int retval = 0;
1946
1947         if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1948                 netdev_warn(qdev->ndev,
1949                             "Frame too short but it was padded and sent\n");
1950         }
1951
1952         tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1953
1954         /*  Check the transmit response flags for any errors */
1955         if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1956                 netdev_err(qdev->ndev,
1957                            "Frame too short to be legal, frame not sent\n");
1958
1959                 qdev->ndev->stats.tx_errors++;
1960                 retval = -EIO;
1961                 goto frame_not_sent;
1962         }
1963
1964         if(tx_cb->seg_count == 0) {
1965                 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1966                            mac_rsp->transaction_id);
1967
1968                 qdev->ndev->stats.tx_errors++;
1969                 retval = -EIO;
1970                 goto invalid_seg_count;
1971         }
1972
1973         pci_unmap_single(qdev->pdev,
1974                          dma_unmap_addr(&tx_cb->map[0], mapaddr),
1975                          dma_unmap_len(&tx_cb->map[0], maplen),
1976                          PCI_DMA_TODEVICE);
1977         tx_cb->seg_count--;
1978         if (tx_cb->seg_count) {
1979                 for (i = 1; i < tx_cb->seg_count; i++) {
1980                         pci_unmap_page(qdev->pdev,
1981                                        dma_unmap_addr(&tx_cb->map[i],
1982                                                       mapaddr),
1983                                        dma_unmap_len(&tx_cb->map[i], maplen),
1984                                        PCI_DMA_TODEVICE);
1985                 }
1986         }
1987         qdev->ndev->stats.tx_packets++;
1988         qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
1989
1990 frame_not_sent:
1991         dev_kfree_skb_irq(tx_cb->skb);
1992         tx_cb->skb = NULL;
1993
1994 invalid_seg_count:
1995         atomic_inc(&qdev->tx_count);
1996 }
1997
1998 static void ql_get_sbuf(struct ql3_adapter *qdev)
1999 {
2000         if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
2001                 qdev->small_buf_index = 0;
2002         qdev->small_buf_release_cnt++;
2003 }
2004
2005 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
2006 {
2007         struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
2008         lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
2009         qdev->lrg_buf_release_cnt++;
2010         if (++qdev->lrg_buf_index == qdev->num_large_buffers)
2011                 qdev->lrg_buf_index = 0;
2012         return(lrg_buf_cb);
2013 }
2014
2015 /*
2016  * The difference between 3022 and 3032 for inbound completions:
2017  * 3022 uses two buffers per completion.  The first buffer contains
2018  * (some) header info, the second the remainder of the headers plus
2019  * the data.  For this chip we reserve some space at the top of the
2020  * receive buffer so that the header info in buffer one can be
2021  * prepended to the buffer two.  Buffer two is the sent up while
2022  * buffer one is returned to the hardware to be reused.
2023  * 3032 receives all of it's data and headers in one buffer for a
2024  * simpler process.  3032 also supports checksum verification as
2025  * can be seen in ql_process_macip_rx_intr().
2026  */
2027 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2028                                    struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
2029 {
2030         struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2031         struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2032         struct sk_buff *skb;
2033         u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
2034
2035         /*
2036          * Get the inbound address list (small buffer).
2037          */
2038         ql_get_sbuf(qdev);
2039
2040         if (qdev->device_id == QL3022_DEVICE_ID)
2041                 lrg_buf_cb1 = ql_get_lbuf(qdev);
2042
2043         /* start of second buffer */
2044         lrg_buf_cb2 = ql_get_lbuf(qdev);
2045         skb = lrg_buf_cb2->skb;
2046
2047         qdev->ndev->stats.rx_packets++;
2048         qdev->ndev->stats.rx_bytes += length;
2049
2050         skb_put(skb, length);
2051         pci_unmap_single(qdev->pdev,
2052                          dma_unmap_addr(lrg_buf_cb2, mapaddr),
2053                          dma_unmap_len(lrg_buf_cb2, maplen),
2054                          PCI_DMA_FROMDEVICE);
2055         prefetch(skb->data);
2056         skb->ip_summed = CHECKSUM_NONE;
2057         skb->protocol = eth_type_trans(skb, qdev->ndev);
2058
2059         netif_receive_skb(skb);
2060         lrg_buf_cb2->skb = NULL;
2061
2062         if (qdev->device_id == QL3022_DEVICE_ID)
2063                 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2064         ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2065 }
2066
2067 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2068                                      struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
2069 {
2070         struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2071         struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2072         struct sk_buff *skb1 = NULL, *skb2;
2073         struct net_device *ndev = qdev->ndev;
2074         u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
2075         u16 size = 0;
2076
2077         /*
2078          * Get the inbound address list (small buffer).
2079          */
2080
2081         ql_get_sbuf(qdev);
2082
2083         if (qdev->device_id == QL3022_DEVICE_ID) {
2084                 /* start of first buffer on 3022 */
2085                 lrg_buf_cb1 = ql_get_lbuf(qdev);
2086                 skb1 = lrg_buf_cb1->skb;
2087                 size = ETH_HLEN;
2088                 if (*((u16 *) skb1->data) != 0xFFFF)
2089                         size += VLAN_ETH_HLEN - ETH_HLEN;
2090         }
2091
2092         /* start of second buffer */
2093         lrg_buf_cb2 = ql_get_lbuf(qdev);
2094         skb2 = lrg_buf_cb2->skb;
2095
2096         skb_put(skb2, length);  /* Just the second buffer length here. */
2097         pci_unmap_single(qdev->pdev,
2098                          dma_unmap_addr(lrg_buf_cb2, mapaddr),
2099                          dma_unmap_len(lrg_buf_cb2, maplen),
2100                          PCI_DMA_FROMDEVICE);
2101         prefetch(skb2->data);
2102
2103         skb2->ip_summed = CHECKSUM_NONE;
2104         if (qdev->device_id == QL3022_DEVICE_ID) {
2105                 /*
2106                  * Copy the ethhdr from first buffer to second. This
2107                  * is necessary for 3022 IP completions.
2108                  */
2109                 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
2110                                                  skb_push(skb2, size), size);
2111         } else {
2112                 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2113                 if (checksum &
2114                         (IB_IP_IOCB_RSP_3032_ICE |
2115                          IB_IP_IOCB_RSP_3032_CE)) {
2116                         netdev_err(ndev,
2117                                    "%s: Bad checksum for this %s packet, checksum = %x\n",
2118                                    __func__,
2119                                    ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
2120                                     "TCP" : "UDP"), checksum);
2121                 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2122                                 (checksum & IB_IP_IOCB_RSP_3032_UDP &&
2123                                 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
2124                         skb2->ip_summed = CHECKSUM_UNNECESSARY;
2125                 }
2126         }
2127         skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2128
2129         netif_receive_skb(skb2);
2130         ndev->stats.rx_packets++;
2131         ndev->stats.rx_bytes += length;
2132         lrg_buf_cb2->skb = NULL;
2133
2134         if (qdev->device_id == QL3022_DEVICE_ID)
2135                 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2136         ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2137 }
2138
2139 static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2140                           int *tx_cleaned, int *rx_cleaned, int work_to_do)
2141 {
2142         struct net_rsp_iocb *net_rsp;
2143         struct net_device *ndev = qdev->ndev;
2144         int work_done = 0;
2145
2146         /* While there are entries in the completion queue. */
2147         while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2148                 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
2149
2150                 net_rsp = qdev->rsp_current;
2151                 rmb();
2152                 /*
2153                  * Fix 4032 chipe undocumented "feature" where bit-8 is set if the
2154                  * inbound completion is for a VLAN.
2155                  */
2156                 if (qdev->device_id == QL3032_DEVICE_ID)
2157                         net_rsp->opcode &= 0x7f;
2158                 switch (net_rsp->opcode) {
2159
2160                 case OPCODE_OB_MAC_IOCB_FN0:
2161                 case OPCODE_OB_MAC_IOCB_FN2:
2162                         ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2163                                                net_rsp);
2164                         (*tx_cleaned)++;
2165                         break;
2166
2167                 case OPCODE_IB_MAC_IOCB:
2168                 case OPCODE_IB_3032_MAC_IOCB:
2169                         ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2170                                                net_rsp);
2171                         (*rx_cleaned)++;
2172                         break;
2173
2174                 case OPCODE_IB_IP_IOCB:
2175                 case OPCODE_IB_3032_IP_IOCB:
2176                         ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2177                                                  net_rsp);
2178                         (*rx_cleaned)++;
2179                         break;
2180                 default:
2181                         {
2182                                 u32 *tmp = (u32 *) net_rsp;
2183                                 netdev_err(ndev,
2184                                            "Hit default case, not handled!\n"
2185                                            "    dropping the packet, opcode = %x\n"
2186                                            "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2187                                            net_rsp->opcode,
2188                                            (unsigned long int)tmp[0],
2189                                            (unsigned long int)tmp[1],
2190                                            (unsigned long int)tmp[2],
2191                                            (unsigned long int)tmp[3]);
2192                         }
2193                 }
2194
2195                 qdev->rsp_consumer_index++;
2196
2197                 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2198                         qdev->rsp_consumer_index = 0;
2199                         qdev->rsp_current = qdev->rsp_q_virt_addr;
2200                 } else {
2201                         qdev->rsp_current++;
2202                 }
2203
2204                 work_done = *tx_cleaned + *rx_cleaned;
2205         }
2206
2207         return work_done;
2208 }
2209
2210 static int ql_poll(struct napi_struct *napi, int budget)
2211 {
2212         struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2213         int rx_cleaned = 0, tx_cleaned = 0;
2214         unsigned long hw_flags;
2215         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2216
2217         ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
2218
2219         if (tx_cleaned + rx_cleaned != budget) {
2220                 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2221                 __napi_complete(napi);
2222                 ql_update_small_bufq_prod_index(qdev);
2223                 ql_update_lrg_bufq_prod_index(qdev);
2224                 writel(qdev->rsp_consumer_index,
2225                             &port_regs->CommonRegs.rspQConsumerIndex);
2226                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2227
2228                 ql_enable_interrupts(qdev);
2229         }
2230         return tx_cleaned + rx_cleaned;
2231 }
2232
2233 static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2234 {
2235
2236         struct net_device *ndev = dev_id;
2237         struct ql3_adapter *qdev = netdev_priv(ndev);
2238         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2239         u32 value;
2240         int handled = 1;
2241         u32 var;
2242
2243         port_regs = qdev->mem_map_registers;
2244
2245         value =
2246             ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2247
2248         if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2249                 spin_lock(&qdev->adapter_lock);
2250                 netif_stop_queue(qdev->ndev);
2251                 netif_carrier_off(qdev->ndev);
2252                 ql_disable_interrupts(qdev);
2253                 qdev->port_link_state = LS_DOWN;
2254                 set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
2255
2256                 if (value & ISP_CONTROL_FE) {
2257                         /*
2258                          * Chip Fatal Error.
2259                          */
2260                         var =
2261                             ql_read_page0_reg_l(qdev,
2262                                               &port_regs->PortFatalErrStatus);
2263                         netdev_warn(ndev,
2264                                     "Resetting chip. PortFatalErrStatus register = 0x%x\n",
2265                                     var);
2266                         set_bit(QL_RESET_START,&qdev->flags) ;
2267                 } else {
2268                         /*
2269                          * Soft Reset Requested.
2270                          */
2271                         set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
2272                         netdev_err(ndev,
2273                                    "Another function issued a reset to the chip. ISR value = %x\n",
2274                                    value);
2275                 }
2276                 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2277                 spin_unlock(&qdev->adapter_lock);
2278         } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2279                 ql_disable_interrupts(qdev);
2280                 if (likely(napi_schedule_prep(&qdev->napi))) {
2281                         __napi_schedule(&qdev->napi);
2282                 }
2283         } else {
2284                 return IRQ_NONE;
2285         }
2286
2287         return IRQ_RETVAL(handled);
2288 }
2289
2290 /*
2291  * Get the total number of segments needed for the
2292  * given number of fragments.  This is necessary because
2293  * outbound address lists (OAL) will be used when more than
2294  * two frags are given.  Each address list has 5 addr/len
2295  * pairs.  The 5th pair in each AOL is used to  point to
2296  * the next AOL if more frags are coming.
2297  * That is why the frags:segment count  ratio is not linear.
2298  */
2299 static int ql_get_seg_count(struct ql3_adapter *qdev,
2300                             unsigned short frags)
2301 {
2302         if (qdev->device_id == QL3022_DEVICE_ID)
2303                 return 1;
2304
2305         switch(frags) {
2306         case 0: return 1;       /* just the skb->data seg */
2307         case 1: return 2;       /* skb->data + 1 frag */
2308         case 2: return 3;       /* skb->data + 2 frags */
2309         case 3: return 5;       /* skb->data + 1 frag + 1 AOL containting 2 frags */
2310         case 4: return 6;
2311         case 5: return 7;
2312         case 6: return 8;
2313         case 7: return 10;
2314         case 8: return 11;
2315         case 9: return 12;
2316         case 10: return 13;
2317         case 11: return 15;
2318         case 12: return 16;
2319         case 13: return 17;
2320         case 14: return 18;
2321         case 15: return 20;
2322         case 16: return 21;
2323         case 17: return 22;
2324         case 18: return 23;
2325         }
2326         return -1;
2327 }
2328
2329 static void ql_hw_csum_setup(const struct sk_buff *skb,
2330                              struct ob_mac_iocb_req *mac_iocb_ptr)
2331 {
2332         const struct iphdr *ip = ip_hdr(skb);
2333
2334         mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
2335         mac_iocb_ptr->ip_hdr_len = ip->ihl;
2336
2337         if (ip->protocol == IPPROTO_TCP) {
2338                 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2339                         OB_3032MAC_IOCB_REQ_IC;
2340         } else {
2341                 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2342                         OB_3032MAC_IOCB_REQ_IC;
2343         }
2344
2345 }
2346
2347 /*
2348  * Map the buffers for this transmit.  This will return
2349  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2350  */
2351 static int ql_send_map(struct ql3_adapter *qdev,
2352                                 struct ob_mac_iocb_req *mac_iocb_ptr,
2353                                 struct ql_tx_buf_cb *tx_cb,
2354                                 struct sk_buff *skb)
2355 {
2356         struct oal *oal;
2357         struct oal_entry *oal_entry;
2358         int len = skb_headlen(skb);
2359         dma_addr_t map;
2360         int err;
2361         int completed_segs, i;
2362         int seg_cnt, seg = 0;
2363         int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2364
2365         seg_cnt = tx_cb->seg_count;
2366         /*
2367          * Map the skb buffer first.
2368          */
2369         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2370
2371         err = pci_dma_mapping_error(qdev->pdev, map);
2372         if(err) {
2373                 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2374                            err);
2375
2376                 return NETDEV_TX_BUSY;
2377         }
2378
2379         oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2380         oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2381         oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2382         oal_entry->len = cpu_to_le32(len);
2383         dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2384         dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
2385         seg++;
2386
2387         if (seg_cnt == 1) {
2388                 /* Terminate the last segment. */
2389                 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2390         } else {
2391                 oal = tx_cb->oal;
2392                 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
2393                         skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2394                         oal_entry++;
2395                         if ((seg == 2 && seg_cnt > 3) ||        /* Check for continuation */
2396                             (seg == 7 && seg_cnt > 8) ||        /* requirements. It's strange */
2397                             (seg == 12 && seg_cnt > 13) ||      /* but necessary. */
2398                             (seg == 17 && seg_cnt > 18)) {
2399                                 /* Continuation entry points to outbound address list. */
2400                                 map = pci_map_single(qdev->pdev, oal,
2401                                                      sizeof(struct oal),
2402                                                      PCI_DMA_TODEVICE);
2403
2404                                 err = pci_dma_mapping_error(qdev->pdev, map);
2405                                 if(err) {
2406
2407                                         netdev_err(qdev->ndev,
2408                                                    "PCI mapping outbound address list with error: %d\n",
2409                                                    err);
2410                                         goto map_error;
2411                                 }
2412
2413                                 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2414                                 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2415                                 oal_entry->len =
2416                                     cpu_to_le32(sizeof(struct oal) |
2417                                                 OAL_CONT_ENTRY);
2418                                 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2419                                                    map);
2420                                 dma_unmap_len_set(&tx_cb->map[seg], maplen,
2421                                                   sizeof(struct oal));
2422                                 oal_entry = (struct oal_entry *)oal;
2423                                 oal++;
2424                                 seg++;
2425                         }
2426
2427                         map =
2428                             pci_map_page(qdev->pdev, frag->page,
2429                                          frag->page_offset, frag->size,
2430                                          PCI_DMA_TODEVICE);
2431
2432                         err = pci_dma_mapping_error(qdev->pdev, map);
2433                         if(err) {
2434                                 netdev_err(qdev->ndev,
2435                                            "PCI mapping frags failed with error: %d\n",
2436                                            err);
2437                                 goto map_error;
2438                         }
2439
2440                         oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2441                         oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2442                         oal_entry->len = cpu_to_le32(frag->size);
2443                         dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2444                         dma_unmap_len_set(&tx_cb->map[seg], maplen,
2445                                           frag->size);
2446                 }
2447                 /* Terminate the last segment. */
2448                 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2449         }
2450
2451         return NETDEV_TX_OK;
2452
2453 map_error:
2454         /* A PCI mapping failed and now we will need to back out
2455          * We need to traverse through the oal's and associated pages which
2456          * have been mapped and now we must unmap them to clean up properly
2457          */
2458
2459         seg = 1;
2460         oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2461         oal = tx_cb->oal;
2462         for (i=0; i<completed_segs; i++,seg++) {
2463                 oal_entry++;
2464
2465                 if((seg == 2 && seg_cnt > 3) ||        /* Check for continuation */
2466                    (seg == 7 && seg_cnt > 8) ||        /* requirements. It's strange */
2467                    (seg == 12 && seg_cnt > 13) ||      /* but necessary. */
2468                    (seg == 17 && seg_cnt > 18)) {
2469                         pci_unmap_single(qdev->pdev,
2470                                 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2471                                 dma_unmap_len(&tx_cb->map[seg], maplen),
2472                                  PCI_DMA_TODEVICE);
2473                         oal++;
2474                         seg++;
2475                 }
2476
2477                 pci_unmap_page(qdev->pdev,
2478                                dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2479                                dma_unmap_len(&tx_cb->map[seg], maplen),
2480                                PCI_DMA_TODEVICE);
2481         }
2482
2483         pci_unmap_single(qdev->pdev,
2484                          dma_unmap_addr(&tx_cb->map[0], mapaddr),
2485                          dma_unmap_addr(&tx_cb->map[0], maplen),
2486                          PCI_DMA_TODEVICE);
2487
2488         return NETDEV_TX_BUSY;
2489
2490 }
2491
2492 /*
2493  * The difference between 3022 and 3032 sends:
2494  * 3022 only supports a simple single segment transmission.
2495  * 3032 supports checksumming and scatter/gather lists (fragments).
2496  * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2497  * in the IOCB plus a chain of outbound address lists (OAL) that
2498  * each contain 5 ALPs.  The last ALP of the IOCB (3rd) or OAL (5th)
2499  * will used to point to an OAL when more ALP entries are required.
2500  * The IOCB is always the top of the chain followed by one or more
2501  * OALs (when necessary).
2502  */
2503 static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2504                                struct net_device *ndev)
2505 {
2506         struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2507         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2508         struct ql_tx_buf_cb *tx_cb;
2509         u32 tot_len = skb->len;
2510         struct ob_mac_iocb_req *mac_iocb_ptr;
2511
2512         if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2513                 return NETDEV_TX_BUSY;
2514         }
2515
2516         tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2517         if((tx_cb->seg_count = ql_get_seg_count(qdev,
2518                                                 (skb_shinfo(skb)->nr_frags))) == -1) {
2519                 netdev_err(ndev, "%s: invalid segment count!\n", __func__);
2520                 return NETDEV_TX_OK;
2521         }
2522
2523         mac_iocb_ptr = tx_cb->queue_entry;
2524         memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2525         mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2526         mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2527         mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2528         mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2529         mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2530         tx_cb->skb = skb;
2531         if (qdev->device_id == QL3032_DEVICE_ID &&
2532             skb->ip_summed == CHECKSUM_PARTIAL)
2533                 ql_hw_csum_setup(skb, mac_iocb_ptr);
2534
2535         if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2536                 netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
2537                 return NETDEV_TX_BUSY;
2538         }
2539
2540         wmb();
2541         qdev->req_producer_index++;
2542         if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2543                 qdev->req_producer_index = 0;
2544         wmb();
2545         ql_write_common_reg_l(qdev,
2546                             &port_regs->CommonRegs.reqQProducerIndex,
2547                             qdev->req_producer_index);
2548
2549         netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2550                      "tx queued, slot %d, len %d\n",
2551                      qdev->req_producer_index, skb->len);
2552
2553         atomic_dec(&qdev->tx_count);
2554         return NETDEV_TX_OK;
2555 }
2556
2557 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2558 {
2559         qdev->req_q_size =
2560             (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2561
2562         qdev->req_q_virt_addr =
2563             pci_alloc_consistent(qdev->pdev,
2564                                  (size_t) qdev->req_q_size,
2565                                  &qdev->req_q_phy_addr);
2566
2567         if ((qdev->req_q_virt_addr == NULL) ||
2568             LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2569                 netdev_err(qdev->ndev, "reqQ failed\n");
2570                 return -ENOMEM;
2571         }
2572
2573         qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2574
2575         qdev->rsp_q_virt_addr =
2576             pci_alloc_consistent(qdev->pdev,
2577                                  (size_t) qdev->rsp_q_size,
2578                                  &qdev->rsp_q_phy_addr);
2579
2580         if ((qdev->rsp_q_virt_addr == NULL) ||
2581             LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2582                 netdev_err(qdev->ndev, "rspQ allocation failed\n");
2583                 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2584                                     qdev->req_q_virt_addr,
2585                                     qdev->req_q_phy_addr);
2586                 return -ENOMEM;
2587         }
2588
2589         set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2590
2591         return 0;
2592 }
2593
2594 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2595 {
2596         if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
2597                 netdev_info(qdev->ndev, "Already done\n");
2598                 return;
2599         }
2600
2601         pci_free_consistent(qdev->pdev,
2602                             qdev->req_q_size,
2603                             qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2604
2605         qdev->req_q_virt_addr = NULL;
2606
2607         pci_free_consistent(qdev->pdev,
2608                             qdev->rsp_q_size,
2609                             qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2610
2611         qdev->rsp_q_virt_addr = NULL;
2612
2613         clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2614 }
2615
2616 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2617 {
2618         /* Create Large Buffer Queue */
2619         qdev->lrg_buf_q_size =
2620             qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2621         if (qdev->lrg_buf_q_size < PAGE_SIZE)
2622                 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2623         else
2624                 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2625
2626         qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
2627         if (qdev->lrg_buf == NULL) {
2628                 netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
2629                 return -ENOMEM;
2630         }
2631
2632         qdev->lrg_buf_q_alloc_virt_addr =
2633             pci_alloc_consistent(qdev->pdev,
2634                                  qdev->lrg_buf_q_alloc_size,
2635                                  &qdev->lrg_buf_q_alloc_phy_addr);
2636
2637         if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2638                 netdev_err(qdev->ndev, "lBufQ failed\n");
2639                 return -ENOMEM;
2640         }
2641         qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2642         qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2643
2644         /* Create Small Buffer Queue */
2645         qdev->small_buf_q_size =
2646             NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2647         if (qdev->small_buf_q_size < PAGE_SIZE)
2648                 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2649         else
2650                 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2651
2652         qdev->small_buf_q_alloc_virt_addr =
2653             pci_alloc_consistent(qdev->pdev,
2654                                  qdev->small_buf_q_alloc_size,
2655                                  &qdev->small_buf_q_alloc_phy_addr);
2656
2657         if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2658                 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2659                 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2660                                     qdev->lrg_buf_q_alloc_virt_addr,
2661                                     qdev->lrg_buf_q_alloc_phy_addr);
2662                 return -ENOMEM;
2663         }
2664
2665         qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2666         qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2667         set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2668         return 0;
2669 }
2670
2671 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2672 {
2673         if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
2674                 netdev_info(qdev->ndev, "Already done\n");
2675                 return;
2676         }
2677         if(qdev->lrg_buf) kfree(qdev->lrg_buf);
2678         pci_free_consistent(qdev->pdev,
2679                             qdev->lrg_buf_q_alloc_size,
2680                             qdev->lrg_buf_q_alloc_virt_addr,
2681                             qdev->lrg_buf_q_alloc_phy_addr);
2682
2683         qdev->lrg_buf_q_virt_addr = NULL;
2684
2685         pci_free_consistent(qdev->pdev,
2686                             qdev->small_buf_q_alloc_size,
2687                             qdev->small_buf_q_alloc_virt_addr,
2688                             qdev->small_buf_q_alloc_phy_addr);
2689
2690         qdev->small_buf_q_virt_addr = NULL;
2691
2692         clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2693 }
2694
2695 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2696 {
2697         int i;
2698         struct bufq_addr_element *small_buf_q_entry;
2699
2700         /* Currently we allocate on one of memory and use it for smallbuffers */
2701         qdev->small_buf_total_size =
2702             (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2703              QL_SMALL_BUFFER_SIZE);
2704
2705         qdev->small_buf_virt_addr =
2706             pci_alloc_consistent(qdev->pdev,
2707                                  qdev->small_buf_total_size,
2708                                  &qdev->small_buf_phy_addr);
2709
2710         if (qdev->small_buf_virt_addr == NULL) {
2711                 netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2712                 return -ENOMEM;
2713         }
2714
2715         qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2716         qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2717
2718         small_buf_q_entry = qdev->small_buf_q_virt_addr;
2719
2720         /* Initialize the small buffer queue. */
2721         for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2722                 small_buf_q_entry->addr_high =
2723                     cpu_to_le32(qdev->small_buf_phy_addr_high);
2724                 small_buf_q_entry->addr_low =
2725                     cpu_to_le32(qdev->small_buf_phy_addr_low +
2726                                 (i * QL_SMALL_BUFFER_SIZE));
2727                 small_buf_q_entry++;
2728         }
2729         qdev->small_buf_index = 0;
2730         set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
2731         return 0;
2732 }
2733
2734 static void ql_free_small_buffers(struct ql3_adapter *qdev)
2735 {
2736         if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
2737                 netdev_info(qdev->ndev, "Already done\n");
2738                 return;
2739         }
2740         if (qdev->small_buf_virt_addr != NULL) {
2741                 pci_free_consistent(qdev->pdev,
2742                                     qdev->small_buf_total_size,
2743                                     qdev->small_buf_virt_addr,
2744                                     qdev->small_buf_phy_addr);
2745
2746                 qdev->small_buf_virt_addr = NULL;
2747         }
2748 }
2749
2750 static void ql_free_large_buffers(struct ql3_adapter *qdev)
2751 {
2752         int i = 0;
2753         struct ql_rcv_buf_cb *lrg_buf_cb;
2754
2755         for (i = 0; i < qdev->num_large_buffers; i++) {
2756                 lrg_buf_cb = &qdev->lrg_buf[i];
2757                 if (lrg_buf_cb->skb) {
2758                         dev_kfree_skb(lrg_buf_cb->skb);
2759                         pci_unmap_single(qdev->pdev,
2760                                          dma_unmap_addr(lrg_buf_cb, mapaddr),
2761                                          dma_unmap_len(lrg_buf_cb, maplen),
2762                                          PCI_DMA_FROMDEVICE);
2763                         memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2764                 } else {
2765                         break;
2766                 }
2767         }
2768 }
2769
2770 static void ql_init_large_buffers(struct ql3_adapter *qdev)
2771 {
2772         int i;
2773         struct ql_rcv_buf_cb *lrg_buf_cb;
2774         struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2775
2776         for (i = 0; i < qdev->num_large_buffers; i++) {
2777                 lrg_buf_cb = &qdev->lrg_buf[i];
2778                 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2779                 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2780                 buf_addr_ele++;
2781         }
2782         qdev->lrg_buf_index = 0;
2783         qdev->lrg_buf_skb_check = 0;
2784 }
2785
2786 static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2787 {
2788         int i;
2789         struct ql_rcv_buf_cb *lrg_buf_cb;
2790         struct sk_buff *skb;
2791         dma_addr_t map;
2792         int err;
2793
2794         for (i = 0; i < qdev->num_large_buffers; i++) {
2795                 skb = netdev_alloc_skb(qdev->ndev,
2796                                        qdev->lrg_buffer_len);
2797                 if (unlikely(!skb)) {
2798                         /* Better luck next round */
2799                         netdev_err(qdev->ndev,
2800                                    "large buff alloc failed for %d bytes at index %d\n",
2801                                    qdev->lrg_buffer_len * 2, i);
2802                         ql_free_large_buffers(qdev);
2803                         return -ENOMEM;
2804                 } else {
2805
2806                         lrg_buf_cb = &qdev->lrg_buf[i];
2807                         memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2808                         lrg_buf_cb->index = i;
2809                         lrg_buf_cb->skb = skb;
2810                         /*
2811                          * We save some space to copy the ethhdr from first
2812                          * buffer
2813                          */
2814                         skb_reserve(skb, QL_HEADER_SPACE);
2815                         map = pci_map_single(qdev->pdev,
2816                                              skb->data,
2817                                              qdev->lrg_buffer_len -
2818                                              QL_HEADER_SPACE,
2819                                              PCI_DMA_FROMDEVICE);
2820
2821                         err = pci_dma_mapping_error(qdev->pdev, map);
2822                         if(err) {
2823                                 netdev_err(qdev->ndev,
2824                                            "PCI mapping failed with error: %d\n",
2825                                            err);
2826                                 ql_free_large_buffers(qdev);
2827                                 return -ENOMEM;
2828                         }
2829
2830                         dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2831                         dma_unmap_len_set(lrg_buf_cb, maplen,
2832                                           qdev->lrg_buffer_len -
2833                                           QL_HEADER_SPACE);
2834                         lrg_buf_cb->buf_phy_addr_low =
2835                             cpu_to_le32(LS_64BITS(map));
2836                         lrg_buf_cb->buf_phy_addr_high =
2837                             cpu_to_le32(MS_64BITS(map));
2838                 }
2839         }
2840         return 0;
2841 }
2842
2843 static void ql_free_send_free_list(struct ql3_adapter *qdev)
2844 {
2845         struct ql_tx_buf_cb *tx_cb;
2846         int i;
2847
2848         tx_cb = &qdev->tx_buf[0];
2849         for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2850                 if (tx_cb->oal) {
2851                         kfree(tx_cb->oal);
2852                         tx_cb->oal = NULL;
2853                 }
2854                 tx_cb++;
2855         }
2856 }
2857
2858 static int ql_create_send_free_list(struct ql3_adapter *qdev)
2859 {
2860         struct ql_tx_buf_cb *tx_cb;
2861         int i;
2862         struct ob_mac_iocb_req *req_q_curr =
2863                                         qdev->req_q_virt_addr;
2864
2865         /* Create free list of transmit buffers */
2866         for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2867
2868                 tx_cb = &qdev->tx_buf[i];
2869                 tx_cb->skb = NULL;
2870                 tx_cb->queue_entry = req_q_curr;
2871                 req_q_curr++;
2872                 tx_cb->oal = kmalloc(512, GFP_KERNEL);
2873                 if (tx_cb->oal == NULL)
2874                         return -1;
2875         }
2876         return 0;
2877 }
2878
2879 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2880 {
2881         if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2882                 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2883                 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2884         }
2885         else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2886                 /*
2887                  * Bigger buffers, so less of them.
2888                  */
2889                 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2890                 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2891         } else {
2892                 netdev_err(qdev->ndev, "Invalid mtu size: %d.  Only %d and %d are accepted.\n",
2893                            qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2894                 return -ENOMEM;
2895         }
2896         qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2897         qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2898         qdev->max_frame_size =
2899             (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2900
2901         /*
2902          * First allocate a page of shared memory and use it for shadow
2903          * locations of Network Request Queue Consumer Address Register and
2904          * Network Completion Queue Producer Index Register
2905          */
2906         qdev->shadow_reg_virt_addr =
2907             pci_alloc_consistent(qdev->pdev,
2908                                  PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2909
2910         if (qdev->shadow_reg_virt_addr != NULL) {
2911                 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
2912                 qdev->req_consumer_index_phy_addr_high =
2913                     MS_64BITS(qdev->shadow_reg_phy_addr);
2914                 qdev->req_consumer_index_phy_addr_low =
2915                     LS_64BITS(qdev->shadow_reg_phy_addr);
2916
2917                 qdev->prsp_producer_index =
2918                     (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2919                 qdev->rsp_producer_index_phy_addr_high =
2920                     qdev->req_consumer_index_phy_addr_high;
2921                 qdev->rsp_producer_index_phy_addr_low =
2922                     qdev->req_consumer_index_phy_addr_low + 8;
2923         } else {
2924                 netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
2925                 return -ENOMEM;
2926         }
2927
2928         if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2929                 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
2930                 goto err_req_rsp;
2931         }
2932
2933         if (ql_alloc_buffer_queues(qdev) != 0) {
2934                 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
2935                 goto err_buffer_queues;
2936         }
2937
2938         if (ql_alloc_small_buffers(qdev) != 0) {
2939                 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
2940                 goto err_small_buffers;
2941         }
2942
2943         if (ql_alloc_large_buffers(qdev) != 0) {
2944                 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
2945                 goto err_small_buffers;
2946         }
2947
2948         /* Initialize the large buffer queue. */
2949         ql_init_large_buffers(qdev);
2950         if (ql_create_send_free_list(qdev))
2951                 goto err_free_list;
2952
2953         qdev->rsp_current = qdev->rsp_q_virt_addr;
2954
2955         return 0;
2956 err_free_list:
2957         ql_free_send_free_list(qdev);
2958 err_small_buffers:
2959         ql_free_buffer_queues(qdev);
2960 err_buffer_queues:
2961         ql_free_net_req_rsp_queues(qdev);
2962 err_req_rsp:
2963         pci_free_consistent(qdev->pdev,
2964                             PAGE_SIZE,
2965                             qdev->shadow_reg_virt_addr,
2966                             qdev->shadow_reg_phy_addr);
2967
2968         return -ENOMEM;
2969 }
2970
2971 static void ql_free_mem_resources(struct ql3_adapter *qdev)
2972 {
2973         ql_free_send_free_list(qdev);
2974         ql_free_large_buffers(qdev);
2975         ql_free_small_buffers(qdev);
2976         ql_free_buffer_queues(qdev);
2977         ql_free_net_req_rsp_queues(qdev);
2978         if (qdev->shadow_reg_virt_addr != NULL) {
2979                 pci_free_consistent(qdev->pdev,
2980                                     PAGE_SIZE,
2981                                     qdev->shadow_reg_virt_addr,
2982                                     qdev->shadow_reg_phy_addr);
2983                 qdev->shadow_reg_virt_addr = NULL;
2984         }
2985 }
2986
2987 static int ql_init_misc_registers(struct ql3_adapter *qdev)
2988 {
2989         struct ql3xxx_local_ram_registers __iomem *local_ram =
2990             (void __iomem *)qdev->mem_map_registers;
2991
2992         if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2993                         (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2994                          2) << 4))
2995                 return -1;
2996
2997         ql_write_page2_reg(qdev,
2998                            &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2999
3000         ql_write_page2_reg(qdev,
3001                            &local_ram->maxBufletCount,
3002                            qdev->nvram_data.bufletCount);
3003
3004         ql_write_page2_reg(qdev,
3005                            &local_ram->freeBufletThresholdLow,
3006                            (qdev->nvram_data.tcpWindowThreshold25 << 16) |
3007                            (qdev->nvram_data.tcpWindowThreshold0));
3008
3009         ql_write_page2_reg(qdev,
3010                            &local_ram->freeBufletThresholdHigh,
3011                            qdev->nvram_data.tcpWindowThreshold50);
3012
3013         ql_write_page2_reg(qdev,
3014                            &local_ram->ipHashTableBase,
3015                            (qdev->nvram_data.ipHashTableBaseHi << 16) |
3016                            qdev->nvram_data.ipHashTableBaseLo);
3017         ql_write_page2_reg(qdev,
3018                            &local_ram->ipHashTableCount,
3019                            qdev->nvram_data.ipHashTableSize);
3020         ql_write_page2_reg(qdev,
3021                            &local_ram->tcpHashTableBase,
3022                            (qdev->nvram_data.tcpHashTableBaseHi << 16) |
3023                            qdev->nvram_data.tcpHashTableBaseLo);
3024         ql_write_page2_reg(qdev,
3025                            &local_ram->tcpHashTableCount,
3026                            qdev->nvram_data.tcpHashTableSize);
3027         ql_write_page2_reg(qdev,
3028                            &local_ram->ncbBase,
3029                            (qdev->nvram_data.ncbTableBaseHi << 16) |
3030                            qdev->nvram_data.ncbTableBaseLo);
3031         ql_write_page2_reg(qdev,
3032                            &local_ram->maxNcbCount,
3033                            qdev->nvram_data.ncbTableSize);
3034         ql_write_page2_reg(qdev,
3035                            &local_ram->drbBase,
3036                            (qdev->nvram_data.drbTableBaseHi << 16) |
3037                            qdev->nvram_data.drbTableBaseLo);
3038         ql_write_page2_reg(qdev,
3039                            &local_ram->maxDrbCount,
3040                            qdev->nvram_data.drbTableSize);
3041         ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
3042         return 0;
3043 }
3044
3045 static int ql_adapter_initialize(struct ql3_adapter *qdev)
3046 {
3047         u32 value;
3048         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3049         struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3050                                                 (void __iomem *)port_regs;
3051         u32 delay = 10;
3052         int status = 0;
3053         unsigned long hw_flags = 0;
3054
3055         if(ql_mii_setup(qdev))
3056                 return -1;
3057
3058         /* Bring out PHY out of reset */
3059         ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3060                             (ISP_SERIAL_PORT_IF_WE |
3061                              (ISP_SERIAL_PORT_IF_WE << 16)));
3062         /* Give the PHY time to come out of reset. */
3063         mdelay(100);
3064         qdev->port_link_state = LS_DOWN;
3065         netif_carrier_off(qdev->ndev);
3066
3067         /* V2 chip fix for ARS-39168. */
3068         ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3069                             (ISP_SERIAL_PORT_IF_SDE |
3070                              (ISP_SERIAL_PORT_IF_SDE << 16)));
3071
3072         /* Request Queue Registers */
3073         *((u32 *) (qdev->preq_consumer_index)) = 0;
3074         atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
3075         qdev->req_producer_index = 0;
3076
3077         ql_write_page1_reg(qdev,
3078                            &hmem_regs->reqConsumerIndexAddrHigh,
3079                            qdev->req_consumer_index_phy_addr_high);
3080         ql_write_page1_reg(qdev,
3081                            &hmem_regs->reqConsumerIndexAddrLow,
3082                            qdev->req_consumer_index_phy_addr_low);
3083
3084         ql_write_page1_reg(qdev,
3085                            &hmem_regs->reqBaseAddrHigh,
3086                            MS_64BITS(qdev->req_q_phy_addr));
3087         ql_write_page1_reg(qdev,
3088                            &hmem_regs->reqBaseAddrLow,
3089                            LS_64BITS(qdev->req_q_phy_addr));
3090         ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3091
3092         /* Response Queue Registers */
3093         *((__le16 *) (qdev->prsp_producer_index)) = 0;
3094         qdev->rsp_consumer_index = 0;
3095         qdev->rsp_current = qdev->rsp_q_virt_addr;
3096
3097         ql_write_page1_reg(qdev,
3098                            &hmem_regs->rspProducerIndexAddrHigh,
3099                            qdev->rsp_producer_index_phy_addr_high);
3100
3101         ql_write_page1_reg(qdev,
3102                            &hmem_regs->rspProducerIndexAddrLow,
3103                            qdev->rsp_producer_index_phy_addr_low);
3104
3105         ql_write_page1_reg(qdev,
3106                            &hmem_regs->rspBaseAddrHigh,
3107                            MS_64BITS(qdev->rsp_q_phy_addr));
3108
3109         ql_write_page1_reg(qdev,
3110                            &hmem_regs->rspBaseAddrLow,
3111                            LS_64BITS(qdev->rsp_q_phy_addr));
3112
3113         ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3114
3115         /* Large Buffer Queue */
3116         ql_write_page1_reg(qdev,
3117                            &hmem_regs->rxLargeQBaseAddrHigh,
3118                            MS_64BITS(qdev->lrg_buf_q_phy_addr));
3119
3120         ql_write_page1_reg(qdev,
3121                            &hmem_regs->rxLargeQBaseAddrLow,
3122                            LS_64BITS(qdev->lrg_buf_q_phy_addr));
3123
3124         ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
3125
3126         ql_write_page1_reg(qdev,
3127                            &hmem_regs->rxLargeBufferLength,
3128                            qdev->lrg_buffer_len);
3129
3130         /* Small Buffer Queue */
3131         ql_write_page1_reg(qdev,
3132                            &hmem_regs->rxSmallQBaseAddrHigh,
3133                            MS_64BITS(qdev->small_buf_q_phy_addr));
3134
3135         ql_write_page1_reg(qdev,
3136                            &hmem_regs->rxSmallQBaseAddrLow,
3137                            LS_64BITS(qdev->small_buf_q_phy_addr));
3138
3139         ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3140         ql_write_page1_reg(qdev,
3141                            &hmem_regs->rxSmallBufferLength,
3142                            QL_SMALL_BUFFER_SIZE);
3143
3144         qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3145         qdev->small_buf_release_cnt = 8;
3146         qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3147         qdev->lrg_buf_release_cnt = 8;
3148         qdev->lrg_buf_next_free =
3149             (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
3150         qdev->small_buf_index = 0;
3151         qdev->lrg_buf_index = 0;
3152         qdev->lrg_buf_free_count = 0;
3153         qdev->lrg_buf_free_head = NULL;
3154         qdev->lrg_buf_free_tail = NULL;
3155
3156         ql_write_common_reg(qdev,
3157                             &port_regs->CommonRegs.
3158                             rxSmallQProducerIndex,
3159                             qdev->small_buf_q_producer_index);
3160         ql_write_common_reg(qdev,
3161                             &port_regs->CommonRegs.
3162                             rxLargeQProducerIndex,
3163                             qdev->lrg_buf_q_producer_index);
3164
3165         /*
3166          * Find out if the chip has already been initialized.  If it has, then
3167          * we skip some of the initialization.
3168          */
3169         clear_bit(QL_LINK_MASTER, &qdev->flags);
3170         value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3171         if ((value & PORT_STATUS_IC) == 0) {
3172
3173                 /* Chip has not been configured yet, so let it rip. */
3174                 if(ql_init_misc_registers(qdev)) {
3175                         status = -1;
3176                         goto out;
3177                 }
3178
3179                 value = qdev->nvram_data.tcpMaxWindowSize;
3180                 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3181
3182                 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3183
3184                 if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3185                                 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3186                                  * 2) << 13)) {
3187                         status = -1;
3188                         goto out;
3189                 }
3190                 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3191                 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3192                                    (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
3193                                      16) | (INTERNAL_CHIP_SD |
3194                                             INTERNAL_CHIP_WE)));
3195                 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3196         }
3197
3198         if (qdev->mac_index)
3199                 ql_write_page0_reg(qdev,
3200                                    &port_regs->mac1MaxFrameLengthReg,
3201                                    qdev->max_frame_size);
3202         else
3203                 ql_write_page0_reg(qdev,
3204                                            &port_regs->mac0MaxFrameLengthReg,
3205                                            qdev->max_frame_size);
3206
3207         if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3208                         (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3209                          2) << 7)) {
3210                 status = -1;
3211                 goto out;
3212         }
3213
3214         PHY_Setup(qdev);
3215         ql_init_scan_mode(qdev);
3216         ql_get_phy_owner(qdev);
3217
3218         /* Load the MAC Configuration */
3219
3220         /* Program lower 32 bits of the MAC address */
3221         ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3222                            (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3223         ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3224                            ((qdev->ndev->dev_addr[2] << 24)
3225                             | (qdev->ndev->dev_addr[3] << 16)
3226                             | (qdev->ndev->dev_addr[4] << 8)
3227                             | qdev->ndev->dev_addr[5]));
3228
3229         /* Program top 16 bits of the MAC address */
3230         ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3231                            ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3232         ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3233                            ((qdev->ndev->dev_addr[0] << 8)
3234                             | qdev->ndev->dev_addr[1]));
3235
3236         /* Enable Primary MAC */
3237         ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3238                            ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3239                             MAC_ADDR_INDIRECT_PTR_REG_PE));
3240
3241         /* Clear Primary and Secondary IP addresses */
3242         ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3243                            ((IP_ADDR_INDEX_REG_MASK << 16) |
3244                             (qdev->mac_index << 2)));
3245         ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3246
3247         ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3248                            ((IP_ADDR_INDEX_REG_MASK << 16) |
3249                             ((qdev->mac_index << 2) + 1)));
3250         ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3251
3252         ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3253
3254         /* Indicate Configuration Complete */
3255         ql_write_page0_reg(qdev,
3256                            &port_regs->portControl,
3257                            ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3258
3259         do {
3260                 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3261                 if (value & PORT_STATUS_IC)
3262                         break;
3263                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3264                 msleep(500);
3265                 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3266         } while (--delay);
3267
3268         if (delay == 0) {
3269                 netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3270                 status = -1;
3271                 goto out;
3272         }
3273
3274         /* Enable Ethernet Function */
3275         if (qdev->device_id == QL3032_DEVICE_ID) {
3276                 value =
3277                     (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
3278                      QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
3279                         QL3032_PORT_CONTROL_ET);
3280                 ql_write_page0_reg(qdev, &port_regs->functionControl,
3281                                    ((value << 16) | value));
3282         } else {
3283                 value =
3284                     (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
3285                      PORT_CONTROL_HH);
3286                 ql_write_page0_reg(qdev, &port_regs->portControl,
3287                                    ((value << 16) | value));
3288         }
3289
3290
3291 out:
3292         return status;
3293 }
3294
3295 /*
3296  * Caller holds hw_lock.
3297  */
3298 static int ql_adapter_reset(struct ql3_adapter *qdev)
3299 {
3300         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3301         int status = 0;
3302         u16 value;
3303         int max_wait_time;
3304
3305         set_bit(QL_RESET_ACTIVE, &qdev->flags);
3306         clear_bit(QL_RESET_DONE, &qdev->flags);
3307
3308         /*
3309          * Issue soft reset to chip.
3310          */
3311         netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3312         ql_write_common_reg(qdev,
3313                             &port_regs->CommonRegs.ispControlStatus,
3314                             ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3315
3316         /* Wait 3 seconds for reset to complete. */
3317         netdev_printk(KERN_DEBUG, qdev->ndev,
3318                       "Wait 10 milliseconds for reset to complete\n");
3319
3320         /* Wait until the firmware tells us the Soft Reset is done */
3321         max_wait_time = 5;
3322         do {
3323                 value =
3324                     ql_read_common_reg(qdev,
3325                                        &port_regs->CommonRegs.ispControlStatus);
3326                 if ((value & ISP_CONTROL_SR) == 0)
3327                         break;
3328
3329                 ssleep(1);
3330         } while ((--max_wait_time));
3331
3332         /*
3333          * Also, make sure that the Network Reset Interrupt bit has been
3334          * cleared after the soft reset has taken place.
3335          */
3336         value =
3337             ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3338         if (value & ISP_CONTROL_RI) {
3339                 netdev_printk(KERN_DEBUG, qdev->ndev,
3340                               "clearing RI after reset\n");
3341                 ql_write_common_reg(qdev,
3342                                     &port_regs->CommonRegs.
3343                                     ispControlStatus,
3344                                     ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3345         }
3346
3347         if (max_wait_time == 0) {
3348                 /* Issue Force Soft Reset */
3349                 ql_write_common_reg(qdev,
3350                                     &port_regs->CommonRegs.
3351                                     ispControlStatus,
3352                                     ((ISP_CONTROL_FSR << 16) |
3353                                      ISP_CONTROL_FSR));
3354                 /*
3355                  * Wait until the firmware tells us the Force Soft Reset is
3356                  * done
3357                  */
3358                 max_wait_time = 5;
3359                 do {
3360                         value =
3361                             ql_read_common_reg(qdev,
3362                                                &port_regs->CommonRegs.
3363                                                ispControlStatus);
3364                         if ((value & ISP_CONTROL_FSR) == 0) {
3365                                 break;
3366                         }
3367                         ssleep(1);
3368                 } while ((--max_wait_time));
3369         }
3370         if (max_wait_time == 0)
3371                 status = 1;
3372
3373         clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3374         set_bit(QL_RESET_DONE, &qdev->flags);
3375         return status;
3376 }
3377
3378 static void ql_set_mac_info(struct ql3_adapter *qdev)
3379 {
3380         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3381         u32 value, port_status;
3382         u8 func_number;
3383
3384         /* Get the function number */
3385         value =
3386             ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3387         func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3388         port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3389         switch (value & ISP_CONTROL_FN_MASK) {
3390         case ISP_CONTROL_FN0_NET:
3391                 qdev->mac_index = 0;
3392                 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3393                 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3394                 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3395                 if (port_status & PORT_STATUS_SM0)
3396                         set_bit(QL_LINK_OPTICAL,&qdev->flags);
3397                 else
3398                         clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3399                 break;
3400
3401         case ISP_CONTROL_FN1_NET:
3402                 qdev->mac_index = 1;
3403                 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3404                 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3405                 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3406                 if (port_status & PORT_STATUS_SM1)
3407                         set_bit(QL_LINK_OPTICAL,&qdev->flags);
3408                 else
3409                         clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3410                 break;
3411
3412         case ISP_CONTROL_FN0_SCSI:
3413         case ISP_CONTROL_FN1_SCSI:
3414         default:
3415                 netdev_printk(KERN_DEBUG, qdev->ndev,
3416                               "Invalid function number, ispControlStatus = 0x%x\n",
3417                               value);
3418                 break;
3419         }
3420         qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
3421 }
3422
3423 static void ql_display_dev_info(struct net_device *ndev)
3424 {
3425         struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3426         struct pci_dev *pdev = qdev->pdev;
3427
3428         netdev_info(ndev,
3429                     "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
3430                     DRV_NAME, qdev->index, qdev->chip_rev_id,
3431                     (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3432                     qdev->pci_slot);
3433         netdev_info(ndev, "%s Interface\n",
3434                 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3435
3436         /*
3437          * Print PCI bus width/type.
3438          */
3439         netdev_info(ndev, "Bus interface is %s %s\n",
3440                     ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3441                     ((qdev->pci_x) ? "PCI-X" : "PCI"));
3442
3443         netdev_info(ndev, "mem  IO base address adjusted = 0x%p\n",
3444                     qdev->mem_map_registers);
3445         netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
3446
3447         netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3448 }
3449
3450 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3451 {
3452         struct net_device *ndev = qdev->ndev;
3453         int retval = 0;
3454
3455         netif_stop_queue(ndev);
3456         netif_carrier_off(ndev);
3457
3458         clear_bit(QL_ADAPTER_UP,&qdev->flags);
3459         clear_bit(QL_LINK_MASTER,&qdev->flags);
3460
3461         ql_disable_interrupts(qdev);
3462
3463         free_irq(qdev->pdev->irq, ndev);
3464
3465         if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3466                 netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3467                 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3468                 pci_disable_msi(qdev->pdev);
3469         }
3470
3471         del_timer_sync(&qdev->adapter_timer);
3472
3473         napi_disable(&qdev->napi);
3474
3475         if (do_reset) {
3476                 int soft_reset;
3477                 unsigned long hw_flags;
3478
3479                 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3480                 if (ql_wait_for_drvr_lock(qdev)) {
3481                         if ((soft_reset = ql_adapter_reset(qdev))) {
3482                                 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
3483                                            qdev->index);
3484                         }
3485                         netdev_err(ndev,
3486                                    "Releasing driver lock via chip reset\n");
3487                 } else {
3488                         netdev_err(ndev,
3489                                    "Could not acquire driver lock to do reset!\n");
3490                         retval = -1;
3491                 }
3492                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3493         }
3494         ql_free_mem_resources(qdev);
3495         return retval;
3496 }
3497
3498 static int ql_adapter_up(struct ql3_adapter *qdev)
3499 {
3500         struct net_device *ndev = qdev->ndev;
3501         int err;
3502         unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
3503         unsigned long hw_flags;
3504
3505         if (ql_alloc_mem_resources(qdev)) {
3506                 netdev_err(ndev, "Unable to  allocate buffers\n");
3507                 return -ENOMEM;
3508         }
3509
3510         if (qdev->msi) {
3511                 if (pci_enable_msi(qdev->pdev)) {
3512                         netdev_err(ndev,
3513                                    "User requested MSI, but MSI failed to initialize.  Continuing without MSI.\n");
3514                         qdev->msi = 0;
3515                 } else {
3516                         netdev_info(ndev, "MSI Enabled...\n");
3517                         set_bit(QL_MSI_ENABLED,&qdev->flags);
3518                         irq_flags &= ~IRQF_SHARED;
3519                 }
3520         }
3521
3522         if ((err = request_irq(qdev->pdev->irq,
3523                                ql3xxx_isr,
3524                                irq_flags, ndev->name, ndev))) {
3525                 netdev_err(ndev,
3526                            "Failed to reserve interrupt %d already in use\n",
3527                            qdev->pdev->irq);
3528                 goto err_irq;
3529         }
3530
3531         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3532
3533         if ((err = ql_wait_for_drvr_lock(qdev))) {
3534                 if ((err = ql_adapter_initialize(qdev))) {
3535                         netdev_err(ndev, "Unable to initialize adapter\n");
3536                         goto err_init;
3537                 }
3538                 netdev_err(ndev, "Releasing driver lock\n");
3539                 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3540         } else {
3541                 netdev_err(ndev, "Could not acquire driver lock\n");
3542                 goto err_lock;
3543         }
3544
3545         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3546
3547         set_bit(QL_ADAPTER_UP,&qdev->flags);
3548
3549         mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3550
3551         napi_enable(&qdev->napi);
3552         ql_enable_interrupts(qdev);
3553         return 0;
3554
3555 err_init:
3556         ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3557 err_lock:
3558         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3559         free_irq(qdev->pdev->irq, ndev);
3560 err_irq:
3561         if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3562                 netdev_info(ndev, "calling pci_disable_msi()\n");
3563                 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3564                 pci_disable_msi(qdev->pdev);
3565         }
3566         return err;
3567 }
3568
3569 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3570 {
3571         if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
3572                 netdev_err(qdev->ndev,
3573                            "Driver up/down cycle failed, closing device\n");
3574                 rtnl_lock();
3575                 dev_close(qdev->ndev);
3576                 rtnl_unlock();
3577                 return -1;
3578         }
3579         return 0;
3580 }
3581
3582 static int ql3xxx_close(struct net_device *ndev)
3583 {
3584         struct ql3_adapter *qdev = netdev_priv(ndev);
3585
3586         /*
3587          * Wait for device to recover from a reset.
3588          * (Rarely happens, but possible.)
3589          */
3590         while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
3591                 msleep(50);
3592
3593         ql_adapter_down(qdev,QL_DO_RESET);
3594         return 0;
3595 }
3596
3597 static int ql3xxx_open(struct net_device *ndev)
3598 {
3599         struct ql3_adapter *qdev = netdev_priv(ndev);
3600         return (ql_adapter_up(qdev));
3601 }
3602
3603 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3604 {
3605         struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3606         struct ql3xxx_port_registers __iomem *port_regs =
3607                         qdev->mem_map_registers;
3608         struct sockaddr *addr = p;
3609         unsigned long hw_flags;
3610
3611         if (netif_running(ndev))
3612                 return -EBUSY;
3613
3614         if (!is_valid_ether_addr(addr->sa_data))
3615                 return -EADDRNOTAVAIL;
3616
3617         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3618
3619         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3620         /* Program lower 32 bits of the MAC address */
3621         ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3622                            (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3623         ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3624                            ((ndev->dev_addr[2] << 24) | (ndev->
3625                                                          dev_addr[3] << 16) |
3626                             (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3627
3628         /* Program top 16 bits of the MAC address */
3629         ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3630                            ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3631         ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3632                            ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3633         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3634
3635         return 0;
3636 }
3637
3638 static void ql3xxx_tx_timeout(struct net_device *ndev)
3639 {
3640         struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3641
3642         netdev_err(ndev, "Resetting...\n");
3643         /*
3644          * Stop the queues, we've got a problem.
3645          */
3646         netif_stop_queue(ndev);
3647
3648         /*
3649          * Wake up the worker to process this event.
3650          */
3651         queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3652 }
3653
3654 static void ql_reset_work(struct work_struct *work)
3655 {
3656         struct ql3_adapter *qdev =
3657                 container_of(work, struct ql3_adapter, reset_work.work);
3658         struct net_device *ndev = qdev->ndev;
3659         u32 value;
3660         struct ql_tx_buf_cb *tx_cb;
3661         int max_wait_time, i;
3662         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3663         unsigned long hw_flags;
3664
3665         if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
3666                 clear_bit(QL_LINK_MASTER,&qdev->flags);
3667
3668                 /*
3669                  * Loop through the active list and return the skb.
3670                  */
3671                 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3672                         int j;
3673                         tx_cb = &qdev->tx_buf[i];
3674                         if (tx_cb->skb) {
3675                                 netdev_printk(KERN_DEBUG, ndev,
3676                                               "Freeing lost SKB\n");
3677                                 pci_unmap_single(qdev->pdev,
3678                                          dma_unmap_addr(&tx_cb->map[0], mapaddr),
3679                                          dma_unmap_len(&tx_cb->map[0], maplen),
3680                                          PCI_DMA_TODEVICE);
3681                                 for(j=1;j<tx_cb->seg_count;j++) {
3682                                         pci_unmap_page(qdev->pdev,
3683                                                dma_unmap_addr(&tx_cb->map[j],mapaddr),
3684                                                dma_unmap_len(&tx_cb->map[j],maplen),
3685                                                PCI_DMA_TODEVICE);
3686                                 }
3687                                 dev_kfree_skb(tx_cb->skb);
3688                                 tx_cb->skb = NULL;
3689                         }
3690                 }
3691
3692                 netdev_err(ndev, "Clearing NRI after reset\n");
3693                 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3694                 ql_write_common_reg(qdev,
3695                                     &port_regs->CommonRegs.
3696                                     ispControlStatus,
3697                                     ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3698                 /*
3699                  * Wait the for Soft Reset to Complete.
3700                  */
3701                 max_wait_time = 10;
3702                 do {
3703                         value = ql_read_common_reg(qdev,
3704                                                    &port_regs->CommonRegs.
3705
3706                                                    ispControlStatus);
3707                         if ((value & ISP_CONTROL_SR) == 0) {
3708                                 netdev_printk(KERN_DEBUG, ndev,
3709                                               "reset completed\n");
3710                                 break;
3711                         }
3712
3713                         if (value & ISP_CONTROL_RI) {
3714                                 netdev_printk(KERN_DEBUG, ndev,
3715                                               "clearing NRI after reset\n");
3716                                 ql_write_common_reg(qdev,
3717                                                     &port_regs->
3718                                                     CommonRegs.
3719                                                     ispControlStatus,
3720                                                     ((ISP_CONTROL_RI <<
3721                                                       16) | ISP_CONTROL_RI));
3722                         }
3723
3724                         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3725                         ssleep(1);
3726                         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3727                 } while (--max_wait_time);
3728                 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3729
3730                 if (value & ISP_CONTROL_SR) {
3731
3732                         /*
3733                          * Set the reset flags and clear the board again.
3734                          * Nothing else to do...
3735                          */
3736                         netdev_err(ndev,
3737                                    "Timed out waiting for reset to complete\n");
3738                         netdev_err(ndev, "Do a reset\n");
3739                         clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3740                         clear_bit(QL_RESET_START,&qdev->flags);
3741                         ql_cycle_adapter(qdev,QL_DO_RESET);
3742                         return;
3743                 }
3744
3745                 clear_bit(QL_RESET_ACTIVE,&qdev->flags);
3746                 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3747                 clear_bit(QL_RESET_START,&qdev->flags);
3748                 ql_cycle_adapter(qdev,QL_NO_RESET);
3749         }
3750 }
3751
3752 static void ql_tx_timeout_work(struct work_struct *work)
3753 {
3754         struct ql3_adapter *qdev =
3755                 container_of(work, struct ql3_adapter, tx_timeout_work.work);
3756
3757         ql_cycle_adapter(qdev, QL_DO_RESET);
3758 }
3759
3760 static void ql_get_board_info(struct ql3_adapter *qdev)
3761 {
3762         struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3763         u32 value;
3764
3765         value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3766
3767         qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3768         if (value & PORT_STATUS_64)
3769                 qdev->pci_width = 64;
3770         else
3771                 qdev->pci_width = 32;
3772         if (value & PORT_STATUS_X)
3773                 qdev->pci_x = 1;
3774         else
3775                 qdev->pci_x = 0;
3776         qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3777 }
3778
3779 static void ql3xxx_timer(unsigned long ptr)
3780 {
3781         struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3782         queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3783 }
3784
3785 static const struct net_device_ops ql3xxx_netdev_ops = {
3786         .ndo_open               = ql3xxx_open,
3787         .ndo_start_xmit         = ql3xxx_send,
3788         .ndo_stop               = ql3xxx_close,
3789         .ndo_set_multicast_list = NULL, /* not allowed on NIC side */
3790         .ndo_change_mtu         = eth_change_mtu,
3791         .ndo_validate_addr      = eth_validate_addr,
3792         .ndo_set_mac_address    = ql3xxx_set_mac_address,
3793         .ndo_tx_timeout         = ql3xxx_tx_timeout,
3794 };
3795
3796 static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3797                                   const struct pci_device_id *pci_entry)
3798 {
3799         struct net_device *ndev = NULL;
3800         struct ql3_adapter *qdev = NULL;
3801         static int cards_found = 0;
3802         int uninitialized_var(pci_using_dac), err;
3803
3804         err = pci_enable_device(pdev);
3805         if (err) {
3806                 pr_err("%s cannot enable PCI device\n", pci_name(pdev));
3807                 goto err_out;
3808         }
3809
3810         err = pci_request_regions(pdev, DRV_NAME);
3811         if (err) {
3812                 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
3813                 goto err_out_disable_pdev;
3814         }
3815
3816         pci_set_master(pdev);
3817
3818         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3819                 pci_using_dac = 1;
3820                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3821         } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
3822                 pci_using_dac = 0;
3823                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3824         }
3825
3826         if (err) {
3827                 pr_err("%s no usable DMA configuration\n", pci_name(pdev));
3828                 goto err_out_free_regions;
3829         }
3830
3831         ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3832         if (!ndev) {
3833                 pr_err("%s could not alloc etherdev\n", pci_name(pdev));
3834                 err = -ENOMEM;
3835                 goto err_out_free_regions;
3836         }
3837
3838         SET_NETDEV_DEV(ndev, &pdev->dev);
3839
3840         pci_set_drvdata(pdev, ndev);
3841
3842         qdev = netdev_priv(ndev);
3843         qdev->index = cards_found;
3844         qdev->ndev = ndev;
3845         qdev->pdev = pdev;
3846         qdev->device_id = pci_entry->device;
3847         qdev->port_link_state = LS_DOWN;
3848         if (msi)
3849                 qdev->msi = 1;
3850
3851         qdev->msg_enable = netif_msg_init(debug, default_msg);
3852
3853         if (pci_using_dac)
3854                 ndev->features |= NETIF_F_HIGHDMA;
3855         if (qdev->device_id == QL3032_DEVICE_ID)
3856                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3857
3858         qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3859         if (!qdev->mem_map_registers) {
3860                 pr_err("%s: cannot map device registers\n", pci_name(pdev));
3861                 err = -EIO;
3862                 goto err_out_free_ndev;
3863         }
3864
3865         spin_lock_init(&qdev->adapter_lock);
3866         spin_lock_init(&qdev->hw_lock);
3867
3868         /* Set driver entry points */
3869         ndev->netdev_ops = &ql3xxx_netdev_ops;
3870         SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3871         ndev->watchdog_timeo = 5 * HZ;
3872
3873         netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
3874
3875         ndev->irq = pdev->irq;
3876
3877         /* make sure the EEPROM is good */
3878         if (ql_get_nvram_params(qdev)) {
3879                 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
3880                          __func__, qdev->index);
3881                 err = -EIO;
3882                 goto err_out_iounmap;
3883         }
3884
3885         ql_set_mac_info(qdev);
3886
3887         /* Validate and set parameters */
3888         if (qdev->mac_index) {
3889                 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3890                 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
3891         } else {
3892                 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3893                 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
3894         }
3895         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3896
3897         ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
3898
3899         /* Record PCI bus information. */
3900         ql_get_board_info(qdev);
3901
3902         /*
3903          * Set the Maximum Memory Read Byte Count value. We do this to handle
3904          * jumbo frames.
3905          */
3906         if (qdev->pci_x) {
3907                 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
3908         }
3909
3910         err = register_netdev(ndev);
3911         if (err) {
3912                 pr_err("%s: cannot register net device\n", pci_name(pdev));
3913                 goto err_out_iounmap;
3914         }
3915
3916         /* we're going to reset, so assume we have no link for now */
3917
3918         netif_carrier_off(ndev);
3919         netif_stop_queue(ndev);
3920
3921         qdev->workqueue = create_singlethread_workqueue(ndev->name);
3922         INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3923         INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3924         INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
3925
3926         init_timer(&qdev->adapter_timer);
3927         qdev->adapter_timer.function = ql3xxx_timer;
3928         qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
3929         qdev->adapter_timer.data = (unsigned long)qdev;
3930
3931         if (!cards_found) {
3932                 pr_alert("%s\n", DRV_STRING);
3933                 pr_alert("Driver name: %s, Version: %s\n",
3934                          DRV_NAME, DRV_VERSION);
3935         }
3936         ql_display_dev_info(ndev);
3937
3938         cards_found++;
3939         return 0;
3940
3941 err_out_iounmap:
3942         iounmap(qdev->mem_map_registers);
3943 err_out_free_ndev:
3944         free_netdev(ndev);
3945 err_out_free_regions:
3946         pci_release_regions(pdev);
3947 err_out_disable_pdev:
3948         pci_disable_device(pdev);
3949         pci_set_drvdata(pdev, NULL);
3950 err_out:
3951         return err;
3952 }
3953
3954 static void __devexit ql3xxx_remove(struct pci_dev *pdev)
3955 {
3956         struct net_device *ndev = pci_get_drvdata(pdev);
3957         struct ql3_adapter *qdev = netdev_priv(ndev);
3958
3959         unregister_netdev(ndev);
3960
3961         ql_disable_interrupts(qdev);
3962
3963         if (qdev->workqueue) {
3964                 cancel_delayed_work(&qdev->reset_work);
3965                 cancel_delayed_work(&qdev->tx_timeout_work);
3966                 destroy_workqueue(qdev->workqueue);
3967                 qdev->workqueue = NULL;
3968         }
3969
3970         iounmap(qdev->mem_map_registers);
3971         pci_release_regions(pdev);
3972         pci_set_drvdata(pdev, NULL);
3973         free_netdev(ndev);
3974 }
3975
3976 static struct pci_driver ql3xxx_driver = {
3977
3978         .name = DRV_NAME,
3979         .id_table = ql3xxx_pci_tbl,
3980         .probe = ql3xxx_probe,
3981         .remove = __devexit_p(ql3xxx_remove),
3982 };
3983
3984 static int __init ql3xxx_init_module(void)
3985 {
3986         return pci_register_driver(&ql3xxx_driver);
3987 }
3988
3989 static void __exit ql3xxx_exit(void)
3990 {
3991         pci_unregister_driver(&ql3xxx_driver);
3992 }
3993
3994 module_init(ql3xxx_init_module);
3995 module_exit(ql3xxx_exit);