Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[sfrench/cifs-2.6.git] / drivers / net / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/skbuff.h>
37 #include <linux/if_vlan.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <net/ip6_checksum.h>
42
43 #include "qlge.h"
44
45 char qlge_driver_name[] = DRV_NAME;
46 const char qlge_driver_version[] = DRV_VERSION;
47
48 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49 MODULE_DESCRIPTION(DRV_STRING " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
52
53 static const u32 default_msg =
54     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55 /* NETIF_MSG_TIMER |    */
56     NETIF_MSG_IFDOWN |
57     NETIF_MSG_IFUP |
58     NETIF_MSG_RX_ERR |
59     NETIF_MSG_TX_ERR |
60 /*  NETIF_MSG_TX_QUEUED | */
61 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
62 /* NETIF_MSG_PKTDATA | */
63     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65 static int debug = 0x00007fff;  /* defaults above */
66 module_param(debug, int, 0);
67 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69 #define MSIX_IRQ 0
70 #define MSI_IRQ 1
71 #define LEG_IRQ 2
72 static int irq_type = MSIX_IRQ;
73 module_param(irq_type, int, MSIX_IRQ);
74 MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75
76 static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
77         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
78         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
79         /* required last entry */
80         {0,}
81 };
82
83 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85 /* This hardware semaphore causes exclusive access to
86  * resources shared between the NIC driver, MPI firmware,
87  * FCOE firmware and the FC driver.
88  */
89 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90 {
91         u32 sem_bits = 0;
92
93         switch (sem_mask) {
94         case SEM_XGMAC0_MASK:
95                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96                 break;
97         case SEM_XGMAC1_MASK:
98                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99                 break;
100         case SEM_ICB_MASK:
101                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102                 break;
103         case SEM_MAC_ADDR_MASK:
104                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105                 break;
106         case SEM_FLASH_MASK:
107                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108                 break;
109         case SEM_PROBE_MASK:
110                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111                 break;
112         case SEM_RT_IDX_MASK:
113                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114                 break;
115         case SEM_PROC_REG_MASK:
116                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117                 break;
118         default:
119                 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120                 return -EINVAL;
121         }
122
123         ql_write32(qdev, SEM, sem_bits | sem_mask);
124         return !(ql_read32(qdev, SEM) & sem_bits);
125 }
126
127 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128 {
129         unsigned int wait_count = 30;
130         do {
131                 if (!ql_sem_trylock(qdev, sem_mask))
132                         return 0;
133                 udelay(100);
134         } while (--wait_count);
135         return -ETIMEDOUT;
136 }
137
138 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139 {
140         ql_write32(qdev, SEM, sem_mask);
141         ql_read32(qdev, SEM);   /* flush */
142 }
143
144 /* This function waits for a specific bit to come ready
145  * in a given register.  It is used mostly by the initialize
146  * process, but is also used in kernel thread API such as
147  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148  */
149 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150 {
151         u32 temp;
152         int count = UDELAY_COUNT;
153
154         while (count) {
155                 temp = ql_read32(qdev, reg);
156
157                 /* check for errors */
158                 if (temp & err_bit) {
159                         QPRINTK(qdev, PROBE, ALERT,
160                                 "register 0x%.08x access error, value = 0x%.08x!.\n",
161                                 reg, temp);
162                         return -EIO;
163                 } else if (temp & bit)
164                         return 0;
165                 udelay(UDELAY_DELAY);
166                 count--;
167         }
168         QPRINTK(qdev, PROBE, ALERT,
169                 "Timed out waiting for reg %x to come ready.\n", reg);
170         return -ETIMEDOUT;
171 }
172
173 /* The CFG register is used to download TX and RX control blocks
174  * to the chip. This function waits for an operation to complete.
175  */
176 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177 {
178         int count = UDELAY_COUNT;
179         u32 temp;
180
181         while (count) {
182                 temp = ql_read32(qdev, CFG);
183                 if (temp & CFG_LE)
184                         return -EIO;
185                 if (!(temp & bit))
186                         return 0;
187                 udelay(UDELAY_DELAY);
188                 count--;
189         }
190         return -ETIMEDOUT;
191 }
192
193
194 /* Used to issue init control blocks to hw. Maps control block,
195  * sets address, triggers download, waits for completion.
196  */
197 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198                  u16 q_id)
199 {
200         u64 map;
201         int status = 0;
202         int direction;
203         u32 mask;
204         u32 value;
205
206         direction =
207             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208             PCI_DMA_FROMDEVICE;
209
210         map = pci_map_single(qdev->pdev, ptr, size, direction);
211         if (pci_dma_mapping_error(qdev->pdev, map)) {
212                 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213                 return -ENOMEM;
214         }
215
216         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
217         if (status)
218                 return status;
219
220         status = ql_wait_cfg(qdev, bit);
221         if (status) {
222                 QPRINTK(qdev, IFUP, ERR,
223                         "Timed out waiting for CFG to come ready.\n");
224                 goto exit;
225         }
226
227         ql_write32(qdev, ICB_L, (u32) map);
228         ql_write32(qdev, ICB_H, (u32) (map >> 32));
229
230         mask = CFG_Q_MASK | (bit << 16);
231         value = bit | (q_id << CFG_Q_SHIFT);
232         ql_write32(qdev, CFG, (mask | value));
233
234         /*
235          * Wait for the bit to clear after signaling hw.
236          */
237         status = ql_wait_cfg(qdev, bit);
238 exit:
239         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
240         pci_unmap_single(qdev->pdev, map, size, direction);
241         return status;
242 }
243
244 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
245 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246                         u32 *value)
247 {
248         u32 offset = 0;
249         int status;
250
251         switch (type) {
252         case MAC_ADDR_TYPE_MULTI_MAC:
253         case MAC_ADDR_TYPE_CAM_MAC:
254                 {
255                         status =
256                             ql_wait_reg_rdy(qdev,
257                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
258                         if (status)
259                                 goto exit;
260                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
261                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
262                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
263                         status =
264                             ql_wait_reg_rdy(qdev,
265                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
266                         if (status)
267                                 goto exit;
268                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
269                         status =
270                             ql_wait_reg_rdy(qdev,
271                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
272                         if (status)
273                                 goto exit;
274                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
276                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
277                         status =
278                             ql_wait_reg_rdy(qdev,
279                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
280                         if (status)
281                                 goto exit;
282                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
284                                 status =
285                                     ql_wait_reg_rdy(qdev,
286                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
287                                 if (status)
288                                         goto exit;
289                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
291                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292                                 status =
293                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
294                                                     MAC_ADDR_MR, 0);
295                                 if (status)
296                                         goto exit;
297                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298                         }
299                         break;
300                 }
301         case MAC_ADDR_TYPE_VLAN:
302         case MAC_ADDR_TYPE_MULTI_FLTR:
303         default:
304                 QPRINTK(qdev, IFUP, CRIT,
305                         "Address type %d not yet supported.\n", type);
306                 status = -EPERM;
307         }
308 exit:
309         return status;
310 }
311
312 /* Set up a MAC, multicast or VLAN address for the
313  * inbound frame matching.
314  */
315 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
316                                u16 index)
317 {
318         u32 offset = 0;
319         int status = 0;
320
321         switch (type) {
322         case MAC_ADDR_TYPE_MULTI_MAC:
323                 {
324                         u32 upper = (addr[0] << 8) | addr[1];
325                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
326                                         (addr[4] << 8) | (addr[5]);
327
328                         status =
329                                 ql_wait_reg_rdy(qdev,
330                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
331                         if (status)
332                                 goto exit;
333                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
334                                 (index << MAC_ADDR_IDX_SHIFT) |
335                                 type | MAC_ADDR_E);
336                         ql_write32(qdev, MAC_ADDR_DATA, lower);
337                         status =
338                                 ql_wait_reg_rdy(qdev,
339                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
340                         if (status)
341                                 goto exit;
342                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
343                                 (index << MAC_ADDR_IDX_SHIFT) |
344                                 type | MAC_ADDR_E);
345
346                         ql_write32(qdev, MAC_ADDR_DATA, upper);
347                         status =
348                                 ql_wait_reg_rdy(qdev,
349                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350                         if (status)
351                                 goto exit;
352                         break;
353                 }
354         case MAC_ADDR_TYPE_CAM_MAC:
355                 {
356                         u32 cam_output;
357                         u32 upper = (addr[0] << 8) | addr[1];
358                         u32 lower =
359                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
360                             (addr[5]);
361
362                         QPRINTK(qdev, IFUP, DEBUG,
363                                 "Adding %s address %pM"
364                                 " at index %d in the CAM.\n",
365                                 ((type ==
366                                   MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
367                                  "UNICAST"), addr, index);
368
369                         status =
370                             ql_wait_reg_rdy(qdev,
371                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
372                         if (status)
373                                 goto exit;
374                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
375                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
376                                    type);       /* type */
377                         ql_write32(qdev, MAC_ADDR_DATA, lower);
378                         status =
379                             ql_wait_reg_rdy(qdev,
380                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
381                         if (status)
382                                 goto exit;
383                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
385                                    type);       /* type */
386                         ql_write32(qdev, MAC_ADDR_DATA, upper);
387                         status =
388                             ql_wait_reg_rdy(qdev,
389                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390                         if (status)
391                                 goto exit;
392                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
393                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
394                                    type);       /* type */
395                         /* This field should also include the queue id
396                            and possibly the function id.  Right now we hardcode
397                            the route field to NIC core.
398                          */
399                         cam_output = (CAM_OUT_ROUTE_NIC |
400                                       (qdev->
401                                        func << CAM_OUT_FUNC_SHIFT) |
402                                         (0 << CAM_OUT_CQ_ID_SHIFT));
403                         if (qdev->vlgrp)
404                                 cam_output |= CAM_OUT_RV;
405                         /* route to NIC core */
406                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
407                         break;
408                 }
409         case MAC_ADDR_TYPE_VLAN:
410                 {
411                         u32 enable_bit = *((u32 *) &addr[0]);
412                         /* For VLAN, the addr actually holds a bit that
413                          * either enables or disables the vlan id we are
414                          * addressing. It's either MAC_ADDR_E on or off.
415                          * That's bit-27 we're talking about.
416                          */
417                         QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
418                                 (enable_bit ? "Adding" : "Removing"),
419                                 index, (enable_bit ? "to" : "from"));
420
421                         status =
422                             ql_wait_reg_rdy(qdev,
423                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
424                         if (status)
425                                 goto exit;
426                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
427                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
428                                    type |       /* type */
429                                    enable_bit); /* enable/disable */
430                         break;
431                 }
432         case MAC_ADDR_TYPE_MULTI_FLTR:
433         default:
434                 QPRINTK(qdev, IFUP, CRIT,
435                         "Address type %d not yet supported.\n", type);
436                 status = -EPERM;
437         }
438 exit:
439         return status;
440 }
441
442 /* Set or clear MAC address in hardware. We sometimes
443  * have to clear it to prevent wrong frame routing
444  * especially in a bonding environment.
445  */
446 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
447 {
448         int status;
449         char zero_mac_addr[ETH_ALEN];
450         char *addr;
451
452         if (set) {
453                 addr = &qdev->ndev->dev_addr[0];
454                 QPRINTK(qdev, IFUP, DEBUG,
455                         "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
456                         addr[0], addr[1], addr[2], addr[3],
457                         addr[4], addr[5]);
458         } else {
459                 memset(zero_mac_addr, 0, ETH_ALEN);
460                 addr = &zero_mac_addr[0];
461                 QPRINTK(qdev, IFUP, DEBUG,
462                                 "Clearing MAC address on %s\n",
463                                 qdev->ndev->name);
464         }
465         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
466         if (status)
467                 return status;
468         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
469                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
470         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
471         if (status)
472                 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
473                         "address.\n");
474         return status;
475 }
476
477 void ql_link_on(struct ql_adapter *qdev)
478 {
479         QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
480                                  qdev->ndev->name);
481         netif_carrier_on(qdev->ndev);
482         ql_set_mac_addr(qdev, 1);
483 }
484
485 void ql_link_off(struct ql_adapter *qdev)
486 {
487         QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
488                                  qdev->ndev->name);
489         netif_carrier_off(qdev->ndev);
490         ql_set_mac_addr(qdev, 0);
491 }
492
493 /* Get a specific frame routing value from the CAM.
494  * Used for debug and reg dump.
495  */
496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497 {
498         int status = 0;
499
500         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
501         if (status)
502                 goto exit;
503
504         ql_write32(qdev, RT_IDX,
505                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
506         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
507         if (status)
508                 goto exit;
509         *value = ql_read32(qdev, RT_DATA);
510 exit:
511         return status;
512 }
513
514 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
515  * to route different frame types to various inbound queues.  We send broadcast/
516  * multicast/error frames to the default queue for slow handling,
517  * and CAM hit/RSS frames to the fast handling queues.
518  */
519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520                               int enable)
521 {
522         int status = -EINVAL; /* Return error if no mask match. */
523         u32 value = 0;
524
525         QPRINTK(qdev, IFUP, DEBUG,
526                 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
527                 (enable ? "Adding" : "Removing"),
528                 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
529                 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
530                 ((index ==
531                   RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
532                 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
533                 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
534                 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
535                 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
536                 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
537                 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
538                 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
539                 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
540                 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
541                 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
542                 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
543                 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
544                 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
545                 (enable ? "to" : "from"));
546
547         switch (mask) {
548         case RT_IDX_CAM_HIT:
549                 {
550                         value = RT_IDX_DST_CAM_Q |      /* dest */
551                             RT_IDX_TYPE_NICQ |  /* type */
552                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
553                         break;
554                 }
555         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
556                 {
557                         value = RT_IDX_DST_DFLT_Q |     /* dest */
558                             RT_IDX_TYPE_NICQ |  /* type */
559                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
560                         break;
561                 }
562         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
563                 {
564                         value = RT_IDX_DST_DFLT_Q |     /* dest */
565                             RT_IDX_TYPE_NICQ |  /* type */
566                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
567                         break;
568                 }
569         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
570                 {
571                         value = RT_IDX_DST_DFLT_Q |     /* dest */
572                             RT_IDX_TYPE_NICQ |  /* type */
573                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
574                         break;
575                 }
576         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
577                 {
578                         value = RT_IDX_DST_DFLT_Q |     /* dest */
579                             RT_IDX_TYPE_NICQ |  /* type */
580                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
581                         break;
582                 }
583         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
584                 {
585                         value = RT_IDX_DST_DFLT_Q |     /* dest */
586                             RT_IDX_TYPE_NICQ |  /* type */
587                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
588                         break;
589                 }
590         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
591                 {
592                         value = RT_IDX_DST_RSS |        /* dest */
593                             RT_IDX_TYPE_NICQ |  /* type */
594                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
595                         break;
596                 }
597         case 0:         /* Clear the E-bit on an entry. */
598                 {
599                         value = RT_IDX_DST_DFLT_Q |     /* dest */
600                             RT_IDX_TYPE_NICQ |  /* type */
601                             (index << RT_IDX_IDX_SHIFT);/* index */
602                         break;
603                 }
604         default:
605                 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
606                         mask);
607                 status = -EPERM;
608                 goto exit;
609         }
610
611         if (value) {
612                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
613                 if (status)
614                         goto exit;
615                 value |= (enable ? RT_IDX_E : 0);
616                 ql_write32(qdev, RT_IDX, value);
617                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
618         }
619 exit:
620         return status;
621 }
622
623 static void ql_enable_interrupts(struct ql_adapter *qdev)
624 {
625         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
626 }
627
628 static void ql_disable_interrupts(struct ql_adapter *qdev)
629 {
630         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
631 }
632
633 /* If we're running with multiple MSI-X vectors then we enable on the fly.
634  * Otherwise, we may have multiple outstanding workers and don't want to
635  * enable until the last one finishes. In this case, the irq_cnt gets
636  * incremented everytime we queue a worker and decremented everytime
637  * a worker finishes.  Once it hits zero we enable the interrupt.
638  */
639 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
640 {
641         u32 var = 0;
642         unsigned long hw_flags = 0;
643         struct intr_context *ctx = qdev->intr_context + intr;
644
645         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
646                 /* Always enable if we're MSIX multi interrupts and
647                  * it's not the default (zeroeth) interrupt.
648                  */
649                 ql_write32(qdev, INTR_EN,
650                            ctx->intr_en_mask);
651                 var = ql_read32(qdev, STS);
652                 return var;
653         }
654
655         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
656         if (atomic_dec_and_test(&ctx->irq_cnt)) {
657                 ql_write32(qdev, INTR_EN,
658                            ctx->intr_en_mask);
659                 var = ql_read32(qdev, STS);
660         }
661         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
662         return var;
663 }
664
665 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
666 {
667         u32 var = 0;
668         struct intr_context *ctx;
669
670         /* HW disables for us if we're MSIX multi interrupts and
671          * it's not the default (zeroeth) interrupt.
672          */
673         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
674                 return 0;
675
676         ctx = qdev->intr_context + intr;
677         spin_lock(&qdev->hw_lock);
678         if (!atomic_read(&ctx->irq_cnt)) {
679                 ql_write32(qdev, INTR_EN,
680                 ctx->intr_dis_mask);
681                 var = ql_read32(qdev, STS);
682         }
683         atomic_inc(&ctx->irq_cnt);
684         spin_unlock(&qdev->hw_lock);
685         return var;
686 }
687
688 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
689 {
690         int i;
691         for (i = 0; i < qdev->intr_count; i++) {
692                 /* The enable call does a atomic_dec_and_test
693                  * and enables only if the result is zero.
694                  * So we precharge it here.
695                  */
696                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
697                         i == 0))
698                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
699                 ql_enable_completion_interrupt(qdev, i);
700         }
701
702 }
703
704 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
705 {
706         int status, i;
707         u16 csum = 0;
708         __le16 *flash = (__le16 *)&qdev->flash;
709
710         status = strncmp((char *)&qdev->flash, str, 4);
711         if (status) {
712                 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
713                 return  status;
714         }
715
716         for (i = 0; i < size; i++)
717                 csum += le16_to_cpu(*flash++);
718
719         if (csum)
720                 QPRINTK(qdev, IFUP, ERR,
721                         "Invalid flash checksum, csum = 0x%.04x.\n", csum);
722
723         return csum;
724 }
725
726 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
727 {
728         int status = 0;
729         /* wait for reg to come ready */
730         status = ql_wait_reg_rdy(qdev,
731                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
732         if (status)
733                 goto exit;
734         /* set up for reg read */
735         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
736         /* wait for reg to come ready */
737         status = ql_wait_reg_rdy(qdev,
738                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
739         if (status)
740                 goto exit;
741          /* This data is stored on flash as an array of
742          * __le32.  Since ql_read32() returns cpu endian
743          * we need to swap it back.
744          */
745         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
746 exit:
747         return status;
748 }
749
750 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
751 {
752         u32 i, size;
753         int status;
754         __le32 *p = (__le32 *)&qdev->flash;
755         u32 offset;
756         u8 mac_addr[6];
757
758         /* Get flash offset for function and adjust
759          * for dword access.
760          */
761         if (!qdev->port)
762                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
763         else
764                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
765
766         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
767                 return -ETIMEDOUT;
768
769         size = sizeof(struct flash_params_8000) / sizeof(u32);
770         for (i = 0; i < size; i++, p++) {
771                 status = ql_read_flash_word(qdev, i+offset, p);
772                 if (status) {
773                         QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
774                         goto exit;
775                 }
776         }
777
778         status = ql_validate_flash(qdev,
779                         sizeof(struct flash_params_8000) / sizeof(u16),
780                         "8000");
781         if (status) {
782                 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
783                 status = -EINVAL;
784                 goto exit;
785         }
786
787         /* Extract either manufacturer or BOFM modified
788          * MAC address.
789          */
790         if (qdev->flash.flash_params_8000.data_type1 == 2)
791                 memcpy(mac_addr,
792                         qdev->flash.flash_params_8000.mac_addr1,
793                         qdev->ndev->addr_len);
794         else
795                 memcpy(mac_addr,
796                         qdev->flash.flash_params_8000.mac_addr,
797                         qdev->ndev->addr_len);
798
799         if (!is_valid_ether_addr(mac_addr)) {
800                 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
801                 status = -EINVAL;
802                 goto exit;
803         }
804
805         memcpy(qdev->ndev->dev_addr,
806                 mac_addr,
807                 qdev->ndev->addr_len);
808
809 exit:
810         ql_sem_unlock(qdev, SEM_FLASH_MASK);
811         return status;
812 }
813
814 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
815 {
816         int i;
817         int status;
818         __le32 *p = (__le32 *)&qdev->flash;
819         u32 offset = 0;
820         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
821
822         /* Second function's parameters follow the first
823          * function's.
824          */
825         if (qdev->port)
826                 offset = size;
827
828         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
829                 return -ETIMEDOUT;
830
831         for (i = 0; i < size; i++, p++) {
832                 status = ql_read_flash_word(qdev, i+offset, p);
833                 if (status) {
834                         QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
835                         goto exit;
836                 }
837
838         }
839
840         status = ql_validate_flash(qdev,
841                         sizeof(struct flash_params_8012) / sizeof(u16),
842                         "8012");
843         if (status) {
844                 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
845                 status = -EINVAL;
846                 goto exit;
847         }
848
849         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
850                 status = -EINVAL;
851                 goto exit;
852         }
853
854         memcpy(qdev->ndev->dev_addr,
855                 qdev->flash.flash_params_8012.mac_addr,
856                 qdev->ndev->addr_len);
857
858 exit:
859         ql_sem_unlock(qdev, SEM_FLASH_MASK);
860         return status;
861 }
862
863 /* xgmac register are located behind the xgmac_addr and xgmac_data
864  * register pair.  Each read/write requires us to wait for the ready
865  * bit before reading/writing the data.
866  */
867 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
868 {
869         int status;
870         /* wait for reg to come ready */
871         status = ql_wait_reg_rdy(qdev,
872                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
873         if (status)
874                 return status;
875         /* write the data to the data reg */
876         ql_write32(qdev, XGMAC_DATA, data);
877         /* trigger the write */
878         ql_write32(qdev, XGMAC_ADDR, reg);
879         return status;
880 }
881
882 /* xgmac register are located behind the xgmac_addr and xgmac_data
883  * register pair.  Each read/write requires us to wait for the ready
884  * bit before reading/writing the data.
885  */
886 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
887 {
888         int status = 0;
889         /* wait for reg to come ready */
890         status = ql_wait_reg_rdy(qdev,
891                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
892         if (status)
893                 goto exit;
894         /* set up for reg read */
895         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
896         /* wait for reg to come ready */
897         status = ql_wait_reg_rdy(qdev,
898                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899         if (status)
900                 goto exit;
901         /* get the data */
902         *data = ql_read32(qdev, XGMAC_DATA);
903 exit:
904         return status;
905 }
906
907 /* This is used for reading the 64-bit statistics regs. */
908 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
909 {
910         int status = 0;
911         u32 hi = 0;
912         u32 lo = 0;
913
914         status = ql_read_xgmac_reg(qdev, reg, &lo);
915         if (status)
916                 goto exit;
917
918         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
919         if (status)
920                 goto exit;
921
922         *data = (u64) lo | ((u64) hi << 32);
923
924 exit:
925         return status;
926 }
927
928 static int ql_8000_port_initialize(struct ql_adapter *qdev)
929 {
930         int status;
931         /*
932          * Get MPI firmware version for driver banner
933          * and ethool info.
934          */
935         status = ql_mb_about_fw(qdev);
936         if (status)
937                 goto exit;
938         status = ql_mb_get_fw_state(qdev);
939         if (status)
940                 goto exit;
941         /* Wake up a worker to get/set the TX/RX frame sizes. */
942         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
943 exit:
944         return status;
945 }
946
947 /* Take the MAC Core out of reset.
948  * Enable statistics counting.
949  * Take the transmitter/receiver out of reset.
950  * This functionality may be done in the MPI firmware at a
951  * later date.
952  */
953 static int ql_8012_port_initialize(struct ql_adapter *qdev)
954 {
955         int status = 0;
956         u32 data;
957
958         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
959                 /* Another function has the semaphore, so
960                  * wait for the port init bit to come ready.
961                  */
962                 QPRINTK(qdev, LINK, INFO,
963                         "Another function has the semaphore, so wait for the port init bit to come ready.\n");
964                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
965                 if (status) {
966                         QPRINTK(qdev, LINK, CRIT,
967                                 "Port initialize timed out.\n");
968                 }
969                 return status;
970         }
971
972         QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
973         /* Set the core reset. */
974         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
975         if (status)
976                 goto end;
977         data |= GLOBAL_CFG_RESET;
978         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
979         if (status)
980                 goto end;
981
982         /* Clear the core reset and turn on jumbo for receiver. */
983         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
984         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
985         data |= GLOBAL_CFG_TX_STAT_EN;
986         data |= GLOBAL_CFG_RX_STAT_EN;
987         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
988         if (status)
989                 goto end;
990
991         /* Enable transmitter, and clear it's reset. */
992         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
993         if (status)
994                 goto end;
995         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
996         data |= TX_CFG_EN;      /* Enable the transmitter. */
997         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
998         if (status)
999                 goto end;
1000
1001         /* Enable receiver and clear it's reset. */
1002         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1003         if (status)
1004                 goto end;
1005         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1006         data |= RX_CFG_EN;      /* Enable the receiver. */
1007         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1008         if (status)
1009                 goto end;
1010
1011         /* Turn on jumbo. */
1012         status =
1013             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1014         if (status)
1015                 goto end;
1016         status =
1017             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1018         if (status)
1019                 goto end;
1020
1021         /* Signal to the world that the port is enabled.        */
1022         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1023 end:
1024         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1025         return status;
1026 }
1027
1028 /* Get the next large buffer. */
1029 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1030 {
1031         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1032         rx_ring->lbq_curr_idx++;
1033         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1034                 rx_ring->lbq_curr_idx = 0;
1035         rx_ring->lbq_free_cnt++;
1036         return lbq_desc;
1037 }
1038
1039 /* Get the next small buffer. */
1040 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1041 {
1042         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1043         rx_ring->sbq_curr_idx++;
1044         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1045                 rx_ring->sbq_curr_idx = 0;
1046         rx_ring->sbq_free_cnt++;
1047         return sbq_desc;
1048 }
1049
1050 /* Update an rx ring index. */
1051 static void ql_update_cq(struct rx_ring *rx_ring)
1052 {
1053         rx_ring->cnsmr_idx++;
1054         rx_ring->curr_entry++;
1055         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1056                 rx_ring->cnsmr_idx = 0;
1057                 rx_ring->curr_entry = rx_ring->cq_base;
1058         }
1059 }
1060
1061 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1062 {
1063         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1064 }
1065
1066 /* Process (refill) a large buffer queue. */
1067 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1068 {
1069         u32 clean_idx = rx_ring->lbq_clean_idx;
1070         u32 start_idx = clean_idx;
1071         struct bq_desc *lbq_desc;
1072         u64 map;
1073         int i;
1074
1075         while (rx_ring->lbq_free_cnt > 16) {
1076                 for (i = 0; i < 16; i++) {
1077                         QPRINTK(qdev, RX_STATUS, DEBUG,
1078                                 "lbq: try cleaning clean_idx = %d.\n",
1079                                 clean_idx);
1080                         lbq_desc = &rx_ring->lbq[clean_idx];
1081                         if (lbq_desc->p.lbq_page == NULL) {
1082                                 QPRINTK(qdev, RX_STATUS, DEBUG,
1083                                         "lbq: getting new page for index %d.\n",
1084                                         lbq_desc->index);
1085                                 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
1086                                 if (lbq_desc->p.lbq_page == NULL) {
1087                                         rx_ring->lbq_clean_idx = clean_idx;
1088                                         QPRINTK(qdev, RX_STATUS, ERR,
1089                                                 "Couldn't get a page.\n");
1090                                         return;
1091                                 }
1092                                 map = pci_map_page(qdev->pdev,
1093                                                    lbq_desc->p.lbq_page,
1094                                                    0, PAGE_SIZE,
1095                                                    PCI_DMA_FROMDEVICE);
1096                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1097                                         rx_ring->lbq_clean_idx = clean_idx;
1098                                         put_page(lbq_desc->p.lbq_page);
1099                                         lbq_desc->p.lbq_page = NULL;
1100                                         QPRINTK(qdev, RX_STATUS, ERR,
1101                                                 "PCI mapping failed.\n");
1102                                         return;
1103                                 }
1104                                 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1105                                 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
1106                                 *lbq_desc->addr = cpu_to_le64(map);
1107                         }
1108                         clean_idx++;
1109                         if (clean_idx == rx_ring->lbq_len)
1110                                 clean_idx = 0;
1111                 }
1112
1113                 rx_ring->lbq_clean_idx = clean_idx;
1114                 rx_ring->lbq_prod_idx += 16;
1115                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1116                         rx_ring->lbq_prod_idx = 0;
1117                 rx_ring->lbq_free_cnt -= 16;
1118         }
1119
1120         if (start_idx != clean_idx) {
1121                 QPRINTK(qdev, RX_STATUS, DEBUG,
1122                         "lbq: updating prod idx = %d.\n",
1123                         rx_ring->lbq_prod_idx);
1124                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1125                                 rx_ring->lbq_prod_idx_db_reg);
1126         }
1127 }
1128
1129 /* Process (refill) a small buffer queue. */
1130 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1131 {
1132         u32 clean_idx = rx_ring->sbq_clean_idx;
1133         u32 start_idx = clean_idx;
1134         struct bq_desc *sbq_desc;
1135         u64 map;
1136         int i;
1137
1138         while (rx_ring->sbq_free_cnt > 16) {
1139                 for (i = 0; i < 16; i++) {
1140                         sbq_desc = &rx_ring->sbq[clean_idx];
1141                         QPRINTK(qdev, RX_STATUS, DEBUG,
1142                                 "sbq: try cleaning clean_idx = %d.\n",
1143                                 clean_idx);
1144                         if (sbq_desc->p.skb == NULL) {
1145                                 QPRINTK(qdev, RX_STATUS, DEBUG,
1146                                         "sbq: getting new skb for index %d.\n",
1147                                         sbq_desc->index);
1148                                 sbq_desc->p.skb =
1149                                     netdev_alloc_skb(qdev->ndev,
1150                                                      rx_ring->sbq_buf_size);
1151                                 if (sbq_desc->p.skb == NULL) {
1152                                         QPRINTK(qdev, PROBE, ERR,
1153                                                 "Couldn't get an skb.\n");
1154                                         rx_ring->sbq_clean_idx = clean_idx;
1155                                         return;
1156                                 }
1157                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1158                                 map = pci_map_single(qdev->pdev,
1159                                                      sbq_desc->p.skb->data,
1160                                                      rx_ring->sbq_buf_size /
1161                                                      2, PCI_DMA_FROMDEVICE);
1162                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1163                                         QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1164                                         rx_ring->sbq_clean_idx = clean_idx;
1165                                         dev_kfree_skb_any(sbq_desc->p.skb);
1166                                         sbq_desc->p.skb = NULL;
1167                                         return;
1168                                 }
1169                                 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1170                                 pci_unmap_len_set(sbq_desc, maplen,
1171                                                   rx_ring->sbq_buf_size / 2);
1172                                 *sbq_desc->addr = cpu_to_le64(map);
1173                         }
1174
1175                         clean_idx++;
1176                         if (clean_idx == rx_ring->sbq_len)
1177                                 clean_idx = 0;
1178                 }
1179                 rx_ring->sbq_clean_idx = clean_idx;
1180                 rx_ring->sbq_prod_idx += 16;
1181                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1182                         rx_ring->sbq_prod_idx = 0;
1183                 rx_ring->sbq_free_cnt -= 16;
1184         }
1185
1186         if (start_idx != clean_idx) {
1187                 QPRINTK(qdev, RX_STATUS, DEBUG,
1188                         "sbq: updating prod idx = %d.\n",
1189                         rx_ring->sbq_prod_idx);
1190                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1191                                 rx_ring->sbq_prod_idx_db_reg);
1192         }
1193 }
1194
1195 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1196                                     struct rx_ring *rx_ring)
1197 {
1198         ql_update_sbq(qdev, rx_ring);
1199         ql_update_lbq(qdev, rx_ring);
1200 }
1201
1202 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1203  * fails at some stage, or from the interrupt when a tx completes.
1204  */
1205 static void ql_unmap_send(struct ql_adapter *qdev,
1206                           struct tx_ring_desc *tx_ring_desc, int mapped)
1207 {
1208         int i;
1209         for (i = 0; i < mapped; i++) {
1210                 if (i == 0 || (i == 7 && mapped > 7)) {
1211                         /*
1212                          * Unmap the skb->data area, or the
1213                          * external sglist (AKA the Outbound
1214                          * Address List (OAL)).
1215                          * If its the zeroeth element, then it's
1216                          * the skb->data area.  If it's the 7th
1217                          * element and there is more than 6 frags,
1218                          * then its an OAL.
1219                          */
1220                         if (i == 7) {
1221                                 QPRINTK(qdev, TX_DONE, DEBUG,
1222                                         "unmapping OAL area.\n");
1223                         }
1224                         pci_unmap_single(qdev->pdev,
1225                                          pci_unmap_addr(&tx_ring_desc->map[i],
1226                                                         mapaddr),
1227                                          pci_unmap_len(&tx_ring_desc->map[i],
1228                                                        maplen),
1229                                          PCI_DMA_TODEVICE);
1230                 } else {
1231                         QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1232                                 i);
1233                         pci_unmap_page(qdev->pdev,
1234                                        pci_unmap_addr(&tx_ring_desc->map[i],
1235                                                       mapaddr),
1236                                        pci_unmap_len(&tx_ring_desc->map[i],
1237                                                      maplen), PCI_DMA_TODEVICE);
1238                 }
1239         }
1240
1241 }
1242
1243 /* Map the buffers for this transmit.  This will return
1244  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1245  */
1246 static int ql_map_send(struct ql_adapter *qdev,
1247                        struct ob_mac_iocb_req *mac_iocb_ptr,
1248                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1249 {
1250         int len = skb_headlen(skb);
1251         dma_addr_t map;
1252         int frag_idx, err, map_idx = 0;
1253         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1254         int frag_cnt = skb_shinfo(skb)->nr_frags;
1255
1256         if (frag_cnt) {
1257                 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1258         }
1259         /*
1260          * Map the skb buffer first.
1261          */
1262         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1263
1264         err = pci_dma_mapping_error(qdev->pdev, map);
1265         if (err) {
1266                 QPRINTK(qdev, TX_QUEUED, ERR,
1267                         "PCI mapping failed with error: %d\n", err);
1268
1269                 return NETDEV_TX_BUSY;
1270         }
1271
1272         tbd->len = cpu_to_le32(len);
1273         tbd->addr = cpu_to_le64(map);
1274         pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1275         pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1276         map_idx++;
1277
1278         /*
1279          * This loop fills the remainder of the 8 address descriptors
1280          * in the IOCB.  If there are more than 7 fragments, then the
1281          * eighth address desc will point to an external list (OAL).
1282          * When this happens, the remainder of the frags will be stored
1283          * in this list.
1284          */
1285         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1286                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1287                 tbd++;
1288                 if (frag_idx == 6 && frag_cnt > 7) {
1289                         /* Let's tack on an sglist.
1290                          * Our control block will now
1291                          * look like this:
1292                          * iocb->seg[0] = skb->data
1293                          * iocb->seg[1] = frag[0]
1294                          * iocb->seg[2] = frag[1]
1295                          * iocb->seg[3] = frag[2]
1296                          * iocb->seg[4] = frag[3]
1297                          * iocb->seg[5] = frag[4]
1298                          * iocb->seg[6] = frag[5]
1299                          * iocb->seg[7] = ptr to OAL (external sglist)
1300                          * oal->seg[0] = frag[6]
1301                          * oal->seg[1] = frag[7]
1302                          * oal->seg[2] = frag[8]
1303                          * oal->seg[3] = frag[9]
1304                          * oal->seg[4] = frag[10]
1305                          *      etc...
1306                          */
1307                         /* Tack on the OAL in the eighth segment of IOCB. */
1308                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1309                                              sizeof(struct oal),
1310                                              PCI_DMA_TODEVICE);
1311                         err = pci_dma_mapping_error(qdev->pdev, map);
1312                         if (err) {
1313                                 QPRINTK(qdev, TX_QUEUED, ERR,
1314                                         "PCI mapping outbound address list with error: %d\n",
1315                                         err);
1316                                 goto map_error;
1317                         }
1318
1319                         tbd->addr = cpu_to_le64(map);
1320                         /*
1321                          * The length is the number of fragments
1322                          * that remain to be mapped times the length
1323                          * of our sglist (OAL).
1324                          */
1325                         tbd->len =
1326                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1327                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1328                         pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1329                                            map);
1330                         pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1331                                           sizeof(struct oal));
1332                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1333                         map_idx++;
1334                 }
1335
1336                 map =
1337                     pci_map_page(qdev->pdev, frag->page,
1338                                  frag->page_offset, frag->size,
1339                                  PCI_DMA_TODEVICE);
1340
1341                 err = pci_dma_mapping_error(qdev->pdev, map);
1342                 if (err) {
1343                         QPRINTK(qdev, TX_QUEUED, ERR,
1344                                 "PCI mapping frags failed with error: %d.\n",
1345                                 err);
1346                         goto map_error;
1347                 }
1348
1349                 tbd->addr = cpu_to_le64(map);
1350                 tbd->len = cpu_to_le32(frag->size);
1351                 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1352                 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1353                                   frag->size);
1354
1355         }
1356         /* Save the number of segments we've mapped. */
1357         tx_ring_desc->map_cnt = map_idx;
1358         /* Terminate the last segment. */
1359         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1360         return NETDEV_TX_OK;
1361
1362 map_error:
1363         /*
1364          * If the first frag mapping failed, then i will be zero.
1365          * This causes the unmap of the skb->data area.  Otherwise
1366          * we pass in the number of frags that mapped successfully
1367          * so they can be umapped.
1368          */
1369         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1370         return NETDEV_TX_BUSY;
1371 }
1372
1373 static void ql_realign_skb(struct sk_buff *skb, int len)
1374 {
1375         void *temp_addr = skb->data;
1376
1377         /* Undo the skb_reserve(skb,32) we did before
1378          * giving to hardware, and realign data on
1379          * a 2-byte boundary.
1380          */
1381         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1382         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1383         skb_copy_to_linear_data(skb, temp_addr,
1384                 (unsigned int)len);
1385 }
1386
1387 /*
1388  * This function builds an skb for the given inbound
1389  * completion.  It will be rewritten for readability in the near
1390  * future, but for not it works well.
1391  */
1392 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1393                                        struct rx_ring *rx_ring,
1394                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1395 {
1396         struct bq_desc *lbq_desc;
1397         struct bq_desc *sbq_desc;
1398         struct sk_buff *skb = NULL;
1399         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1400        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1401
1402         /*
1403          * Handle the header buffer if present.
1404          */
1405         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1406             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1407                 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1408                 /*
1409                  * Headers fit nicely into a small buffer.
1410                  */
1411                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1412                 pci_unmap_single(qdev->pdev,
1413                                 pci_unmap_addr(sbq_desc, mapaddr),
1414                                 pci_unmap_len(sbq_desc, maplen),
1415                                 PCI_DMA_FROMDEVICE);
1416                 skb = sbq_desc->p.skb;
1417                 ql_realign_skb(skb, hdr_len);
1418                 skb_put(skb, hdr_len);
1419                 sbq_desc->p.skb = NULL;
1420         }
1421
1422         /*
1423          * Handle the data buffer(s).
1424          */
1425         if (unlikely(!length)) {        /* Is there data too? */
1426                 QPRINTK(qdev, RX_STATUS, DEBUG,
1427                         "No Data buffer in this packet.\n");
1428                 return skb;
1429         }
1430
1431         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1432                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1433                         QPRINTK(qdev, RX_STATUS, DEBUG,
1434                                 "Headers in small, data of %d bytes in small, combine them.\n", length);
1435                         /*
1436                          * Data is less than small buffer size so it's
1437                          * stuffed in a small buffer.
1438                          * For this case we append the data
1439                          * from the "data" small buffer to the "header" small
1440                          * buffer.
1441                          */
1442                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1443                         pci_dma_sync_single_for_cpu(qdev->pdev,
1444                                                     pci_unmap_addr
1445                                                     (sbq_desc, mapaddr),
1446                                                     pci_unmap_len
1447                                                     (sbq_desc, maplen),
1448                                                     PCI_DMA_FROMDEVICE);
1449                         memcpy(skb_put(skb, length),
1450                                sbq_desc->p.skb->data, length);
1451                         pci_dma_sync_single_for_device(qdev->pdev,
1452                                                        pci_unmap_addr
1453                                                        (sbq_desc,
1454                                                         mapaddr),
1455                                                        pci_unmap_len
1456                                                        (sbq_desc,
1457                                                         maplen),
1458                                                        PCI_DMA_FROMDEVICE);
1459                 } else {
1460                         QPRINTK(qdev, RX_STATUS, DEBUG,
1461                                 "%d bytes in a single small buffer.\n", length);
1462                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1463                         skb = sbq_desc->p.skb;
1464                         ql_realign_skb(skb, length);
1465                         skb_put(skb, length);
1466                         pci_unmap_single(qdev->pdev,
1467                                          pci_unmap_addr(sbq_desc,
1468                                                         mapaddr),
1469                                          pci_unmap_len(sbq_desc,
1470                                                        maplen),
1471                                          PCI_DMA_FROMDEVICE);
1472                         sbq_desc->p.skb = NULL;
1473                 }
1474         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1475                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1476                         QPRINTK(qdev, RX_STATUS, DEBUG,
1477                                 "Header in small, %d bytes in large. Chain large to small!\n", length);
1478                         /*
1479                          * The data is in a single large buffer.  We
1480                          * chain it to the header buffer's skb and let
1481                          * it rip.
1482                          */
1483                         lbq_desc = ql_get_curr_lbuf(rx_ring);
1484                         pci_unmap_page(qdev->pdev,
1485                                        pci_unmap_addr(lbq_desc,
1486                                                       mapaddr),
1487                                        pci_unmap_len(lbq_desc, maplen),
1488                                        PCI_DMA_FROMDEVICE);
1489                         QPRINTK(qdev, RX_STATUS, DEBUG,
1490                                 "Chaining page to skb.\n");
1491                         skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1492                                            0, length);
1493                         skb->len += length;
1494                         skb->data_len += length;
1495                         skb->truesize += length;
1496                         lbq_desc->p.lbq_page = NULL;
1497                 } else {
1498                         /*
1499                          * The headers and data are in a single large buffer. We
1500                          * copy it to a new skb and let it go. This can happen with
1501                          * jumbo mtu on a non-TCP/UDP frame.
1502                          */
1503                         lbq_desc = ql_get_curr_lbuf(rx_ring);
1504                         skb = netdev_alloc_skb(qdev->ndev, length);
1505                         if (skb == NULL) {
1506                                 QPRINTK(qdev, PROBE, DEBUG,
1507                                         "No skb available, drop the packet.\n");
1508                                 return NULL;
1509                         }
1510                         pci_unmap_page(qdev->pdev,
1511                                        pci_unmap_addr(lbq_desc,
1512                                                       mapaddr),
1513                                        pci_unmap_len(lbq_desc, maplen),
1514                                        PCI_DMA_FROMDEVICE);
1515                         skb_reserve(skb, NET_IP_ALIGN);
1516                         QPRINTK(qdev, RX_STATUS, DEBUG,
1517                                 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1518                         skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1519                                            0, length);
1520                         skb->len += length;
1521                         skb->data_len += length;
1522                         skb->truesize += length;
1523                         length -= length;
1524                         lbq_desc->p.lbq_page = NULL;
1525                         __pskb_pull_tail(skb,
1526                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1527                                 VLAN_ETH_HLEN : ETH_HLEN);
1528                 }
1529         } else {
1530                 /*
1531                  * The data is in a chain of large buffers
1532                  * pointed to by a small buffer.  We loop
1533                  * thru and chain them to the our small header
1534                  * buffer's skb.
1535                  * frags:  There are 18 max frags and our small
1536                  *         buffer will hold 32 of them. The thing is,
1537                  *         we'll use 3 max for our 9000 byte jumbo
1538                  *         frames.  If the MTU goes up we could
1539                  *          eventually be in trouble.
1540                  */
1541                 int size, offset, i = 0;
1542                 __le64 *bq, bq_array[8];
1543                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1544                 pci_unmap_single(qdev->pdev,
1545                                  pci_unmap_addr(sbq_desc, mapaddr),
1546                                  pci_unmap_len(sbq_desc, maplen),
1547                                  PCI_DMA_FROMDEVICE);
1548                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1549                         /*
1550                          * This is an non TCP/UDP IP frame, so
1551                          * the headers aren't split into a small
1552                          * buffer.  We have to use the small buffer
1553                          * that contains our sg list as our skb to
1554                          * send upstairs. Copy the sg list here to
1555                          * a local buffer and use it to find the
1556                          * pages to chain.
1557                          */
1558                         QPRINTK(qdev, RX_STATUS, DEBUG,
1559                                 "%d bytes of headers & data in chain of large.\n", length);
1560                         skb = sbq_desc->p.skb;
1561                         bq = &bq_array[0];
1562                         memcpy(bq, skb->data, sizeof(bq_array));
1563                         sbq_desc->p.skb = NULL;
1564                         skb_reserve(skb, NET_IP_ALIGN);
1565                 } else {
1566                         QPRINTK(qdev, RX_STATUS, DEBUG,
1567                                 "Headers in small, %d bytes of data in chain of large.\n", length);
1568                         bq = (__le64 *)sbq_desc->p.skb->data;
1569                 }
1570                 while (length > 0) {
1571                         lbq_desc = ql_get_curr_lbuf(rx_ring);
1572                         pci_unmap_page(qdev->pdev,
1573                                        pci_unmap_addr(lbq_desc,
1574                                                       mapaddr),
1575                                        pci_unmap_len(lbq_desc,
1576                                                      maplen),
1577                                        PCI_DMA_FROMDEVICE);
1578                         size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1579                         offset = 0;
1580
1581                         QPRINTK(qdev, RX_STATUS, DEBUG,
1582                                 "Adding page %d to skb for %d bytes.\n",
1583                                 i, size);
1584                         skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1585                                            offset, size);
1586                         skb->len += size;
1587                         skb->data_len += size;
1588                         skb->truesize += size;
1589                         length -= size;
1590                         lbq_desc->p.lbq_page = NULL;
1591                         bq++;
1592                         i++;
1593                 }
1594                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1595                                 VLAN_ETH_HLEN : ETH_HLEN);
1596         }
1597         return skb;
1598 }
1599
1600 /* Process an inbound completion from an rx ring. */
1601 static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1602                                    struct rx_ring *rx_ring,
1603                                    struct ib_mac_iocb_rsp *ib_mac_rsp)
1604 {
1605         struct net_device *ndev = qdev->ndev;
1606         struct sk_buff *skb = NULL;
1607         u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
1608                         IB_MAC_IOCB_RSP_VLAN_MASK)
1609
1610         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1611
1612         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1613         if (unlikely(!skb)) {
1614                 QPRINTK(qdev, RX_STATUS, DEBUG,
1615                         "No skb available, drop packet.\n");
1616                 return;
1617         }
1618
1619         /* Frame error, so drop the packet. */
1620         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1621                 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1622                                         ib_mac_rsp->flags2);
1623                 dev_kfree_skb_any(skb);
1624                 return;
1625         }
1626
1627         /* The max framesize filter on this chip is set higher than
1628          * MTU since FCoE uses 2k frames.
1629          */
1630         if (skb->len > ndev->mtu + ETH_HLEN) {
1631                 dev_kfree_skb_any(skb);
1632                 return;
1633         }
1634
1635         prefetch(skb->data);
1636         skb->dev = ndev;
1637         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1638                 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1639                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1640                         IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1641                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1642                         IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1643                         (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1644                         IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1645         }
1646         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1647                 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1648         }
1649
1650         skb->protocol = eth_type_trans(skb, ndev);
1651         skb->ip_summed = CHECKSUM_NONE;
1652
1653         /* If rx checksum is on, and there are no
1654          * csum or frame errors.
1655          */
1656         if (qdev->rx_csum &&
1657                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1658                 /* TCP frame. */
1659                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1660                         QPRINTK(qdev, RX_STATUS, DEBUG,
1661                                         "TCP checksum done!\n");
1662                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1663                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1664                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1665                 /* Unfragmented ipv4 UDP frame. */
1666                         struct iphdr *iph = (struct iphdr *) skb->data;
1667                         if (!(iph->frag_off &
1668                                 cpu_to_be16(IP_MF|IP_OFFSET))) {
1669                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1670                                 QPRINTK(qdev, RX_STATUS, DEBUG,
1671                                                 "TCP checksum done!\n");
1672                         }
1673                 }
1674         }
1675
1676         qdev->stats.rx_packets++;
1677         qdev->stats.rx_bytes += skb->len;
1678         skb_record_rx_queue(skb, rx_ring->cq_id);
1679         if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1680                 if (qdev->vlgrp &&
1681                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1682                         (vlan_id != 0))
1683                         vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1684                                 vlan_id, skb);
1685                 else
1686                         napi_gro_receive(&rx_ring->napi, skb);
1687         } else {
1688                 if (qdev->vlgrp &&
1689                         (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1690                         (vlan_id != 0))
1691                         vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1692                 else
1693                         netif_receive_skb(skb);
1694         }
1695 }
1696
1697 /* Process an outbound completion from an rx ring. */
1698 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1699                                    struct ob_mac_iocb_rsp *mac_rsp)
1700 {
1701         struct tx_ring *tx_ring;
1702         struct tx_ring_desc *tx_ring_desc;
1703
1704         QL_DUMP_OB_MAC_RSP(mac_rsp);
1705         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1706         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1707         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1708         qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
1709         qdev->stats.tx_packets++;
1710         dev_kfree_skb(tx_ring_desc->skb);
1711         tx_ring_desc->skb = NULL;
1712
1713         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1714                                         OB_MAC_IOCB_RSP_S |
1715                                         OB_MAC_IOCB_RSP_L |
1716                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1717                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1718                         QPRINTK(qdev, TX_DONE, WARNING,
1719                                 "Total descriptor length did not match transfer length.\n");
1720                 }
1721                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1722                         QPRINTK(qdev, TX_DONE, WARNING,
1723                                 "Frame too short to be legal, not sent.\n");
1724                 }
1725                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1726                         QPRINTK(qdev, TX_DONE, WARNING,
1727                                 "Frame too long, but sent anyway.\n");
1728                 }
1729                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1730                         QPRINTK(qdev, TX_DONE, WARNING,
1731                                 "PCI backplane error. Frame not sent.\n");
1732                 }
1733         }
1734         atomic_inc(&tx_ring->tx_count);
1735 }
1736
1737 /* Fire up a handler to reset the MPI processor. */
1738 void ql_queue_fw_error(struct ql_adapter *qdev)
1739 {
1740         ql_link_off(qdev);
1741         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1742 }
1743
1744 void ql_queue_asic_error(struct ql_adapter *qdev)
1745 {
1746         ql_link_off(qdev);
1747         ql_disable_interrupts(qdev);
1748         /* Clear adapter up bit to signal the recovery
1749          * process that it shouldn't kill the reset worker
1750          * thread
1751          */
1752         clear_bit(QL_ADAPTER_UP, &qdev->flags);
1753         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1754 }
1755
1756 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1757                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
1758 {
1759         switch (ib_ae_rsp->event) {
1760         case MGMT_ERR_EVENT:
1761                 QPRINTK(qdev, RX_ERR, ERR,
1762                         "Management Processor Fatal Error.\n");
1763                 ql_queue_fw_error(qdev);
1764                 return;
1765
1766         case CAM_LOOKUP_ERR_EVENT:
1767                 QPRINTK(qdev, LINK, ERR,
1768                         "Multiple CAM hits lookup occurred.\n");
1769                 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1770                 ql_queue_asic_error(qdev);
1771                 return;
1772
1773         case SOFT_ECC_ERROR_EVENT:
1774                 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1775                 ql_queue_asic_error(qdev);
1776                 break;
1777
1778         case PCI_ERR_ANON_BUF_RD:
1779                 QPRINTK(qdev, RX_ERR, ERR,
1780                         "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1781                         ib_ae_rsp->q_id);
1782                 ql_queue_asic_error(qdev);
1783                 break;
1784
1785         default:
1786                 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1787                         ib_ae_rsp->event);
1788                 ql_queue_asic_error(qdev);
1789                 break;
1790         }
1791 }
1792
1793 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1794 {
1795         struct ql_adapter *qdev = rx_ring->qdev;
1796         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1797         struct ob_mac_iocb_rsp *net_rsp = NULL;
1798         int count = 0;
1799
1800         struct tx_ring *tx_ring;
1801         /* While there are entries in the completion queue. */
1802         while (prod != rx_ring->cnsmr_idx) {
1803
1804                 QPRINTK(qdev, RX_STATUS, DEBUG,
1805                         "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1806                         prod, rx_ring->cnsmr_idx);
1807
1808                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1809                 rmb();
1810                 switch (net_rsp->opcode) {
1811
1812                 case OPCODE_OB_MAC_TSO_IOCB:
1813                 case OPCODE_OB_MAC_IOCB:
1814                         ql_process_mac_tx_intr(qdev, net_rsp);
1815                         break;
1816                 default:
1817                         QPRINTK(qdev, RX_STATUS, DEBUG,
1818                                 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1819                                 net_rsp->opcode);
1820                 }
1821                 count++;
1822                 ql_update_cq(rx_ring);
1823                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1824         }
1825         ql_write_cq_idx(rx_ring);
1826         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1827         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
1828                                         net_rsp != NULL) {
1829                 if (atomic_read(&tx_ring->queue_stopped) &&
1830                     (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1831                         /*
1832                          * The queue got stopped because the tx_ring was full.
1833                          * Wake it up, because it's now at least 25% empty.
1834                          */
1835                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
1836         }
1837
1838         return count;
1839 }
1840
1841 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1842 {
1843         struct ql_adapter *qdev = rx_ring->qdev;
1844         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1845         struct ql_net_rsp_iocb *net_rsp;
1846         int count = 0;
1847
1848         /* While there are entries in the completion queue. */
1849         while (prod != rx_ring->cnsmr_idx) {
1850
1851                 QPRINTK(qdev, RX_STATUS, DEBUG,
1852                         "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1853                         prod, rx_ring->cnsmr_idx);
1854
1855                 net_rsp = rx_ring->curr_entry;
1856                 rmb();
1857                 switch (net_rsp->opcode) {
1858                 case OPCODE_IB_MAC_IOCB:
1859                         ql_process_mac_rx_intr(qdev, rx_ring,
1860                                                (struct ib_mac_iocb_rsp *)
1861                                                net_rsp);
1862                         break;
1863
1864                 case OPCODE_IB_AE_IOCB:
1865                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1866                                                 net_rsp);
1867                         break;
1868                 default:
1869                         {
1870                                 QPRINTK(qdev, RX_STATUS, DEBUG,
1871                                         "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1872                                         net_rsp->opcode);
1873                         }
1874                 }
1875                 count++;
1876                 ql_update_cq(rx_ring);
1877                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1878                 if (count == budget)
1879                         break;
1880         }
1881         ql_update_buffer_queues(qdev, rx_ring);
1882         ql_write_cq_idx(rx_ring);
1883         return count;
1884 }
1885
1886 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1887 {
1888         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1889         struct ql_adapter *qdev = rx_ring->qdev;
1890         struct rx_ring *trx_ring;
1891         int i, work_done = 0;
1892         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
1893
1894         QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1895                 rx_ring->cq_id);
1896
1897         /* Service the TX rings first.  They start
1898          * right after the RSS rings. */
1899         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
1900                 trx_ring = &qdev->rx_ring[i];
1901                 /* If this TX completion ring belongs to this vector and
1902                  * it's not empty then service it.
1903                  */
1904                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
1905                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
1906                                         trx_ring->cnsmr_idx)) {
1907                         QPRINTK(qdev, INTR, DEBUG,
1908                                 "%s: Servicing TX completion ring %d.\n",
1909                                 __func__, trx_ring->cq_id);
1910                         ql_clean_outbound_rx_ring(trx_ring);
1911                 }
1912         }
1913
1914         /*
1915          * Now service the RSS ring if it's active.
1916          */
1917         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1918                                         rx_ring->cnsmr_idx) {
1919                 QPRINTK(qdev, INTR, DEBUG,
1920                         "%s: Servicing RX completion ring %d.\n",
1921                         __func__, rx_ring->cq_id);
1922                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1923         }
1924
1925         if (work_done < budget) {
1926                 napi_complete(napi);
1927                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1928         }
1929         return work_done;
1930 }
1931
1932 static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1933 {
1934         struct ql_adapter *qdev = netdev_priv(ndev);
1935
1936         qdev->vlgrp = grp;
1937         if (grp) {
1938                 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1939                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1940                            NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1941         } else {
1942                 QPRINTK(qdev, IFUP, DEBUG,
1943                         "Turning off VLAN in NIC_RCV_CFG.\n");
1944                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1945         }
1946 }
1947
1948 static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1949 {
1950         struct ql_adapter *qdev = netdev_priv(ndev);
1951         u32 enable_bit = MAC_ADDR_E;
1952         int status;
1953
1954         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1955         if (status)
1956                 return;
1957         if (ql_set_mac_addr_reg
1958             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1959                 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1960         }
1961         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1962 }
1963
1964 static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1965 {
1966         struct ql_adapter *qdev = netdev_priv(ndev);
1967         u32 enable_bit = 0;
1968         int status;
1969
1970         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1971         if (status)
1972                 return;
1973
1974         if (ql_set_mac_addr_reg
1975             (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1976                 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1977         }
1978         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1979
1980 }
1981
1982 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1983 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1984 {
1985         struct rx_ring *rx_ring = dev_id;
1986         napi_schedule(&rx_ring->napi);
1987         return IRQ_HANDLED;
1988 }
1989
1990 /* This handles a fatal error, MPI activity, and the default
1991  * rx_ring in an MSI-X multiple vector environment.
1992  * In MSI/Legacy environment it also process the rest of
1993  * the rx_rings.
1994  */
1995 static irqreturn_t qlge_isr(int irq, void *dev_id)
1996 {
1997         struct rx_ring *rx_ring = dev_id;
1998         struct ql_adapter *qdev = rx_ring->qdev;
1999         struct intr_context *intr_context = &qdev->intr_context[0];
2000         u32 var;
2001         int work_done = 0;
2002
2003         spin_lock(&qdev->hw_lock);
2004         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2005                 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
2006                 spin_unlock(&qdev->hw_lock);
2007                 return IRQ_NONE;
2008         }
2009         spin_unlock(&qdev->hw_lock);
2010
2011         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2012
2013         /*
2014          * Check for fatal error.
2015          */
2016         if (var & STS_FE) {
2017                 ql_queue_asic_error(qdev);
2018                 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2019                 var = ql_read32(qdev, ERR_STS);
2020                 QPRINTK(qdev, INTR, ERR,
2021                         "Resetting chip. Error Status Register = 0x%x\n", var);
2022                 return IRQ_HANDLED;
2023         }
2024
2025         /*
2026          * Check MPI processor activity.
2027          */
2028         if ((var & STS_PI) &&
2029                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2030                 /*
2031                  * We've got an async event or mailbox completion.
2032                  * Handle it and clear the source of the interrupt.
2033                  */
2034                 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2035                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2036                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2037                 queue_delayed_work_on(smp_processor_id(),
2038                                 qdev->workqueue, &qdev->mpi_work, 0);
2039                 work_done++;
2040         }
2041
2042         /*
2043          * Get the bit-mask that shows the active queues for this
2044          * pass.  Compare it to the queues that this irq services
2045          * and call napi if there's a match.
2046          */
2047         var = ql_read32(qdev, ISR1);
2048         if (var & intr_context->irq_mask) {
2049                                 QPRINTK(qdev, INTR, INFO,
2050                         "Waking handler for rx_ring[0].\n");
2051                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2052                                         napi_schedule(&rx_ring->napi);
2053                                 work_done++;
2054                         }
2055         ql_enable_completion_interrupt(qdev, intr_context->intr);
2056         return work_done ? IRQ_HANDLED : IRQ_NONE;
2057 }
2058
2059 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2060 {
2061
2062         if (skb_is_gso(skb)) {
2063                 int err;
2064                 if (skb_header_cloned(skb)) {
2065                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2066                         if (err)
2067                                 return err;
2068                 }
2069
2070                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2071                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2072                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2073                 mac_iocb_ptr->total_hdrs_len =
2074                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2075                 mac_iocb_ptr->net_trans_offset =
2076                     cpu_to_le16(skb_network_offset(skb) |
2077                                 skb_transport_offset(skb)
2078                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2079                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2080                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2081                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2082                         struct iphdr *iph = ip_hdr(skb);
2083                         iph->check = 0;
2084                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2085                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2086                                                                  iph->daddr, 0,
2087                                                                  IPPROTO_TCP,
2088                                                                  0);
2089                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2090                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2091                         tcp_hdr(skb)->check =
2092                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2093                                              &ipv6_hdr(skb)->daddr,
2094                                              0, IPPROTO_TCP, 0);
2095                 }
2096                 return 1;
2097         }
2098         return 0;
2099 }
2100
2101 static void ql_hw_csum_setup(struct sk_buff *skb,
2102                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2103 {
2104         int len;
2105         struct iphdr *iph = ip_hdr(skb);
2106         __sum16 *check;
2107         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2108         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2109         mac_iocb_ptr->net_trans_offset =
2110                 cpu_to_le16(skb_network_offset(skb) |
2111                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2112
2113         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2114         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2115         if (likely(iph->protocol == IPPROTO_TCP)) {
2116                 check = &(tcp_hdr(skb)->check);
2117                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2118                 mac_iocb_ptr->total_hdrs_len =
2119                     cpu_to_le16(skb_transport_offset(skb) +
2120                                 (tcp_hdr(skb)->doff << 2));
2121         } else {
2122                 check = &(udp_hdr(skb)->check);
2123                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2124                 mac_iocb_ptr->total_hdrs_len =
2125                     cpu_to_le16(skb_transport_offset(skb) +
2126                                 sizeof(struct udphdr));
2127         }
2128         *check = ~csum_tcpudp_magic(iph->saddr,
2129                                     iph->daddr, len, iph->protocol, 0);
2130 }
2131
2132 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2133 {
2134         struct tx_ring_desc *tx_ring_desc;
2135         struct ob_mac_iocb_req *mac_iocb_ptr;
2136         struct ql_adapter *qdev = netdev_priv(ndev);
2137         int tso;
2138         struct tx_ring *tx_ring;
2139         u32 tx_ring_idx = (u32) skb->queue_mapping;
2140
2141         tx_ring = &qdev->tx_ring[tx_ring_idx];
2142
2143         if (skb_padto(skb, ETH_ZLEN))
2144                 return NETDEV_TX_OK;
2145
2146         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2147                 QPRINTK(qdev, TX_QUEUED, INFO,
2148                         "%s: shutting down tx queue %d du to lack of resources.\n",
2149                         __func__, tx_ring_idx);
2150                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2151                 atomic_inc(&tx_ring->queue_stopped);
2152                 return NETDEV_TX_BUSY;
2153         }
2154         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2155         mac_iocb_ptr = tx_ring_desc->queue_entry;
2156         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2157
2158         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2159         mac_iocb_ptr->tid = tx_ring_desc->index;
2160         /* We use the upper 32-bits to store the tx queue for this IO.
2161          * When we get the completion we can use it to establish the context.
2162          */
2163         mac_iocb_ptr->txq_idx = tx_ring_idx;
2164         tx_ring_desc->skb = skb;
2165
2166         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2167
2168         if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2169                 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2170                         vlan_tx_tag_get(skb));
2171                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2172                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2173         }
2174         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2175         if (tso < 0) {
2176                 dev_kfree_skb_any(skb);
2177                 return NETDEV_TX_OK;
2178         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2179                 ql_hw_csum_setup(skb,
2180                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2181         }
2182         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2183                         NETDEV_TX_OK) {
2184                 QPRINTK(qdev, TX_QUEUED, ERR,
2185                                 "Could not map the segments.\n");
2186                 return NETDEV_TX_BUSY;
2187         }
2188         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2189         tx_ring->prod_idx++;
2190         if (tx_ring->prod_idx == tx_ring->wq_len)
2191                 tx_ring->prod_idx = 0;
2192         wmb();
2193
2194         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2195         QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2196                 tx_ring->prod_idx, skb->len);
2197
2198         atomic_dec(&tx_ring->tx_count);
2199         return NETDEV_TX_OK;
2200 }
2201
2202 static void ql_free_shadow_space(struct ql_adapter *qdev)
2203 {
2204         if (qdev->rx_ring_shadow_reg_area) {
2205                 pci_free_consistent(qdev->pdev,
2206                                     PAGE_SIZE,
2207                                     qdev->rx_ring_shadow_reg_area,
2208                                     qdev->rx_ring_shadow_reg_dma);
2209                 qdev->rx_ring_shadow_reg_area = NULL;
2210         }
2211         if (qdev->tx_ring_shadow_reg_area) {
2212                 pci_free_consistent(qdev->pdev,
2213                                     PAGE_SIZE,
2214                                     qdev->tx_ring_shadow_reg_area,
2215                                     qdev->tx_ring_shadow_reg_dma);
2216                 qdev->tx_ring_shadow_reg_area = NULL;
2217         }
2218 }
2219
2220 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2221 {
2222         qdev->rx_ring_shadow_reg_area =
2223             pci_alloc_consistent(qdev->pdev,
2224                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2225         if (qdev->rx_ring_shadow_reg_area == NULL) {
2226                 QPRINTK(qdev, IFUP, ERR,
2227                         "Allocation of RX shadow space failed.\n");
2228                 return -ENOMEM;
2229         }
2230         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2231         qdev->tx_ring_shadow_reg_area =
2232             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2233                                  &qdev->tx_ring_shadow_reg_dma);
2234         if (qdev->tx_ring_shadow_reg_area == NULL) {
2235                 QPRINTK(qdev, IFUP, ERR,
2236                         "Allocation of TX shadow space failed.\n");
2237                 goto err_wqp_sh_area;
2238         }
2239         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2240         return 0;
2241
2242 err_wqp_sh_area:
2243         pci_free_consistent(qdev->pdev,
2244                             PAGE_SIZE,
2245                             qdev->rx_ring_shadow_reg_area,
2246                             qdev->rx_ring_shadow_reg_dma);
2247         return -ENOMEM;
2248 }
2249
2250 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2251 {
2252         struct tx_ring_desc *tx_ring_desc;
2253         int i;
2254         struct ob_mac_iocb_req *mac_iocb_ptr;
2255
2256         mac_iocb_ptr = tx_ring->wq_base;
2257         tx_ring_desc = tx_ring->q;
2258         for (i = 0; i < tx_ring->wq_len; i++) {
2259                 tx_ring_desc->index = i;
2260                 tx_ring_desc->skb = NULL;
2261                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2262                 mac_iocb_ptr++;
2263                 tx_ring_desc++;
2264         }
2265         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2266         atomic_set(&tx_ring->queue_stopped, 0);
2267 }
2268
2269 static void ql_free_tx_resources(struct ql_adapter *qdev,
2270                                  struct tx_ring *tx_ring)
2271 {
2272         if (tx_ring->wq_base) {
2273                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2274                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2275                 tx_ring->wq_base = NULL;
2276         }
2277         kfree(tx_ring->q);
2278         tx_ring->q = NULL;
2279 }
2280
2281 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2282                                  struct tx_ring *tx_ring)
2283 {
2284         tx_ring->wq_base =
2285             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2286                                  &tx_ring->wq_base_dma);
2287
2288         if ((tx_ring->wq_base == NULL)
2289                 || tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2290                 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2291                 return -ENOMEM;
2292         }
2293         tx_ring->q =
2294             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2295         if (tx_ring->q == NULL)
2296                 goto err;
2297
2298         return 0;
2299 err:
2300         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2301                             tx_ring->wq_base, tx_ring->wq_base_dma);
2302         return -ENOMEM;
2303 }
2304
2305 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2306 {
2307         int i;
2308         struct bq_desc *lbq_desc;
2309
2310         for (i = 0; i < rx_ring->lbq_len; i++) {
2311                 lbq_desc = &rx_ring->lbq[i];
2312                 if (lbq_desc->p.lbq_page) {
2313                         pci_unmap_page(qdev->pdev,
2314                                        pci_unmap_addr(lbq_desc, mapaddr),
2315                                        pci_unmap_len(lbq_desc, maplen),
2316                                        PCI_DMA_FROMDEVICE);
2317
2318                         put_page(lbq_desc->p.lbq_page);
2319                         lbq_desc->p.lbq_page = NULL;
2320                 }
2321         }
2322 }
2323
2324 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2325 {
2326         int i;
2327         struct bq_desc *sbq_desc;
2328
2329         for (i = 0; i < rx_ring->sbq_len; i++) {
2330                 sbq_desc = &rx_ring->sbq[i];
2331                 if (sbq_desc == NULL) {
2332                         QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2333                         return;
2334                 }
2335                 if (sbq_desc->p.skb) {
2336                         pci_unmap_single(qdev->pdev,
2337                                          pci_unmap_addr(sbq_desc, mapaddr),
2338                                          pci_unmap_len(sbq_desc, maplen),
2339                                          PCI_DMA_FROMDEVICE);
2340                         dev_kfree_skb(sbq_desc->p.skb);
2341                         sbq_desc->p.skb = NULL;
2342                 }
2343         }
2344 }
2345
2346 /* Free all large and small rx buffers associated
2347  * with the completion queues for this device.
2348  */
2349 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2350 {
2351         int i;
2352         struct rx_ring *rx_ring;
2353
2354         for (i = 0; i < qdev->rx_ring_count; i++) {
2355                 rx_ring = &qdev->rx_ring[i];
2356                 if (rx_ring->lbq)
2357                         ql_free_lbq_buffers(qdev, rx_ring);
2358                 if (rx_ring->sbq)
2359                         ql_free_sbq_buffers(qdev, rx_ring);
2360         }
2361 }
2362
2363 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2364 {
2365         struct rx_ring *rx_ring;
2366         int i;
2367
2368         for (i = 0; i < qdev->rx_ring_count; i++) {
2369                 rx_ring = &qdev->rx_ring[i];
2370                 if (rx_ring->type != TX_Q)
2371                         ql_update_buffer_queues(qdev, rx_ring);
2372         }
2373 }
2374
2375 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2376                                 struct rx_ring *rx_ring)
2377 {
2378         int i;
2379         struct bq_desc *lbq_desc;
2380         __le64 *bq = rx_ring->lbq_base;
2381
2382         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2383         for (i = 0; i < rx_ring->lbq_len; i++) {
2384                 lbq_desc = &rx_ring->lbq[i];
2385                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2386                 lbq_desc->index = i;
2387                 lbq_desc->addr = bq;
2388                 bq++;
2389         }
2390 }
2391
2392 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2393                                 struct rx_ring *rx_ring)
2394 {
2395         int i;
2396         struct bq_desc *sbq_desc;
2397         __le64 *bq = rx_ring->sbq_base;
2398
2399         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2400         for (i = 0; i < rx_ring->sbq_len; i++) {
2401                 sbq_desc = &rx_ring->sbq[i];
2402                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2403                 sbq_desc->index = i;
2404                 sbq_desc->addr = bq;
2405                 bq++;
2406         }
2407 }
2408
2409 static void ql_free_rx_resources(struct ql_adapter *qdev,
2410                                  struct rx_ring *rx_ring)
2411 {
2412         /* Free the small buffer queue. */
2413         if (rx_ring->sbq_base) {
2414                 pci_free_consistent(qdev->pdev,
2415                                     rx_ring->sbq_size,
2416                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2417                 rx_ring->sbq_base = NULL;
2418         }
2419
2420         /* Free the small buffer queue control blocks. */
2421         kfree(rx_ring->sbq);
2422         rx_ring->sbq = NULL;
2423
2424         /* Free the large buffer queue. */
2425         if (rx_ring->lbq_base) {
2426                 pci_free_consistent(qdev->pdev,
2427                                     rx_ring->lbq_size,
2428                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2429                 rx_ring->lbq_base = NULL;
2430         }
2431
2432         /* Free the large buffer queue control blocks. */
2433         kfree(rx_ring->lbq);
2434         rx_ring->lbq = NULL;
2435
2436         /* Free the rx queue. */
2437         if (rx_ring->cq_base) {
2438                 pci_free_consistent(qdev->pdev,
2439                                     rx_ring->cq_size,
2440                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2441                 rx_ring->cq_base = NULL;
2442         }
2443 }
2444
2445 /* Allocate queues and buffers for this completions queue based
2446  * on the values in the parameter structure. */
2447 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2448                                  struct rx_ring *rx_ring)
2449 {
2450
2451         /*
2452          * Allocate the completion queue for this rx_ring.
2453          */
2454         rx_ring->cq_base =
2455             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2456                                  &rx_ring->cq_base_dma);
2457
2458         if (rx_ring->cq_base == NULL) {
2459                 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2460                 return -ENOMEM;
2461         }
2462
2463         if (rx_ring->sbq_len) {
2464                 /*
2465                  * Allocate small buffer queue.
2466                  */
2467                 rx_ring->sbq_base =
2468                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2469                                          &rx_ring->sbq_base_dma);
2470
2471                 if (rx_ring->sbq_base == NULL) {
2472                         QPRINTK(qdev, IFUP, ERR,
2473                                 "Small buffer queue allocation failed.\n");
2474                         goto err_mem;
2475                 }
2476
2477                 /*
2478                  * Allocate small buffer queue control blocks.
2479                  */
2480                 rx_ring->sbq =
2481                     kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2482                             GFP_KERNEL);
2483                 if (rx_ring->sbq == NULL) {
2484                         QPRINTK(qdev, IFUP, ERR,
2485                                 "Small buffer queue control block allocation failed.\n");
2486                         goto err_mem;
2487                 }
2488
2489                 ql_init_sbq_ring(qdev, rx_ring);
2490         }
2491
2492         if (rx_ring->lbq_len) {
2493                 /*
2494                  * Allocate large buffer queue.
2495                  */
2496                 rx_ring->lbq_base =
2497                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2498                                          &rx_ring->lbq_base_dma);
2499
2500                 if (rx_ring->lbq_base == NULL) {
2501                         QPRINTK(qdev, IFUP, ERR,
2502                                 "Large buffer queue allocation failed.\n");
2503                         goto err_mem;
2504                 }
2505                 /*
2506                  * Allocate large buffer queue control blocks.
2507                  */
2508                 rx_ring->lbq =
2509                     kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2510                             GFP_KERNEL);
2511                 if (rx_ring->lbq == NULL) {
2512                         QPRINTK(qdev, IFUP, ERR,
2513                                 "Large buffer queue control block allocation failed.\n");
2514                         goto err_mem;
2515                 }
2516
2517                 ql_init_lbq_ring(qdev, rx_ring);
2518         }
2519
2520         return 0;
2521
2522 err_mem:
2523         ql_free_rx_resources(qdev, rx_ring);
2524         return -ENOMEM;
2525 }
2526
2527 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2528 {
2529         struct tx_ring *tx_ring;
2530         struct tx_ring_desc *tx_ring_desc;
2531         int i, j;
2532
2533         /*
2534          * Loop through all queues and free
2535          * any resources.
2536          */
2537         for (j = 0; j < qdev->tx_ring_count; j++) {
2538                 tx_ring = &qdev->tx_ring[j];
2539                 for (i = 0; i < tx_ring->wq_len; i++) {
2540                         tx_ring_desc = &tx_ring->q[i];
2541                         if (tx_ring_desc && tx_ring_desc->skb) {
2542                                 QPRINTK(qdev, IFDOWN, ERR,
2543                                 "Freeing lost SKB %p, from queue %d, index %d.\n",
2544                                         tx_ring_desc->skb, j,
2545                                         tx_ring_desc->index);
2546                                 ql_unmap_send(qdev, tx_ring_desc,
2547                                               tx_ring_desc->map_cnt);
2548                                 dev_kfree_skb(tx_ring_desc->skb);
2549                                 tx_ring_desc->skb = NULL;
2550                         }
2551                 }
2552         }
2553 }
2554
2555 static void ql_free_mem_resources(struct ql_adapter *qdev)
2556 {
2557         int i;
2558
2559         for (i = 0; i < qdev->tx_ring_count; i++)
2560                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2561         for (i = 0; i < qdev->rx_ring_count; i++)
2562                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2563         ql_free_shadow_space(qdev);
2564 }
2565
2566 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2567 {
2568         int i;
2569
2570         /* Allocate space for our shadow registers and such. */
2571         if (ql_alloc_shadow_space(qdev))
2572                 return -ENOMEM;
2573
2574         for (i = 0; i < qdev->rx_ring_count; i++) {
2575                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2576                         QPRINTK(qdev, IFUP, ERR,
2577                                 "RX resource allocation failed.\n");
2578                         goto err_mem;
2579                 }
2580         }
2581         /* Allocate tx queue resources */
2582         for (i = 0; i < qdev->tx_ring_count; i++) {
2583                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2584                         QPRINTK(qdev, IFUP, ERR,
2585                                 "TX resource allocation failed.\n");
2586                         goto err_mem;
2587                 }
2588         }
2589         return 0;
2590
2591 err_mem:
2592         ql_free_mem_resources(qdev);
2593         return -ENOMEM;
2594 }
2595
2596 /* Set up the rx ring control block and pass it to the chip.
2597  * The control block is defined as
2598  * "Completion Queue Initialization Control Block", or cqicb.
2599  */
2600 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2601 {
2602         struct cqicb *cqicb = &rx_ring->cqicb;
2603         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2604                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2605         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2606                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2607         void __iomem *doorbell_area =
2608             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2609         int err = 0;
2610         u16 bq_len;
2611         u64 tmp;
2612         __le64 *base_indirect_ptr;
2613         int page_entries;
2614
2615         /* Set up the shadow registers for this ring. */
2616         rx_ring->prod_idx_sh_reg = shadow_reg;
2617         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2618         shadow_reg += sizeof(u64);
2619         shadow_reg_dma += sizeof(u64);
2620         rx_ring->lbq_base_indirect = shadow_reg;
2621         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2622         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2623         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2624         rx_ring->sbq_base_indirect = shadow_reg;
2625         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2626
2627         /* PCI doorbell mem area + 0x00 for consumer index register */
2628         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
2629         rx_ring->cnsmr_idx = 0;
2630         rx_ring->curr_entry = rx_ring->cq_base;
2631
2632         /* PCI doorbell mem area + 0x04 for valid register */
2633         rx_ring->valid_db_reg = doorbell_area + 0x04;
2634
2635         /* PCI doorbell mem area + 0x18 for large buffer consumer */
2636         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
2637
2638         /* PCI doorbell mem area + 0x1c */
2639         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
2640
2641         memset((void *)cqicb, 0, sizeof(struct cqicb));
2642         cqicb->msix_vect = rx_ring->irq;
2643
2644         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
2645         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
2646
2647         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
2648
2649         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
2650
2651         /*
2652          * Set up the control block load flags.
2653          */
2654         cqicb->flags = FLAGS_LC |       /* Load queue base address */
2655             FLAGS_LV |          /* Load MSI-X vector */
2656             FLAGS_LI;           /* Load irq delay values */
2657         if (rx_ring->lbq_len) {
2658                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
2659                 tmp = (u64)rx_ring->lbq_base_dma;
2660                 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
2661                 page_entries = 0;
2662                 do {
2663                         *base_indirect_ptr = cpu_to_le64(tmp);
2664                         tmp += DB_PAGE_SIZE;
2665                         base_indirect_ptr++;
2666                         page_entries++;
2667                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
2668                 cqicb->lbq_addr =
2669                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
2670                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
2671                         (u16) rx_ring->lbq_buf_size;
2672                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
2673                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
2674                         (u16) rx_ring->lbq_len;
2675                 cqicb->lbq_len = cpu_to_le16(bq_len);
2676                 rx_ring->lbq_prod_idx = 0;
2677                 rx_ring->lbq_curr_idx = 0;
2678                 rx_ring->lbq_clean_idx = 0;
2679                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
2680         }
2681         if (rx_ring->sbq_len) {
2682                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
2683                 tmp = (u64)rx_ring->sbq_base_dma;
2684                 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
2685                 page_entries = 0;
2686                 do {
2687                         *base_indirect_ptr = cpu_to_le64(tmp);
2688                         tmp += DB_PAGE_SIZE;
2689                         base_indirect_ptr++;
2690                         page_entries++;
2691                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
2692                 cqicb->sbq_addr =
2693                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
2694                 cqicb->sbq_buf_size =
2695                     cpu_to_le16((u16)(rx_ring->sbq_buf_size/2));
2696                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2697                         (u16) rx_ring->sbq_len;
2698                 cqicb->sbq_len = cpu_to_le16(bq_len);
2699                 rx_ring->sbq_prod_idx = 0;
2700                 rx_ring->sbq_curr_idx = 0;
2701                 rx_ring->sbq_clean_idx = 0;
2702                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
2703         }
2704         switch (rx_ring->type) {
2705         case TX_Q:
2706                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2707                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2708                 break;
2709         case RX_Q:
2710                 /* Inbound completion handling rx_rings run in
2711                  * separate NAPI contexts.
2712                  */
2713                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2714                                64);
2715                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2716                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2717                 break;
2718         default:
2719                 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2720                         rx_ring->type);
2721         }
2722         QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
2723         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2724                            CFG_LCQ, rx_ring->cq_id);
2725         if (err) {
2726                 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2727                 return err;
2728         }
2729         return err;
2730 }
2731
2732 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2733 {
2734         struct wqicb *wqicb = (struct wqicb *)tx_ring;
2735         void __iomem *doorbell_area =
2736             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2737         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2738             (tx_ring->wq_id * sizeof(u64));
2739         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2740             (tx_ring->wq_id * sizeof(u64));
2741         int err = 0;
2742
2743         /*
2744          * Assign doorbell registers for this tx_ring.
2745          */
2746         /* TX PCI doorbell mem area for tx producer index */
2747         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
2748         tx_ring->prod_idx = 0;
2749         /* TX PCI doorbell mem area + 0x04 */
2750         tx_ring->valid_db_reg = doorbell_area + 0x04;
2751
2752         /*
2753          * Assign shadow registers for this tx_ring.
2754          */
2755         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2756         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2757
2758         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2759         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2760                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2761         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2762         wqicb->rid = 0;
2763         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
2764
2765         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
2766
2767         ql_init_tx_ring(qdev, tx_ring);
2768
2769         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
2770                            (u16) tx_ring->wq_id);
2771         if (err) {
2772                 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2773                 return err;
2774         }
2775         QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
2776         return err;
2777 }
2778
2779 static void ql_disable_msix(struct ql_adapter *qdev)
2780 {
2781         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2782                 pci_disable_msix(qdev->pdev);
2783                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2784                 kfree(qdev->msi_x_entry);
2785                 qdev->msi_x_entry = NULL;
2786         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2787                 pci_disable_msi(qdev->pdev);
2788                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2789         }
2790 }
2791
2792 /* We start by trying to get the number of vectors
2793  * stored in qdev->intr_count. If we don't get that
2794  * many then we reduce the count and try again.
2795  */
2796 static void ql_enable_msix(struct ql_adapter *qdev)
2797 {
2798         int i, err;
2799
2800         /* Get the MSIX vectors. */
2801         if (irq_type == MSIX_IRQ) {
2802                 /* Try to alloc space for the msix struct,
2803                  * if it fails then go to MSI/legacy.
2804                  */
2805                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
2806                                             sizeof(struct msix_entry),
2807                                             GFP_KERNEL);
2808                 if (!qdev->msi_x_entry) {
2809                         irq_type = MSI_IRQ;
2810                         goto msi;
2811                 }
2812
2813                 for (i = 0; i < qdev->intr_count; i++)
2814                         qdev->msi_x_entry[i].entry = i;
2815
2816                 /* Loop to get our vectors.  We start with
2817                  * what we want and settle for what we get.
2818                  */
2819                 do {
2820                         err = pci_enable_msix(qdev->pdev,
2821                                 qdev->msi_x_entry, qdev->intr_count);
2822                         if (err > 0)
2823                                 qdev->intr_count = err;
2824                 } while (err > 0);
2825
2826                 if (err < 0) {
2827                         kfree(qdev->msi_x_entry);
2828                         qdev->msi_x_entry = NULL;
2829                         QPRINTK(qdev, IFUP, WARNING,
2830                                 "MSI-X Enable failed, trying MSI.\n");
2831                         qdev->intr_count = 1;
2832                         irq_type = MSI_IRQ;
2833                 } else if (err == 0) {
2834                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
2835                         QPRINTK(qdev, IFUP, INFO,
2836                                 "MSI-X Enabled, got %d vectors.\n",
2837                                 qdev->intr_count);
2838                         return;
2839                 }
2840         }
2841 msi:
2842         qdev->intr_count = 1;
2843         if (irq_type == MSI_IRQ) {
2844                 if (!pci_enable_msi(qdev->pdev)) {
2845                         set_bit(QL_MSI_ENABLED, &qdev->flags);
2846                         QPRINTK(qdev, IFUP, INFO,
2847                                 "Running with MSI interrupts.\n");
2848                         return;
2849                 }
2850         }
2851         irq_type = LEG_IRQ;
2852         QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2853 }
2854
2855 /* Each vector services 1 RSS ring and and 1 or more
2856  * TX completion rings.  This function loops through
2857  * the TX completion rings and assigns the vector that
2858  * will service it.  An example would be if there are
2859  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
2860  * This would mean that vector 0 would service RSS ring 0
2861  * and TX competion rings 0,1,2 and 3.  Vector 1 would
2862  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
2863  */
2864 static void ql_set_tx_vect(struct ql_adapter *qdev)
2865 {
2866         int i, j, vect;
2867         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2868
2869         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2870                 /* Assign irq vectors to TX rx_rings.*/
2871                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
2872                                          i < qdev->rx_ring_count; i++) {
2873                         if (j == tx_rings_per_vector) {
2874                                 vect++;
2875                                 j = 0;
2876                         }
2877                         qdev->rx_ring[i].irq = vect;
2878                         j++;
2879                 }
2880         } else {
2881                 /* For single vector all rings have an irq
2882                  * of zero.
2883                  */
2884                 for (i = 0; i < qdev->rx_ring_count; i++)
2885                         qdev->rx_ring[i].irq = 0;
2886         }
2887 }
2888
2889 /* Set the interrupt mask for this vector.  Each vector
2890  * will service 1 RSS ring and 1 or more TX completion
2891  * rings.  This function sets up a bit mask per vector
2892  * that indicates which rings it services.
2893  */
2894 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
2895 {
2896         int j, vect = ctx->intr;
2897         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
2898
2899         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2900                 /* Add the RSS ring serviced by this vector
2901                  * to the mask.
2902                  */
2903                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
2904                 /* Add the TX ring(s) serviced by this vector
2905                  * to the mask. */
2906                 for (j = 0; j < tx_rings_per_vector; j++) {
2907                         ctx->irq_mask |=
2908                         (1 << qdev->rx_ring[qdev->rss_ring_count +
2909                         (vect * tx_rings_per_vector) + j].cq_id);
2910                 }
2911         } else {
2912                 /* For single vector we just shift each queue's
2913                  * ID into the mask.
2914                  */
2915                 for (j = 0; j < qdev->rx_ring_count; j++)
2916                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
2917         }
2918 }
2919
2920 /*
2921  * Here we build the intr_context structures based on
2922  * our rx_ring count and intr vector count.
2923  * The intr_context structure is used to hook each vector
2924  * to possibly different handlers.
2925  */
2926 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2927 {
2928         int i = 0;
2929         struct intr_context *intr_context = &qdev->intr_context[0];
2930
2931         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2932                 /* Each rx_ring has it's
2933                  * own intr_context since we have separate
2934                  * vectors for each queue.
2935                  */
2936                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2937                         qdev->rx_ring[i].irq = i;
2938                         intr_context->intr = i;
2939                         intr_context->qdev = qdev;
2940                         /* Set up this vector's bit-mask that indicates
2941                          * which queues it services.
2942                          */
2943                         ql_set_irq_mask(qdev, intr_context);
2944                         /*
2945                          * We set up each vectors enable/disable/read bits so
2946                          * there's no bit/mask calculations in the critical path.
2947                          */
2948                         intr_context->intr_en_mask =
2949                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2950                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2951                             | i;
2952                         intr_context->intr_dis_mask =
2953                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2954                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2955                             INTR_EN_IHD | i;
2956                         intr_context->intr_read_mask =
2957                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2958                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2959                             i;
2960                         if (i == 0) {
2961                                 /* The first vector/queue handles
2962                                  * broadcast/multicast, fatal errors,
2963                                  * and firmware events.  This in addition
2964                                  * to normal inbound NAPI processing.
2965                                  */
2966                                 intr_context->handler = qlge_isr;
2967                                 sprintf(intr_context->name, "%s-rx-%d",
2968                                         qdev->ndev->name, i);
2969                         } else {
2970                                 /*
2971                                  * Inbound queues handle unicast frames only.
2972                                  */
2973                                 intr_context->handler = qlge_msix_rx_isr;
2974                                 sprintf(intr_context->name, "%s-rx-%d",
2975                                         qdev->ndev->name, i);
2976                         }
2977                 }
2978         } else {
2979                 /*
2980                  * All rx_rings use the same intr_context since
2981                  * there is only one vector.
2982                  */
2983                 intr_context->intr = 0;
2984                 intr_context->qdev = qdev;
2985                 /*
2986                  * We set up each vectors enable/disable/read bits so
2987                  * there's no bit/mask calculations in the critical path.
2988                  */
2989                 intr_context->intr_en_mask =
2990                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2991                 intr_context->intr_dis_mask =
2992                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2993                     INTR_EN_TYPE_DISABLE;
2994                 intr_context->intr_read_mask =
2995                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2996                 /*
2997                  * Single interrupt means one handler for all rings.
2998                  */
2999                 intr_context->handler = qlge_isr;
3000                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3001                 /* Set up this vector's bit-mask that indicates
3002                  * which queues it services. In this case there is
3003                  * a single vector so it will service all RSS and
3004                  * TX completion rings.
3005                  */
3006                 ql_set_irq_mask(qdev, intr_context);
3007         }
3008         /* Tell the TX completion rings which MSIx vector
3009          * they will be using.
3010          */
3011         ql_set_tx_vect(qdev);
3012 }
3013
3014 static void ql_free_irq(struct ql_adapter *qdev)
3015 {
3016         int i;
3017         struct intr_context *intr_context = &qdev->intr_context[0];
3018
3019         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3020                 if (intr_context->hooked) {
3021                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3022                                 free_irq(qdev->msi_x_entry[i].vector,
3023                                          &qdev->rx_ring[i]);
3024                                 QPRINTK(qdev, IFDOWN, DEBUG,
3025                                         "freeing msix interrupt %d.\n", i);
3026                         } else {
3027                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3028                                 QPRINTK(qdev, IFDOWN, DEBUG,
3029                                         "freeing msi interrupt %d.\n", i);
3030                         }
3031                 }
3032         }
3033         ql_disable_msix(qdev);
3034 }
3035
3036 static int ql_request_irq(struct ql_adapter *qdev)
3037 {
3038         int i;
3039         int status = 0;
3040         struct pci_dev *pdev = qdev->pdev;
3041         struct intr_context *intr_context = &qdev->intr_context[0];
3042
3043         ql_resolve_queues_to_irqs(qdev);
3044
3045         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3046                 atomic_set(&intr_context->irq_cnt, 0);
3047                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3048                         status = request_irq(qdev->msi_x_entry[i].vector,
3049                                              intr_context->handler,
3050                                              0,
3051                                              intr_context->name,
3052                                              &qdev->rx_ring[i]);
3053                         if (status) {
3054                                 QPRINTK(qdev, IFUP, ERR,
3055                                         "Failed request for MSIX interrupt %d.\n",
3056                                         i);
3057                                 goto err_irq;
3058                         } else {
3059                                 QPRINTK(qdev, IFUP, DEBUG,
3060                                         "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3061                                         i,
3062                                         qdev->rx_ring[i].type ==
3063                                         DEFAULT_Q ? "DEFAULT_Q" : "",
3064                                         qdev->rx_ring[i].type ==
3065                                         TX_Q ? "TX_Q" : "",
3066                                         qdev->rx_ring[i].type ==
3067                                         RX_Q ? "RX_Q" : "", intr_context->name);
3068                         }
3069                 } else {
3070                         QPRINTK(qdev, IFUP, DEBUG,
3071                                 "trying msi or legacy interrupts.\n");
3072                         QPRINTK(qdev, IFUP, DEBUG,
3073                                 "%s: irq = %d.\n", __func__, pdev->irq);
3074                         QPRINTK(qdev, IFUP, DEBUG,
3075                                 "%s: context->name = %s.\n", __func__,
3076                                intr_context->name);
3077                         QPRINTK(qdev, IFUP, DEBUG,
3078                                 "%s: dev_id = 0x%p.\n", __func__,
3079                                &qdev->rx_ring[0]);
3080                         status =
3081                             request_irq(pdev->irq, qlge_isr,
3082                                         test_bit(QL_MSI_ENABLED,
3083                                                  &qdev->
3084                                                  flags) ? 0 : IRQF_SHARED,
3085                                         intr_context->name, &qdev->rx_ring[0]);
3086                         if (status)
3087                                 goto err_irq;
3088
3089                         QPRINTK(qdev, IFUP, ERR,
3090                                 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3091                                 i,
3092                                 qdev->rx_ring[0].type ==
3093                                 DEFAULT_Q ? "DEFAULT_Q" : "",
3094                                 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3095                                 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3096                                 intr_context->name);
3097                 }
3098                 intr_context->hooked = 1;
3099         }
3100         return status;
3101 err_irq:
3102         QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3103         ql_free_irq(qdev);
3104         return status;
3105 }
3106
3107 static int ql_start_rss(struct ql_adapter *qdev)
3108 {
3109         u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3110                                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3111                                 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3112                                 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3113                                 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3114                                 0xbe, 0xac, 0x01, 0xfa};
3115         struct ricb *ricb = &qdev->ricb;
3116         int status = 0;
3117         int i;
3118         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3119
3120         memset((void *)ricb, 0, sizeof(*ricb));
3121
3122         ricb->base_cq = RSS_L4K;
3123         ricb->flags =
3124                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3125         ricb->mask = cpu_to_le16((u16)(0x3ff));
3126
3127         /*
3128          * Fill out the Indirection Table.
3129          */
3130         for (i = 0; i < 1024; i++)
3131                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3132
3133         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3134         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3135
3136         QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
3137
3138         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3139         if (status) {
3140                 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3141                 return status;
3142         }
3143         QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
3144         return status;
3145 }
3146
3147 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3148 {
3149         int i, status = 0;
3150
3151         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3152         if (status)
3153                 return status;
3154         /* Clear all the entries in the routing table. */
3155         for (i = 0; i < 16; i++) {
3156                 status = ql_set_routing_reg(qdev, i, 0, 0);
3157                 if (status) {
3158                         QPRINTK(qdev, IFUP, ERR,
3159                                 "Failed to init routing register for CAM "
3160                                 "packets.\n");
3161                         break;
3162                 }
3163         }
3164         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3165         return status;
3166 }
3167
3168 /* Initialize the frame-to-queue routing. */
3169 static int ql_route_initialize(struct ql_adapter *qdev)
3170 {
3171         int status = 0;
3172
3173         /* Clear all the entries in the routing table. */
3174         status = ql_clear_routing_entries(qdev);
3175         if (status)
3176                 return status;
3177
3178         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3179         if (status)
3180                 return status;
3181
3182         status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3183         if (status) {
3184                 QPRINTK(qdev, IFUP, ERR,
3185                         "Failed to init routing register for error packets.\n");
3186                 goto exit;
3187         }
3188         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3189         if (status) {
3190                 QPRINTK(qdev, IFUP, ERR,
3191                         "Failed to init routing register for broadcast packets.\n");
3192                 goto exit;
3193         }
3194         /* If we have more than one inbound queue, then turn on RSS in the
3195          * routing block.
3196          */
3197         if (qdev->rss_ring_count > 1) {
3198                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3199                                         RT_IDX_RSS_MATCH, 1);
3200                 if (status) {
3201                         QPRINTK(qdev, IFUP, ERR,
3202                                 "Failed to init routing register for MATCH RSS packets.\n");
3203                         goto exit;
3204                 }
3205         }
3206
3207         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3208                                     RT_IDX_CAM_HIT, 1);
3209         if (status)
3210                 QPRINTK(qdev, IFUP, ERR,
3211                         "Failed to init routing register for CAM packets.\n");
3212 exit:
3213         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3214         return status;
3215 }
3216
3217 int ql_cam_route_initialize(struct ql_adapter *qdev)
3218 {
3219         int status, set;
3220
3221         /* If check if the link is up and use to
3222          * determine if we are setting or clearing
3223          * the MAC address in the CAM.
3224          */
3225         set = ql_read32(qdev, STS);
3226         set &= qdev->port_link_up;
3227         status = ql_set_mac_addr(qdev, set);
3228         if (status) {
3229                 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3230                 return status;
3231         }
3232
3233         status = ql_route_initialize(qdev);
3234         if (status)
3235                 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3236
3237         return status;
3238 }
3239
3240 static int ql_adapter_initialize(struct ql_adapter *qdev)
3241 {
3242         u32 value, mask;
3243         int i;
3244         int status = 0;
3245
3246         /*
3247          * Set up the System register to halt on errors.
3248          */
3249         value = SYS_EFE | SYS_FAE;
3250         mask = value << 16;
3251         ql_write32(qdev, SYS, mask | value);
3252
3253         /* Set the default queue, and VLAN behavior. */
3254         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3255         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3256         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3257
3258         /* Set the MPI interrupt to enabled. */
3259         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3260
3261         /* Enable the function, set pagesize, enable error checking. */
3262         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3263             FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3264
3265         /* Set/clear header splitting. */
3266         mask = FSC_VM_PAGESIZE_MASK |
3267             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3268         ql_write32(qdev, FSC, mask | value);
3269
3270         ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3271                 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3272
3273         /* Set RX packet routing to use port/pci function on which the
3274          * packet arrived on in addition to usual frame routing.
3275          * This is helpful on bonding where both interfaces can have
3276          * the same MAC address.
3277          */
3278         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3279
3280         /* Start up the rx queues. */
3281         for (i = 0; i < qdev->rx_ring_count; i++) {
3282                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3283                 if (status) {
3284                         QPRINTK(qdev, IFUP, ERR,
3285                                 "Failed to start rx ring[%d].\n", i);
3286                         return status;
3287                 }
3288         }
3289
3290         /* If there is more than one inbound completion queue
3291          * then download a RICB to configure RSS.
3292          */
3293         if (qdev->rss_ring_count > 1) {
3294                 status = ql_start_rss(qdev);
3295                 if (status) {
3296                         QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3297                         return status;
3298                 }
3299         }
3300
3301         /* Start up the tx queues. */
3302         for (i = 0; i < qdev->tx_ring_count; i++) {
3303                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3304                 if (status) {
3305                         QPRINTK(qdev, IFUP, ERR,
3306                                 "Failed to start tx ring[%d].\n", i);
3307                         return status;
3308                 }
3309         }
3310
3311         /* Initialize the port and set the max framesize. */
3312         status = qdev->nic_ops->port_initialize(qdev);
3313        if (status) {
3314               QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3315               return status;
3316        }
3317
3318         /* Set up the MAC address and frame routing filter. */
3319         status = ql_cam_route_initialize(qdev);
3320         if (status) {
3321                 QPRINTK(qdev, IFUP, ERR,
3322                                 "Failed to init CAM/Routing tables.\n");
3323                 return status;
3324         }
3325
3326         /* Start NAPI for the RSS queues. */
3327         for (i = 0; i < qdev->rss_ring_count; i++) {
3328                 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
3329                         i);
3330                 napi_enable(&qdev->rx_ring[i].napi);
3331         }
3332
3333         return status;
3334 }
3335
3336 /* Issue soft reset to chip. */
3337 static int ql_adapter_reset(struct ql_adapter *qdev)
3338 {
3339         u32 value;
3340         int status = 0;
3341         unsigned long end_jiffies;
3342
3343         /* Clear all the entries in the routing table. */
3344         status = ql_clear_routing_entries(qdev);
3345         if (status) {
3346                 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3347                 return status;
3348         }
3349
3350         end_jiffies = jiffies +
3351                 max((unsigned long)1, usecs_to_jiffies(30));
3352
3353         /* Stop management traffic. */
3354         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3355
3356         /* Wait for the NIC and MGMNT FIFOs to empty. */
3357         ql_wait_fifo_empty(qdev);
3358
3359         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3360
3361         do {
3362                 value = ql_read32(qdev, RST_FO);
3363                 if ((value & RST_FO_FR) == 0)
3364                         break;
3365                 cpu_relax();
3366         } while (time_before(jiffies, end_jiffies));
3367
3368         if (value & RST_FO_FR) {
3369                 QPRINTK(qdev, IFDOWN, ERR,
3370                         "ETIMEDOUT!!! errored out of resetting the chip!\n");
3371                 status = -ETIMEDOUT;
3372         }
3373
3374         /* Resume management traffic. */
3375         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3376         return status;
3377 }
3378
3379 static void ql_display_dev_info(struct net_device *ndev)
3380 {
3381         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3382
3383         QPRINTK(qdev, PROBE, INFO,
3384                 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3385                 "XG Roll = %d, XG Rev = %d.\n",
3386                 qdev->func,
3387                 qdev->port,
3388                 qdev->chip_rev_id & 0x0000000f,
3389                 qdev->chip_rev_id >> 4 & 0x0000000f,
3390                 qdev->chip_rev_id >> 8 & 0x0000000f,
3391                 qdev->chip_rev_id >> 12 & 0x0000000f);
3392         QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
3393 }
3394
3395 static int ql_adapter_down(struct ql_adapter *qdev)
3396 {
3397         int i, status = 0;
3398
3399         ql_link_off(qdev);
3400
3401         /* Don't kill the reset worker thread if we
3402          * are in the process of recovery.
3403          */
3404         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3405                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3406         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3407         cancel_delayed_work_sync(&qdev->mpi_work);
3408         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3409         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3410
3411         for (i = 0; i < qdev->rss_ring_count; i++)
3412                 napi_disable(&qdev->rx_ring[i].napi);
3413
3414         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3415
3416         ql_disable_interrupts(qdev);
3417
3418         ql_tx_ring_clean(qdev);
3419
3420         /* Call netif_napi_del() from common point.
3421          */
3422         for (i = 0; i < qdev->rss_ring_count; i++)
3423                 netif_napi_del(&qdev->rx_ring[i].napi);
3424
3425         ql_free_rx_buffers(qdev);
3426
3427         status = ql_adapter_reset(qdev);
3428         if (status)
3429                 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3430                         qdev->func);
3431         return status;
3432 }
3433
3434 static int ql_adapter_up(struct ql_adapter *qdev)
3435 {
3436         int err = 0;
3437
3438         err = ql_adapter_initialize(qdev);
3439         if (err) {
3440                 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3441                 goto err_init;
3442         }
3443         set_bit(QL_ADAPTER_UP, &qdev->flags);
3444         ql_alloc_rx_buffers(qdev);
3445         /* If the port is initialized and the
3446          * link is up the turn on the carrier.
3447          */
3448         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3449                         (ql_read32(qdev, STS) & qdev->port_link_up))
3450                 ql_link_on(qdev);
3451         ql_enable_interrupts(qdev);
3452         ql_enable_all_completion_interrupts(qdev);
3453         netif_tx_start_all_queues(qdev->ndev);
3454
3455         return 0;
3456 err_init:
3457         ql_adapter_reset(qdev);
3458         return err;
3459 }
3460
3461 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3462 {
3463         ql_free_mem_resources(qdev);
3464         ql_free_irq(qdev);
3465 }
3466
3467 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3468 {
3469         int status = 0;
3470
3471         if (ql_alloc_mem_resources(qdev)) {
3472                 QPRINTK(qdev, IFUP, ERR, "Unable to  allocate memory.\n");
3473                 return -ENOMEM;
3474         }
3475         status = ql_request_irq(qdev);
3476         return status;
3477 }
3478
3479 static int qlge_close(struct net_device *ndev)
3480 {
3481         struct ql_adapter *qdev = netdev_priv(ndev);
3482
3483         /*
3484          * Wait for device to recover from a reset.
3485          * (Rarely happens, but possible.)
3486          */
3487         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3488                 msleep(1);
3489         ql_adapter_down(qdev);
3490         ql_release_adapter_resources(qdev);
3491         return 0;
3492 }
3493
3494 static int ql_configure_rings(struct ql_adapter *qdev)
3495 {
3496         int i;
3497         struct rx_ring *rx_ring;
3498         struct tx_ring *tx_ring;
3499         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3500
3501         /* In a perfect world we have one RSS ring for each CPU
3502          * and each has it's own vector.  To do that we ask for
3503          * cpu_cnt vectors.  ql_enable_msix() will adjust the
3504          * vector count to what we actually get.  We then
3505          * allocate an RSS ring for each.
3506          * Essentially, we are doing min(cpu_count, msix_vector_count).
3507          */
3508         qdev->intr_count = cpu_cnt;
3509         ql_enable_msix(qdev);
3510         /* Adjust the RSS ring count to the actual vector count. */
3511         qdev->rss_ring_count = qdev->intr_count;
3512         qdev->tx_ring_count = cpu_cnt;
3513         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3514
3515         for (i = 0; i < qdev->tx_ring_count; i++) {
3516                 tx_ring = &qdev->tx_ring[i];
3517                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3518                 tx_ring->qdev = qdev;
3519                 tx_ring->wq_id = i;
3520                 tx_ring->wq_len = qdev->tx_ring_size;
3521                 tx_ring->wq_size =
3522                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3523
3524                 /*
3525                  * The completion queue ID for the tx rings start
3526                  * immediately after the rss rings.
3527                  */
3528                 tx_ring->cq_id = qdev->rss_ring_count + i;
3529         }
3530
3531         for (i = 0; i < qdev->rx_ring_count; i++) {
3532                 rx_ring = &qdev->rx_ring[i];
3533                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3534                 rx_ring->qdev = qdev;
3535                 rx_ring->cq_id = i;
3536                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
3537                 if (i < qdev->rss_ring_count) {
3538                         /*
3539                          * Inbound (RSS) queues.
3540                          */
3541                         rx_ring->cq_len = qdev->rx_ring_size;
3542                         rx_ring->cq_size =
3543                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3544                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3545                         rx_ring->lbq_size =
3546                             rx_ring->lbq_len * sizeof(__le64);
3547                         rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3548                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3549                         rx_ring->sbq_size =
3550                             rx_ring->sbq_len * sizeof(__le64);
3551                         rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3552                         rx_ring->type = RX_Q;
3553                 } else {
3554                         /*
3555                          * Outbound queue handles outbound completions only.
3556                          */
3557                         /* outbound cq is same size as tx_ring it services. */
3558                         rx_ring->cq_len = qdev->tx_ring_size;
3559                         rx_ring->cq_size =
3560                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3561                         rx_ring->lbq_len = 0;
3562                         rx_ring->lbq_size = 0;
3563                         rx_ring->lbq_buf_size = 0;
3564                         rx_ring->sbq_len = 0;
3565                         rx_ring->sbq_size = 0;
3566                         rx_ring->sbq_buf_size = 0;
3567                         rx_ring->type = TX_Q;
3568                 }
3569         }
3570         return 0;
3571 }
3572
3573 static int qlge_open(struct net_device *ndev)
3574 {
3575         int err = 0;
3576         struct ql_adapter *qdev = netdev_priv(ndev);
3577
3578         err = ql_configure_rings(qdev);
3579         if (err)
3580                 return err;
3581
3582         err = ql_get_adapter_resources(qdev);
3583         if (err)
3584                 goto error_up;
3585
3586         err = ql_adapter_up(qdev);
3587         if (err)
3588                 goto error_up;
3589
3590         return err;
3591
3592 error_up:
3593         ql_release_adapter_resources(qdev);
3594         return err;
3595 }
3596
3597 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3598 {
3599         struct ql_adapter *qdev = netdev_priv(ndev);
3600
3601         if (ndev->mtu == 1500 && new_mtu == 9000) {
3602                 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3603                 queue_delayed_work(qdev->workqueue,
3604                                 &qdev->mpi_port_cfg_work, 0);
3605         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3606                 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3607         } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3608                    (ndev->mtu == 9000 && new_mtu == 9000)) {
3609                 return 0;
3610         } else
3611                 return -EINVAL;
3612         ndev->mtu = new_mtu;
3613         return 0;
3614 }
3615
3616 static struct net_device_stats *qlge_get_stats(struct net_device
3617                                                *ndev)
3618 {
3619         struct ql_adapter *qdev = netdev_priv(ndev);
3620         return &qdev->stats;
3621 }
3622
3623 static void qlge_set_multicast_list(struct net_device *ndev)
3624 {
3625         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3626         struct dev_mc_list *mc_ptr;
3627         int i, status;
3628
3629         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3630         if (status)
3631                 return;
3632         /*
3633          * Set or clear promiscuous mode if a
3634          * transition is taking place.
3635          */
3636         if (ndev->flags & IFF_PROMISC) {
3637                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3638                         if (ql_set_routing_reg
3639                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3640                                 QPRINTK(qdev, HW, ERR,
3641                                         "Failed to set promiscous mode.\n");
3642                         } else {
3643                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
3644                         }
3645                 }
3646         } else {
3647                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3648                         if (ql_set_routing_reg
3649                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3650                                 QPRINTK(qdev, HW, ERR,
3651                                         "Failed to clear promiscous mode.\n");
3652                         } else {
3653                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3654                         }
3655                 }
3656         }
3657
3658         /*
3659          * Set or clear all multicast mode if a
3660          * transition is taking place.
3661          */
3662         if ((ndev->flags & IFF_ALLMULTI) ||
3663             (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3664                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3665                         if (ql_set_routing_reg
3666                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3667                                 QPRINTK(qdev, HW, ERR,
3668                                         "Failed to set all-multi mode.\n");
3669                         } else {
3670                                 set_bit(QL_ALLMULTI, &qdev->flags);
3671                         }
3672                 }
3673         } else {
3674                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3675                         if (ql_set_routing_reg
3676                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3677                                 QPRINTK(qdev, HW, ERR,
3678                                         "Failed to clear all-multi mode.\n");
3679                         } else {
3680                                 clear_bit(QL_ALLMULTI, &qdev->flags);
3681                         }
3682                 }
3683         }
3684
3685         if (ndev->mc_count) {
3686                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3687                 if (status)
3688                         goto exit;
3689                 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3690                      i++, mc_ptr = mc_ptr->next)
3691                         if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3692                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3693                                 QPRINTK(qdev, HW, ERR,
3694                                         "Failed to loadmulticast address.\n");
3695                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3696                                 goto exit;
3697                         }
3698                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3699                 if (ql_set_routing_reg
3700                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3701                         QPRINTK(qdev, HW, ERR,
3702                                 "Failed to set multicast match mode.\n");
3703                 } else {
3704                         set_bit(QL_ALLMULTI, &qdev->flags);
3705                 }
3706         }
3707 exit:
3708         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3709 }
3710
3711 static int qlge_set_mac_address(struct net_device *ndev, void *p)
3712 {
3713         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3714         struct sockaddr *addr = p;
3715         int status;
3716
3717         if (netif_running(ndev))
3718                 return -EBUSY;
3719
3720         if (!is_valid_ether_addr(addr->sa_data))
3721                 return -EADDRNOTAVAIL;
3722         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3723
3724         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
3725         if (status)
3726                 return status;
3727         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3728                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
3729         if (status)
3730                 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3731         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
3732         return status;
3733 }
3734
3735 static void qlge_tx_timeout(struct net_device *ndev)
3736 {
3737         struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3738         ql_queue_asic_error(qdev);
3739 }
3740
3741 static void ql_asic_reset_work(struct work_struct *work)
3742 {
3743         struct ql_adapter *qdev =
3744             container_of(work, struct ql_adapter, asic_reset_work.work);
3745         int status;
3746         rtnl_lock();
3747         status = ql_adapter_down(qdev);
3748         if (status)
3749                 goto error;
3750
3751         status = ql_adapter_up(qdev);
3752         if (status)
3753                 goto error;
3754
3755         /* Restore rx mode. */
3756         clear_bit(QL_ALLMULTI, &qdev->flags);
3757         clear_bit(QL_PROMISCUOUS, &qdev->flags);
3758         qlge_set_multicast_list(qdev->ndev);
3759
3760         rtnl_unlock();
3761         return;
3762 error:
3763         QPRINTK(qdev, IFUP, ALERT,
3764                 "Driver up/down cycle failed, closing device\n");
3765
3766         set_bit(QL_ADAPTER_UP, &qdev->flags);
3767         dev_close(qdev->ndev);
3768         rtnl_unlock();
3769 }
3770
3771 static struct nic_operations qla8012_nic_ops = {
3772         .get_flash              = ql_get_8012_flash_params,
3773         .port_initialize        = ql_8012_port_initialize,
3774 };
3775
3776 static struct nic_operations qla8000_nic_ops = {
3777         .get_flash              = ql_get_8000_flash_params,
3778         .port_initialize        = ql_8000_port_initialize,
3779 };
3780
3781 /* Find the pcie function number for the other NIC
3782  * on this chip.  Since both NIC functions share a
3783  * common firmware we have the lowest enabled function
3784  * do any common work.  Examples would be resetting
3785  * after a fatal firmware error, or doing a firmware
3786  * coredump.
3787  */
3788 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
3789 {
3790         int status = 0;
3791         u32 temp;
3792         u32 nic_func1, nic_func2;
3793
3794         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
3795                         &temp);
3796         if (status)
3797                 return status;
3798
3799         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
3800                         MPI_TEST_NIC_FUNC_MASK);
3801         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
3802                         MPI_TEST_NIC_FUNC_MASK);
3803
3804         if (qdev->func == nic_func1)
3805                 qdev->alt_func = nic_func2;
3806         else if (qdev->func == nic_func2)
3807                 qdev->alt_func = nic_func1;
3808         else
3809                 status = -EIO;
3810
3811         return status;
3812 }
3813
3814 static int ql_get_board_info(struct ql_adapter *qdev)
3815 {
3816         int status;
3817         qdev->func =
3818             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3819         if (qdev->func > 3)
3820                 return -EIO;
3821
3822         status = ql_get_alt_pcie_func(qdev);
3823         if (status)
3824                 return status;
3825
3826         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
3827         if (qdev->port) {
3828                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3829                 qdev->port_link_up = STS_PL1;
3830                 qdev->port_init = STS_PI1;
3831                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3832                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3833         } else {
3834                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3835                 qdev->port_link_up = STS_PL0;
3836                 qdev->port_init = STS_PI0;
3837                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3838                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3839         }
3840         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3841         qdev->device_id = qdev->pdev->device;
3842         if (qdev->device_id == QLGE_DEVICE_ID_8012)
3843                 qdev->nic_ops = &qla8012_nic_ops;
3844         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
3845                 qdev->nic_ops = &qla8000_nic_ops;
3846         return status;
3847 }
3848
3849 static void ql_release_all(struct pci_dev *pdev)
3850 {
3851         struct net_device *ndev = pci_get_drvdata(pdev);
3852         struct ql_adapter *qdev = netdev_priv(ndev);
3853
3854         if (qdev->workqueue) {
3855                 destroy_workqueue(qdev->workqueue);
3856                 qdev->workqueue = NULL;
3857         }
3858
3859         if (qdev->reg_base)
3860                 iounmap(qdev->reg_base);
3861         if (qdev->doorbell_area)
3862                 iounmap(qdev->doorbell_area);
3863         pci_release_regions(pdev);
3864         pci_set_drvdata(pdev, NULL);
3865 }
3866
3867 static int __devinit ql_init_device(struct pci_dev *pdev,
3868                                     struct net_device *ndev, int cards_found)
3869 {
3870         struct ql_adapter *qdev = netdev_priv(ndev);
3871         int pos, err = 0;
3872         u16 val16;
3873
3874         memset((void *)qdev, 0, sizeof(*qdev));
3875         err = pci_enable_device(pdev);
3876         if (err) {
3877                 dev_err(&pdev->dev, "PCI device enable failed.\n");
3878                 return err;
3879         }
3880
3881         qdev->ndev = ndev;
3882         qdev->pdev = pdev;
3883         pci_set_drvdata(pdev, ndev);
3884         pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3885         if (pos <= 0) {
3886                 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3887                         "aborting.\n");
3888                 return pos;
3889         } else {
3890                 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3891                 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3892                 val16 |= (PCI_EXP_DEVCTL_CERE |
3893                           PCI_EXP_DEVCTL_NFERE |
3894                           PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3895                 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3896         }
3897
3898         err = pci_request_regions(pdev, DRV_NAME);
3899         if (err) {
3900                 dev_err(&pdev->dev, "PCI region request failed.\n");
3901                 return err;
3902         }
3903
3904         pci_set_master(pdev);
3905         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3906                 set_bit(QL_DMA64, &qdev->flags);
3907                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3908         } else {
3909                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3910                 if (!err)
3911                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3912         }
3913
3914         if (err) {
3915                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3916                 goto err_out;
3917         }
3918
3919         /* Set PCIe reset type for EEH to fundamental. */
3920         pdev->needs_freset = 1;
3921         pci_save_state(pdev);
3922         qdev->reg_base =
3923             ioremap_nocache(pci_resource_start(pdev, 1),
3924                             pci_resource_len(pdev, 1));
3925         if (!qdev->reg_base) {
3926                 dev_err(&pdev->dev, "Register mapping failed.\n");
3927                 err = -ENOMEM;
3928                 goto err_out;
3929         }
3930
3931         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3932         qdev->doorbell_area =
3933             ioremap_nocache(pci_resource_start(pdev, 3),
3934                             pci_resource_len(pdev, 3));
3935         if (!qdev->doorbell_area) {
3936                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3937                 err = -ENOMEM;
3938                 goto err_out;
3939         }
3940
3941         err = ql_get_board_info(qdev);
3942         if (err) {
3943                 dev_err(&pdev->dev, "Register access failed.\n");
3944                 err = -EIO;
3945                 goto err_out;
3946         }
3947         qdev->msg_enable = netif_msg_init(debug, default_msg);
3948         spin_lock_init(&qdev->hw_lock);
3949         spin_lock_init(&qdev->stats_lock);
3950
3951         /* make sure the EEPROM is good */
3952         err = qdev->nic_ops->get_flash(qdev);
3953         if (err) {
3954                 dev_err(&pdev->dev, "Invalid FLASH.\n");
3955                 goto err_out;
3956         }
3957
3958         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3959
3960         /* Set up the default ring sizes. */
3961         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3962         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3963
3964         /* Set up the coalescing parameters. */
3965         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3966         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3967         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3968         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3969
3970         /*
3971          * Set up the operating parameters.
3972          */
3973         qdev->rx_csum = 1;
3974         qdev->workqueue = create_singlethread_workqueue(ndev->name);
3975         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3976         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3977         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3978         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
3979         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
3980         init_completion(&qdev->ide_completion);
3981
3982         if (!cards_found) {
3983                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3984                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3985                          DRV_NAME, DRV_VERSION);
3986         }
3987         return 0;
3988 err_out:
3989         ql_release_all(pdev);
3990         pci_disable_device(pdev);
3991         return err;
3992 }
3993
3994
3995 static const struct net_device_ops qlge_netdev_ops = {
3996         .ndo_open               = qlge_open,
3997         .ndo_stop               = qlge_close,
3998         .ndo_start_xmit         = qlge_send,
3999         .ndo_change_mtu         = qlge_change_mtu,
4000         .ndo_get_stats          = qlge_get_stats,
4001         .ndo_set_multicast_list = qlge_set_multicast_list,
4002         .ndo_set_mac_address    = qlge_set_mac_address,
4003         .ndo_validate_addr      = eth_validate_addr,
4004         .ndo_tx_timeout         = qlge_tx_timeout,
4005         .ndo_vlan_rx_register   = ql_vlan_rx_register,
4006         .ndo_vlan_rx_add_vid    = ql_vlan_rx_add_vid,
4007         .ndo_vlan_rx_kill_vid   = ql_vlan_rx_kill_vid,
4008 };
4009
4010 static int __devinit qlge_probe(struct pci_dev *pdev,
4011                                 const struct pci_device_id *pci_entry)
4012 {
4013         struct net_device *ndev = NULL;
4014         struct ql_adapter *qdev = NULL;
4015         static int cards_found = 0;
4016         int err = 0;
4017
4018         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4019                         min(MAX_CPUS, (int)num_online_cpus()));
4020         if (!ndev)
4021                 return -ENOMEM;
4022
4023         err = ql_init_device(pdev, ndev, cards_found);
4024         if (err < 0) {
4025                 free_netdev(ndev);
4026                 return err;
4027         }
4028
4029         qdev = netdev_priv(ndev);
4030         SET_NETDEV_DEV(ndev, &pdev->dev);
4031         ndev->features = (0
4032                           | NETIF_F_IP_CSUM
4033                           | NETIF_F_SG
4034                           | NETIF_F_TSO
4035                           | NETIF_F_TSO6
4036                           | NETIF_F_TSO_ECN
4037                           | NETIF_F_HW_VLAN_TX
4038                           | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
4039         ndev->features |= NETIF_F_GRO;
4040
4041         if (test_bit(QL_DMA64, &qdev->flags))
4042                 ndev->features |= NETIF_F_HIGHDMA;
4043
4044         /*
4045          * Set up net_device structure.
4046          */
4047         ndev->tx_queue_len = qdev->tx_ring_size;
4048         ndev->irq = pdev->irq;
4049
4050         ndev->netdev_ops = &qlge_netdev_ops;
4051         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4052         ndev->watchdog_timeo = 10 * HZ;
4053
4054         err = register_netdev(ndev);
4055         if (err) {
4056                 dev_err(&pdev->dev, "net device registration failed.\n");
4057                 ql_release_all(pdev);
4058                 pci_disable_device(pdev);
4059                 return err;
4060         }
4061         ql_link_off(qdev);
4062         ql_display_dev_info(ndev);
4063         cards_found++;
4064         return 0;
4065 }
4066
4067 static void __devexit qlge_remove(struct pci_dev *pdev)
4068 {
4069         struct net_device *ndev = pci_get_drvdata(pdev);
4070         unregister_netdev(ndev);
4071         ql_release_all(pdev);
4072         pci_disable_device(pdev);
4073         free_netdev(ndev);
4074 }
4075
4076 /* Clean up resources without touching hardware. */
4077 static void ql_eeh_close(struct net_device *ndev)
4078 {
4079         int i;
4080         struct ql_adapter *qdev = netdev_priv(ndev);
4081
4082         if (netif_carrier_ok(ndev)) {
4083                 netif_carrier_off(ndev);
4084                 netif_stop_queue(ndev);
4085         }
4086
4087         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4088                 cancel_delayed_work_sync(&qdev->asic_reset_work);
4089         cancel_delayed_work_sync(&qdev->mpi_reset_work);
4090         cancel_delayed_work_sync(&qdev->mpi_work);
4091         cancel_delayed_work_sync(&qdev->mpi_idc_work);
4092         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4093
4094         for (i = 0; i < qdev->rss_ring_count; i++)
4095                 netif_napi_del(&qdev->rx_ring[i].napi);
4096
4097         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4098         ql_tx_ring_clean(qdev);
4099         ql_free_rx_buffers(qdev);
4100         ql_release_adapter_resources(qdev);
4101 }
4102
4103 /*
4104  * This callback is called by the PCI subsystem whenever
4105  * a PCI bus error is detected.
4106  */
4107 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4108                                                enum pci_channel_state state)
4109 {
4110         struct net_device *ndev = pci_get_drvdata(pdev);
4111
4112         switch (state) {
4113         case pci_channel_io_normal:
4114                 return PCI_ERS_RESULT_CAN_RECOVER;
4115         case pci_channel_io_frozen:
4116                 netif_device_detach(ndev);
4117                 if (netif_running(ndev))
4118                         ql_eeh_close(ndev);
4119                 pci_disable_device(pdev);
4120                 return PCI_ERS_RESULT_NEED_RESET;
4121         case pci_channel_io_perm_failure:
4122                 dev_err(&pdev->dev,
4123                         "%s: pci_channel_io_perm_failure.\n", __func__);
4124                 return PCI_ERS_RESULT_DISCONNECT;
4125         }
4126
4127         /* Request a slot reset. */
4128         return PCI_ERS_RESULT_NEED_RESET;
4129 }
4130
4131 /*
4132  * This callback is called after the PCI buss has been reset.
4133  * Basically, this tries to restart the card from scratch.
4134  * This is a shortened version of the device probe/discovery code,
4135  * it resembles the first-half of the () routine.
4136  */
4137 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4138 {
4139         struct net_device *ndev = pci_get_drvdata(pdev);
4140         struct ql_adapter *qdev = netdev_priv(ndev);
4141
4142         pdev->error_state = pci_channel_io_normal;
4143
4144         pci_restore_state(pdev);
4145         if (pci_enable_device(pdev)) {
4146                 QPRINTK(qdev, IFUP, ERR,
4147                         "Cannot re-enable PCI device after reset.\n");
4148                 return PCI_ERS_RESULT_DISCONNECT;
4149         }
4150         pci_set_master(pdev);
4151         return PCI_ERS_RESULT_RECOVERED;
4152 }
4153
4154 static void qlge_io_resume(struct pci_dev *pdev)
4155 {
4156         struct net_device *ndev = pci_get_drvdata(pdev);
4157         struct ql_adapter *qdev = netdev_priv(ndev);
4158         int err = 0;
4159
4160         if (ql_adapter_reset(qdev))
4161                 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
4162         if (netif_running(ndev)) {
4163                 err = qlge_open(ndev);
4164                 if (err) {
4165                         QPRINTK(qdev, IFUP, ERR,
4166                                 "Device initialization failed after reset.\n");
4167                         return;
4168                 }
4169         } else {
4170                 QPRINTK(qdev, IFUP, ERR,
4171                         "Device was not running prior to EEH.\n");
4172         }
4173         netif_device_attach(ndev);
4174 }
4175
4176 static struct pci_error_handlers qlge_err_handler = {
4177         .error_detected = qlge_io_error_detected,
4178         .slot_reset = qlge_io_slot_reset,
4179         .resume = qlge_io_resume,
4180 };
4181
4182 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4183 {
4184         struct net_device *ndev = pci_get_drvdata(pdev);
4185         struct ql_adapter *qdev = netdev_priv(ndev);
4186         int err;
4187
4188         netif_device_detach(ndev);
4189
4190         if (netif_running(ndev)) {
4191                 err = ql_adapter_down(qdev);
4192                 if (!err)
4193                         return err;
4194         }
4195
4196         err = pci_save_state(pdev);
4197         if (err)
4198                 return err;
4199
4200         pci_disable_device(pdev);
4201
4202         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4203
4204         return 0;
4205 }
4206
4207 #ifdef CONFIG_PM
4208 static int qlge_resume(struct pci_dev *pdev)
4209 {
4210         struct net_device *ndev = pci_get_drvdata(pdev);
4211         struct ql_adapter *qdev = netdev_priv(ndev);
4212         int err;
4213
4214         pci_set_power_state(pdev, PCI_D0);
4215         pci_restore_state(pdev);
4216         err = pci_enable_device(pdev);
4217         if (err) {
4218                 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4219                 return err;
4220         }
4221         pci_set_master(pdev);
4222
4223         pci_enable_wake(pdev, PCI_D3hot, 0);
4224         pci_enable_wake(pdev, PCI_D3cold, 0);
4225
4226         if (netif_running(ndev)) {
4227                 err = ql_adapter_up(qdev);
4228                 if (err)
4229                         return err;
4230         }
4231
4232         netif_device_attach(ndev);
4233
4234         return 0;
4235 }
4236 #endif /* CONFIG_PM */
4237
4238 static void qlge_shutdown(struct pci_dev *pdev)
4239 {
4240         qlge_suspend(pdev, PMSG_SUSPEND);
4241 }
4242
4243 static struct pci_driver qlge_driver = {
4244         .name = DRV_NAME,
4245         .id_table = qlge_pci_tbl,
4246         .probe = qlge_probe,
4247         .remove = __devexit_p(qlge_remove),
4248 #ifdef CONFIG_PM
4249         .suspend = qlge_suspend,
4250         .resume = qlge_resume,
4251 #endif
4252         .shutdown = qlge_shutdown,
4253         .err_handler = &qlge_err_handler
4254 };
4255
4256 static int __init qlge_init_module(void)
4257 {
4258         return pci_register_driver(&qlge_driver);
4259 }
4260
4261 static void __exit qlge_exit(void)
4262 {
4263         pci_unregister_driver(&qlge_driver);
4264 }
4265
4266 module_init(qlge_init_module);
4267 module_exit(qlge_exit);