Merge branch 'for-rmk' of git://linux-arm.org/linux-2.6 into devel
[sfrench/cifs-2.6.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
94         {0,}
95 };
96
97 MODULE_DESCRIPTION(DRV_DESC);
98 MODULE_AUTHOR("Chelsio Communications");
99 MODULE_LICENSE("Dual BSD/GPL");
100 MODULE_VERSION(DRV_VERSION);
101 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102
103 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104
105 module_param(dflt_msg_enable, int, 0644);
106 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
107
108 /*
109  * The driver uses the best interrupt scheme available on a platform in the
110  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
111  * of these schemes the driver may consider as follows:
112  *
113  * msi = 2: choose from among all three options
114  * msi = 1: only consider MSI and pin interrupts
115  * msi = 0: force pin interrupts
116  */
117 static int msi = 2;
118
119 module_param(msi, int, 0644);
120 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
121
122 /*
123  * The driver enables offload as a default.
124  * To disable it, use ofld_disable = 1.
125  */
126
127 static int ofld_disable = 0;
128
129 module_param(ofld_disable, int, 0644);
130 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
131
132 /*
133  * We have work elements that we need to cancel when an interface is taken
134  * down.  Normally the work elements would be executed by keventd but that
135  * can deadlock because of linkwatch.  If our close method takes the rtnl
136  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
137  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
138  * for our work to complete.  Get our own work queue to solve this.
139  */
140 static struct workqueue_struct *cxgb3_wq;
141
142 /**
143  *      link_report - show link status and link speed/duplex
144  *      @p: the port whose settings are to be reported
145  *
146  *      Shows the link status, speed, and duplex of a port.
147  */
148 static void link_report(struct net_device *dev)
149 {
150         if (!netif_carrier_ok(dev))
151                 printk(KERN_INFO "%s: link down\n", dev->name);
152         else {
153                 const char *s = "10Mbps";
154                 const struct port_info *p = netdev_priv(dev);
155
156                 switch (p->link_config.speed) {
157                 case SPEED_10000:
158                         s = "10Gbps";
159                         break;
160                 case SPEED_1000:
161                         s = "1000Mbps";
162                         break;
163                 case SPEED_100:
164                         s = "100Mbps";
165                         break;
166                 }
167
168                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
169                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
170         }
171 }
172
173 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
174 {
175         struct net_device *dev = adap->port[port_id];
176         struct port_info *pi = netdev_priv(dev);
177
178         if (state == netif_carrier_ok(dev))
179                 return;
180
181         if (state) {
182                 struct cmac *mac = &pi->mac;
183
184                 netif_carrier_on(dev);
185
186                 /* Clear local faults */
187                 t3_xgm_intr_disable(adap, pi->port_id);
188                 t3_read_reg(adap, A_XGM_INT_STATUS +
189                                     pi->mac.offset);
190                 t3_write_reg(adap,
191                              A_XGM_INT_CAUSE + pi->mac.offset,
192                              F_XGM_INT);
193
194                 t3_set_reg_field(adap,
195                                  A_XGM_INT_ENABLE +
196                                  pi->mac.offset,
197                                  F_XGM_INT, F_XGM_INT);
198                 t3_xgm_intr_enable(adap, pi->port_id);
199
200                 t3_mac_enable(mac, MAC_DIRECTION_TX);
201         } else
202                 netif_carrier_off(dev);
203
204         link_report(dev);
205 }
206
207 /**
208  *      t3_os_link_changed - handle link status changes
209  *      @adapter: the adapter associated with the link change
210  *      @port_id: the port index whose limk status has changed
211  *      @link_stat: the new status of the link
212  *      @speed: the new speed setting
213  *      @duplex: the new duplex setting
214  *      @pause: the new flow-control setting
215  *
216  *      This is the OS-dependent handler for link status changes.  The OS
217  *      neutral handler takes care of most of the processing for these events,
218  *      then calls this handler for any OS-specific processing.
219  */
220 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
221                         int speed, int duplex, int pause)
222 {
223         struct net_device *dev = adapter->port[port_id];
224         struct port_info *pi = netdev_priv(dev);
225         struct cmac *mac = &pi->mac;
226
227         /* Skip changes from disabled ports. */
228         if (!netif_running(dev))
229                 return;
230
231         if (link_stat != netif_carrier_ok(dev)) {
232                 if (link_stat) {
233                         t3_mac_enable(mac, MAC_DIRECTION_RX);
234
235                         /* Clear local faults */
236                         t3_xgm_intr_disable(adapter, pi->port_id);
237                         t3_read_reg(adapter, A_XGM_INT_STATUS +
238                                     pi->mac.offset);
239                         t3_write_reg(adapter,
240                                      A_XGM_INT_CAUSE + pi->mac.offset,
241                                      F_XGM_INT);
242
243                         t3_set_reg_field(adapter,
244                                          A_XGM_INT_ENABLE + pi->mac.offset,
245                                          F_XGM_INT, F_XGM_INT);
246                         t3_xgm_intr_enable(adapter, pi->port_id);
247
248                         netif_carrier_on(dev);
249                 } else {
250                         netif_carrier_off(dev);
251
252                         t3_xgm_intr_disable(adapter, pi->port_id);
253                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
254                         t3_set_reg_field(adapter,
255                                          A_XGM_INT_ENABLE + pi->mac.offset,
256                                          F_XGM_INT, 0);
257
258                         if (is_10G(adapter))
259                                 pi->phy.ops->power_down(&pi->phy, 1);
260
261                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
262                         t3_mac_disable(mac, MAC_DIRECTION_RX);
263                         t3_link_start(&pi->phy, mac, &pi->link_config);
264                 }
265
266                 link_report(dev);
267         }
268 }
269
270 /**
271  *      t3_os_phymod_changed - handle PHY module changes
272  *      @phy: the PHY reporting the module change
273  *      @mod_type: new module type
274  *
275  *      This is the OS-dependent handler for PHY module changes.  It is
276  *      invoked when a PHY module is removed or inserted for any OS-specific
277  *      processing.
278  */
279 void t3_os_phymod_changed(struct adapter *adap, int port_id)
280 {
281         static const char *mod_str[] = {
282                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
283         };
284
285         const struct net_device *dev = adap->port[port_id];
286         const struct port_info *pi = netdev_priv(dev);
287
288         if (pi->phy.modtype == phy_modtype_none)
289                 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
290         else
291                 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
292                        mod_str[pi->phy.modtype]);
293 }
294
295 static void cxgb_set_rxmode(struct net_device *dev)
296 {
297         struct t3_rx_mode rm;
298         struct port_info *pi = netdev_priv(dev);
299
300         init_rx_mode(&rm, dev, dev->mc_list);
301         t3_mac_set_rx_mode(&pi->mac, &rm);
302 }
303
304 /**
305  *      link_start - enable a port
306  *      @dev: the device to enable
307  *
308  *      Performs the MAC and PHY actions needed to enable a port.
309  */
310 static void link_start(struct net_device *dev)
311 {
312         struct t3_rx_mode rm;
313         struct port_info *pi = netdev_priv(dev);
314         struct cmac *mac = &pi->mac;
315
316         init_rx_mode(&rm, dev, dev->mc_list);
317         t3_mac_reset(mac);
318         t3_mac_set_mtu(mac, dev->mtu);
319         t3_mac_set_address(mac, 0, dev->dev_addr);
320         t3_mac_set_rx_mode(mac, &rm);
321         t3_link_start(&pi->phy, mac, &pi->link_config);
322         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
323 }
324
325 static inline void cxgb_disable_msi(struct adapter *adapter)
326 {
327         if (adapter->flags & USING_MSIX) {
328                 pci_disable_msix(adapter->pdev);
329                 adapter->flags &= ~USING_MSIX;
330         } else if (adapter->flags & USING_MSI) {
331                 pci_disable_msi(adapter->pdev);
332                 adapter->flags &= ~USING_MSI;
333         }
334 }
335
336 /*
337  * Interrupt handler for asynchronous events used with MSI-X.
338  */
339 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
340 {
341         t3_slow_intr_handler(cookie);
342         return IRQ_HANDLED;
343 }
344
345 /*
346  * Name the MSI-X interrupts.
347  */
348 static void name_msix_vecs(struct adapter *adap)
349 {
350         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
351
352         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
353         adap->msix_info[0].desc[n] = 0;
354
355         for_each_port(adap, j) {
356                 struct net_device *d = adap->port[j];
357                 const struct port_info *pi = netdev_priv(d);
358
359                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
360                         snprintf(adap->msix_info[msi_idx].desc, n,
361                                  "%s-%d", d->name, pi->first_qset + i);
362                         adap->msix_info[msi_idx].desc[n] = 0;
363                 }
364         }
365 }
366
367 static int request_msix_data_irqs(struct adapter *adap)
368 {
369         int i, j, err, qidx = 0;
370
371         for_each_port(adap, i) {
372                 int nqsets = adap2pinfo(adap, i)->nqsets;
373
374                 for (j = 0; j < nqsets; ++j) {
375                         err = request_irq(adap->msix_info[qidx + 1].vec,
376                                           t3_intr_handler(adap,
377                                                           adap->sge.qs[qidx].
378                                                           rspq.polling), 0,
379                                           adap->msix_info[qidx + 1].desc,
380                                           &adap->sge.qs[qidx]);
381                         if (err) {
382                                 while (--qidx >= 0)
383                                         free_irq(adap->msix_info[qidx + 1].vec,
384                                                  &adap->sge.qs[qidx]);
385                                 return err;
386                         }
387                         qidx++;
388                 }
389         }
390         return 0;
391 }
392
393 static void free_irq_resources(struct adapter *adapter)
394 {
395         if (adapter->flags & USING_MSIX) {
396                 int i, n = 0;
397
398                 free_irq(adapter->msix_info[0].vec, adapter);
399                 for_each_port(adapter, i)
400                         n += adap2pinfo(adapter, i)->nqsets;
401
402                 for (i = 0; i < n; ++i)
403                         free_irq(adapter->msix_info[i + 1].vec,
404                                  &adapter->sge.qs[i]);
405         } else
406                 free_irq(adapter->pdev->irq, adapter);
407 }
408
409 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
410                               unsigned long n)
411 {
412         int attempts = 5;
413
414         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
415                 if (!--attempts)
416                         return -ETIMEDOUT;
417                 msleep(10);
418         }
419         return 0;
420 }
421
422 static int init_tp_parity(struct adapter *adap)
423 {
424         int i;
425         struct sk_buff *skb;
426         struct cpl_set_tcb_field *greq;
427         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
428
429         t3_tp_set_offload_mode(adap, 1);
430
431         for (i = 0; i < 16; i++) {
432                 struct cpl_smt_write_req *req;
433
434                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
435                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
436                 memset(req, 0, sizeof(*req));
437                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
438                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
439                 req->iff = i;
440                 t3_mgmt_tx(adap, skb);
441         }
442
443         for (i = 0; i < 2048; i++) {
444                 struct cpl_l2t_write_req *req;
445
446                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
447                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
448                 memset(req, 0, sizeof(*req));
449                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
450                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
451                 req->params = htonl(V_L2T_W_IDX(i));
452                 t3_mgmt_tx(adap, skb);
453         }
454
455         for (i = 0; i < 2048; i++) {
456                 struct cpl_rte_write_req *req;
457
458                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
459                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
460                 memset(req, 0, sizeof(*req));
461                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
462                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
463                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
464                 t3_mgmt_tx(adap, skb);
465         }
466
467         skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
468         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
469         memset(greq, 0, sizeof(*greq));
470         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
471         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
472         greq->mask = cpu_to_be64(1);
473         t3_mgmt_tx(adap, skb);
474
475         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
476         t3_tp_set_offload_mode(adap, 0);
477         return i;
478 }
479
480 /**
481  *      setup_rss - configure RSS
482  *      @adap: the adapter
483  *
484  *      Sets up RSS to distribute packets to multiple receive queues.  We
485  *      configure the RSS CPU lookup table to distribute to the number of HW
486  *      receive queues, and the response queue lookup table to narrow that
487  *      down to the response queues actually configured for each port.
488  *      We always configure the RSS mapping for two ports since the mapping
489  *      table has plenty of entries.
490  */
491 static void setup_rss(struct adapter *adap)
492 {
493         int i;
494         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
495         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
496         u8 cpus[SGE_QSETS + 1];
497         u16 rspq_map[RSS_TABLE_SIZE];
498
499         for (i = 0; i < SGE_QSETS; ++i)
500                 cpus[i] = i;
501         cpus[SGE_QSETS] = 0xff; /* terminator */
502
503         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
504                 rspq_map[i] = i % nq0;
505                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
506         }
507
508         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
509                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
510                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
511 }
512
513 static void init_napi(struct adapter *adap)
514 {
515         int i;
516
517         for (i = 0; i < SGE_QSETS; i++) {
518                 struct sge_qset *qs = &adap->sge.qs[i];
519
520                 if (qs->adap)
521                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
522                                        64);
523         }
524
525         /*
526          * netif_napi_add() can be called only once per napi_struct because it
527          * adds each new napi_struct to a list.  Be careful not to call it a
528          * second time, e.g., during EEH recovery, by making a note of it.
529          */
530         adap->flags |= NAPI_INIT;
531 }
532
533 /*
534  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
535  * both netdevices representing interfaces and the dummy ones for the extra
536  * queues.
537  */
538 static void quiesce_rx(struct adapter *adap)
539 {
540         int i;
541
542         for (i = 0; i < SGE_QSETS; i++)
543                 if (adap->sge.qs[i].adap)
544                         napi_disable(&adap->sge.qs[i].napi);
545 }
546
547 static void enable_all_napi(struct adapter *adap)
548 {
549         int i;
550         for (i = 0; i < SGE_QSETS; i++)
551                 if (adap->sge.qs[i].adap)
552                         napi_enable(&adap->sge.qs[i].napi);
553 }
554
555 /**
556  *      set_qset_lro - Turn a queue set's LRO capability on and off
557  *      @dev: the device the qset is attached to
558  *      @qset_idx: the queue set index
559  *      @val: the LRO switch
560  *
561  *      Sets LRO on or off for a particular queue set.
562  *      the device's features flag is updated to reflect the LRO
563  *      capability when all queues belonging to the device are
564  *      in the same state.
565  */
566 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
567 {
568         struct port_info *pi = netdev_priv(dev);
569         struct adapter *adapter = pi->adapter;
570
571         adapter->params.sge.qset[qset_idx].lro = !!val;
572         adapter->sge.qs[qset_idx].lro_enabled = !!val;
573 }
574
575 /**
576  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
577  *      @adap: the adapter
578  *
579  *      Determines how many sets of SGE queues to use and initializes them.
580  *      We support multiple queue sets per port if we have MSI-X, otherwise
581  *      just one queue set per port.
582  */
583 static int setup_sge_qsets(struct adapter *adap)
584 {
585         int i, j, err, irq_idx = 0, qset_idx = 0;
586         unsigned int ntxq = SGE_TXQ_PER_SET;
587
588         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
589                 irq_idx = -1;
590
591         for_each_port(adap, i) {
592                 struct net_device *dev = adap->port[i];
593                 struct port_info *pi = netdev_priv(dev);
594
595                 pi->qs = &adap->sge.qs[pi->first_qset];
596                 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
597                      ++j, ++qset_idx) {
598                         set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
599                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
600                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
601                                                              irq_idx,
602                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
603                                 netdev_get_tx_queue(dev, j));
604                         if (err) {
605                                 t3_free_sge_resources(adap);
606                                 return err;
607                         }
608                 }
609         }
610
611         return 0;
612 }
613
614 static ssize_t attr_show(struct device *d, char *buf,
615                          ssize_t(*format) (struct net_device *, char *))
616 {
617         ssize_t len;
618
619         /* Synchronize with ioctls that may shut down the device */
620         rtnl_lock();
621         len = (*format) (to_net_dev(d), buf);
622         rtnl_unlock();
623         return len;
624 }
625
626 static ssize_t attr_store(struct device *d,
627                           const char *buf, size_t len,
628                           ssize_t(*set) (struct net_device *, unsigned int),
629                           unsigned int min_val, unsigned int max_val)
630 {
631         char *endp;
632         ssize_t ret;
633         unsigned int val;
634
635         if (!capable(CAP_NET_ADMIN))
636                 return -EPERM;
637
638         val = simple_strtoul(buf, &endp, 0);
639         if (endp == buf || val < min_val || val > max_val)
640                 return -EINVAL;
641
642         rtnl_lock();
643         ret = (*set) (to_net_dev(d), val);
644         if (!ret)
645                 ret = len;
646         rtnl_unlock();
647         return ret;
648 }
649
650 #define CXGB3_SHOW(name, val_expr) \
651 static ssize_t format_##name(struct net_device *dev, char *buf) \
652 { \
653         struct port_info *pi = netdev_priv(dev); \
654         struct adapter *adap = pi->adapter; \
655         return sprintf(buf, "%u\n", val_expr); \
656 } \
657 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
658                            char *buf) \
659 { \
660         return attr_show(d, buf, format_##name); \
661 }
662
663 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
664 {
665         struct port_info *pi = netdev_priv(dev);
666         struct adapter *adap = pi->adapter;
667         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
668
669         if (adap->flags & FULL_INIT_DONE)
670                 return -EBUSY;
671         if (val && adap->params.rev == 0)
672                 return -EINVAL;
673         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
674             min_tids)
675                 return -EINVAL;
676         adap->params.mc5.nfilters = val;
677         return 0;
678 }
679
680 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
681                               const char *buf, size_t len)
682 {
683         return attr_store(d, buf, len, set_nfilters, 0, ~0);
684 }
685
686 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
687 {
688         struct port_info *pi = netdev_priv(dev);
689         struct adapter *adap = pi->adapter;
690
691         if (adap->flags & FULL_INIT_DONE)
692                 return -EBUSY;
693         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
694             MC5_MIN_TIDS)
695                 return -EINVAL;
696         adap->params.mc5.nservers = val;
697         return 0;
698 }
699
700 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
701                               const char *buf, size_t len)
702 {
703         return attr_store(d, buf, len, set_nservers, 0, ~0);
704 }
705
706 #define CXGB3_ATTR_R(name, val_expr) \
707 CXGB3_SHOW(name, val_expr) \
708 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
709
710 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
711 CXGB3_SHOW(name, val_expr) \
712 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
713
714 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
715 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
716 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
717
718 static struct attribute *cxgb3_attrs[] = {
719         &dev_attr_cam_size.attr,
720         &dev_attr_nfilters.attr,
721         &dev_attr_nservers.attr,
722         NULL
723 };
724
725 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
726
727 static ssize_t tm_attr_show(struct device *d,
728                             char *buf, int sched)
729 {
730         struct port_info *pi = netdev_priv(to_net_dev(d));
731         struct adapter *adap = pi->adapter;
732         unsigned int v, addr, bpt, cpt;
733         ssize_t len;
734
735         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
736         rtnl_lock();
737         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
738         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
739         if (sched & 1)
740                 v >>= 16;
741         bpt = (v >> 8) & 0xff;
742         cpt = v & 0xff;
743         if (!cpt)
744                 len = sprintf(buf, "disabled\n");
745         else {
746                 v = (adap->params.vpd.cclk * 1000) / cpt;
747                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
748         }
749         rtnl_unlock();
750         return len;
751 }
752
753 static ssize_t tm_attr_store(struct device *d,
754                              const char *buf, size_t len, int sched)
755 {
756         struct port_info *pi = netdev_priv(to_net_dev(d));
757         struct adapter *adap = pi->adapter;
758         unsigned int val;
759         char *endp;
760         ssize_t ret;
761
762         if (!capable(CAP_NET_ADMIN))
763                 return -EPERM;
764
765         val = simple_strtoul(buf, &endp, 0);
766         if (endp == buf || val > 10000000)
767                 return -EINVAL;
768
769         rtnl_lock();
770         ret = t3_config_sched(adap, val, sched);
771         if (!ret)
772                 ret = len;
773         rtnl_unlock();
774         return ret;
775 }
776
777 #define TM_ATTR(name, sched) \
778 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
779                            char *buf) \
780 { \
781         return tm_attr_show(d, buf, sched); \
782 } \
783 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
784                             const char *buf, size_t len) \
785 { \
786         return tm_attr_store(d, buf, len, sched); \
787 } \
788 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
789
790 TM_ATTR(sched0, 0);
791 TM_ATTR(sched1, 1);
792 TM_ATTR(sched2, 2);
793 TM_ATTR(sched3, 3);
794 TM_ATTR(sched4, 4);
795 TM_ATTR(sched5, 5);
796 TM_ATTR(sched6, 6);
797 TM_ATTR(sched7, 7);
798
799 static struct attribute *offload_attrs[] = {
800         &dev_attr_sched0.attr,
801         &dev_attr_sched1.attr,
802         &dev_attr_sched2.attr,
803         &dev_attr_sched3.attr,
804         &dev_attr_sched4.attr,
805         &dev_attr_sched5.attr,
806         &dev_attr_sched6.attr,
807         &dev_attr_sched7.attr,
808         NULL
809 };
810
811 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
812
813 /*
814  * Sends an sk_buff to an offload queue driver
815  * after dealing with any active network taps.
816  */
817 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
818 {
819         int ret;
820
821         local_bh_disable();
822         ret = t3_offload_tx(tdev, skb);
823         local_bh_enable();
824         return ret;
825 }
826
827 static int write_smt_entry(struct adapter *adapter, int idx)
828 {
829         struct cpl_smt_write_req *req;
830         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
831
832         if (!skb)
833                 return -ENOMEM;
834
835         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
836         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
837         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
838         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
839         req->iff = idx;
840         memset(req->src_mac1, 0, sizeof(req->src_mac1));
841         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
842         skb->priority = 1;
843         offload_tx(&adapter->tdev, skb);
844         return 0;
845 }
846
847 static int init_smt(struct adapter *adapter)
848 {
849         int i;
850
851         for_each_port(adapter, i)
852             write_smt_entry(adapter, i);
853         return 0;
854 }
855
856 static void init_port_mtus(struct adapter *adapter)
857 {
858         unsigned int mtus = adapter->port[0]->mtu;
859
860         if (adapter->port[1])
861                 mtus |= adapter->port[1]->mtu << 16;
862         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
863 }
864
865 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
866                               int hi, int port)
867 {
868         struct sk_buff *skb;
869         struct mngt_pktsched_wr *req;
870         int ret;
871
872         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
873         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
874         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
875         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
876         req->sched = sched;
877         req->idx = qidx;
878         req->min = lo;
879         req->max = hi;
880         req->binding = port;
881         ret = t3_mgmt_tx(adap, skb);
882
883         return ret;
884 }
885
886 static int bind_qsets(struct adapter *adap)
887 {
888         int i, j, err = 0;
889
890         for_each_port(adap, i) {
891                 const struct port_info *pi = adap2pinfo(adap, i);
892
893                 for (j = 0; j < pi->nqsets; ++j) {
894                         int ret = send_pktsched_cmd(adap, 1,
895                                                     pi->first_qset + j, -1,
896                                                     -1, i);
897                         if (ret)
898                                 err = ret;
899                 }
900         }
901
902         return err;
903 }
904
905 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
906 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
907
908 static int upgrade_fw(struct adapter *adap)
909 {
910         int ret;
911         char buf[64];
912         const struct firmware *fw;
913         struct device *dev = &adap->pdev->dev;
914
915         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
916                  FW_VERSION_MINOR, FW_VERSION_MICRO);
917         ret = request_firmware(&fw, buf, dev);
918         if (ret < 0) {
919                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
920                         buf);
921                 return ret;
922         }
923         ret = t3_load_fw(adap, fw->data, fw->size);
924         release_firmware(fw);
925
926         if (ret == 0)
927                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
928                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
929         else
930                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
931                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
932
933         return ret;
934 }
935
936 static inline char t3rev2char(struct adapter *adapter)
937 {
938         char rev = 0;
939
940         switch(adapter->params.rev) {
941         case T3_REV_B:
942         case T3_REV_B2:
943                 rev = 'b';
944                 break;
945         case T3_REV_C:
946                 rev = 'c';
947                 break;
948         }
949         return rev;
950 }
951
952 static int update_tpsram(struct adapter *adap)
953 {
954         const struct firmware *tpsram;
955         char buf[64];
956         struct device *dev = &adap->pdev->dev;
957         int ret;
958         char rev;
959
960         rev = t3rev2char(adap);
961         if (!rev)
962                 return 0;
963
964         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
965                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
966
967         ret = request_firmware(&tpsram, buf, dev);
968         if (ret < 0) {
969                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
970                         buf);
971                 return ret;
972         }
973
974         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
975         if (ret)
976                 goto release_tpsram;
977
978         ret = t3_set_proto_sram(adap, tpsram->data);
979         if (ret == 0)
980                 dev_info(dev,
981                          "successful update of protocol engine "
982                          "to %d.%d.%d\n",
983                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
984         else
985                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
986                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
987         if (ret)
988                 dev_err(dev, "loading protocol SRAM failed\n");
989
990 release_tpsram:
991         release_firmware(tpsram);
992
993         return ret;
994 }
995
996 /**
997  *      cxgb_up - enable the adapter
998  *      @adapter: adapter being enabled
999  *
1000  *      Called when the first port is enabled, this function performs the
1001  *      actions necessary to make an adapter operational, such as completing
1002  *      the initialization of HW modules, and enabling interrupts.
1003  *
1004  *      Must be called with the rtnl lock held.
1005  */
1006 static int cxgb_up(struct adapter *adap)
1007 {
1008         int err;
1009
1010         if (!(adap->flags & FULL_INIT_DONE)) {
1011                 err = t3_check_fw_version(adap);
1012                 if (err == -EINVAL) {
1013                         err = upgrade_fw(adap);
1014                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1015                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1016                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1017                 }
1018
1019                 err = t3_check_tpsram_version(adap);
1020                 if (err == -EINVAL) {
1021                         err = update_tpsram(adap);
1022                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1023                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1024                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1025                 }
1026
1027                 /*
1028                  * Clear interrupts now to catch errors if t3_init_hw fails.
1029                  * We clear them again later as initialization may trigger
1030                  * conditions that can interrupt.
1031                  */
1032                 t3_intr_clear(adap);
1033
1034                 err = t3_init_hw(adap, 0);
1035                 if (err)
1036                         goto out;
1037
1038                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1039                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1040
1041                 err = setup_sge_qsets(adap);
1042                 if (err)
1043                         goto out;
1044
1045                 setup_rss(adap);
1046                 if (!(adap->flags & NAPI_INIT))
1047                         init_napi(adap);
1048
1049                 t3_start_sge_timers(adap);
1050                 adap->flags |= FULL_INIT_DONE;
1051         }
1052
1053         t3_intr_clear(adap);
1054
1055         if (adap->flags & USING_MSIX) {
1056                 name_msix_vecs(adap);
1057                 err = request_irq(adap->msix_info[0].vec,
1058                                   t3_async_intr_handler, 0,
1059                                   adap->msix_info[0].desc, adap);
1060                 if (err)
1061                         goto irq_err;
1062
1063                 err = request_msix_data_irqs(adap);
1064                 if (err) {
1065                         free_irq(adap->msix_info[0].vec, adap);
1066                         goto irq_err;
1067                 }
1068         } else if ((err = request_irq(adap->pdev->irq,
1069                                       t3_intr_handler(adap,
1070                                                       adap->sge.qs[0].rspq.
1071                                                       polling),
1072                                       (adap->flags & USING_MSI) ?
1073                                        0 : IRQF_SHARED,
1074                                       adap->name, adap)))
1075                 goto irq_err;
1076
1077         enable_all_napi(adap);
1078         t3_sge_start(adap);
1079         t3_intr_enable(adap);
1080
1081         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1082             is_offload(adap) && init_tp_parity(adap) == 0)
1083                 adap->flags |= TP_PARITY_INIT;
1084
1085         if (adap->flags & TP_PARITY_INIT) {
1086                 t3_write_reg(adap, A_TP_INT_CAUSE,
1087                              F_CMCACHEPERR | F_ARPLUTPERR);
1088                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1089         }
1090
1091         if (!(adap->flags & QUEUES_BOUND)) {
1092                 err = bind_qsets(adap);
1093                 if (err) {
1094                         CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1095                         t3_intr_disable(adap);
1096                         free_irq_resources(adap);
1097                         goto out;
1098                 }
1099                 adap->flags |= QUEUES_BOUND;
1100         }
1101
1102 out:
1103         return err;
1104 irq_err:
1105         CH_ERR(adap, "request_irq failed, err %d\n", err);
1106         goto out;
1107 }
1108
1109 /*
1110  * Release resources when all the ports and offloading have been stopped.
1111  */
1112 static void cxgb_down(struct adapter *adapter)
1113 {
1114         t3_sge_stop(adapter);
1115         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1116         t3_intr_disable(adapter);
1117         spin_unlock_irq(&adapter->work_lock);
1118
1119         free_irq_resources(adapter);
1120         quiesce_rx(adapter);
1121         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
1122 }
1123
1124 static void schedule_chk_task(struct adapter *adap)
1125 {
1126         unsigned int timeo;
1127
1128         timeo = adap->params.linkpoll_period ?
1129             (HZ * adap->params.linkpoll_period) / 10 :
1130             adap->params.stats_update_period * HZ;
1131         if (timeo)
1132                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1133 }
1134
1135 static int offload_open(struct net_device *dev)
1136 {
1137         struct port_info *pi = netdev_priv(dev);
1138         struct adapter *adapter = pi->adapter;
1139         struct t3cdev *tdev = dev2t3cdev(dev);
1140         int adap_up = adapter->open_device_map & PORT_MASK;
1141         int err;
1142
1143         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1144                 return 0;
1145
1146         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1147                 goto out;
1148
1149         t3_tp_set_offload_mode(adapter, 1);
1150         tdev->lldev = adapter->port[0];
1151         err = cxgb3_offload_activate(adapter);
1152         if (err)
1153                 goto out;
1154
1155         init_port_mtus(adapter);
1156         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1157                      adapter->params.b_wnd,
1158                      adapter->params.rev == 0 ?
1159                      adapter->port[0]->mtu : 0xffff);
1160         init_smt(adapter);
1161
1162         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1163                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1164
1165         /* Call back all registered clients */
1166         cxgb3_add_clients(tdev);
1167
1168 out:
1169         /* restore them in case the offload module has changed them */
1170         if (err) {
1171                 t3_tp_set_offload_mode(adapter, 0);
1172                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1173                 cxgb3_set_dummy_ops(tdev);
1174         }
1175         return err;
1176 }
1177
1178 static int offload_close(struct t3cdev *tdev)
1179 {
1180         struct adapter *adapter = tdev2adap(tdev);
1181
1182         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1183                 return 0;
1184
1185         /* Call back all registered clients */
1186         cxgb3_remove_clients(tdev);
1187
1188         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1189
1190         /* Flush work scheduled while releasing TIDs */
1191         flush_scheduled_work();
1192
1193         tdev->lldev = NULL;
1194         cxgb3_set_dummy_ops(tdev);
1195         t3_tp_set_offload_mode(adapter, 0);
1196         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1197
1198         if (!adapter->open_device_map)
1199                 cxgb_down(adapter);
1200
1201         cxgb3_offload_deactivate(adapter);
1202         return 0;
1203 }
1204
1205 static int cxgb_open(struct net_device *dev)
1206 {
1207         struct port_info *pi = netdev_priv(dev);
1208         struct adapter *adapter = pi->adapter;
1209         int other_ports = adapter->open_device_map & PORT_MASK;
1210         int err;
1211
1212         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1213                 return err;
1214
1215         set_bit(pi->port_id, &adapter->open_device_map);
1216         if (is_offload(adapter) && !ofld_disable) {
1217                 err = offload_open(dev);
1218                 if (err)
1219                         printk(KERN_WARNING
1220                                "Could not initialize offload capabilities\n");
1221         }
1222
1223         dev->real_num_tx_queues = pi->nqsets;
1224         link_start(dev);
1225         t3_port_intr_enable(adapter, pi->port_id);
1226         netif_tx_start_all_queues(dev);
1227         if (!other_ports)
1228                 schedule_chk_task(adapter);
1229
1230         return 0;
1231 }
1232
1233 static int cxgb_close(struct net_device *dev)
1234 {
1235         struct port_info *pi = netdev_priv(dev);
1236         struct adapter *adapter = pi->adapter;
1237
1238         
1239         if (!adapter->open_device_map)
1240                 return 0;
1241
1242         /* Stop link fault interrupts */
1243         t3_xgm_intr_disable(adapter, pi->port_id);
1244         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1245
1246         t3_port_intr_disable(adapter, pi->port_id);
1247         netif_tx_stop_all_queues(dev);
1248         pi->phy.ops->power_down(&pi->phy, 1);
1249         netif_carrier_off(dev);
1250         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1251
1252         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1253         clear_bit(pi->port_id, &adapter->open_device_map);
1254         spin_unlock_irq(&adapter->work_lock);
1255
1256         if (!(adapter->open_device_map & PORT_MASK))
1257                 cancel_delayed_work_sync(&adapter->adap_check_task);
1258
1259         if (!adapter->open_device_map)
1260                 cxgb_down(adapter);
1261
1262         return 0;
1263 }
1264
1265 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1266 {
1267         struct port_info *pi = netdev_priv(dev);
1268         struct adapter *adapter = pi->adapter;
1269         struct net_device_stats *ns = &pi->netstats;
1270         const struct mac_stats *pstats;
1271
1272         spin_lock(&adapter->stats_lock);
1273         pstats = t3_mac_update_stats(&pi->mac);
1274         spin_unlock(&adapter->stats_lock);
1275
1276         ns->tx_bytes = pstats->tx_octets;
1277         ns->tx_packets = pstats->tx_frames;
1278         ns->rx_bytes = pstats->rx_octets;
1279         ns->rx_packets = pstats->rx_frames;
1280         ns->multicast = pstats->rx_mcast_frames;
1281
1282         ns->tx_errors = pstats->tx_underrun;
1283         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1284             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1285             pstats->rx_fifo_ovfl;
1286
1287         /* detailed rx_errors */
1288         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1289         ns->rx_over_errors = 0;
1290         ns->rx_crc_errors = pstats->rx_fcs_errs;
1291         ns->rx_frame_errors = pstats->rx_symbol_errs;
1292         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1293         ns->rx_missed_errors = pstats->rx_cong_drops;
1294
1295         /* detailed tx_errors */
1296         ns->tx_aborted_errors = 0;
1297         ns->tx_carrier_errors = 0;
1298         ns->tx_fifo_errors = pstats->tx_underrun;
1299         ns->tx_heartbeat_errors = 0;
1300         ns->tx_window_errors = 0;
1301         return ns;
1302 }
1303
1304 static u32 get_msglevel(struct net_device *dev)
1305 {
1306         struct port_info *pi = netdev_priv(dev);
1307         struct adapter *adapter = pi->adapter;
1308
1309         return adapter->msg_enable;
1310 }
1311
1312 static void set_msglevel(struct net_device *dev, u32 val)
1313 {
1314         struct port_info *pi = netdev_priv(dev);
1315         struct adapter *adapter = pi->adapter;
1316
1317         adapter->msg_enable = val;
1318 }
1319
1320 static char stats_strings[][ETH_GSTRING_LEN] = {
1321         "TxOctetsOK         ",
1322         "TxFramesOK         ",
1323         "TxMulticastFramesOK",
1324         "TxBroadcastFramesOK",
1325         "TxPauseFrames      ",
1326         "TxUnderrun         ",
1327         "TxExtUnderrun      ",
1328
1329         "TxFrames64         ",
1330         "TxFrames65To127    ",
1331         "TxFrames128To255   ",
1332         "TxFrames256To511   ",
1333         "TxFrames512To1023  ",
1334         "TxFrames1024To1518 ",
1335         "TxFrames1519ToMax  ",
1336
1337         "RxOctetsOK         ",
1338         "RxFramesOK         ",
1339         "RxMulticastFramesOK",
1340         "RxBroadcastFramesOK",
1341         "RxPauseFrames      ",
1342         "RxFCSErrors        ",
1343         "RxSymbolErrors     ",
1344         "RxShortErrors      ",
1345         "RxJabberErrors     ",
1346         "RxLengthErrors     ",
1347         "RxFIFOoverflow     ",
1348
1349         "RxFrames64         ",
1350         "RxFrames65To127    ",
1351         "RxFrames128To255   ",
1352         "RxFrames256To511   ",
1353         "RxFrames512To1023  ",
1354         "RxFrames1024To1518 ",
1355         "RxFrames1519ToMax  ",
1356
1357         "PhyFIFOErrors      ",
1358         "TSO                ",
1359         "VLANextractions    ",
1360         "VLANinsertions     ",
1361         "TxCsumOffload      ",
1362         "RxCsumGood         ",
1363         "LroAggregated      ",
1364         "LroFlushed         ",
1365         "LroNoDesc          ",
1366         "RxDrops            ",
1367
1368         "CheckTXEnToggled   ",
1369         "CheckResets        ",
1370
1371         "LinkFaults         ",
1372 };
1373
1374 static int get_sset_count(struct net_device *dev, int sset)
1375 {
1376         switch (sset) {
1377         case ETH_SS_STATS:
1378                 return ARRAY_SIZE(stats_strings);
1379         default:
1380                 return -EOPNOTSUPP;
1381         }
1382 }
1383
1384 #define T3_REGMAP_SIZE (3 * 1024)
1385
1386 static int get_regs_len(struct net_device *dev)
1387 {
1388         return T3_REGMAP_SIZE;
1389 }
1390
1391 static int get_eeprom_len(struct net_device *dev)
1392 {
1393         return EEPROMSIZE;
1394 }
1395
1396 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1397 {
1398         struct port_info *pi = netdev_priv(dev);
1399         struct adapter *adapter = pi->adapter;
1400         u32 fw_vers = 0;
1401         u32 tp_vers = 0;
1402
1403         spin_lock(&adapter->stats_lock);
1404         t3_get_fw_version(adapter, &fw_vers);
1405         t3_get_tp_version(adapter, &tp_vers);
1406         spin_unlock(&adapter->stats_lock);
1407
1408         strcpy(info->driver, DRV_NAME);
1409         strcpy(info->version, DRV_VERSION);
1410         strcpy(info->bus_info, pci_name(adapter->pdev));
1411         if (!fw_vers)
1412                 strcpy(info->fw_version, "N/A");
1413         else {
1414                 snprintf(info->fw_version, sizeof(info->fw_version),
1415                          "%s %u.%u.%u TP %u.%u.%u",
1416                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1417                          G_FW_VERSION_MAJOR(fw_vers),
1418                          G_FW_VERSION_MINOR(fw_vers),
1419                          G_FW_VERSION_MICRO(fw_vers),
1420                          G_TP_VERSION_MAJOR(tp_vers),
1421                          G_TP_VERSION_MINOR(tp_vers),
1422                          G_TP_VERSION_MICRO(tp_vers));
1423         }
1424 }
1425
1426 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1427 {
1428         if (stringset == ETH_SS_STATS)
1429                 memcpy(data, stats_strings, sizeof(stats_strings));
1430 }
1431
1432 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1433                                             struct port_info *p, int idx)
1434 {
1435         int i;
1436         unsigned long tot = 0;
1437
1438         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1439                 tot += adapter->sge.qs[i].port_stats[idx];
1440         return tot;
1441 }
1442
1443 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1444                       u64 *data)
1445 {
1446         struct port_info *pi = netdev_priv(dev);
1447         struct adapter *adapter = pi->adapter;
1448         const struct mac_stats *s;
1449
1450         spin_lock(&adapter->stats_lock);
1451         s = t3_mac_update_stats(&pi->mac);
1452         spin_unlock(&adapter->stats_lock);
1453
1454         *data++ = s->tx_octets;
1455         *data++ = s->tx_frames;
1456         *data++ = s->tx_mcast_frames;
1457         *data++ = s->tx_bcast_frames;
1458         *data++ = s->tx_pause;
1459         *data++ = s->tx_underrun;
1460         *data++ = s->tx_fifo_urun;
1461
1462         *data++ = s->tx_frames_64;
1463         *data++ = s->tx_frames_65_127;
1464         *data++ = s->tx_frames_128_255;
1465         *data++ = s->tx_frames_256_511;
1466         *data++ = s->tx_frames_512_1023;
1467         *data++ = s->tx_frames_1024_1518;
1468         *data++ = s->tx_frames_1519_max;
1469
1470         *data++ = s->rx_octets;
1471         *data++ = s->rx_frames;
1472         *data++ = s->rx_mcast_frames;
1473         *data++ = s->rx_bcast_frames;
1474         *data++ = s->rx_pause;
1475         *data++ = s->rx_fcs_errs;
1476         *data++ = s->rx_symbol_errs;
1477         *data++ = s->rx_short;
1478         *data++ = s->rx_jabber;
1479         *data++ = s->rx_too_long;
1480         *data++ = s->rx_fifo_ovfl;
1481
1482         *data++ = s->rx_frames_64;
1483         *data++ = s->rx_frames_65_127;
1484         *data++ = s->rx_frames_128_255;
1485         *data++ = s->rx_frames_256_511;
1486         *data++ = s->rx_frames_512_1023;
1487         *data++ = s->rx_frames_1024_1518;
1488         *data++ = s->rx_frames_1519_max;
1489
1490         *data++ = pi->phy.fifo_errors;
1491
1492         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1493         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1494         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1495         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1496         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1497         *data++ = 0;
1498         *data++ = 0;
1499         *data++ = 0;
1500         *data++ = s->rx_cong_drops;
1501
1502         *data++ = s->num_toggled;
1503         *data++ = s->num_resets;
1504
1505         *data++ = s->link_faults;
1506 }
1507
1508 static inline void reg_block_dump(struct adapter *ap, void *buf,
1509                                   unsigned int start, unsigned int end)
1510 {
1511         u32 *p = buf + start;
1512
1513         for (; start <= end; start += sizeof(u32))
1514                 *p++ = t3_read_reg(ap, start);
1515 }
1516
1517 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1518                      void *buf)
1519 {
1520         struct port_info *pi = netdev_priv(dev);
1521         struct adapter *ap = pi->adapter;
1522
1523         /*
1524          * Version scheme:
1525          * bits 0..9: chip version
1526          * bits 10..15: chip revision
1527          * bit 31: set for PCIe cards
1528          */
1529         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1530
1531         /*
1532          * We skip the MAC statistics registers because they are clear-on-read.
1533          * Also reading multi-register stats would need to synchronize with the
1534          * periodic mac stats accumulation.  Hard to justify the complexity.
1535          */
1536         memset(buf, 0, T3_REGMAP_SIZE);
1537         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1538         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1539         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1540         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1541         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1542         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1543                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1544         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1545                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1546 }
1547
1548 static int restart_autoneg(struct net_device *dev)
1549 {
1550         struct port_info *p = netdev_priv(dev);
1551
1552         if (!netif_running(dev))
1553                 return -EAGAIN;
1554         if (p->link_config.autoneg != AUTONEG_ENABLE)
1555                 return -EINVAL;
1556         p->phy.ops->autoneg_restart(&p->phy);
1557         return 0;
1558 }
1559
1560 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1561 {
1562         struct port_info *pi = netdev_priv(dev);
1563         struct adapter *adapter = pi->adapter;
1564         int i;
1565
1566         if (data == 0)
1567                 data = 2;
1568
1569         for (i = 0; i < data * 2; i++) {
1570                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1571                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1572                 if (msleep_interruptible(500))
1573                         break;
1574         }
1575         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1576                          F_GPIO0_OUT_VAL);
1577         return 0;
1578 }
1579
1580 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1581 {
1582         struct port_info *p = netdev_priv(dev);
1583
1584         cmd->supported = p->link_config.supported;
1585         cmd->advertising = p->link_config.advertising;
1586
1587         if (netif_carrier_ok(dev)) {
1588                 cmd->speed = p->link_config.speed;
1589                 cmd->duplex = p->link_config.duplex;
1590         } else {
1591                 cmd->speed = -1;
1592                 cmd->duplex = -1;
1593         }
1594
1595         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1596         cmd->phy_address = p->phy.addr;
1597         cmd->transceiver = XCVR_EXTERNAL;
1598         cmd->autoneg = p->link_config.autoneg;
1599         cmd->maxtxpkt = 0;
1600         cmd->maxrxpkt = 0;
1601         return 0;
1602 }
1603
1604 static int speed_duplex_to_caps(int speed, int duplex)
1605 {
1606         int cap = 0;
1607
1608         switch (speed) {
1609         case SPEED_10:
1610                 if (duplex == DUPLEX_FULL)
1611                         cap = SUPPORTED_10baseT_Full;
1612                 else
1613                         cap = SUPPORTED_10baseT_Half;
1614                 break;
1615         case SPEED_100:
1616                 if (duplex == DUPLEX_FULL)
1617                         cap = SUPPORTED_100baseT_Full;
1618                 else
1619                         cap = SUPPORTED_100baseT_Half;
1620                 break;
1621         case SPEED_1000:
1622                 if (duplex == DUPLEX_FULL)
1623                         cap = SUPPORTED_1000baseT_Full;
1624                 else
1625                         cap = SUPPORTED_1000baseT_Half;
1626                 break;
1627         case SPEED_10000:
1628                 if (duplex == DUPLEX_FULL)
1629                         cap = SUPPORTED_10000baseT_Full;
1630         }
1631         return cap;
1632 }
1633
1634 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1635                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1636                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1637                       ADVERTISED_10000baseT_Full)
1638
1639 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1640 {
1641         struct port_info *p = netdev_priv(dev);
1642         struct link_config *lc = &p->link_config;
1643
1644         if (!(lc->supported & SUPPORTED_Autoneg)) {
1645                 /*
1646                  * PHY offers a single speed/duplex.  See if that's what's
1647                  * being requested.
1648                  */
1649                 if (cmd->autoneg == AUTONEG_DISABLE) {
1650                         int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1651                         if (lc->supported & cap)
1652                                 return 0;
1653                 }
1654                 return -EINVAL;
1655         }
1656
1657         if (cmd->autoneg == AUTONEG_DISABLE) {
1658                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1659
1660                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1661                         return -EINVAL;
1662                 lc->requested_speed = cmd->speed;
1663                 lc->requested_duplex = cmd->duplex;
1664                 lc->advertising = 0;
1665         } else {
1666                 cmd->advertising &= ADVERTISED_MASK;
1667                 cmd->advertising &= lc->supported;
1668                 if (!cmd->advertising)
1669                         return -EINVAL;
1670                 lc->requested_speed = SPEED_INVALID;
1671                 lc->requested_duplex = DUPLEX_INVALID;
1672                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1673         }
1674         lc->autoneg = cmd->autoneg;
1675         if (netif_running(dev))
1676                 t3_link_start(&p->phy, &p->mac, lc);
1677         return 0;
1678 }
1679
1680 static void get_pauseparam(struct net_device *dev,
1681                            struct ethtool_pauseparam *epause)
1682 {
1683         struct port_info *p = netdev_priv(dev);
1684
1685         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1686         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1687         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1688 }
1689
1690 static int set_pauseparam(struct net_device *dev,
1691                           struct ethtool_pauseparam *epause)
1692 {
1693         struct port_info *p = netdev_priv(dev);
1694         struct link_config *lc = &p->link_config;
1695
1696         if (epause->autoneg == AUTONEG_DISABLE)
1697                 lc->requested_fc = 0;
1698         else if (lc->supported & SUPPORTED_Autoneg)
1699                 lc->requested_fc = PAUSE_AUTONEG;
1700         else
1701                 return -EINVAL;
1702
1703         if (epause->rx_pause)
1704                 lc->requested_fc |= PAUSE_RX;
1705         if (epause->tx_pause)
1706                 lc->requested_fc |= PAUSE_TX;
1707         if (lc->autoneg == AUTONEG_ENABLE) {
1708                 if (netif_running(dev))
1709                         t3_link_start(&p->phy, &p->mac, lc);
1710         } else {
1711                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1712                 if (netif_running(dev))
1713                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1714         }
1715         return 0;
1716 }
1717
1718 static u32 get_rx_csum(struct net_device *dev)
1719 {
1720         struct port_info *p = netdev_priv(dev);
1721
1722         return p->rx_offload & T3_RX_CSUM;
1723 }
1724
1725 static int set_rx_csum(struct net_device *dev, u32 data)
1726 {
1727         struct port_info *p = netdev_priv(dev);
1728
1729         if (data) {
1730                 p->rx_offload |= T3_RX_CSUM;
1731         } else {
1732                 int i;
1733
1734                 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1735                 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1736                         set_qset_lro(dev, i, 0);
1737         }
1738         return 0;
1739 }
1740
1741 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1742 {
1743         struct port_info *pi = netdev_priv(dev);
1744         struct adapter *adapter = pi->adapter;
1745         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1746
1747         e->rx_max_pending = MAX_RX_BUFFERS;
1748         e->rx_mini_max_pending = 0;
1749         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1750         e->tx_max_pending = MAX_TXQ_ENTRIES;
1751
1752         e->rx_pending = q->fl_size;
1753         e->rx_mini_pending = q->rspq_size;
1754         e->rx_jumbo_pending = q->jumbo_size;
1755         e->tx_pending = q->txq_size[0];
1756 }
1757
1758 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1759 {
1760         struct port_info *pi = netdev_priv(dev);
1761         struct adapter *adapter = pi->adapter;
1762         struct qset_params *q;
1763         int i;
1764
1765         if (e->rx_pending > MAX_RX_BUFFERS ||
1766             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1767             e->tx_pending > MAX_TXQ_ENTRIES ||
1768             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1769             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1770             e->rx_pending < MIN_FL_ENTRIES ||
1771             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1772             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1773                 return -EINVAL;
1774
1775         if (adapter->flags & FULL_INIT_DONE)
1776                 return -EBUSY;
1777
1778         q = &adapter->params.sge.qset[pi->first_qset];
1779         for (i = 0; i < pi->nqsets; ++i, ++q) {
1780                 q->rspq_size = e->rx_mini_pending;
1781                 q->fl_size = e->rx_pending;
1782                 q->jumbo_size = e->rx_jumbo_pending;
1783                 q->txq_size[0] = e->tx_pending;
1784                 q->txq_size[1] = e->tx_pending;
1785                 q->txq_size[2] = e->tx_pending;
1786         }
1787         return 0;
1788 }
1789
1790 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1791 {
1792         struct port_info *pi = netdev_priv(dev);
1793         struct adapter *adapter = pi->adapter;
1794         struct qset_params *qsp = &adapter->params.sge.qset[0];
1795         struct sge_qset *qs = &adapter->sge.qs[0];
1796
1797         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1798                 return -EINVAL;
1799
1800         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1801         t3_update_qset_coalesce(qs, qsp);
1802         return 0;
1803 }
1804
1805 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1806 {
1807         struct port_info *pi = netdev_priv(dev);
1808         struct adapter *adapter = pi->adapter;
1809         struct qset_params *q = adapter->params.sge.qset;
1810
1811         c->rx_coalesce_usecs = q->coalesce_usecs;
1812         return 0;
1813 }
1814
1815 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1816                       u8 * data)
1817 {
1818         struct port_info *pi = netdev_priv(dev);
1819         struct adapter *adapter = pi->adapter;
1820         int i, err = 0;
1821
1822         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1823         if (!buf)
1824                 return -ENOMEM;
1825
1826         e->magic = EEPROM_MAGIC;
1827         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1828                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1829
1830         if (!err)
1831                 memcpy(data, buf + e->offset, e->len);
1832         kfree(buf);
1833         return err;
1834 }
1835
1836 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1837                       u8 * data)
1838 {
1839         struct port_info *pi = netdev_priv(dev);
1840         struct adapter *adapter = pi->adapter;
1841         u32 aligned_offset, aligned_len;
1842         __le32 *p;
1843         u8 *buf;
1844         int err;
1845
1846         if (eeprom->magic != EEPROM_MAGIC)
1847                 return -EINVAL;
1848
1849         aligned_offset = eeprom->offset & ~3;
1850         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1851
1852         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1853                 buf = kmalloc(aligned_len, GFP_KERNEL);
1854                 if (!buf)
1855                         return -ENOMEM;
1856                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1857                 if (!err && aligned_len > 4)
1858                         err = t3_seeprom_read(adapter,
1859                                               aligned_offset + aligned_len - 4,
1860                                               (__le32 *) & buf[aligned_len - 4]);
1861                 if (err)
1862                         goto out;
1863                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1864         } else
1865                 buf = data;
1866
1867         err = t3_seeprom_wp(adapter, 0);
1868         if (err)
1869                 goto out;
1870
1871         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1872                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1873                 aligned_offset += 4;
1874         }
1875
1876         if (!err)
1877                 err = t3_seeprom_wp(adapter, 1);
1878 out:
1879         if (buf != data)
1880                 kfree(buf);
1881         return err;
1882 }
1883
1884 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1885 {
1886         wol->supported = 0;
1887         wol->wolopts = 0;
1888         memset(&wol->sopass, 0, sizeof(wol->sopass));
1889 }
1890
1891 static const struct ethtool_ops cxgb_ethtool_ops = {
1892         .get_settings = get_settings,
1893         .set_settings = set_settings,
1894         .get_drvinfo = get_drvinfo,
1895         .get_msglevel = get_msglevel,
1896         .set_msglevel = set_msglevel,
1897         .get_ringparam = get_sge_param,
1898         .set_ringparam = set_sge_param,
1899         .get_coalesce = get_coalesce,
1900         .set_coalesce = set_coalesce,
1901         .get_eeprom_len = get_eeprom_len,
1902         .get_eeprom = get_eeprom,
1903         .set_eeprom = set_eeprom,
1904         .get_pauseparam = get_pauseparam,
1905         .set_pauseparam = set_pauseparam,
1906         .get_rx_csum = get_rx_csum,
1907         .set_rx_csum = set_rx_csum,
1908         .set_tx_csum = ethtool_op_set_tx_csum,
1909         .set_sg = ethtool_op_set_sg,
1910         .get_link = ethtool_op_get_link,
1911         .get_strings = get_strings,
1912         .phys_id = cxgb3_phys_id,
1913         .nway_reset = restart_autoneg,
1914         .get_sset_count = get_sset_count,
1915         .get_ethtool_stats = get_stats,
1916         .get_regs_len = get_regs_len,
1917         .get_regs = get_regs,
1918         .get_wol = get_wol,
1919         .set_tso = ethtool_op_set_tso,
1920 };
1921
1922 static int in_range(int val, int lo, int hi)
1923 {
1924         return val < 0 || (val <= hi && val >= lo);
1925 }
1926
1927 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1928 {
1929         struct port_info *pi = netdev_priv(dev);
1930         struct adapter *adapter = pi->adapter;
1931         u32 cmd;
1932         int ret;
1933
1934         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1935                 return -EFAULT;
1936
1937         switch (cmd) {
1938         case CHELSIO_SET_QSET_PARAMS:{
1939                 int i;
1940                 struct qset_params *q;
1941                 struct ch_qset_params t;
1942                 int q1 = pi->first_qset;
1943                 int nqsets = pi->nqsets;
1944
1945                 if (!capable(CAP_NET_ADMIN))
1946                         return -EPERM;
1947                 if (copy_from_user(&t, useraddr, sizeof(t)))
1948                         return -EFAULT;
1949                 if (t.qset_idx >= SGE_QSETS)
1950                         return -EINVAL;
1951                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1952                         !in_range(t.cong_thres, 0, 255) ||
1953                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1954                                 MAX_TXQ_ENTRIES) ||
1955                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1956                                 MAX_TXQ_ENTRIES) ||
1957                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1958                                 MAX_CTRL_TXQ_ENTRIES) ||
1959                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1960                                 MAX_RX_BUFFERS)
1961                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1962                                         MAX_RX_JUMBO_BUFFERS)
1963                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1964                                         MAX_RSPQ_ENTRIES))
1965                         return -EINVAL;
1966
1967                 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1968                         for_each_port(adapter, i) {
1969                                 pi = adap2pinfo(adapter, i);
1970                                 if (t.qset_idx >= pi->first_qset &&
1971                                     t.qset_idx < pi->first_qset + pi->nqsets &&
1972                                     !(pi->rx_offload & T3_RX_CSUM))
1973                                         return -EINVAL;
1974                         }
1975
1976                 if ((adapter->flags & FULL_INIT_DONE) &&
1977                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1978                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1979                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1980                         t.polling >= 0 || t.cong_thres >= 0))
1981                         return -EBUSY;
1982
1983                 /* Allow setting of any available qset when offload enabled */
1984                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1985                         q1 = 0;
1986                         for_each_port(adapter, i) {
1987                                 pi = adap2pinfo(adapter, i);
1988                                 nqsets += pi->first_qset + pi->nqsets;
1989                         }
1990                 }
1991
1992                 if (t.qset_idx < q1)
1993                         return -EINVAL;
1994                 if (t.qset_idx > q1 + nqsets - 1)
1995                         return -EINVAL;
1996
1997                 q = &adapter->params.sge.qset[t.qset_idx];
1998
1999                 if (t.rspq_size >= 0)
2000                         q->rspq_size = t.rspq_size;
2001                 if (t.fl_size[0] >= 0)
2002                         q->fl_size = t.fl_size[0];
2003                 if (t.fl_size[1] >= 0)
2004                         q->jumbo_size = t.fl_size[1];
2005                 if (t.txq_size[0] >= 0)
2006                         q->txq_size[0] = t.txq_size[0];
2007                 if (t.txq_size[1] >= 0)
2008                         q->txq_size[1] = t.txq_size[1];
2009                 if (t.txq_size[2] >= 0)
2010                         q->txq_size[2] = t.txq_size[2];
2011                 if (t.cong_thres >= 0)
2012                         q->cong_thres = t.cong_thres;
2013                 if (t.intr_lat >= 0) {
2014                         struct sge_qset *qs =
2015                                 &adapter->sge.qs[t.qset_idx];
2016
2017                         q->coalesce_usecs = t.intr_lat;
2018                         t3_update_qset_coalesce(qs, q);
2019                 }
2020                 if (t.polling >= 0) {
2021                         if (adapter->flags & USING_MSIX)
2022                                 q->polling = t.polling;
2023                         else {
2024                                 /* No polling with INTx for T3A */
2025                                 if (adapter->params.rev == 0 &&
2026                                         !(adapter->flags & USING_MSI))
2027                                         t.polling = 0;
2028
2029                                 for (i = 0; i < SGE_QSETS; i++) {
2030                                         q = &adapter->params.sge.
2031                                                 qset[i];
2032                                         q->polling = t.polling;
2033                                 }
2034                         }
2035                 }
2036                 if (t.lro >= 0)
2037                         set_qset_lro(dev, t.qset_idx, t.lro);
2038
2039                 break;
2040         }
2041         case CHELSIO_GET_QSET_PARAMS:{
2042                 struct qset_params *q;
2043                 struct ch_qset_params t;
2044                 int q1 = pi->first_qset;
2045                 int nqsets = pi->nqsets;
2046                 int i;
2047
2048                 if (copy_from_user(&t, useraddr, sizeof(t)))
2049                         return -EFAULT;
2050
2051                 /* Display qsets for all ports when offload enabled */
2052                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2053                         q1 = 0;
2054                         for_each_port(adapter, i) {
2055                                 pi = adap2pinfo(adapter, i);
2056                                 nqsets = pi->first_qset + pi->nqsets;
2057                         }
2058                 }
2059
2060                 if (t.qset_idx >= nqsets)
2061                         return -EINVAL;
2062
2063                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2064                 t.rspq_size = q->rspq_size;
2065                 t.txq_size[0] = q->txq_size[0];
2066                 t.txq_size[1] = q->txq_size[1];
2067                 t.txq_size[2] = q->txq_size[2];
2068                 t.fl_size[0] = q->fl_size;
2069                 t.fl_size[1] = q->jumbo_size;
2070                 t.polling = q->polling;
2071                 t.lro = q->lro;
2072                 t.intr_lat = q->coalesce_usecs;
2073                 t.cong_thres = q->cong_thres;
2074                 t.qnum = q1;
2075
2076                 if (adapter->flags & USING_MSIX)
2077                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2078                 else
2079                         t.vector = adapter->pdev->irq;
2080
2081                 if (copy_to_user(useraddr, &t, sizeof(t)))
2082                         return -EFAULT;
2083                 break;
2084         }
2085         case CHELSIO_SET_QSET_NUM:{
2086                 struct ch_reg edata;
2087                 unsigned int i, first_qset = 0, other_qsets = 0;
2088
2089                 if (!capable(CAP_NET_ADMIN))
2090                         return -EPERM;
2091                 if (adapter->flags & FULL_INIT_DONE)
2092                         return -EBUSY;
2093                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2094                         return -EFAULT;
2095                 if (edata.val < 1 ||
2096                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2097                         return -EINVAL;
2098
2099                 for_each_port(adapter, i)
2100                         if (adapter->port[i] && adapter->port[i] != dev)
2101                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2102
2103                 if (edata.val + other_qsets > SGE_QSETS)
2104                         return -EINVAL;
2105
2106                 pi->nqsets = edata.val;
2107
2108                 for_each_port(adapter, i)
2109                         if (adapter->port[i]) {
2110                                 pi = adap2pinfo(adapter, i);
2111                                 pi->first_qset = first_qset;
2112                                 first_qset += pi->nqsets;
2113                         }
2114                 break;
2115         }
2116         case CHELSIO_GET_QSET_NUM:{
2117                 struct ch_reg edata;
2118
2119                 edata.cmd = CHELSIO_GET_QSET_NUM;
2120                 edata.val = pi->nqsets;
2121                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2122                         return -EFAULT;
2123                 break;
2124         }
2125         case CHELSIO_LOAD_FW:{
2126                 u8 *fw_data;
2127                 struct ch_mem_range t;
2128
2129                 if (!capable(CAP_SYS_RAWIO))
2130                         return -EPERM;
2131                 if (copy_from_user(&t, useraddr, sizeof(t)))
2132                         return -EFAULT;
2133                 /* Check t.len sanity ? */
2134                 fw_data = kmalloc(t.len, GFP_KERNEL);
2135                 if (!fw_data)
2136                         return -ENOMEM;
2137
2138                 if (copy_from_user
2139                         (fw_data, useraddr + sizeof(t), t.len)) {
2140                         kfree(fw_data);
2141                         return -EFAULT;
2142                 }
2143
2144                 ret = t3_load_fw(adapter, fw_data, t.len);
2145                 kfree(fw_data);
2146                 if (ret)
2147                         return ret;
2148                 break;
2149         }
2150         case CHELSIO_SETMTUTAB:{
2151                 struct ch_mtus m;
2152                 int i;
2153
2154                 if (!is_offload(adapter))
2155                         return -EOPNOTSUPP;
2156                 if (!capable(CAP_NET_ADMIN))
2157                         return -EPERM;
2158                 if (offload_running(adapter))
2159                         return -EBUSY;
2160                 if (copy_from_user(&m, useraddr, sizeof(m)))
2161                         return -EFAULT;
2162                 if (m.nmtus != NMTUS)
2163                         return -EINVAL;
2164                 if (m.mtus[0] < 81)     /* accommodate SACK */
2165                         return -EINVAL;
2166
2167                 /* MTUs must be in ascending order */
2168                 for (i = 1; i < NMTUS; ++i)
2169                         if (m.mtus[i] < m.mtus[i - 1])
2170                                 return -EINVAL;
2171
2172                 memcpy(adapter->params.mtus, m.mtus,
2173                         sizeof(adapter->params.mtus));
2174                 break;
2175         }
2176         case CHELSIO_GET_PM:{
2177                 struct tp_params *p = &adapter->params.tp;
2178                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2179
2180                 if (!is_offload(adapter))
2181                         return -EOPNOTSUPP;
2182                 m.tx_pg_sz = p->tx_pg_size;
2183                 m.tx_num_pg = p->tx_num_pgs;
2184                 m.rx_pg_sz = p->rx_pg_size;
2185                 m.rx_num_pg = p->rx_num_pgs;
2186                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2187                 if (copy_to_user(useraddr, &m, sizeof(m)))
2188                         return -EFAULT;
2189                 break;
2190         }
2191         case CHELSIO_SET_PM:{
2192                 struct ch_pm m;
2193                 struct tp_params *p = &adapter->params.tp;
2194
2195                 if (!is_offload(adapter))
2196                         return -EOPNOTSUPP;
2197                 if (!capable(CAP_NET_ADMIN))
2198                         return -EPERM;
2199                 if (adapter->flags & FULL_INIT_DONE)
2200                         return -EBUSY;
2201                 if (copy_from_user(&m, useraddr, sizeof(m)))
2202                         return -EFAULT;
2203                 if (!is_power_of_2(m.rx_pg_sz) ||
2204                         !is_power_of_2(m.tx_pg_sz))
2205                         return -EINVAL; /* not power of 2 */
2206                 if (!(m.rx_pg_sz & 0x14000))
2207                         return -EINVAL; /* not 16KB or 64KB */
2208                 if (!(m.tx_pg_sz & 0x1554000))
2209                         return -EINVAL;
2210                 if (m.tx_num_pg == -1)
2211                         m.tx_num_pg = p->tx_num_pgs;
2212                 if (m.rx_num_pg == -1)
2213                         m.rx_num_pg = p->rx_num_pgs;
2214                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2215                         return -EINVAL;
2216                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2217                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2218                         return -EINVAL;
2219                 p->rx_pg_size = m.rx_pg_sz;
2220                 p->tx_pg_size = m.tx_pg_sz;
2221                 p->rx_num_pgs = m.rx_num_pg;
2222                 p->tx_num_pgs = m.tx_num_pg;
2223                 break;
2224         }
2225         case CHELSIO_GET_MEM:{
2226                 struct ch_mem_range t;
2227                 struct mc7 *mem;
2228                 u64 buf[32];
2229
2230                 if (!is_offload(adapter))
2231                         return -EOPNOTSUPP;
2232                 if (!(adapter->flags & FULL_INIT_DONE))
2233                         return -EIO;    /* need the memory controllers */
2234                 if (copy_from_user(&t, useraddr, sizeof(t)))
2235                         return -EFAULT;
2236                 if ((t.addr & 7) || (t.len & 7))
2237                         return -EINVAL;
2238                 if (t.mem_id == MEM_CM)
2239                         mem = &adapter->cm;
2240                 else if (t.mem_id == MEM_PMRX)
2241                         mem = &adapter->pmrx;
2242                 else if (t.mem_id == MEM_PMTX)
2243                         mem = &adapter->pmtx;
2244                 else
2245                         return -EINVAL;
2246
2247                 /*
2248                  * Version scheme:
2249                  * bits 0..9: chip version
2250                  * bits 10..15: chip revision
2251                  */
2252                 t.version = 3 | (adapter->params.rev << 10);
2253                 if (copy_to_user(useraddr, &t, sizeof(t)))
2254                         return -EFAULT;
2255
2256                 /*
2257                  * Read 256 bytes at a time as len can be large and we don't
2258                  * want to use huge intermediate buffers.
2259                  */
2260                 useraddr += sizeof(t);  /* advance to start of buffer */
2261                 while (t.len) {
2262                         unsigned int chunk =
2263                                 min_t(unsigned int, t.len, sizeof(buf));
2264
2265                         ret =
2266                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2267                                                 buf);
2268                         if (ret)
2269                                 return ret;
2270                         if (copy_to_user(useraddr, buf, chunk))
2271                                 return -EFAULT;
2272                         useraddr += chunk;
2273                         t.addr += chunk;
2274                         t.len -= chunk;
2275                 }
2276                 break;
2277         }
2278         case CHELSIO_SET_TRACE_FILTER:{
2279                 struct ch_trace t;
2280                 const struct trace_params *tp;
2281
2282                 if (!capable(CAP_NET_ADMIN))
2283                         return -EPERM;
2284                 if (!offload_running(adapter))
2285                         return -EAGAIN;
2286                 if (copy_from_user(&t, useraddr, sizeof(t)))
2287                         return -EFAULT;
2288
2289                 tp = (const struct trace_params *)&t.sip;
2290                 if (t.config_tx)
2291                         t3_config_trace_filter(adapter, tp, 0,
2292                                                 t.invert_match,
2293                                                 t.trace_tx);
2294                 if (t.config_rx)
2295                         t3_config_trace_filter(adapter, tp, 1,
2296                                                 t.invert_match,
2297                                                 t.trace_rx);
2298                 break;
2299         }
2300         default:
2301                 return -EOPNOTSUPP;
2302         }
2303         return 0;
2304 }
2305
2306 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2307 {
2308         struct mii_ioctl_data *data = if_mii(req);
2309         struct port_info *pi = netdev_priv(dev);
2310         struct adapter *adapter = pi->adapter;
2311         int ret, mmd;
2312
2313         switch (cmd) {
2314         case SIOCGMIIPHY:
2315                 data->phy_id = pi->phy.addr;
2316                 /* FALLTHRU */
2317         case SIOCGMIIREG:{
2318                 u32 val;
2319                 struct cphy *phy = &pi->phy;
2320
2321                 if (!phy->mdio_read)
2322                         return -EOPNOTSUPP;
2323                 if (is_10G(adapter)) {
2324                         mmd = data->phy_id >> 8;
2325                         if (!mmd)
2326                                 mmd = MDIO_DEV_PCS;
2327                         else if (mmd > MDIO_DEV_VEND2)
2328                                 return -EINVAL;
2329
2330                         ret =
2331                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2332                                                 mmd, data->reg_num, &val);
2333                 } else
2334                         ret =
2335                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2336                                                 0, data->reg_num & 0x1f,
2337                                                 &val);
2338                 if (!ret)
2339                         data->val_out = val;
2340                 break;
2341         }
2342         case SIOCSMIIREG:{
2343                 struct cphy *phy = &pi->phy;
2344
2345                 if (!capable(CAP_NET_ADMIN))
2346                         return -EPERM;
2347                 if (!phy->mdio_write)
2348                         return -EOPNOTSUPP;
2349                 if (is_10G(adapter)) {
2350                         mmd = data->phy_id >> 8;
2351                         if (!mmd)
2352                                 mmd = MDIO_DEV_PCS;
2353                         else if (mmd > MDIO_DEV_VEND2)
2354                                 return -EINVAL;
2355
2356                         ret =
2357                                 phy->mdio_write(adapter,
2358                                                 data->phy_id & 0x1f, mmd,
2359                                                 data->reg_num,
2360                                                 data->val_in);
2361                 } else
2362                         ret =
2363                                 phy->mdio_write(adapter,
2364                                                 data->phy_id & 0x1f, 0,
2365                                                 data->reg_num & 0x1f,
2366                                                 data->val_in);
2367                 break;
2368         }
2369         case SIOCCHIOCTL:
2370                 return cxgb_extension_ioctl(dev, req->ifr_data);
2371         default:
2372                 return -EOPNOTSUPP;
2373         }
2374         return ret;
2375 }
2376
2377 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2378 {
2379         struct port_info *pi = netdev_priv(dev);
2380         struct adapter *adapter = pi->adapter;
2381         int ret;
2382
2383         if (new_mtu < 81)       /* accommodate SACK */
2384                 return -EINVAL;
2385         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2386                 return ret;
2387         dev->mtu = new_mtu;
2388         init_port_mtus(adapter);
2389         if (adapter->params.rev == 0 && offload_running(adapter))
2390                 t3_load_mtus(adapter, adapter->params.mtus,
2391                              adapter->params.a_wnd, adapter->params.b_wnd,
2392                              adapter->port[0]->mtu);
2393         return 0;
2394 }
2395
2396 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2397 {
2398         struct port_info *pi = netdev_priv(dev);
2399         struct adapter *adapter = pi->adapter;
2400         struct sockaddr *addr = p;
2401
2402         if (!is_valid_ether_addr(addr->sa_data))
2403                 return -EINVAL;
2404
2405         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2406         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2407         if (offload_running(adapter))
2408                 write_smt_entry(adapter, pi->port_id);
2409         return 0;
2410 }
2411
2412 /**
2413  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2414  * @adap: the adapter
2415  * @p: the port
2416  *
2417  * Ensures that current Rx processing on any of the queues associated with
2418  * the given port completes before returning.  We do this by acquiring and
2419  * releasing the locks of the response queues associated with the port.
2420  */
2421 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2422 {
2423         int i;
2424
2425         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2426                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2427
2428                 spin_lock_irq(&q->lock);
2429                 spin_unlock_irq(&q->lock);
2430         }
2431 }
2432
2433 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2434 {
2435         struct port_info *pi = netdev_priv(dev);
2436         struct adapter *adapter = pi->adapter;
2437
2438         pi->vlan_grp = grp;
2439         if (adapter->params.rev > 0)
2440                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2441         else {
2442                 /* single control for all ports */
2443                 unsigned int i, have_vlans = 0;
2444                 for_each_port(adapter, i)
2445                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2446
2447                 t3_set_vlan_accel(adapter, 1, have_vlans);
2448         }
2449         t3_synchronize_rx(adapter, pi);
2450 }
2451
2452 #ifdef CONFIG_NET_POLL_CONTROLLER
2453 static void cxgb_netpoll(struct net_device *dev)
2454 {
2455         struct port_info *pi = netdev_priv(dev);
2456         struct adapter *adapter = pi->adapter;
2457         int qidx;
2458
2459         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2460                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2461                 void *source;
2462
2463                 if (adapter->flags & USING_MSIX)
2464                         source = qs;
2465                 else
2466                         source = adapter;
2467
2468                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2469         }
2470 }
2471 #endif
2472
2473 /*
2474  * Periodic accumulation of MAC statistics.
2475  */
2476 static void mac_stats_update(struct adapter *adapter)
2477 {
2478         int i;
2479
2480         for_each_port(adapter, i) {
2481                 struct net_device *dev = adapter->port[i];
2482                 struct port_info *p = netdev_priv(dev);
2483
2484                 if (netif_running(dev)) {
2485                         spin_lock(&adapter->stats_lock);
2486                         t3_mac_update_stats(&p->mac);
2487                         spin_unlock(&adapter->stats_lock);
2488                 }
2489         }
2490 }
2491
2492 static void check_link_status(struct adapter *adapter)
2493 {
2494         int i;
2495
2496         for_each_port(adapter, i) {
2497                 struct net_device *dev = adapter->port[i];
2498                 struct port_info *p = netdev_priv(dev);
2499                 int link_fault;
2500
2501                 spin_lock_irq(&adapter->work_lock);
2502                 link_fault = p->link_fault;
2503                 spin_unlock_irq(&adapter->work_lock);
2504
2505                 if (link_fault) {
2506                         t3_link_fault(adapter, i);
2507                         continue;
2508                 }
2509
2510                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2511                         t3_xgm_intr_disable(adapter, i);
2512                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2513
2514                         t3_link_changed(adapter, i);
2515                         t3_xgm_intr_enable(adapter, i);
2516                 }
2517         }
2518 }
2519
2520 static void check_t3b2_mac(struct adapter *adapter)
2521 {
2522         int i;
2523
2524         if (!rtnl_trylock())    /* synchronize with ifdown */
2525                 return;
2526
2527         for_each_port(adapter, i) {
2528                 struct net_device *dev = adapter->port[i];
2529                 struct port_info *p = netdev_priv(dev);
2530                 int status;
2531
2532                 if (!netif_running(dev))
2533                         continue;
2534
2535                 status = 0;
2536                 if (netif_running(dev) && netif_carrier_ok(dev))
2537                         status = t3b2_mac_watchdog_task(&p->mac);
2538                 if (status == 1)
2539                         p->mac.stats.num_toggled++;
2540                 else if (status == 2) {
2541                         struct cmac *mac = &p->mac;
2542
2543                         t3_mac_set_mtu(mac, dev->mtu);
2544                         t3_mac_set_address(mac, 0, dev->dev_addr);
2545                         cxgb_set_rxmode(dev);
2546                         t3_link_start(&p->phy, mac, &p->link_config);
2547                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2548                         t3_port_intr_enable(adapter, p->port_id);
2549                         p->mac.stats.num_resets++;
2550                 }
2551         }
2552         rtnl_unlock();
2553 }
2554
2555
2556 static void t3_adap_check_task(struct work_struct *work)
2557 {
2558         struct adapter *adapter = container_of(work, struct adapter,
2559                                                adap_check_task.work);
2560         const struct adapter_params *p = &adapter->params;
2561         int port;
2562         unsigned int v, status, reset;
2563
2564         adapter->check_task_cnt++;
2565
2566         check_link_status(adapter);
2567
2568         /* Accumulate MAC stats if needed */
2569         if (!p->linkpoll_period ||
2570             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2571             p->stats_update_period) {
2572                 mac_stats_update(adapter);
2573                 adapter->check_task_cnt = 0;
2574         }
2575
2576         if (p->rev == T3_REV_B2)
2577                 check_t3b2_mac(adapter);
2578
2579         /*
2580          * Scan the XGMAC's to check for various conditions which we want to
2581          * monitor in a periodic polling manner rather than via an interrupt
2582          * condition.  This is used for conditions which would otherwise flood
2583          * the system with interrupts and we only really need to know that the
2584          * conditions are "happening" ...  For each condition we count the
2585          * detection of the condition and reset it for the next polling loop.
2586          */
2587         for_each_port(adapter, port) {
2588                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2589                 u32 cause;
2590
2591                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2592                 reset = 0;
2593                 if (cause & F_RXFIFO_OVERFLOW) {
2594                         mac->stats.rx_fifo_ovfl++;
2595                         reset |= F_RXFIFO_OVERFLOW;
2596                 }
2597
2598                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2599         }
2600
2601         /*
2602          * We do the same as above for FL_EMPTY interrupts.
2603          */
2604         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2605         reset = 0;
2606
2607         if (status & F_FLEMPTY) {
2608                 struct sge_qset *qs = &adapter->sge.qs[0];
2609                 int i = 0;
2610
2611                 reset |= F_FLEMPTY;
2612
2613                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2614                     0xffff;
2615
2616                 while (v) {
2617                         qs->fl[i].empty += (v & 1);
2618                         if (i)
2619                                 qs++;
2620                         i ^= 1;
2621                         v >>= 1;
2622                 }
2623         }
2624
2625         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2626
2627         /* Schedule the next check update if any port is active. */
2628         spin_lock_irq(&adapter->work_lock);
2629         if (adapter->open_device_map & PORT_MASK)
2630                 schedule_chk_task(adapter);
2631         spin_unlock_irq(&adapter->work_lock);
2632 }
2633
2634 /*
2635  * Processes external (PHY) interrupts in process context.
2636  */
2637 static void ext_intr_task(struct work_struct *work)
2638 {
2639         struct adapter *adapter = container_of(work, struct adapter,
2640                                                ext_intr_handler_task);
2641         int i;
2642
2643         /* Disable link fault interrupts */
2644         for_each_port(adapter, i) {
2645                 struct net_device *dev = adapter->port[i];
2646                 struct port_info *p = netdev_priv(dev);
2647
2648                 t3_xgm_intr_disable(adapter, i);
2649                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2650         }
2651
2652         /* Re-enable link fault interrupts */
2653         t3_phy_intr_handler(adapter);
2654
2655         for_each_port(adapter, i)
2656                 t3_xgm_intr_enable(adapter, i);
2657
2658         /* Now reenable external interrupts */
2659         spin_lock_irq(&adapter->work_lock);
2660         if (adapter->slow_intr_mask) {
2661                 adapter->slow_intr_mask |= F_T3DBG;
2662                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2663                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2664                              adapter->slow_intr_mask);
2665         }
2666         spin_unlock_irq(&adapter->work_lock);
2667 }
2668
2669 /*
2670  * Interrupt-context handler for external (PHY) interrupts.
2671  */
2672 void t3_os_ext_intr_handler(struct adapter *adapter)
2673 {
2674         /*
2675          * Schedule a task to handle external interrupts as they may be slow
2676          * and we use a mutex to protect MDIO registers.  We disable PHY
2677          * interrupts in the meantime and let the task reenable them when
2678          * it's done.
2679          */
2680         spin_lock(&adapter->work_lock);
2681         if (adapter->slow_intr_mask) {
2682                 adapter->slow_intr_mask &= ~F_T3DBG;
2683                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2684                              adapter->slow_intr_mask);
2685                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2686         }
2687         spin_unlock(&adapter->work_lock);
2688 }
2689
2690 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2691 {
2692         struct net_device *netdev = adapter->port[port_id];
2693         struct port_info *pi = netdev_priv(netdev);
2694
2695         spin_lock(&adapter->work_lock);
2696         pi->link_fault = 1;
2697         spin_unlock(&adapter->work_lock);
2698 }
2699
2700 static int t3_adapter_error(struct adapter *adapter, int reset)
2701 {
2702         int i, ret = 0;
2703
2704         if (is_offload(adapter) &&
2705             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2706                 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2707                 offload_close(&adapter->tdev);
2708         }
2709
2710         /* Stop all ports */
2711         for_each_port(adapter, i) {
2712                 struct net_device *netdev = adapter->port[i];
2713
2714                 if (netif_running(netdev))
2715                         cxgb_close(netdev);
2716         }
2717
2718         /* Stop SGE timers */
2719         t3_stop_sge_timers(adapter);
2720
2721         adapter->flags &= ~FULL_INIT_DONE;
2722
2723         if (reset)
2724                 ret = t3_reset_adapter(adapter);
2725
2726         pci_disable_device(adapter->pdev);
2727
2728         return ret;
2729 }
2730
2731 static int t3_reenable_adapter(struct adapter *adapter)
2732 {
2733         if (pci_enable_device(adapter->pdev)) {
2734                 dev_err(&adapter->pdev->dev,
2735                         "Cannot re-enable PCI device after reset.\n");
2736                 goto err;
2737         }
2738         pci_set_master(adapter->pdev);
2739         pci_restore_state(adapter->pdev);
2740
2741         /* Free sge resources */
2742         t3_free_sge_resources(adapter);
2743
2744         if (t3_replay_prep_adapter(adapter))
2745                 goto err;
2746
2747         return 0;
2748 err:
2749         return -1;
2750 }
2751
2752 static void t3_resume_ports(struct adapter *adapter)
2753 {
2754         int i;
2755
2756         /* Restart the ports */
2757         for_each_port(adapter, i) {
2758                 struct net_device *netdev = adapter->port[i];
2759
2760                 if (netif_running(netdev)) {
2761                         if (cxgb_open(netdev)) {
2762                                 dev_err(&adapter->pdev->dev,
2763                                         "can't bring device back up"
2764                                         " after reset\n");
2765                                 continue;
2766                         }
2767                 }
2768         }
2769
2770         if (is_offload(adapter) && !ofld_disable)
2771                 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2772 }
2773
2774 /*
2775  * processes a fatal error.
2776  * Bring the ports down, reset the chip, bring the ports back up.
2777  */
2778 static void fatal_error_task(struct work_struct *work)
2779 {
2780         struct adapter *adapter = container_of(work, struct adapter,
2781                                                fatal_error_handler_task);
2782         int err = 0;
2783
2784         rtnl_lock();
2785         err = t3_adapter_error(adapter, 1);
2786         if (!err)
2787                 err = t3_reenable_adapter(adapter);
2788         if (!err)
2789                 t3_resume_ports(adapter);
2790
2791         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2792         rtnl_unlock();
2793 }
2794
2795 void t3_fatal_err(struct adapter *adapter)
2796 {
2797         unsigned int fw_status[4];
2798
2799         if (adapter->flags & FULL_INIT_DONE) {
2800                 t3_sge_stop(adapter);
2801                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2802                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2803                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2804                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2805
2806                 spin_lock(&adapter->work_lock);
2807                 t3_intr_disable(adapter);
2808                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2809                 spin_unlock(&adapter->work_lock);
2810         }
2811         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2812         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2813                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2814                          fw_status[0], fw_status[1],
2815                          fw_status[2], fw_status[3]);
2816 }
2817
2818 /**
2819  * t3_io_error_detected - called when PCI error is detected
2820  * @pdev: Pointer to PCI device
2821  * @state: The current pci connection state
2822  *
2823  * This function is called after a PCI bus error affecting
2824  * this device has been detected.
2825  */
2826 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2827                                              pci_channel_state_t state)
2828 {
2829         struct adapter *adapter = pci_get_drvdata(pdev);
2830         int ret;
2831
2832         if (state == pci_channel_io_perm_failure)
2833                 return PCI_ERS_RESULT_DISCONNECT;
2834
2835         ret = t3_adapter_error(adapter, 0);
2836
2837         /* Request a slot reset. */
2838         return PCI_ERS_RESULT_NEED_RESET;
2839 }
2840
2841 /**
2842  * t3_io_slot_reset - called after the pci bus has been reset.
2843  * @pdev: Pointer to PCI device
2844  *
2845  * Restart the card from scratch, as if from a cold-boot.
2846  */
2847 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2848 {
2849         struct adapter *adapter = pci_get_drvdata(pdev);
2850
2851         if (!t3_reenable_adapter(adapter))
2852                 return PCI_ERS_RESULT_RECOVERED;
2853
2854         return PCI_ERS_RESULT_DISCONNECT;
2855 }
2856
2857 /**
2858  * t3_io_resume - called when traffic can start flowing again.
2859  * @pdev: Pointer to PCI device
2860  *
2861  * This callback is called when the error recovery driver tells us that
2862  * its OK to resume normal operation.
2863  */
2864 static void t3_io_resume(struct pci_dev *pdev)
2865 {
2866         struct adapter *adapter = pci_get_drvdata(pdev);
2867
2868         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2869                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
2870
2871         t3_resume_ports(adapter);
2872 }
2873
2874 static struct pci_error_handlers t3_err_handler = {
2875         .error_detected = t3_io_error_detected,
2876         .slot_reset = t3_io_slot_reset,
2877         .resume = t3_io_resume,
2878 };
2879
2880 /*
2881  * Set the number of qsets based on the number of CPUs and the number of ports,
2882  * not to exceed the number of available qsets, assuming there are enough qsets
2883  * per port in HW.
2884  */
2885 static void set_nqsets(struct adapter *adap)
2886 {
2887         int i, j = 0;
2888         int num_cpus = num_online_cpus();
2889         int hwports = adap->params.nports;
2890         int nqsets = adap->msix_nvectors - 1;
2891
2892         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2893                 if (hwports == 2 &&
2894                     (hwports * nqsets > SGE_QSETS ||
2895                      num_cpus >= nqsets / hwports))
2896                         nqsets /= hwports;
2897                 if (nqsets > num_cpus)
2898                         nqsets = num_cpus;
2899                 if (nqsets < 1 || hwports == 4)
2900                         nqsets = 1;
2901         } else
2902                 nqsets = 1;
2903
2904         for_each_port(adap, i) {
2905                 struct port_info *pi = adap2pinfo(adap, i);
2906
2907                 pi->first_qset = j;
2908                 pi->nqsets = nqsets;
2909                 j = pi->first_qset + nqsets;
2910
2911                 dev_info(&adap->pdev->dev,
2912                          "Port %d using %d queue sets.\n", i, nqsets);
2913         }
2914 }
2915
2916 static int __devinit cxgb_enable_msix(struct adapter *adap)
2917 {
2918         struct msix_entry entries[SGE_QSETS + 1];
2919         int vectors;
2920         int i, err;
2921
2922         vectors = ARRAY_SIZE(entries);
2923         for (i = 0; i < vectors; ++i)
2924                 entries[i].entry = i;
2925
2926         while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2927                 vectors = err;
2928
2929         if (err < 0)
2930                 pci_disable_msix(adap->pdev);
2931
2932         if (!err && vectors < (adap->params.nports + 1)) {
2933                 pci_disable_msix(adap->pdev);
2934                 err = -1;
2935         }
2936
2937         if (!err) {
2938                 for (i = 0; i < vectors; ++i)
2939                         adap->msix_info[i].vec = entries[i].vector;
2940                 adap->msix_nvectors = vectors;
2941         }
2942
2943         return err;
2944 }
2945
2946 static void __devinit print_port_info(struct adapter *adap,
2947                                       const struct adapter_info *ai)
2948 {
2949         static const char *pci_variant[] = {
2950                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2951         };
2952
2953         int i;
2954         char buf[80];
2955
2956         if (is_pcie(adap))
2957                 snprintf(buf, sizeof(buf), "%s x%d",
2958                          pci_variant[adap->params.pci.variant],
2959                          adap->params.pci.width);
2960         else
2961                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2962                          pci_variant[adap->params.pci.variant],
2963                          adap->params.pci.speed, adap->params.pci.width);
2964
2965         for_each_port(adap, i) {
2966                 struct net_device *dev = adap->port[i];
2967                 const struct port_info *pi = netdev_priv(dev);
2968
2969                 if (!test_bit(i, &adap->registered_device_map))
2970                         continue;
2971                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2972                        dev->name, ai->desc, pi->phy.desc,
2973                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2974                        (adap->flags & USING_MSIX) ? " MSI-X" :
2975                        (adap->flags & USING_MSI) ? " MSI" : "");
2976                 if (adap->name == dev->name && adap->params.vpd.mclk)
2977                         printk(KERN_INFO
2978                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2979                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2980                                t3_mc7_size(&adap->pmtx) >> 20,
2981                                t3_mc7_size(&adap->pmrx) >> 20,
2982                                adap->params.vpd.sn);
2983         }
2984 }
2985
2986 static const struct net_device_ops cxgb_netdev_ops = {
2987         .ndo_open               = cxgb_open,
2988         .ndo_stop               = cxgb_close,
2989         .ndo_start_xmit         = t3_eth_xmit,
2990         .ndo_get_stats          = cxgb_get_stats,
2991         .ndo_validate_addr      = eth_validate_addr,
2992         .ndo_set_multicast_list = cxgb_set_rxmode,
2993         .ndo_do_ioctl           = cxgb_ioctl,
2994         .ndo_change_mtu         = cxgb_change_mtu,
2995         .ndo_set_mac_address    = cxgb_set_mac_addr,
2996         .ndo_vlan_rx_register   = vlan_rx_register,
2997 #ifdef CONFIG_NET_POLL_CONTROLLER
2998         .ndo_poll_controller    = cxgb_netpoll,
2999 #endif
3000 };
3001
3002 static int __devinit init_one(struct pci_dev *pdev,
3003                               const struct pci_device_id *ent)
3004 {
3005         static int version_printed;
3006
3007         int i, err, pci_using_dac = 0;
3008         resource_size_t mmio_start, mmio_len;
3009         const struct adapter_info *ai;
3010         struct adapter *adapter = NULL;
3011         struct port_info *pi;
3012
3013         if (!version_printed) {
3014                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3015                 ++version_printed;
3016         }
3017
3018         if (!cxgb3_wq) {
3019                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3020                 if (!cxgb3_wq) {
3021                         printk(KERN_ERR DRV_NAME
3022                                ": cannot initialize work queue\n");
3023                         return -ENOMEM;
3024                 }
3025         }
3026
3027         err = pci_request_regions(pdev, DRV_NAME);
3028         if (err) {
3029                 /* Just info, some other driver may have claimed the device. */
3030                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3031                 return err;
3032         }
3033
3034         err = pci_enable_device(pdev);
3035         if (err) {
3036                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3037                 goto out_release_regions;
3038         }
3039
3040         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3041                 pci_using_dac = 1;
3042                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3043                 if (err) {
3044                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3045                                "coherent allocations\n");
3046                         goto out_disable_device;
3047                 }
3048         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3049                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3050                 goto out_disable_device;
3051         }
3052
3053         pci_set_master(pdev);
3054         pci_save_state(pdev);
3055
3056         mmio_start = pci_resource_start(pdev, 0);
3057         mmio_len = pci_resource_len(pdev, 0);
3058         ai = t3_get_adapter_info(ent->driver_data);
3059
3060         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3061         if (!adapter) {
3062                 err = -ENOMEM;
3063                 goto out_disable_device;
3064         }
3065
3066         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3067         if (!adapter->regs) {
3068                 dev_err(&pdev->dev, "cannot map device registers\n");
3069                 err = -ENOMEM;
3070                 goto out_free_adapter;
3071         }
3072
3073         adapter->pdev = pdev;
3074         adapter->name = pci_name(pdev);
3075         adapter->msg_enable = dflt_msg_enable;
3076         adapter->mmio_len = mmio_len;
3077
3078         mutex_init(&adapter->mdio_lock);
3079         spin_lock_init(&adapter->work_lock);
3080         spin_lock_init(&adapter->stats_lock);
3081
3082         INIT_LIST_HEAD(&adapter->adapter_list);
3083         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3084         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3085         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3086
3087         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3088                 struct net_device *netdev;
3089
3090                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3091                 if (!netdev) {
3092                         err = -ENOMEM;
3093                         goto out_free_dev;
3094                 }
3095
3096                 SET_NETDEV_DEV(netdev, &pdev->dev);
3097
3098                 adapter->port[i] = netdev;
3099                 pi = netdev_priv(netdev);
3100                 pi->adapter = adapter;
3101                 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3102                 pi->port_id = i;
3103                 netif_carrier_off(netdev);
3104                 netif_tx_stop_all_queues(netdev);
3105                 netdev->irq = pdev->irq;
3106                 netdev->mem_start = mmio_start;
3107                 netdev->mem_end = mmio_start + mmio_len - 1;
3108                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3109                 netdev->features |= NETIF_F_LLTX;
3110                 netdev->features |= NETIF_F_GRO;
3111                 if (pci_using_dac)
3112                         netdev->features |= NETIF_F_HIGHDMA;
3113
3114                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3115                 netdev->netdev_ops = &cxgb_netdev_ops;
3116                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3117         }
3118
3119         pci_set_drvdata(pdev, adapter);
3120         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3121                 err = -ENODEV;
3122                 goto out_free_dev;
3123         }
3124
3125         /*
3126          * The card is now ready to go.  If any errors occur during device
3127          * registration we do not fail the whole card but rather proceed only
3128          * with the ports we manage to register successfully.  However we must
3129          * register at least one net device.
3130          */
3131         for_each_port(adapter, i) {
3132                 err = register_netdev(adapter->port[i]);
3133                 if (err)
3134                         dev_warn(&pdev->dev,
3135                                  "cannot register net device %s, skipping\n",
3136                                  adapter->port[i]->name);
3137                 else {
3138                         /*
3139                          * Change the name we use for messages to the name of
3140                          * the first successfully registered interface.
3141                          */
3142                         if (!adapter->registered_device_map)
3143                                 adapter->name = adapter->port[i]->name;
3144
3145                         __set_bit(i, &adapter->registered_device_map);
3146                 }
3147         }
3148         if (!adapter->registered_device_map) {
3149                 dev_err(&pdev->dev, "could not register any net devices\n");
3150                 goto out_free_dev;
3151         }
3152
3153         /* Driver's ready. Reflect it on LEDs */
3154         t3_led_ready(adapter);
3155
3156         if (is_offload(adapter)) {
3157                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3158                 cxgb3_adapter_ofld(adapter);
3159         }
3160
3161         /* See what interrupts we'll be using */
3162         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3163                 adapter->flags |= USING_MSIX;
3164         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3165                 adapter->flags |= USING_MSI;
3166
3167         set_nqsets(adapter);
3168
3169         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3170                                  &cxgb3_attr_group);
3171
3172         print_port_info(adapter, ai);
3173         return 0;
3174
3175 out_free_dev:
3176         iounmap(adapter->regs);
3177         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3178                 if (adapter->port[i])
3179                         free_netdev(adapter->port[i]);
3180
3181 out_free_adapter:
3182         kfree(adapter);
3183
3184 out_disable_device:
3185         pci_disable_device(pdev);
3186 out_release_regions:
3187         pci_release_regions(pdev);
3188         pci_set_drvdata(pdev, NULL);
3189         return err;
3190 }
3191
3192 static void __devexit remove_one(struct pci_dev *pdev)
3193 {
3194         struct adapter *adapter = pci_get_drvdata(pdev);
3195
3196         if (adapter) {
3197                 int i;
3198
3199                 t3_sge_stop(adapter);
3200                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3201                                    &cxgb3_attr_group);
3202
3203                 if (is_offload(adapter)) {
3204                         cxgb3_adapter_unofld(adapter);
3205                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3206                                      &adapter->open_device_map))
3207                                 offload_close(&adapter->tdev);
3208                 }
3209
3210                 for_each_port(adapter, i)
3211                     if (test_bit(i, &adapter->registered_device_map))
3212                         unregister_netdev(adapter->port[i]);
3213
3214                 t3_stop_sge_timers(adapter);
3215                 t3_free_sge_resources(adapter);
3216                 cxgb_disable_msi(adapter);
3217
3218                 for_each_port(adapter, i)
3219                         if (adapter->port[i])
3220                                 free_netdev(adapter->port[i]);
3221
3222                 iounmap(adapter->regs);
3223                 kfree(adapter);
3224                 pci_release_regions(pdev);
3225                 pci_disable_device(pdev);
3226                 pci_set_drvdata(pdev, NULL);
3227         }
3228 }
3229
3230 static struct pci_driver driver = {
3231         .name = DRV_NAME,
3232         .id_table = cxgb3_pci_tbl,
3233         .probe = init_one,
3234         .remove = __devexit_p(remove_one),
3235         .err_handler = &t3_err_handler,
3236 };
3237
3238 static int __init cxgb3_init_module(void)
3239 {
3240         int ret;
3241
3242         cxgb3_offload_init();
3243
3244         ret = pci_register_driver(&driver);
3245         return ret;
3246 }
3247
3248 static void __exit cxgb3_cleanup_module(void)
3249 {
3250         pci_unregister_driver(&driver);
3251         if (cxgb3_wq)
3252                 destroy_workqueue(cxgb3_wq);
3253 }
3254
3255 module_init(cxgb3_init_module);
3256 module_exit(cxgb3_cleanup_module);