2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <linux/stringify.h>
48 #include <asm/uaccess.h>
51 #include "cxgb3_ioctl.h"
53 #include "cxgb3_offload.h"
56 #include "cxgb3_ctl_defs.h"
58 #include "firmware_exports.h"
61 MAX_TXQ_ENTRIES = 16384,
62 MAX_CTRL_TXQ_ENTRIES = 1024,
63 MAX_RSPQ_ENTRIES = 16384,
64 MAX_RX_BUFFERS = 16384,
65 MAX_RX_JUMBO_BUFFERS = 16384,
67 MIN_CTRL_TXQ_ENTRIES = 4,
68 MIN_RSPQ_ENTRIES = 32,
72 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
74 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
75 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
76 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
78 #define EEPROM_MAGIC 0x38E2F10C
80 #define CH_DEVICE(devid, idx) \
81 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
83 static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
84 CH_DEVICE(0x20, 0), /* PE9000 */
85 CH_DEVICE(0x21, 1), /* T302E */
86 CH_DEVICE(0x22, 2), /* T310E */
87 CH_DEVICE(0x23, 3), /* T320X */
88 CH_DEVICE(0x24, 1), /* T302X */
89 CH_DEVICE(0x25, 3), /* T320E */
90 CH_DEVICE(0x26, 2), /* T310X */
91 CH_DEVICE(0x30, 2), /* T3B10 */
92 CH_DEVICE(0x31, 3), /* T3B20 */
93 CH_DEVICE(0x32, 1), /* T3B02 */
94 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
95 CH_DEVICE(0x36, 3), /* S320E-CR */
96 CH_DEVICE(0x37, 7), /* N320E-G2 */
100 MODULE_DESCRIPTION(DRV_DESC);
101 MODULE_AUTHOR("Chelsio Communications");
102 MODULE_LICENSE("Dual BSD/GPL");
103 MODULE_VERSION(DRV_VERSION);
104 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
106 static int dflt_msg_enable = DFLT_MSG_ENABLE;
108 module_param(dflt_msg_enable, int, 0644);
109 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
112 * The driver uses the best interrupt scheme available on a platform in the
113 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
114 * of these schemes the driver may consider as follows:
116 * msi = 2: choose from among all three options
117 * msi = 1: only consider MSI and pin interrupts
118 * msi = 0: force pin interrupts
122 module_param(msi, int, 0644);
123 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
126 * The driver enables offload as a default.
127 * To disable it, use ofld_disable = 1.
130 static int ofld_disable = 0;
132 module_param(ofld_disable, int, 0644);
133 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
136 * We have work elements that we need to cancel when an interface is taken
137 * down. Normally the work elements would be executed by keventd but that
138 * can deadlock because of linkwatch. If our close method takes the rtnl
139 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
140 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
141 * for our work to complete. Get our own work queue to solve this.
143 static struct workqueue_struct *cxgb3_wq;
146 * link_report - show link status and link speed/duplex
147 * @p: the port whose settings are to be reported
149 * Shows the link status, speed, and duplex of a port.
151 static void link_report(struct net_device *dev)
153 if (!netif_carrier_ok(dev))
154 printk(KERN_INFO "%s: link down\n", dev->name);
156 const char *s = "10Mbps";
157 const struct port_info *p = netdev_priv(dev);
159 switch (p->link_config.speed) {
171 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
172 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
176 static void enable_tx_fifo_drain(struct adapter *adapter,
177 struct port_info *pi)
179 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
181 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
182 t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
183 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
186 static void disable_tx_fifo_drain(struct adapter *adapter,
187 struct port_info *pi)
189 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
193 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
195 struct net_device *dev = adap->port[port_id];
196 struct port_info *pi = netdev_priv(dev);
198 if (state == netif_carrier_ok(dev))
202 struct cmac *mac = &pi->mac;
204 netif_carrier_on(dev);
206 disable_tx_fifo_drain(adap, pi);
208 /* Clear local faults */
209 t3_xgm_intr_disable(adap, pi->port_id);
210 t3_read_reg(adap, A_XGM_INT_STATUS +
213 A_XGM_INT_CAUSE + pi->mac.offset,
216 t3_set_reg_field(adap,
219 F_XGM_INT, F_XGM_INT);
220 t3_xgm_intr_enable(adap, pi->port_id);
222 t3_mac_enable(mac, MAC_DIRECTION_TX);
224 netif_carrier_off(dev);
227 enable_tx_fifo_drain(adap, pi);
233 * t3_os_link_changed - handle link status changes
234 * @adapter: the adapter associated with the link change
235 * @port_id: the port index whose limk status has changed
236 * @link_stat: the new status of the link
237 * @speed: the new speed setting
238 * @duplex: the new duplex setting
239 * @pause: the new flow-control setting
241 * This is the OS-dependent handler for link status changes. The OS
242 * neutral handler takes care of most of the processing for these events,
243 * then calls this handler for any OS-specific processing.
245 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
246 int speed, int duplex, int pause)
248 struct net_device *dev = adapter->port[port_id];
249 struct port_info *pi = netdev_priv(dev);
250 struct cmac *mac = &pi->mac;
252 /* Skip changes from disabled ports. */
253 if (!netif_running(dev))
256 if (link_stat != netif_carrier_ok(dev)) {
258 disable_tx_fifo_drain(adapter, pi);
260 t3_mac_enable(mac, MAC_DIRECTION_RX);
262 /* Clear local faults */
263 t3_xgm_intr_disable(adapter, pi->port_id);
264 t3_read_reg(adapter, A_XGM_INT_STATUS +
266 t3_write_reg(adapter,
267 A_XGM_INT_CAUSE + pi->mac.offset,
270 t3_set_reg_field(adapter,
271 A_XGM_INT_ENABLE + pi->mac.offset,
272 F_XGM_INT, F_XGM_INT);
273 t3_xgm_intr_enable(adapter, pi->port_id);
275 netif_carrier_on(dev);
277 netif_carrier_off(dev);
279 t3_xgm_intr_disable(adapter, pi->port_id);
280 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
281 t3_set_reg_field(adapter,
282 A_XGM_INT_ENABLE + pi->mac.offset,
286 pi->phy.ops->power_down(&pi->phy, 1);
288 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
289 t3_mac_disable(mac, MAC_DIRECTION_RX);
290 t3_link_start(&pi->phy, mac, &pi->link_config);
293 enable_tx_fifo_drain(adapter, pi);
301 * t3_os_phymod_changed - handle PHY module changes
302 * @phy: the PHY reporting the module change
303 * @mod_type: new module type
305 * This is the OS-dependent handler for PHY module changes. It is
306 * invoked when a PHY module is removed or inserted for any OS-specific
309 void t3_os_phymod_changed(struct adapter *adap, int port_id)
311 static const char *mod_str[] = {
312 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
315 const struct net_device *dev = adap->port[port_id];
316 const struct port_info *pi = netdev_priv(dev);
318 if (pi->phy.modtype == phy_modtype_none)
319 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
321 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
322 mod_str[pi->phy.modtype]);
325 static void cxgb_set_rxmode(struct net_device *dev)
327 struct port_info *pi = netdev_priv(dev);
329 t3_mac_set_rx_mode(&pi->mac, dev);
333 * link_start - enable a port
334 * @dev: the device to enable
336 * Performs the MAC and PHY actions needed to enable a port.
338 static void link_start(struct net_device *dev)
340 struct port_info *pi = netdev_priv(dev);
341 struct cmac *mac = &pi->mac;
344 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
345 t3_mac_set_mtu(mac, dev->mtu);
346 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
347 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
348 t3_mac_set_rx_mode(mac, dev);
349 t3_link_start(&pi->phy, mac, &pi->link_config);
350 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
353 static inline void cxgb_disable_msi(struct adapter *adapter)
355 if (adapter->flags & USING_MSIX) {
356 pci_disable_msix(adapter->pdev);
357 adapter->flags &= ~USING_MSIX;
358 } else if (adapter->flags & USING_MSI) {
359 pci_disable_msi(adapter->pdev);
360 adapter->flags &= ~USING_MSI;
365 * Interrupt handler for asynchronous events used with MSI-X.
367 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
369 t3_slow_intr_handler(cookie);
374 * Name the MSI-X interrupts.
376 static void name_msix_vecs(struct adapter *adap)
378 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
380 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
381 adap->msix_info[0].desc[n] = 0;
383 for_each_port(adap, j) {
384 struct net_device *d = adap->port[j];
385 const struct port_info *pi = netdev_priv(d);
387 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
388 snprintf(adap->msix_info[msi_idx].desc, n,
389 "%s-%d", d->name, pi->first_qset + i);
390 adap->msix_info[msi_idx].desc[n] = 0;
395 static int request_msix_data_irqs(struct adapter *adap)
397 int i, j, err, qidx = 0;
399 for_each_port(adap, i) {
400 int nqsets = adap2pinfo(adap, i)->nqsets;
402 for (j = 0; j < nqsets; ++j) {
403 err = request_irq(adap->msix_info[qidx + 1].vec,
404 t3_intr_handler(adap,
407 adap->msix_info[qidx + 1].desc,
408 &adap->sge.qs[qidx]);
411 free_irq(adap->msix_info[qidx + 1].vec,
412 &adap->sge.qs[qidx]);
421 static void free_irq_resources(struct adapter *adapter)
423 if (adapter->flags & USING_MSIX) {
426 free_irq(adapter->msix_info[0].vec, adapter);
427 for_each_port(adapter, i)
428 n += adap2pinfo(adapter, i)->nqsets;
430 for (i = 0; i < n; ++i)
431 free_irq(adapter->msix_info[i + 1].vec,
432 &adapter->sge.qs[i]);
434 free_irq(adapter->pdev->irq, adapter);
437 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
442 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
450 static int init_tp_parity(struct adapter *adap)
454 struct cpl_set_tcb_field *greq;
455 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
457 t3_tp_set_offload_mode(adap, 1);
459 for (i = 0; i < 16; i++) {
460 struct cpl_smt_write_req *req;
462 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
464 skb = adap->nofail_skb;
468 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
469 memset(req, 0, sizeof(*req));
470 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
471 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
472 req->mtu_idx = NMTUS - 1;
474 t3_mgmt_tx(adap, skb);
475 if (skb == adap->nofail_skb) {
476 await_mgmt_replies(adap, cnt, i + 1);
477 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
478 if (!adap->nofail_skb)
483 for (i = 0; i < 2048; i++) {
484 struct cpl_l2t_write_req *req;
486 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
488 skb = adap->nofail_skb;
492 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
493 memset(req, 0, sizeof(*req));
494 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
495 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
496 req->params = htonl(V_L2T_W_IDX(i));
497 t3_mgmt_tx(adap, skb);
498 if (skb == adap->nofail_skb) {
499 await_mgmt_replies(adap, cnt, 16 + i + 1);
500 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
501 if (!adap->nofail_skb)
506 for (i = 0; i < 2048; i++) {
507 struct cpl_rte_write_req *req;
509 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
511 skb = adap->nofail_skb;
515 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
516 memset(req, 0, sizeof(*req));
517 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
518 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
519 req->l2t_idx = htonl(V_L2T_W_IDX(i));
520 t3_mgmt_tx(adap, skb);
521 if (skb == adap->nofail_skb) {
522 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
523 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
524 if (!adap->nofail_skb)
529 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
531 skb = adap->nofail_skb;
535 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
536 memset(greq, 0, sizeof(*greq));
537 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
538 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
539 greq->mask = cpu_to_be64(1);
540 t3_mgmt_tx(adap, skb);
542 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
543 if (skb == adap->nofail_skb) {
544 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
545 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
548 t3_tp_set_offload_mode(adap, 0);
552 t3_tp_set_offload_mode(adap, 0);
557 * setup_rss - configure RSS
560 * Sets up RSS to distribute packets to multiple receive queues. We
561 * configure the RSS CPU lookup table to distribute to the number of HW
562 * receive queues, and the response queue lookup table to narrow that
563 * down to the response queues actually configured for each port.
564 * We always configure the RSS mapping for two ports since the mapping
565 * table has plenty of entries.
567 static void setup_rss(struct adapter *adap)
570 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
571 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
572 u8 cpus[SGE_QSETS + 1];
573 u16 rspq_map[RSS_TABLE_SIZE];
575 for (i = 0; i < SGE_QSETS; ++i)
577 cpus[SGE_QSETS] = 0xff; /* terminator */
579 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
580 rspq_map[i] = i % nq0;
581 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
584 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
585 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
586 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
589 static void init_napi(struct adapter *adap)
593 for (i = 0; i < SGE_QSETS; i++) {
594 struct sge_qset *qs = &adap->sge.qs[i];
597 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
602 * netif_napi_add() can be called only once per napi_struct because it
603 * adds each new napi_struct to a list. Be careful not to call it a
604 * second time, e.g., during EEH recovery, by making a note of it.
606 adap->flags |= NAPI_INIT;
610 * Wait until all NAPI handlers are descheduled. This includes the handlers of
611 * both netdevices representing interfaces and the dummy ones for the extra
614 static void quiesce_rx(struct adapter *adap)
618 for (i = 0; i < SGE_QSETS; i++)
619 if (adap->sge.qs[i].adap)
620 napi_disable(&adap->sge.qs[i].napi);
623 static void enable_all_napi(struct adapter *adap)
626 for (i = 0; i < SGE_QSETS; i++)
627 if (adap->sge.qs[i].adap)
628 napi_enable(&adap->sge.qs[i].napi);
632 * set_qset_lro - Turn a queue set's LRO capability on and off
633 * @dev: the device the qset is attached to
634 * @qset_idx: the queue set index
635 * @val: the LRO switch
637 * Sets LRO on or off for a particular queue set.
638 * the device's features flag is updated to reflect the LRO
639 * capability when all queues belonging to the device are
642 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
644 struct port_info *pi = netdev_priv(dev);
645 struct adapter *adapter = pi->adapter;
647 adapter->params.sge.qset[qset_idx].lro = !!val;
648 adapter->sge.qs[qset_idx].lro_enabled = !!val;
652 * setup_sge_qsets - configure SGE Tx/Rx/response queues
655 * Determines how many sets of SGE queues to use and initializes them.
656 * We support multiple queue sets per port if we have MSI-X, otherwise
657 * just one queue set per port.
659 static int setup_sge_qsets(struct adapter *adap)
661 int i, j, err, irq_idx = 0, qset_idx = 0;
662 unsigned int ntxq = SGE_TXQ_PER_SET;
664 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
667 for_each_port(adap, i) {
668 struct net_device *dev = adap->port[i];
669 struct port_info *pi = netdev_priv(dev);
671 pi->qs = &adap->sge.qs[pi->first_qset];
672 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
673 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
674 err = t3_sge_alloc_qset(adap, qset_idx, 1,
675 (adap->flags & USING_MSIX) ? qset_idx + 1 :
677 &adap->params.sge.qset[qset_idx], ntxq, dev,
678 netdev_get_tx_queue(dev, j));
680 t3_free_sge_resources(adap);
689 static ssize_t attr_show(struct device *d, char *buf,
690 ssize_t(*format) (struct net_device *, char *))
694 /* Synchronize with ioctls that may shut down the device */
696 len = (*format) (to_net_dev(d), buf);
701 static ssize_t attr_store(struct device *d,
702 const char *buf, size_t len,
703 ssize_t(*set) (struct net_device *, unsigned int),
704 unsigned int min_val, unsigned int max_val)
710 if (!capable(CAP_NET_ADMIN))
713 val = simple_strtoul(buf, &endp, 0);
714 if (endp == buf || val < min_val || val > max_val)
718 ret = (*set) (to_net_dev(d), val);
725 #define CXGB3_SHOW(name, val_expr) \
726 static ssize_t format_##name(struct net_device *dev, char *buf) \
728 struct port_info *pi = netdev_priv(dev); \
729 struct adapter *adap = pi->adapter; \
730 return sprintf(buf, "%u\n", val_expr); \
732 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
735 return attr_show(d, buf, format_##name); \
738 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
740 struct port_info *pi = netdev_priv(dev);
741 struct adapter *adap = pi->adapter;
742 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
744 if (adap->flags & FULL_INIT_DONE)
746 if (val && adap->params.rev == 0)
748 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
751 adap->params.mc5.nfilters = val;
755 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
756 const char *buf, size_t len)
758 return attr_store(d, buf, len, set_nfilters, 0, ~0);
761 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
763 struct port_info *pi = netdev_priv(dev);
764 struct adapter *adap = pi->adapter;
766 if (adap->flags & FULL_INIT_DONE)
768 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
771 adap->params.mc5.nservers = val;
775 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
776 const char *buf, size_t len)
778 return attr_store(d, buf, len, set_nservers, 0, ~0);
781 #define CXGB3_ATTR_R(name, val_expr) \
782 CXGB3_SHOW(name, val_expr) \
783 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
785 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
786 CXGB3_SHOW(name, val_expr) \
787 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
789 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
790 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
791 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
793 static struct attribute *cxgb3_attrs[] = {
794 &dev_attr_cam_size.attr,
795 &dev_attr_nfilters.attr,
796 &dev_attr_nservers.attr,
800 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
802 static ssize_t tm_attr_show(struct device *d,
803 char *buf, int sched)
805 struct port_info *pi = netdev_priv(to_net_dev(d));
806 struct adapter *adap = pi->adapter;
807 unsigned int v, addr, bpt, cpt;
810 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
812 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
813 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
816 bpt = (v >> 8) & 0xff;
819 len = sprintf(buf, "disabled\n");
821 v = (adap->params.vpd.cclk * 1000) / cpt;
822 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
828 static ssize_t tm_attr_store(struct device *d,
829 const char *buf, size_t len, int sched)
831 struct port_info *pi = netdev_priv(to_net_dev(d));
832 struct adapter *adap = pi->adapter;
837 if (!capable(CAP_NET_ADMIN))
840 val = simple_strtoul(buf, &endp, 0);
841 if (endp == buf || val > 10000000)
845 ret = t3_config_sched(adap, val, sched);
852 #define TM_ATTR(name, sched) \
853 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
856 return tm_attr_show(d, buf, sched); \
858 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
859 const char *buf, size_t len) \
861 return tm_attr_store(d, buf, len, sched); \
863 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
874 static struct attribute *offload_attrs[] = {
875 &dev_attr_sched0.attr,
876 &dev_attr_sched1.attr,
877 &dev_attr_sched2.attr,
878 &dev_attr_sched3.attr,
879 &dev_attr_sched4.attr,
880 &dev_attr_sched5.attr,
881 &dev_attr_sched6.attr,
882 &dev_attr_sched7.attr,
886 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
889 * Sends an sk_buff to an offload queue driver
890 * after dealing with any active network taps.
892 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
897 ret = t3_offload_tx(tdev, skb);
902 static int write_smt_entry(struct adapter *adapter, int idx)
904 struct cpl_smt_write_req *req;
905 struct port_info *pi = netdev_priv(adapter->port[idx]);
906 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
911 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
912 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
913 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
914 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
916 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
917 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
919 offload_tx(&adapter->tdev, skb);
923 static int init_smt(struct adapter *adapter)
927 for_each_port(adapter, i)
928 write_smt_entry(adapter, i);
932 static void init_port_mtus(struct adapter *adapter)
934 unsigned int mtus = adapter->port[0]->mtu;
936 if (adapter->port[1])
937 mtus |= adapter->port[1]->mtu << 16;
938 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
941 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
945 struct mngt_pktsched_wr *req;
948 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
950 skb = adap->nofail_skb;
954 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
955 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
956 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
962 ret = t3_mgmt_tx(adap, skb);
963 if (skb == adap->nofail_skb) {
964 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
966 if (!adap->nofail_skb)
973 static int bind_qsets(struct adapter *adap)
977 for_each_port(adap, i) {
978 const struct port_info *pi = adap2pinfo(adap, i);
980 for (j = 0; j < pi->nqsets; ++j) {
981 int ret = send_pktsched_cmd(adap, 1,
982 pi->first_qset + j, -1,
992 #define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
993 __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
994 #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
995 #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
996 __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
997 #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
998 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
999 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1000 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1001 MODULE_FIRMWARE(FW_FNAME);
1002 MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1003 MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1004 MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1005 MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1006 MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1008 static inline const char *get_edc_fw_name(int edc_idx)
1010 const char *fw_name = NULL;
1013 case EDC_OPT_AEL2005:
1014 fw_name = AEL2005_OPT_EDC_NAME;
1016 case EDC_TWX_AEL2005:
1017 fw_name = AEL2005_TWX_EDC_NAME;
1019 case EDC_TWX_AEL2020:
1020 fw_name = AEL2020_TWX_EDC_NAME;
1026 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1028 struct adapter *adapter = phy->adapter;
1029 const struct firmware *fw;
1033 u16 *cache = phy->phy_cache;
1036 snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1038 ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1040 dev_err(&adapter->pdev->dev,
1041 "could not upgrade firmware: unable to load %s\n",
1046 /* check size, take checksum in account */
1047 if (fw->size > size + 4) {
1048 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1049 (unsigned int)fw->size, size + 4);
1053 /* compute checksum */
1054 p = (const __be32 *)fw->data;
1055 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1056 csum += ntohl(p[i]);
1058 if (csum != 0xffffffff) {
1059 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1064 for (i = 0; i < size / 4 ; i++) {
1065 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1066 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1069 release_firmware(fw);
1074 static int upgrade_fw(struct adapter *adap)
1077 const struct firmware *fw;
1078 struct device *dev = &adap->pdev->dev;
1080 ret = request_firmware(&fw, FW_FNAME, dev);
1082 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1086 ret = t3_load_fw(adap, fw->data, fw->size);
1087 release_firmware(fw);
1090 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1091 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1093 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1094 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1099 static inline char t3rev2char(struct adapter *adapter)
1103 switch(adapter->params.rev) {
1115 static int update_tpsram(struct adapter *adap)
1117 const struct firmware *tpsram;
1119 struct device *dev = &adap->pdev->dev;
1123 rev = t3rev2char(adap);
1127 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1129 ret = request_firmware(&tpsram, buf, dev);
1131 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1136 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1138 goto release_tpsram;
1140 ret = t3_set_proto_sram(adap, tpsram->data);
1143 "successful update of protocol engine "
1145 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1147 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1148 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1150 dev_err(dev, "loading protocol SRAM failed\n");
1153 release_firmware(tpsram);
1159 * cxgb_up - enable the adapter
1160 * @adapter: adapter being enabled
1162 * Called when the first port is enabled, this function performs the
1163 * actions necessary to make an adapter operational, such as completing
1164 * the initialization of HW modules, and enabling interrupts.
1166 * Must be called with the rtnl lock held.
1168 static int cxgb_up(struct adapter *adap)
1172 if (!(adap->flags & FULL_INIT_DONE)) {
1173 err = t3_check_fw_version(adap);
1174 if (err == -EINVAL) {
1175 err = upgrade_fw(adap);
1176 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1177 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1178 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1181 err = t3_check_tpsram_version(adap);
1182 if (err == -EINVAL) {
1183 err = update_tpsram(adap);
1184 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1185 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1186 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1190 * Clear interrupts now to catch errors if t3_init_hw fails.
1191 * We clear them again later as initialization may trigger
1192 * conditions that can interrupt.
1194 t3_intr_clear(adap);
1196 err = t3_init_hw(adap, 0);
1200 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1201 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1203 err = setup_sge_qsets(adap);
1208 if (!(adap->flags & NAPI_INIT))
1211 t3_start_sge_timers(adap);
1212 adap->flags |= FULL_INIT_DONE;
1215 t3_intr_clear(adap);
1217 if (adap->flags & USING_MSIX) {
1218 name_msix_vecs(adap);
1219 err = request_irq(adap->msix_info[0].vec,
1220 t3_async_intr_handler, 0,
1221 adap->msix_info[0].desc, adap);
1225 err = request_msix_data_irqs(adap);
1227 free_irq(adap->msix_info[0].vec, adap);
1230 } else if ((err = request_irq(adap->pdev->irq,
1231 t3_intr_handler(adap,
1232 adap->sge.qs[0].rspq.
1234 (adap->flags & USING_MSI) ?
1239 enable_all_napi(adap);
1241 t3_intr_enable(adap);
1243 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1244 is_offload(adap) && init_tp_parity(adap) == 0)
1245 adap->flags |= TP_PARITY_INIT;
1247 if (adap->flags & TP_PARITY_INIT) {
1248 t3_write_reg(adap, A_TP_INT_CAUSE,
1249 F_CMCACHEPERR | F_ARPLUTPERR);
1250 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1253 if (!(adap->flags & QUEUES_BOUND)) {
1254 err = bind_qsets(adap);
1256 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1257 t3_intr_disable(adap);
1258 free_irq_resources(adap);
1261 adap->flags |= QUEUES_BOUND;
1267 CH_ERR(adap, "request_irq failed, err %d\n", err);
1272 * Release resources when all the ports and offloading have been stopped.
1274 static void cxgb_down(struct adapter *adapter)
1276 t3_sge_stop(adapter);
1277 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1278 t3_intr_disable(adapter);
1279 spin_unlock_irq(&adapter->work_lock);
1281 free_irq_resources(adapter);
1282 quiesce_rx(adapter);
1283 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1286 static void schedule_chk_task(struct adapter *adap)
1290 timeo = adap->params.linkpoll_period ?
1291 (HZ * adap->params.linkpoll_period) / 10 :
1292 adap->params.stats_update_period * HZ;
1294 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1297 static int offload_open(struct net_device *dev)
1299 struct port_info *pi = netdev_priv(dev);
1300 struct adapter *adapter = pi->adapter;
1301 struct t3cdev *tdev = dev2t3cdev(dev);
1302 int adap_up = adapter->open_device_map & PORT_MASK;
1305 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1308 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1311 t3_tp_set_offload_mode(adapter, 1);
1312 tdev->lldev = adapter->port[0];
1313 err = cxgb3_offload_activate(adapter);
1317 init_port_mtus(adapter);
1318 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1319 adapter->params.b_wnd,
1320 adapter->params.rev == 0 ?
1321 adapter->port[0]->mtu : 0xffff);
1324 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1325 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1327 /* Call back all registered clients */
1328 cxgb3_add_clients(tdev);
1331 /* restore them in case the offload module has changed them */
1333 t3_tp_set_offload_mode(adapter, 0);
1334 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1335 cxgb3_set_dummy_ops(tdev);
1340 static int offload_close(struct t3cdev *tdev)
1342 struct adapter *adapter = tdev2adap(tdev);
1344 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1347 /* Call back all registered clients */
1348 cxgb3_remove_clients(tdev);
1350 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1352 /* Flush work scheduled while releasing TIDs */
1353 flush_scheduled_work();
1356 cxgb3_set_dummy_ops(tdev);
1357 t3_tp_set_offload_mode(adapter, 0);
1358 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1360 if (!adapter->open_device_map)
1363 cxgb3_offload_deactivate(adapter);
1367 static int cxgb_open(struct net_device *dev)
1369 struct port_info *pi = netdev_priv(dev);
1370 struct adapter *adapter = pi->adapter;
1371 int other_ports = adapter->open_device_map & PORT_MASK;
1374 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1377 set_bit(pi->port_id, &adapter->open_device_map);
1378 if (is_offload(adapter) && !ofld_disable) {
1379 err = offload_open(dev);
1382 "Could not initialize offload capabilities\n");
1385 dev->real_num_tx_queues = pi->nqsets;
1387 t3_port_intr_enable(adapter, pi->port_id);
1388 netif_tx_start_all_queues(dev);
1390 schedule_chk_task(adapter);
1392 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1396 static int cxgb_close(struct net_device *dev)
1398 struct port_info *pi = netdev_priv(dev);
1399 struct adapter *adapter = pi->adapter;
1402 if (!adapter->open_device_map)
1405 /* Stop link fault interrupts */
1406 t3_xgm_intr_disable(adapter, pi->port_id);
1407 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1409 t3_port_intr_disable(adapter, pi->port_id);
1410 netif_tx_stop_all_queues(dev);
1411 pi->phy.ops->power_down(&pi->phy, 1);
1412 netif_carrier_off(dev);
1413 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1415 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1416 clear_bit(pi->port_id, &adapter->open_device_map);
1417 spin_unlock_irq(&adapter->work_lock);
1419 if (!(adapter->open_device_map & PORT_MASK))
1420 cancel_delayed_work_sync(&adapter->adap_check_task);
1422 if (!adapter->open_device_map)
1425 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1429 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1431 struct port_info *pi = netdev_priv(dev);
1432 struct adapter *adapter = pi->adapter;
1433 struct net_device_stats *ns = &pi->netstats;
1434 const struct mac_stats *pstats;
1436 spin_lock(&adapter->stats_lock);
1437 pstats = t3_mac_update_stats(&pi->mac);
1438 spin_unlock(&adapter->stats_lock);
1440 ns->tx_bytes = pstats->tx_octets;
1441 ns->tx_packets = pstats->tx_frames;
1442 ns->rx_bytes = pstats->rx_octets;
1443 ns->rx_packets = pstats->rx_frames;
1444 ns->multicast = pstats->rx_mcast_frames;
1446 ns->tx_errors = pstats->tx_underrun;
1447 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1448 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1449 pstats->rx_fifo_ovfl;
1451 /* detailed rx_errors */
1452 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1453 ns->rx_over_errors = 0;
1454 ns->rx_crc_errors = pstats->rx_fcs_errs;
1455 ns->rx_frame_errors = pstats->rx_symbol_errs;
1456 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1457 ns->rx_missed_errors = pstats->rx_cong_drops;
1459 /* detailed tx_errors */
1460 ns->tx_aborted_errors = 0;
1461 ns->tx_carrier_errors = 0;
1462 ns->tx_fifo_errors = pstats->tx_underrun;
1463 ns->tx_heartbeat_errors = 0;
1464 ns->tx_window_errors = 0;
1468 static u32 get_msglevel(struct net_device *dev)
1470 struct port_info *pi = netdev_priv(dev);
1471 struct adapter *adapter = pi->adapter;
1473 return adapter->msg_enable;
1476 static void set_msglevel(struct net_device *dev, u32 val)
1478 struct port_info *pi = netdev_priv(dev);
1479 struct adapter *adapter = pi->adapter;
1481 adapter->msg_enable = val;
1484 static char stats_strings[][ETH_GSTRING_LEN] = {
1487 "TxMulticastFramesOK",
1488 "TxBroadcastFramesOK",
1495 "TxFrames128To255 ",
1496 "TxFrames256To511 ",
1497 "TxFrames512To1023 ",
1498 "TxFrames1024To1518 ",
1499 "TxFrames1519ToMax ",
1503 "RxMulticastFramesOK",
1504 "RxBroadcastFramesOK",
1515 "RxFrames128To255 ",
1516 "RxFrames256To511 ",
1517 "RxFrames512To1023 ",
1518 "RxFrames1024To1518 ",
1519 "RxFrames1519ToMax ",
1532 "CheckTXEnToggled ",
1538 static int get_sset_count(struct net_device *dev, int sset)
1542 return ARRAY_SIZE(stats_strings);
1548 #define T3_REGMAP_SIZE (3 * 1024)
1550 static int get_regs_len(struct net_device *dev)
1552 return T3_REGMAP_SIZE;
1555 static int get_eeprom_len(struct net_device *dev)
1560 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1562 struct port_info *pi = netdev_priv(dev);
1563 struct adapter *adapter = pi->adapter;
1567 spin_lock(&adapter->stats_lock);
1568 t3_get_fw_version(adapter, &fw_vers);
1569 t3_get_tp_version(adapter, &tp_vers);
1570 spin_unlock(&adapter->stats_lock);
1572 strcpy(info->driver, DRV_NAME);
1573 strcpy(info->version, DRV_VERSION);
1574 strcpy(info->bus_info, pci_name(adapter->pdev));
1576 strcpy(info->fw_version, "N/A");
1578 snprintf(info->fw_version, sizeof(info->fw_version),
1579 "%s %u.%u.%u TP %u.%u.%u",
1580 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1581 G_FW_VERSION_MAJOR(fw_vers),
1582 G_FW_VERSION_MINOR(fw_vers),
1583 G_FW_VERSION_MICRO(fw_vers),
1584 G_TP_VERSION_MAJOR(tp_vers),
1585 G_TP_VERSION_MINOR(tp_vers),
1586 G_TP_VERSION_MICRO(tp_vers));
1590 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1592 if (stringset == ETH_SS_STATS)
1593 memcpy(data, stats_strings, sizeof(stats_strings));
1596 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1597 struct port_info *p, int idx)
1600 unsigned long tot = 0;
1602 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1603 tot += adapter->sge.qs[i].port_stats[idx];
1607 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1610 struct port_info *pi = netdev_priv(dev);
1611 struct adapter *adapter = pi->adapter;
1612 const struct mac_stats *s;
1614 spin_lock(&adapter->stats_lock);
1615 s = t3_mac_update_stats(&pi->mac);
1616 spin_unlock(&adapter->stats_lock);
1618 *data++ = s->tx_octets;
1619 *data++ = s->tx_frames;
1620 *data++ = s->tx_mcast_frames;
1621 *data++ = s->tx_bcast_frames;
1622 *data++ = s->tx_pause;
1623 *data++ = s->tx_underrun;
1624 *data++ = s->tx_fifo_urun;
1626 *data++ = s->tx_frames_64;
1627 *data++ = s->tx_frames_65_127;
1628 *data++ = s->tx_frames_128_255;
1629 *data++ = s->tx_frames_256_511;
1630 *data++ = s->tx_frames_512_1023;
1631 *data++ = s->tx_frames_1024_1518;
1632 *data++ = s->tx_frames_1519_max;
1634 *data++ = s->rx_octets;
1635 *data++ = s->rx_frames;
1636 *data++ = s->rx_mcast_frames;
1637 *data++ = s->rx_bcast_frames;
1638 *data++ = s->rx_pause;
1639 *data++ = s->rx_fcs_errs;
1640 *data++ = s->rx_symbol_errs;
1641 *data++ = s->rx_short;
1642 *data++ = s->rx_jabber;
1643 *data++ = s->rx_too_long;
1644 *data++ = s->rx_fifo_ovfl;
1646 *data++ = s->rx_frames_64;
1647 *data++ = s->rx_frames_65_127;
1648 *data++ = s->rx_frames_128_255;
1649 *data++ = s->rx_frames_256_511;
1650 *data++ = s->rx_frames_512_1023;
1651 *data++ = s->rx_frames_1024_1518;
1652 *data++ = s->rx_frames_1519_max;
1654 *data++ = pi->phy.fifo_errors;
1656 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1657 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1658 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1659 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1660 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1664 *data++ = s->rx_cong_drops;
1666 *data++ = s->num_toggled;
1667 *data++ = s->num_resets;
1669 *data++ = s->link_faults;
1672 static inline void reg_block_dump(struct adapter *ap, void *buf,
1673 unsigned int start, unsigned int end)
1675 u32 *p = buf + start;
1677 for (; start <= end; start += sizeof(u32))
1678 *p++ = t3_read_reg(ap, start);
1681 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1684 struct port_info *pi = netdev_priv(dev);
1685 struct adapter *ap = pi->adapter;
1689 * bits 0..9: chip version
1690 * bits 10..15: chip revision
1691 * bit 31: set for PCIe cards
1693 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1696 * We skip the MAC statistics registers because they are clear-on-read.
1697 * Also reading multi-register stats would need to synchronize with the
1698 * periodic mac stats accumulation. Hard to justify the complexity.
1700 memset(buf, 0, T3_REGMAP_SIZE);
1701 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1702 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1703 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1704 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1705 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1706 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1707 XGM_REG(A_XGM_SERDES_STAT3, 1));
1708 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1709 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1712 static int restart_autoneg(struct net_device *dev)
1714 struct port_info *p = netdev_priv(dev);
1716 if (!netif_running(dev))
1718 if (p->link_config.autoneg != AUTONEG_ENABLE)
1720 p->phy.ops->autoneg_restart(&p->phy);
1724 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1726 struct port_info *pi = netdev_priv(dev);
1727 struct adapter *adapter = pi->adapter;
1733 for (i = 0; i < data * 2; i++) {
1734 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1735 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1736 if (msleep_interruptible(500))
1739 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1744 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1746 struct port_info *p = netdev_priv(dev);
1748 cmd->supported = p->link_config.supported;
1749 cmd->advertising = p->link_config.advertising;
1751 if (netif_carrier_ok(dev)) {
1752 cmd->speed = p->link_config.speed;
1753 cmd->duplex = p->link_config.duplex;
1759 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1760 cmd->phy_address = p->phy.mdio.prtad;
1761 cmd->transceiver = XCVR_EXTERNAL;
1762 cmd->autoneg = p->link_config.autoneg;
1768 static int speed_duplex_to_caps(int speed, int duplex)
1774 if (duplex == DUPLEX_FULL)
1775 cap = SUPPORTED_10baseT_Full;
1777 cap = SUPPORTED_10baseT_Half;
1780 if (duplex == DUPLEX_FULL)
1781 cap = SUPPORTED_100baseT_Full;
1783 cap = SUPPORTED_100baseT_Half;
1786 if (duplex == DUPLEX_FULL)
1787 cap = SUPPORTED_1000baseT_Full;
1789 cap = SUPPORTED_1000baseT_Half;
1792 if (duplex == DUPLEX_FULL)
1793 cap = SUPPORTED_10000baseT_Full;
1798 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1799 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1800 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1801 ADVERTISED_10000baseT_Full)
1803 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1805 struct port_info *p = netdev_priv(dev);
1806 struct link_config *lc = &p->link_config;
1808 if (!(lc->supported & SUPPORTED_Autoneg)) {
1810 * PHY offers a single speed/duplex. See if that's what's
1813 if (cmd->autoneg == AUTONEG_DISABLE) {
1814 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1815 if (lc->supported & cap)
1821 if (cmd->autoneg == AUTONEG_DISABLE) {
1822 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1824 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1826 lc->requested_speed = cmd->speed;
1827 lc->requested_duplex = cmd->duplex;
1828 lc->advertising = 0;
1830 cmd->advertising &= ADVERTISED_MASK;
1831 cmd->advertising &= lc->supported;
1832 if (!cmd->advertising)
1834 lc->requested_speed = SPEED_INVALID;
1835 lc->requested_duplex = DUPLEX_INVALID;
1836 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1838 lc->autoneg = cmd->autoneg;
1839 if (netif_running(dev))
1840 t3_link_start(&p->phy, &p->mac, lc);
1844 static void get_pauseparam(struct net_device *dev,
1845 struct ethtool_pauseparam *epause)
1847 struct port_info *p = netdev_priv(dev);
1849 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1850 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1851 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1854 static int set_pauseparam(struct net_device *dev,
1855 struct ethtool_pauseparam *epause)
1857 struct port_info *p = netdev_priv(dev);
1858 struct link_config *lc = &p->link_config;
1860 if (epause->autoneg == AUTONEG_DISABLE)
1861 lc->requested_fc = 0;
1862 else if (lc->supported & SUPPORTED_Autoneg)
1863 lc->requested_fc = PAUSE_AUTONEG;
1867 if (epause->rx_pause)
1868 lc->requested_fc |= PAUSE_RX;
1869 if (epause->tx_pause)
1870 lc->requested_fc |= PAUSE_TX;
1871 if (lc->autoneg == AUTONEG_ENABLE) {
1872 if (netif_running(dev))
1873 t3_link_start(&p->phy, &p->mac, lc);
1875 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1876 if (netif_running(dev))
1877 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1882 static u32 get_rx_csum(struct net_device *dev)
1884 struct port_info *p = netdev_priv(dev);
1886 return p->rx_offload & T3_RX_CSUM;
1889 static int set_rx_csum(struct net_device *dev, u32 data)
1891 struct port_info *p = netdev_priv(dev);
1894 p->rx_offload |= T3_RX_CSUM;
1898 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1899 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1900 set_qset_lro(dev, i, 0);
1905 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1907 struct port_info *pi = netdev_priv(dev);
1908 struct adapter *adapter = pi->adapter;
1909 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1911 e->rx_max_pending = MAX_RX_BUFFERS;
1912 e->rx_mini_max_pending = 0;
1913 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1914 e->tx_max_pending = MAX_TXQ_ENTRIES;
1916 e->rx_pending = q->fl_size;
1917 e->rx_mini_pending = q->rspq_size;
1918 e->rx_jumbo_pending = q->jumbo_size;
1919 e->tx_pending = q->txq_size[0];
1922 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1924 struct port_info *pi = netdev_priv(dev);
1925 struct adapter *adapter = pi->adapter;
1926 struct qset_params *q;
1929 if (e->rx_pending > MAX_RX_BUFFERS ||
1930 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1931 e->tx_pending > MAX_TXQ_ENTRIES ||
1932 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1933 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1934 e->rx_pending < MIN_FL_ENTRIES ||
1935 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1936 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1939 if (adapter->flags & FULL_INIT_DONE)
1942 q = &adapter->params.sge.qset[pi->first_qset];
1943 for (i = 0; i < pi->nqsets; ++i, ++q) {
1944 q->rspq_size = e->rx_mini_pending;
1945 q->fl_size = e->rx_pending;
1946 q->jumbo_size = e->rx_jumbo_pending;
1947 q->txq_size[0] = e->tx_pending;
1948 q->txq_size[1] = e->tx_pending;
1949 q->txq_size[2] = e->tx_pending;
1954 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1956 struct port_info *pi = netdev_priv(dev);
1957 struct adapter *adapter = pi->adapter;
1958 struct qset_params *qsp = &adapter->params.sge.qset[0];
1959 struct sge_qset *qs = &adapter->sge.qs[0];
1961 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1964 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1965 t3_update_qset_coalesce(qs, qsp);
1969 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1971 struct port_info *pi = netdev_priv(dev);
1972 struct adapter *adapter = pi->adapter;
1973 struct qset_params *q = adapter->params.sge.qset;
1975 c->rx_coalesce_usecs = q->coalesce_usecs;
1979 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1982 struct port_info *pi = netdev_priv(dev);
1983 struct adapter *adapter = pi->adapter;
1986 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1990 e->magic = EEPROM_MAGIC;
1991 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1992 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1995 memcpy(data, buf + e->offset, e->len);
2000 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2003 struct port_info *pi = netdev_priv(dev);
2004 struct adapter *adapter = pi->adapter;
2005 u32 aligned_offset, aligned_len;
2010 if (eeprom->magic != EEPROM_MAGIC)
2013 aligned_offset = eeprom->offset & ~3;
2014 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2016 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2017 buf = kmalloc(aligned_len, GFP_KERNEL);
2020 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2021 if (!err && aligned_len > 4)
2022 err = t3_seeprom_read(adapter,
2023 aligned_offset + aligned_len - 4,
2024 (__le32 *) & buf[aligned_len - 4]);
2027 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2031 err = t3_seeprom_wp(adapter, 0);
2035 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2036 err = t3_seeprom_write(adapter, aligned_offset, *p);
2037 aligned_offset += 4;
2041 err = t3_seeprom_wp(adapter, 1);
2048 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2052 memset(&wol->sopass, 0, sizeof(wol->sopass));
2055 static const struct ethtool_ops cxgb_ethtool_ops = {
2056 .get_settings = get_settings,
2057 .set_settings = set_settings,
2058 .get_drvinfo = get_drvinfo,
2059 .get_msglevel = get_msglevel,
2060 .set_msglevel = set_msglevel,
2061 .get_ringparam = get_sge_param,
2062 .set_ringparam = set_sge_param,
2063 .get_coalesce = get_coalesce,
2064 .set_coalesce = set_coalesce,
2065 .get_eeprom_len = get_eeprom_len,
2066 .get_eeprom = get_eeprom,
2067 .set_eeprom = set_eeprom,
2068 .get_pauseparam = get_pauseparam,
2069 .set_pauseparam = set_pauseparam,
2070 .get_rx_csum = get_rx_csum,
2071 .set_rx_csum = set_rx_csum,
2072 .set_tx_csum = ethtool_op_set_tx_csum,
2073 .set_sg = ethtool_op_set_sg,
2074 .get_link = ethtool_op_get_link,
2075 .get_strings = get_strings,
2076 .phys_id = cxgb3_phys_id,
2077 .nway_reset = restart_autoneg,
2078 .get_sset_count = get_sset_count,
2079 .get_ethtool_stats = get_stats,
2080 .get_regs_len = get_regs_len,
2081 .get_regs = get_regs,
2083 .set_tso = ethtool_op_set_tso,
2086 static int in_range(int val, int lo, int hi)
2088 return val < 0 || (val <= hi && val >= lo);
2091 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2093 struct port_info *pi = netdev_priv(dev);
2094 struct adapter *adapter = pi->adapter;
2098 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2102 case CHELSIO_SET_QSET_PARAMS:{
2104 struct qset_params *q;
2105 struct ch_qset_params t;
2106 int q1 = pi->first_qset;
2107 int nqsets = pi->nqsets;
2109 if (!capable(CAP_NET_ADMIN))
2111 if (copy_from_user(&t, useraddr, sizeof(t)))
2113 if (t.qset_idx >= SGE_QSETS)
2115 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2116 !in_range(t.cong_thres, 0, 255) ||
2117 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2119 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2121 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2122 MAX_CTRL_TXQ_ENTRIES) ||
2123 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2125 !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2126 MAX_RX_JUMBO_BUFFERS) ||
2127 !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2131 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2132 for_each_port(adapter, i) {
2133 pi = adap2pinfo(adapter, i);
2134 if (t.qset_idx >= pi->first_qset &&
2135 t.qset_idx < pi->first_qset + pi->nqsets &&
2136 !(pi->rx_offload & T3_RX_CSUM))
2140 if ((adapter->flags & FULL_INIT_DONE) &&
2141 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2142 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2143 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2144 t.polling >= 0 || t.cong_thres >= 0))
2147 /* Allow setting of any available qset when offload enabled */
2148 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2150 for_each_port(adapter, i) {
2151 pi = adap2pinfo(adapter, i);
2152 nqsets += pi->first_qset + pi->nqsets;
2156 if (t.qset_idx < q1)
2158 if (t.qset_idx > q1 + nqsets - 1)
2161 q = &adapter->params.sge.qset[t.qset_idx];
2163 if (t.rspq_size >= 0)
2164 q->rspq_size = t.rspq_size;
2165 if (t.fl_size[0] >= 0)
2166 q->fl_size = t.fl_size[0];
2167 if (t.fl_size[1] >= 0)
2168 q->jumbo_size = t.fl_size[1];
2169 if (t.txq_size[0] >= 0)
2170 q->txq_size[0] = t.txq_size[0];
2171 if (t.txq_size[1] >= 0)
2172 q->txq_size[1] = t.txq_size[1];
2173 if (t.txq_size[2] >= 0)
2174 q->txq_size[2] = t.txq_size[2];
2175 if (t.cong_thres >= 0)
2176 q->cong_thres = t.cong_thres;
2177 if (t.intr_lat >= 0) {
2178 struct sge_qset *qs =
2179 &adapter->sge.qs[t.qset_idx];
2181 q->coalesce_usecs = t.intr_lat;
2182 t3_update_qset_coalesce(qs, q);
2184 if (t.polling >= 0) {
2185 if (adapter->flags & USING_MSIX)
2186 q->polling = t.polling;
2188 /* No polling with INTx for T3A */
2189 if (adapter->params.rev == 0 &&
2190 !(adapter->flags & USING_MSI))
2193 for (i = 0; i < SGE_QSETS; i++) {
2194 q = &adapter->params.sge.
2196 q->polling = t.polling;
2201 set_qset_lro(dev, t.qset_idx, t.lro);
2205 case CHELSIO_GET_QSET_PARAMS:{
2206 struct qset_params *q;
2207 struct ch_qset_params t;
2208 int q1 = pi->first_qset;
2209 int nqsets = pi->nqsets;
2212 if (copy_from_user(&t, useraddr, sizeof(t)))
2215 /* Display qsets for all ports when offload enabled */
2216 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2218 for_each_port(adapter, i) {
2219 pi = adap2pinfo(adapter, i);
2220 nqsets = pi->first_qset + pi->nqsets;
2224 if (t.qset_idx >= nqsets)
2227 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2228 t.rspq_size = q->rspq_size;
2229 t.txq_size[0] = q->txq_size[0];
2230 t.txq_size[1] = q->txq_size[1];
2231 t.txq_size[2] = q->txq_size[2];
2232 t.fl_size[0] = q->fl_size;
2233 t.fl_size[1] = q->jumbo_size;
2234 t.polling = q->polling;
2236 t.intr_lat = q->coalesce_usecs;
2237 t.cong_thres = q->cong_thres;
2240 if (adapter->flags & USING_MSIX)
2241 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2243 t.vector = adapter->pdev->irq;
2245 if (copy_to_user(useraddr, &t, sizeof(t)))
2249 case CHELSIO_SET_QSET_NUM:{
2250 struct ch_reg edata;
2251 unsigned int i, first_qset = 0, other_qsets = 0;
2253 if (!capable(CAP_NET_ADMIN))
2255 if (adapter->flags & FULL_INIT_DONE)
2257 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2259 if (edata.val < 1 ||
2260 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2263 for_each_port(adapter, i)
2264 if (adapter->port[i] && adapter->port[i] != dev)
2265 other_qsets += adap2pinfo(adapter, i)->nqsets;
2267 if (edata.val + other_qsets > SGE_QSETS)
2270 pi->nqsets = edata.val;
2272 for_each_port(adapter, i)
2273 if (adapter->port[i]) {
2274 pi = adap2pinfo(adapter, i);
2275 pi->first_qset = first_qset;
2276 first_qset += pi->nqsets;
2280 case CHELSIO_GET_QSET_NUM:{
2281 struct ch_reg edata;
2283 edata.cmd = CHELSIO_GET_QSET_NUM;
2284 edata.val = pi->nqsets;
2285 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2289 case CHELSIO_LOAD_FW:{
2291 struct ch_mem_range t;
2293 if (!capable(CAP_SYS_RAWIO))
2295 if (copy_from_user(&t, useraddr, sizeof(t)))
2297 /* Check t.len sanity ? */
2298 fw_data = kmalloc(t.len, GFP_KERNEL);
2303 (fw_data, useraddr + sizeof(t), t.len)) {
2308 ret = t3_load_fw(adapter, fw_data, t.len);
2314 case CHELSIO_SETMTUTAB:{
2318 if (!is_offload(adapter))
2320 if (!capable(CAP_NET_ADMIN))
2322 if (offload_running(adapter))
2324 if (copy_from_user(&m, useraddr, sizeof(m)))
2326 if (m.nmtus != NMTUS)
2328 if (m.mtus[0] < 81) /* accommodate SACK */
2331 /* MTUs must be in ascending order */
2332 for (i = 1; i < NMTUS; ++i)
2333 if (m.mtus[i] < m.mtus[i - 1])
2336 memcpy(adapter->params.mtus, m.mtus,
2337 sizeof(adapter->params.mtus));
2340 case CHELSIO_GET_PM:{
2341 struct tp_params *p = &adapter->params.tp;
2342 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2344 if (!is_offload(adapter))
2346 m.tx_pg_sz = p->tx_pg_size;
2347 m.tx_num_pg = p->tx_num_pgs;
2348 m.rx_pg_sz = p->rx_pg_size;
2349 m.rx_num_pg = p->rx_num_pgs;
2350 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2351 if (copy_to_user(useraddr, &m, sizeof(m)))
2355 case CHELSIO_SET_PM:{
2357 struct tp_params *p = &adapter->params.tp;
2359 if (!is_offload(adapter))
2361 if (!capable(CAP_NET_ADMIN))
2363 if (adapter->flags & FULL_INIT_DONE)
2365 if (copy_from_user(&m, useraddr, sizeof(m)))
2367 if (!is_power_of_2(m.rx_pg_sz) ||
2368 !is_power_of_2(m.tx_pg_sz))
2369 return -EINVAL; /* not power of 2 */
2370 if (!(m.rx_pg_sz & 0x14000))
2371 return -EINVAL; /* not 16KB or 64KB */
2372 if (!(m.tx_pg_sz & 0x1554000))
2374 if (m.tx_num_pg == -1)
2375 m.tx_num_pg = p->tx_num_pgs;
2376 if (m.rx_num_pg == -1)
2377 m.rx_num_pg = p->rx_num_pgs;
2378 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2380 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2381 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2383 p->rx_pg_size = m.rx_pg_sz;
2384 p->tx_pg_size = m.tx_pg_sz;
2385 p->rx_num_pgs = m.rx_num_pg;
2386 p->tx_num_pgs = m.tx_num_pg;
2389 case CHELSIO_GET_MEM:{
2390 struct ch_mem_range t;
2394 if (!is_offload(adapter))
2396 if (!(adapter->flags & FULL_INIT_DONE))
2397 return -EIO; /* need the memory controllers */
2398 if (copy_from_user(&t, useraddr, sizeof(t)))
2400 if ((t.addr & 7) || (t.len & 7))
2402 if (t.mem_id == MEM_CM)
2404 else if (t.mem_id == MEM_PMRX)
2405 mem = &adapter->pmrx;
2406 else if (t.mem_id == MEM_PMTX)
2407 mem = &adapter->pmtx;
2413 * bits 0..9: chip version
2414 * bits 10..15: chip revision
2416 t.version = 3 | (adapter->params.rev << 10);
2417 if (copy_to_user(useraddr, &t, sizeof(t)))
2421 * Read 256 bytes at a time as len can be large and we don't
2422 * want to use huge intermediate buffers.
2424 useraddr += sizeof(t); /* advance to start of buffer */
2426 unsigned int chunk =
2427 min_t(unsigned int, t.len, sizeof(buf));
2430 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2434 if (copy_to_user(useraddr, buf, chunk))
2442 case CHELSIO_SET_TRACE_FILTER:{
2444 const struct trace_params *tp;
2446 if (!capable(CAP_NET_ADMIN))
2448 if (!offload_running(adapter))
2450 if (copy_from_user(&t, useraddr, sizeof(t)))
2453 tp = (const struct trace_params *)&t.sip;
2455 t3_config_trace_filter(adapter, tp, 0,
2459 t3_config_trace_filter(adapter, tp, 1,
2470 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2472 struct mii_ioctl_data *data = if_mii(req);
2473 struct port_info *pi = netdev_priv(dev);
2474 struct adapter *adapter = pi->adapter;
2479 /* Convert phy_id from older PRTAD/DEVAD format */
2480 if (is_10G(adapter) &&
2481 !mdio_phy_id_is_c45(data->phy_id) &&
2482 (data->phy_id & 0x1f00) &&
2483 !(data->phy_id & 0xe0e0))
2484 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2485 data->phy_id & 0x1f);
2488 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2490 return cxgb_extension_ioctl(dev, req->ifr_data);
2496 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2498 struct port_info *pi = netdev_priv(dev);
2499 struct adapter *adapter = pi->adapter;
2502 if (new_mtu < 81) /* accommodate SACK */
2504 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2507 init_port_mtus(adapter);
2508 if (adapter->params.rev == 0 && offload_running(adapter))
2509 t3_load_mtus(adapter, adapter->params.mtus,
2510 adapter->params.a_wnd, adapter->params.b_wnd,
2511 adapter->port[0]->mtu);
2515 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2517 struct port_info *pi = netdev_priv(dev);
2518 struct adapter *adapter = pi->adapter;
2519 struct sockaddr *addr = p;
2521 if (!is_valid_ether_addr(addr->sa_data))
2524 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2525 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2526 if (offload_running(adapter))
2527 write_smt_entry(adapter, pi->port_id);
2532 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2533 * @adap: the adapter
2536 * Ensures that current Rx processing on any of the queues associated with
2537 * the given port completes before returning. We do this by acquiring and
2538 * releasing the locks of the response queues associated with the port.
2540 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2544 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2545 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2547 spin_lock_irq(&q->lock);
2548 spin_unlock_irq(&q->lock);
2552 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2554 struct port_info *pi = netdev_priv(dev);
2555 struct adapter *adapter = pi->adapter;
2558 if (adapter->params.rev > 0)
2559 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2561 /* single control for all ports */
2562 unsigned int i, have_vlans = 0;
2563 for_each_port(adapter, i)
2564 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2566 t3_set_vlan_accel(adapter, 1, have_vlans);
2568 t3_synchronize_rx(adapter, pi);
2571 #ifdef CONFIG_NET_POLL_CONTROLLER
2572 static void cxgb_netpoll(struct net_device *dev)
2574 struct port_info *pi = netdev_priv(dev);
2575 struct adapter *adapter = pi->adapter;
2578 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2579 struct sge_qset *qs = &adapter->sge.qs[qidx];
2582 if (adapter->flags & USING_MSIX)
2587 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2593 * Periodic accumulation of MAC statistics.
2595 static void mac_stats_update(struct adapter *adapter)
2599 for_each_port(adapter, i) {
2600 struct net_device *dev = adapter->port[i];
2601 struct port_info *p = netdev_priv(dev);
2603 if (netif_running(dev)) {
2604 spin_lock(&adapter->stats_lock);
2605 t3_mac_update_stats(&p->mac);
2606 spin_unlock(&adapter->stats_lock);
2611 static void check_link_status(struct adapter *adapter)
2615 for_each_port(adapter, i) {
2616 struct net_device *dev = adapter->port[i];
2617 struct port_info *p = netdev_priv(dev);
2620 spin_lock_irq(&adapter->work_lock);
2621 link_fault = p->link_fault;
2622 spin_unlock_irq(&adapter->work_lock);
2625 t3_link_fault(adapter, i);
2629 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2630 t3_xgm_intr_disable(adapter, i);
2631 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2633 t3_link_changed(adapter, i);
2634 t3_xgm_intr_enable(adapter, i);
2639 static void check_t3b2_mac(struct adapter *adapter)
2643 if (!rtnl_trylock()) /* synchronize with ifdown */
2646 for_each_port(adapter, i) {
2647 struct net_device *dev = adapter->port[i];
2648 struct port_info *p = netdev_priv(dev);
2651 if (!netif_running(dev))
2655 if (netif_running(dev) && netif_carrier_ok(dev))
2656 status = t3b2_mac_watchdog_task(&p->mac);
2658 p->mac.stats.num_toggled++;
2659 else if (status == 2) {
2660 struct cmac *mac = &p->mac;
2662 t3_mac_set_mtu(mac, dev->mtu);
2663 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2664 cxgb_set_rxmode(dev);
2665 t3_link_start(&p->phy, mac, &p->link_config);
2666 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2667 t3_port_intr_enable(adapter, p->port_id);
2668 p->mac.stats.num_resets++;
2675 static void t3_adap_check_task(struct work_struct *work)
2677 struct adapter *adapter = container_of(work, struct adapter,
2678 adap_check_task.work);
2679 const struct adapter_params *p = &adapter->params;
2681 unsigned int v, status, reset;
2683 adapter->check_task_cnt++;
2685 check_link_status(adapter);
2687 /* Accumulate MAC stats if needed */
2688 if (!p->linkpoll_period ||
2689 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2690 p->stats_update_period) {
2691 mac_stats_update(adapter);
2692 adapter->check_task_cnt = 0;
2695 if (p->rev == T3_REV_B2)
2696 check_t3b2_mac(adapter);
2699 * Scan the XGMAC's to check for various conditions which we want to
2700 * monitor in a periodic polling manner rather than via an interrupt
2701 * condition. This is used for conditions which would otherwise flood
2702 * the system with interrupts and we only really need to know that the
2703 * conditions are "happening" ... For each condition we count the
2704 * detection of the condition and reset it for the next polling loop.
2706 for_each_port(adapter, port) {
2707 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2710 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2712 if (cause & F_RXFIFO_OVERFLOW) {
2713 mac->stats.rx_fifo_ovfl++;
2714 reset |= F_RXFIFO_OVERFLOW;
2717 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2721 * We do the same as above for FL_EMPTY interrupts.
2723 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2726 if (status & F_FLEMPTY) {
2727 struct sge_qset *qs = &adapter->sge.qs[0];
2732 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2736 qs->fl[i].empty += (v & 1);
2744 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2746 /* Schedule the next check update if any port is active. */
2747 spin_lock_irq(&adapter->work_lock);
2748 if (adapter->open_device_map & PORT_MASK)
2749 schedule_chk_task(adapter);
2750 spin_unlock_irq(&adapter->work_lock);
2754 * Processes external (PHY) interrupts in process context.
2756 static void ext_intr_task(struct work_struct *work)
2758 struct adapter *adapter = container_of(work, struct adapter,
2759 ext_intr_handler_task);
2762 /* Disable link fault interrupts */
2763 for_each_port(adapter, i) {
2764 struct net_device *dev = adapter->port[i];
2765 struct port_info *p = netdev_priv(dev);
2767 t3_xgm_intr_disable(adapter, i);
2768 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2771 /* Re-enable link fault interrupts */
2772 t3_phy_intr_handler(adapter);
2774 for_each_port(adapter, i)
2775 t3_xgm_intr_enable(adapter, i);
2777 /* Now reenable external interrupts */
2778 spin_lock_irq(&adapter->work_lock);
2779 if (adapter->slow_intr_mask) {
2780 adapter->slow_intr_mask |= F_T3DBG;
2781 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2782 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2783 adapter->slow_intr_mask);
2785 spin_unlock_irq(&adapter->work_lock);
2789 * Interrupt-context handler for external (PHY) interrupts.
2791 void t3_os_ext_intr_handler(struct adapter *adapter)
2794 * Schedule a task to handle external interrupts as they may be slow
2795 * and we use a mutex to protect MDIO registers. We disable PHY
2796 * interrupts in the meantime and let the task reenable them when
2799 spin_lock(&adapter->work_lock);
2800 if (adapter->slow_intr_mask) {
2801 adapter->slow_intr_mask &= ~F_T3DBG;
2802 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2803 adapter->slow_intr_mask);
2804 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2806 spin_unlock(&adapter->work_lock);
2809 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2811 struct net_device *netdev = adapter->port[port_id];
2812 struct port_info *pi = netdev_priv(netdev);
2814 spin_lock(&adapter->work_lock);
2816 spin_unlock(&adapter->work_lock);
2819 static int t3_adapter_error(struct adapter *adapter, int reset)
2823 if (is_offload(adapter) &&
2824 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2825 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2826 offload_close(&adapter->tdev);
2829 /* Stop all ports */
2830 for_each_port(adapter, i) {
2831 struct net_device *netdev = adapter->port[i];
2833 if (netif_running(netdev))
2837 /* Stop SGE timers */
2838 t3_stop_sge_timers(adapter);
2840 adapter->flags &= ~FULL_INIT_DONE;
2843 ret = t3_reset_adapter(adapter);
2845 pci_disable_device(adapter->pdev);
2850 static int t3_reenable_adapter(struct adapter *adapter)
2852 if (pci_enable_device(adapter->pdev)) {
2853 dev_err(&adapter->pdev->dev,
2854 "Cannot re-enable PCI device after reset.\n");
2857 pci_set_master(adapter->pdev);
2858 pci_restore_state(adapter->pdev);
2859 pci_save_state(adapter->pdev);
2861 /* Free sge resources */
2862 t3_free_sge_resources(adapter);
2864 if (t3_replay_prep_adapter(adapter))
2872 static void t3_resume_ports(struct adapter *adapter)
2876 /* Restart the ports */
2877 for_each_port(adapter, i) {
2878 struct net_device *netdev = adapter->port[i];
2880 if (netif_running(netdev)) {
2881 if (cxgb_open(netdev)) {
2882 dev_err(&adapter->pdev->dev,
2883 "can't bring device back up"
2890 if (is_offload(adapter) && !ofld_disable)
2891 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2895 * processes a fatal error.
2896 * Bring the ports down, reset the chip, bring the ports back up.
2898 static void fatal_error_task(struct work_struct *work)
2900 struct adapter *adapter = container_of(work, struct adapter,
2901 fatal_error_handler_task);
2905 err = t3_adapter_error(adapter, 1);
2907 err = t3_reenable_adapter(adapter);
2909 t3_resume_ports(adapter);
2911 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2915 void t3_fatal_err(struct adapter *adapter)
2917 unsigned int fw_status[4];
2919 if (adapter->flags & FULL_INIT_DONE) {
2920 t3_sge_stop(adapter);
2921 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2922 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2923 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2924 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2926 spin_lock(&adapter->work_lock);
2927 t3_intr_disable(adapter);
2928 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2929 spin_unlock(&adapter->work_lock);
2931 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2932 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2933 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2934 fw_status[0], fw_status[1],
2935 fw_status[2], fw_status[3]);
2939 * t3_io_error_detected - called when PCI error is detected
2940 * @pdev: Pointer to PCI device
2941 * @state: The current pci connection state
2943 * This function is called after a PCI bus error affecting
2944 * this device has been detected.
2946 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2947 pci_channel_state_t state)
2949 struct adapter *adapter = pci_get_drvdata(pdev);
2952 if (state == pci_channel_io_perm_failure)
2953 return PCI_ERS_RESULT_DISCONNECT;
2955 ret = t3_adapter_error(adapter, 0);
2957 /* Request a slot reset. */
2958 return PCI_ERS_RESULT_NEED_RESET;
2962 * t3_io_slot_reset - called after the pci bus has been reset.
2963 * @pdev: Pointer to PCI device
2965 * Restart the card from scratch, as if from a cold-boot.
2967 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2969 struct adapter *adapter = pci_get_drvdata(pdev);
2971 if (!t3_reenable_adapter(adapter))
2972 return PCI_ERS_RESULT_RECOVERED;
2974 return PCI_ERS_RESULT_DISCONNECT;
2978 * t3_io_resume - called when traffic can start flowing again.
2979 * @pdev: Pointer to PCI device
2981 * This callback is called when the error recovery driver tells us that
2982 * its OK to resume normal operation.
2984 static void t3_io_resume(struct pci_dev *pdev)
2986 struct adapter *adapter = pci_get_drvdata(pdev);
2988 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2989 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2991 t3_resume_ports(adapter);
2994 static struct pci_error_handlers t3_err_handler = {
2995 .error_detected = t3_io_error_detected,
2996 .slot_reset = t3_io_slot_reset,
2997 .resume = t3_io_resume,
3001 * Set the number of qsets based on the number of CPUs and the number of ports,
3002 * not to exceed the number of available qsets, assuming there are enough qsets
3005 static void set_nqsets(struct adapter *adap)
3008 int num_cpus = num_online_cpus();
3009 int hwports = adap->params.nports;
3010 int nqsets = adap->msix_nvectors - 1;
3012 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3014 (hwports * nqsets > SGE_QSETS ||
3015 num_cpus >= nqsets / hwports))
3017 if (nqsets > num_cpus)
3019 if (nqsets < 1 || hwports == 4)
3024 for_each_port(adap, i) {
3025 struct port_info *pi = adap2pinfo(adap, i);
3028 pi->nqsets = nqsets;
3029 j = pi->first_qset + nqsets;
3031 dev_info(&adap->pdev->dev,
3032 "Port %d using %d queue sets.\n", i, nqsets);
3036 static int __devinit cxgb_enable_msix(struct adapter *adap)
3038 struct msix_entry entries[SGE_QSETS + 1];
3042 vectors = ARRAY_SIZE(entries);
3043 for (i = 0; i < vectors; ++i)
3044 entries[i].entry = i;
3046 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3050 pci_disable_msix(adap->pdev);
3052 if (!err && vectors < (adap->params.nports + 1)) {
3053 pci_disable_msix(adap->pdev);
3058 for (i = 0; i < vectors; ++i)
3059 adap->msix_info[i].vec = entries[i].vector;
3060 adap->msix_nvectors = vectors;
3066 static void __devinit print_port_info(struct adapter *adap,
3067 const struct adapter_info *ai)
3069 static const char *pci_variant[] = {
3070 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3077 snprintf(buf, sizeof(buf), "%s x%d",
3078 pci_variant[adap->params.pci.variant],
3079 adap->params.pci.width);
3081 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3082 pci_variant[adap->params.pci.variant],
3083 adap->params.pci.speed, adap->params.pci.width);
3085 for_each_port(adap, i) {
3086 struct net_device *dev = adap->port[i];
3087 const struct port_info *pi = netdev_priv(dev);
3089 if (!test_bit(i, &adap->registered_device_map))
3091 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
3092 dev->name, ai->desc, pi->phy.desc,
3093 is_offload(adap) ? "R" : "", adap->params.rev, buf,
3094 (adap->flags & USING_MSIX) ? " MSI-X" :
3095 (adap->flags & USING_MSI) ? " MSI" : "");
3096 if (adap->name == dev->name && adap->params.vpd.mclk)
3098 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3099 adap->name, t3_mc7_size(&adap->cm) >> 20,
3100 t3_mc7_size(&adap->pmtx) >> 20,
3101 t3_mc7_size(&adap->pmrx) >> 20,
3102 adap->params.vpd.sn);
3106 static const struct net_device_ops cxgb_netdev_ops = {
3107 .ndo_open = cxgb_open,
3108 .ndo_stop = cxgb_close,
3109 .ndo_start_xmit = t3_eth_xmit,
3110 .ndo_get_stats = cxgb_get_stats,
3111 .ndo_validate_addr = eth_validate_addr,
3112 .ndo_set_multicast_list = cxgb_set_rxmode,
3113 .ndo_do_ioctl = cxgb_ioctl,
3114 .ndo_change_mtu = cxgb_change_mtu,
3115 .ndo_set_mac_address = cxgb_set_mac_addr,
3116 .ndo_vlan_rx_register = vlan_rx_register,
3117 #ifdef CONFIG_NET_POLL_CONTROLLER
3118 .ndo_poll_controller = cxgb_netpoll,
3122 static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3124 struct port_info *pi = netdev_priv(dev);
3126 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3127 pi->iscsic.mac_addr[3] |= 0x80;
3130 static int __devinit init_one(struct pci_dev *pdev,
3131 const struct pci_device_id *ent)
3133 static int version_printed;
3135 int i, err, pci_using_dac = 0;
3136 resource_size_t mmio_start, mmio_len;
3137 const struct adapter_info *ai;
3138 struct adapter *adapter = NULL;
3139 struct port_info *pi;
3141 if (!version_printed) {
3142 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3147 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3149 printk(KERN_ERR DRV_NAME
3150 ": cannot initialize work queue\n");
3155 err = pci_request_regions(pdev, DRV_NAME);
3157 /* Just info, some other driver may have claimed the device. */
3158 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3162 err = pci_enable_device(pdev);
3164 dev_err(&pdev->dev, "cannot enable PCI device\n");
3165 goto out_release_regions;
3168 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3170 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3172 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3173 "coherent allocations\n");
3174 goto out_disable_device;
3176 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3177 dev_err(&pdev->dev, "no usable DMA configuration\n");
3178 goto out_disable_device;
3181 pci_set_master(pdev);
3182 pci_save_state(pdev);
3184 mmio_start = pci_resource_start(pdev, 0);
3185 mmio_len = pci_resource_len(pdev, 0);
3186 ai = t3_get_adapter_info(ent->driver_data);
3188 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3191 goto out_disable_device;
3194 adapter->nofail_skb =
3195 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3196 if (!adapter->nofail_skb) {
3197 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3199 goto out_free_adapter;
3202 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3203 if (!adapter->regs) {
3204 dev_err(&pdev->dev, "cannot map device registers\n");
3206 goto out_free_adapter;
3209 adapter->pdev = pdev;
3210 adapter->name = pci_name(pdev);
3211 adapter->msg_enable = dflt_msg_enable;
3212 adapter->mmio_len = mmio_len;
3214 mutex_init(&adapter->mdio_lock);
3215 spin_lock_init(&adapter->work_lock);
3216 spin_lock_init(&adapter->stats_lock);
3218 INIT_LIST_HEAD(&adapter->adapter_list);
3219 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3220 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3221 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3223 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3224 struct net_device *netdev;
3226 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3232 SET_NETDEV_DEV(netdev, &pdev->dev);
3234 adapter->port[i] = netdev;
3235 pi = netdev_priv(netdev);
3236 pi->adapter = adapter;
3237 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3239 netif_carrier_off(netdev);
3240 netif_tx_stop_all_queues(netdev);
3241 netdev->irq = pdev->irq;
3242 netdev->mem_start = mmio_start;
3243 netdev->mem_end = mmio_start + mmio_len - 1;
3244 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3245 netdev->features |= NETIF_F_GRO;
3247 netdev->features |= NETIF_F_HIGHDMA;
3249 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3250 netdev->netdev_ops = &cxgb_netdev_ops;
3251 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3254 pci_set_drvdata(pdev, adapter);
3255 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3261 * The card is now ready to go. If any errors occur during device
3262 * registration we do not fail the whole card but rather proceed only
3263 * with the ports we manage to register successfully. However we must
3264 * register at least one net device.
3266 for_each_port(adapter, i) {
3267 err = register_netdev(adapter->port[i]);
3269 dev_warn(&pdev->dev,
3270 "cannot register net device %s, skipping\n",
3271 adapter->port[i]->name);
3274 * Change the name we use for messages to the name of
3275 * the first successfully registered interface.
3277 if (!adapter->registered_device_map)
3278 adapter->name = adapter->port[i]->name;
3280 __set_bit(i, &adapter->registered_device_map);
3283 if (!adapter->registered_device_map) {
3284 dev_err(&pdev->dev, "could not register any net devices\n");
3288 for_each_port(adapter, i)
3289 cxgb3_init_iscsi_mac(adapter->port[i]);
3291 /* Driver's ready. Reflect it on LEDs */
3292 t3_led_ready(adapter);
3294 if (is_offload(adapter)) {
3295 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3296 cxgb3_adapter_ofld(adapter);
3299 /* See what interrupts we'll be using */
3300 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3301 adapter->flags |= USING_MSIX;
3302 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3303 adapter->flags |= USING_MSI;
3305 set_nqsets(adapter);
3307 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3310 print_port_info(adapter, ai);
3314 iounmap(adapter->regs);
3315 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3316 if (adapter->port[i])
3317 free_netdev(adapter->port[i]);
3323 pci_disable_device(pdev);
3324 out_release_regions:
3325 pci_release_regions(pdev);
3326 pci_set_drvdata(pdev, NULL);
3330 static void __devexit remove_one(struct pci_dev *pdev)
3332 struct adapter *adapter = pci_get_drvdata(pdev);
3337 t3_sge_stop(adapter);
3338 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3341 if (is_offload(adapter)) {
3342 cxgb3_adapter_unofld(adapter);
3343 if (test_bit(OFFLOAD_DEVMAP_BIT,
3344 &adapter->open_device_map))
3345 offload_close(&adapter->tdev);
3348 for_each_port(adapter, i)
3349 if (test_bit(i, &adapter->registered_device_map))
3350 unregister_netdev(adapter->port[i]);
3352 t3_stop_sge_timers(adapter);
3353 t3_free_sge_resources(adapter);
3354 cxgb_disable_msi(adapter);
3356 for_each_port(adapter, i)
3357 if (adapter->port[i])
3358 free_netdev(adapter->port[i]);
3360 iounmap(adapter->regs);
3361 if (adapter->nofail_skb)
3362 kfree_skb(adapter->nofail_skb);
3364 pci_release_regions(pdev);
3365 pci_disable_device(pdev);
3366 pci_set_drvdata(pdev, NULL);
3370 static struct pci_driver driver = {
3372 .id_table = cxgb3_pci_tbl,
3374 .remove = __devexit_p(remove_one),
3375 .err_handler = &t3_err_handler,
3378 static int __init cxgb3_init_module(void)
3382 cxgb3_offload_init();
3384 ret = pci_register_driver(&driver);
3388 static void __exit cxgb3_cleanup_module(void)
3390 pci_unregister_driver(&driver);
3392 destroy_workqueue(cxgb3_wq);
3395 module_init(cxgb3_init_module);
3396 module_exit(cxgb3_cleanup_module);