2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <linux/uaccess.h>
67 #include <linux/crash_dump.h>
70 #include "cxgb4_filter.h"
72 #include "t4_values.h"
75 #include "t4fw_version.h"
76 #include "cxgb4_dcb.h"
77 #include "cxgb4_debugfs.h"
81 #include "cxgb4_tc_u32.h"
82 #include "cxgb4_ptp.h"
84 char cxgb4_driver_name[] = KBUILD_MODNAME;
89 #define DRV_VERSION "2.0.0-ko"
90 const char cxgb4_driver_version[] = DRV_VERSION;
91 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
93 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
94 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
95 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
97 /* Macros needed to support the PCI Device ID Table ...
99 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
100 static const struct pci_device_id cxgb4_pci_tbl[] = {
101 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
103 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
106 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
108 #define CH_PCI_ID_TABLE_ENTRY(devid) \
109 {PCI_VDEVICE(CHELSIO, (devid)), 4}
111 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
115 #include "t4_pci_id_tbl.h"
117 #define FW4_FNAME "cxgb4/t4fw.bin"
118 #define FW5_FNAME "cxgb4/t5fw.bin"
119 #define FW6_FNAME "cxgb4/t6fw.bin"
120 #define FW4_CFNAME "cxgb4/t4-config.txt"
121 #define FW5_CFNAME "cxgb4/t5-config.txt"
122 #define FW6_CFNAME "cxgb4/t6-config.txt"
123 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
124 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
125 #define PHY_AQ1202_DEVICEID 0x4409
126 #define PHY_BCM84834_DEVICEID 0x4486
128 MODULE_DESCRIPTION(DRV_DESC);
129 MODULE_AUTHOR("Chelsio Communications");
130 MODULE_LICENSE("Dual BSD/GPL");
131 MODULE_VERSION(DRV_VERSION);
132 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
133 MODULE_FIRMWARE(FW4_FNAME);
134 MODULE_FIRMWARE(FW5_FNAME);
135 MODULE_FIRMWARE(FW6_FNAME);
138 * The driver uses the best interrupt scheme available on a platform in the
139 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
140 * of these schemes the driver may consider as follows:
142 * msi = 2: choose from among all three options
143 * msi = 1: only consider MSI and INTx interrupts
144 * msi = 0: force INTx interrupts
148 module_param(msi, int, 0644);
149 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
152 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
153 * offset by 2 bytes in order to have the IP headers line up on 4-byte
154 * boundaries. This is a requirement for many architectures which will throw
155 * a machine check fault if an attempt is made to access one of the 4-byte IP
156 * header fields on a non-4-byte boundary. And it's a major performance issue
157 * even on some architectures which allow it like some implementations of the
158 * x86 ISA. However, some architectures don't mind this and for some very
159 * edge-case performance sensitive applications (like forwarding large volumes
160 * of small packets), setting this DMA offset to 0 will decrease the number of
161 * PCI-E Bus transfers enough to measurably affect performance.
163 static int rx_dma_offset = 2;
165 /* TX Queue select used to determine what algorithm to use for selecting TX
166 * queue. Select between the kernel provided function (select_queue=0) or user
167 * cxgb_select_queue function (select_queue=1)
169 * Default: select_queue=0
171 static int select_queue;
172 module_param(select_queue, int, 0644);
173 MODULE_PARM_DESC(select_queue,
174 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
176 static struct dentry *cxgb4_debugfs_root;
178 LIST_HEAD(adapter_list);
179 DEFINE_MUTEX(uld_mutex);
181 static void link_report(struct net_device *dev)
183 if (!netif_carrier_ok(dev))
184 netdev_info(dev, "link down\n");
186 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
189 const struct port_info *p = netdev_priv(dev);
191 switch (p->link_cfg.speed) {
211 pr_info("%s: unsupported speed: %d\n",
212 dev->name, p->link_cfg.speed);
216 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
221 #ifdef CONFIG_CHELSIO_T4_DCB
222 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
223 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
225 struct port_info *pi = netdev_priv(dev);
226 struct adapter *adap = pi->adapter;
227 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
230 /* We use a simple mapping of Port TX Queue Index to DCB
231 * Priority when we're enabling DCB.
233 for (i = 0; i < pi->nqsets; i++, txq++) {
237 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
239 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
240 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
241 value = enable ? i : 0xffffffff;
243 /* Since we can be called while atomic (from "interrupt
244 * level") we need to issue the Set Parameters Commannd
245 * without sleeping (timeout < 0).
247 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
249 -FW_CMD_MAX_TIMEOUT);
252 dev_err(adap->pdev_dev,
253 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
254 enable ? "set" : "unset", pi->port_id, i, -err);
256 txq->dcb_prio = value;
260 static int cxgb4_dcb_enabled(const struct net_device *dev)
262 struct port_info *pi = netdev_priv(dev);
264 if (!pi->dcb.enabled)
267 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
268 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
270 #endif /* CONFIG_CHELSIO_T4_DCB */
272 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
274 struct net_device *dev = adapter->port[port_id];
276 /* Skip changes from disabled ports. */
277 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
279 netif_carrier_on(dev);
281 #ifdef CONFIG_CHELSIO_T4_DCB
282 if (cxgb4_dcb_enabled(dev)) {
283 cxgb4_dcb_state_init(dev);
284 dcb_tx_queue_prio_enable(dev, false);
286 #endif /* CONFIG_CHELSIO_T4_DCB */
287 netif_carrier_off(dev);
294 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
296 static const char *mod_str[] = {
297 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
300 const struct net_device *dev = adap->port[port_id];
301 const struct port_info *pi = netdev_priv(dev);
303 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
304 netdev_info(dev, "port module unplugged\n");
305 else if (pi->mod_type < ARRAY_SIZE(mod_str))
306 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
307 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
308 netdev_info(dev, "%s: unsupported port module inserted\n",
310 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
311 netdev_info(dev, "%s: unknown port module inserted\n",
313 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
314 netdev_info(dev, "%s: transceiver module error\n", dev->name);
316 netdev_info(dev, "%s: unknown module type %d inserted\n",
317 dev->name, pi->mod_type);
320 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
321 module_param(dbfifo_int_thresh, int, 0644);
322 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
325 * usecs to sleep while draining the dbfifo
327 static int dbfifo_drain_delay = 1000;
328 module_param(dbfifo_drain_delay, int, 0644);
329 MODULE_PARM_DESC(dbfifo_drain_delay,
330 "usecs to sleep while draining the dbfifo");
332 static inline int cxgb4_set_addr_hash(struct port_info *pi)
334 struct adapter *adap = pi->adapter;
337 struct hash_mac_addr *entry;
339 /* Calculate the hash vector for the updated list and program it */
340 list_for_each_entry(entry, &adap->mac_hlist, list) {
341 ucast |= is_unicast_ether_addr(entry->addr);
342 vec |= (1ULL << hash_mac_addr(entry->addr));
344 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
348 static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
350 struct port_info *pi = netdev_priv(netdev);
351 struct adapter *adap = pi->adapter;
356 bool ucast = is_unicast_ether_addr(mac_addr);
357 const u8 *maclist[1] = {mac_addr};
358 struct hash_mac_addr *new_entry;
360 ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
361 NULL, ucast ? &uhash : &mhash, false);
364 /* if hash != 0, then add the addr to hash addr list
365 * so on the end we will calculate the hash for the
366 * list and program it
368 if (uhash || mhash) {
369 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
372 ether_addr_copy(new_entry->addr, mac_addr);
373 list_add_tail(&new_entry->list, &adap->mac_hlist);
374 ret = cxgb4_set_addr_hash(pi);
377 return ret < 0 ? ret : 0;
380 static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
382 struct port_info *pi = netdev_priv(netdev);
383 struct adapter *adap = pi->adapter;
385 const u8 *maclist[1] = {mac_addr};
386 struct hash_mac_addr *entry, *tmp;
388 /* If the MAC address to be removed is in the hash addr
389 * list, delete it from the list and update hash vector
391 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
392 if (ether_addr_equal(entry->addr, mac_addr)) {
393 list_del(&entry->list);
395 return cxgb4_set_addr_hash(pi);
399 ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
400 return ret < 0 ? -EINVAL : 0;
404 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
405 * If @mtu is -1 it is left unchanged.
407 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
409 struct port_info *pi = netdev_priv(dev);
410 struct adapter *adapter = pi->adapter;
412 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
413 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
415 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
416 (dev->flags & IFF_PROMISC) ? 1 : 0,
417 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
422 * link_start - enable a port
423 * @dev: the port to enable
425 * Performs the MAC and PHY actions needed to enable a port.
427 static int link_start(struct net_device *dev)
430 struct port_info *pi = netdev_priv(dev);
431 unsigned int mb = pi->adapter->pf;
434 * We do not set address filters and promiscuity here, the stack does
435 * that step explicitly.
437 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
438 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
440 ret = t4_change_mac(pi->adapter, mb, pi->viid,
441 pi->xact_addr_filt, dev->dev_addr, true,
444 pi->xact_addr_filt = ret;
449 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
453 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
454 true, CXGB4_DCB_ENABLED);
461 #ifdef CONFIG_CHELSIO_T4_DCB
462 /* Handle a Data Center Bridging update message from the firmware. */
463 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
465 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
466 struct net_device *dev = adap->port[adap->chan_map[port]];
467 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
470 cxgb4_dcb_handle_fw_update(adap, pcmd);
471 new_dcb_enabled = cxgb4_dcb_enabled(dev);
473 /* If the DCB has become enabled or disabled on the port then we're
474 * going to need to set up/tear down DCB Priority parameters for the
475 * TX Queues associated with the port.
477 if (new_dcb_enabled != old_dcb_enabled)
478 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
480 #endif /* CONFIG_CHELSIO_T4_DCB */
482 /* Response queue handler for the FW event queue.
484 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
485 const struct pkt_gl *gl)
487 u8 opcode = ((const struct rss_header *)rsp)->opcode;
489 rsp++; /* skip RSS header */
491 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
493 if (unlikely(opcode == CPL_FW4_MSG &&
494 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
496 opcode = ((const struct rss_header *)rsp)->opcode;
498 if (opcode != CPL_SGE_EGR_UPDATE) {
499 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
505 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
506 const struct cpl_sge_egr_update *p = (void *)rsp;
507 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
510 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
512 if (txq->q_type == CXGB4_TXQ_ETH) {
513 struct sge_eth_txq *eq;
515 eq = container_of(txq, struct sge_eth_txq, q);
516 netif_tx_wake_queue(eq->txq);
518 struct sge_uld_txq *oq;
520 oq = container_of(txq, struct sge_uld_txq, q);
521 tasklet_schedule(&oq->qresume_tsk);
523 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
524 const struct cpl_fw6_msg *p = (void *)rsp;
526 #ifdef CONFIG_CHELSIO_T4_DCB
527 const struct fw_port_cmd *pcmd = (const void *)p->data;
528 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
529 unsigned int action =
530 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
532 if (cmd == FW_PORT_CMD &&
533 action == FW_PORT_ACTION_GET_PORT_INFO) {
534 int port = FW_PORT_CMD_PORTID_G(
535 be32_to_cpu(pcmd->op_to_portid));
536 struct net_device *dev =
537 q->adap->port[q->adap->chan_map[port]];
538 int state_input = ((pcmd->u.info.dcbxdis_pkd &
539 FW_PORT_CMD_DCBXDIS_F)
540 ? CXGB4_DCB_INPUT_FW_DISABLED
541 : CXGB4_DCB_INPUT_FW_ENABLED);
543 cxgb4_dcb_state_fsm(dev, state_input);
546 if (cmd == FW_PORT_CMD &&
547 action == FW_PORT_ACTION_L2_DCB_CFG)
548 dcb_rpl(q->adap, pcmd);
552 t4_handle_fw_rpl(q->adap, p->data);
553 } else if (opcode == CPL_L2T_WRITE_RPL) {
554 const struct cpl_l2t_write_rpl *p = (void *)rsp;
556 do_l2t_write_rpl(q->adap, p);
557 } else if (opcode == CPL_SET_TCB_RPL) {
558 const struct cpl_set_tcb_rpl *p = (void *)rsp;
560 filter_rpl(q->adap, p);
562 dev_err(q->adap->pdev_dev,
563 "unexpected CPL %#x on FW event queue\n", opcode);
568 static void disable_msi(struct adapter *adapter)
570 if (adapter->flags & USING_MSIX) {
571 pci_disable_msix(adapter->pdev);
572 adapter->flags &= ~USING_MSIX;
573 } else if (adapter->flags & USING_MSI) {
574 pci_disable_msi(adapter->pdev);
575 adapter->flags &= ~USING_MSI;
580 * Interrupt handler for non-data events used with MSI-X.
582 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
584 struct adapter *adap = cookie;
585 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
589 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
591 if (adap->flags & MASTER_PF)
592 t4_slow_intr_handler(adap);
597 * Name the MSI-X interrupts.
599 static void name_msix_vecs(struct adapter *adap)
601 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
603 /* non-data interrupts */
604 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
607 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
608 adap->port[0]->name);
610 /* Ethernet queues */
611 for_each_port(adap, j) {
612 struct net_device *d = adap->port[j];
613 const struct port_info *pi = netdev_priv(d);
615 for (i = 0; i < pi->nqsets; i++, msi_idx++)
616 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
621 static int request_msix_queue_irqs(struct adapter *adap)
623 struct sge *s = &adap->sge;
627 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
628 adap->msix_info[1].desc, &s->fw_evtq);
632 for_each_ethrxq(s, ethqidx) {
633 err = request_irq(adap->msix_info[msi_index].vec,
635 adap->msix_info[msi_index].desc,
636 &s->ethrxq[ethqidx].rspq);
644 while (--ethqidx >= 0)
645 free_irq(adap->msix_info[--msi_index].vec,
646 &s->ethrxq[ethqidx].rspq);
647 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
651 static void free_msix_queue_irqs(struct adapter *adap)
653 int i, msi_index = 2;
654 struct sge *s = &adap->sge;
656 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
657 for_each_ethrxq(s, i)
658 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
662 * cxgb4_write_rss - write the RSS table for a given port
664 * @queues: array of queue indices for RSS
666 * Sets up the portion of the HW RSS table for the port's VI to distribute
667 * packets to the Rx queues in @queues.
668 * Should never be called before setting up sge eth rx queues
670 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
674 struct adapter *adapter = pi->adapter;
675 const struct sge_eth_rxq *rxq;
677 rxq = &adapter->sge.ethrxq[pi->first_qset];
678 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
682 /* map the queue indices to queue ids */
683 for (i = 0; i < pi->rss_size; i++, queues++)
684 rss[i] = rxq[*queues].rspq.abs_id;
686 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
687 pi->rss_size, rss, pi->rss_size);
688 /* If Tunnel All Lookup isn't specified in the global RSS
689 * Configuration, then we need to specify a default Ingress
690 * Queue for any ingress packets which aren't hashed. We'll
691 * use our first ingress queue ...
694 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
695 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
696 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
697 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
698 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
699 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
706 * setup_rss - configure RSS
709 * Sets up RSS for each port.
711 static int setup_rss(struct adapter *adap)
715 for_each_port(adap, i) {
716 const struct port_info *pi = adap2pinfo(adap, i);
718 /* Fill default values with equal distribution */
719 for (j = 0; j < pi->rss_size; j++)
720 pi->rss[j] = j % pi->nqsets;
722 err = cxgb4_write_rss(pi, pi->rss);
730 * Return the channel of the ingress queue with the given qid.
732 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
734 qid -= p->ingr_start;
735 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
739 * Wait until all NAPI handlers are descheduled.
741 static void quiesce_rx(struct adapter *adap)
745 for (i = 0; i < adap->sge.ingr_sz; i++) {
746 struct sge_rspq *q = adap->sge.ingr_map[i];
749 napi_disable(&q->napi);
753 /* Disable interrupt and napi handler */
754 static void disable_interrupts(struct adapter *adap)
756 if (adap->flags & FULL_INIT_DONE) {
757 t4_intr_disable(adap);
758 if (adap->flags & USING_MSIX) {
759 free_msix_queue_irqs(adap);
760 free_irq(adap->msix_info[0].vec, adap);
762 free_irq(adap->pdev->irq, adap);
769 * Enable NAPI scheduling and interrupt generation for all Rx queues.
771 static void enable_rx(struct adapter *adap)
775 for (i = 0; i < adap->sge.ingr_sz; i++) {
776 struct sge_rspq *q = adap->sge.ingr_map[i];
781 napi_enable(&q->napi);
783 /* 0-increment GTS to start the timer and enable interrupts */
784 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
785 SEINTARM_V(q->intr_params) |
786 INGRESSQID_V(q->cntxt_id));
791 static int setup_fw_sge_queues(struct adapter *adap)
793 struct sge *s = &adap->sge;
796 bitmap_zero(s->starving_fl, s->egr_sz);
797 bitmap_zero(s->txq_maperr, s->egr_sz);
799 if (adap->flags & USING_MSIX)
800 adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
802 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
803 NULL, NULL, NULL, -1);
806 adap->msi_idx = -((int)s->intrq.abs_id + 1);
809 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
810 adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
812 t4_free_sge_resources(adap);
817 * setup_sge_queues - configure SGE Tx/Rx/response queues
820 * Determines how many sets of SGE queues to use and initializes them.
821 * We support multiple queue sets per port if we have MSI-X, otherwise
822 * just one queue set per port.
824 static int setup_sge_queues(struct adapter *adap)
827 struct sge *s = &adap->sge;
828 struct sge_uld_rxq_info *rxq_info = NULL;
829 unsigned int cmplqid = 0;
832 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
834 for_each_port(adap, i) {
835 struct net_device *dev = adap->port[i];
836 struct port_info *pi = netdev_priv(dev);
837 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
838 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
840 for (j = 0; j < pi->nqsets; j++, q++) {
841 if (adap->msi_idx > 0)
843 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
844 adap->msi_idx, &q->fl,
847 t4_get_tp_ch_map(adap,
852 memset(&q->stats, 0, sizeof(q->stats));
854 for (j = 0; j < pi->nqsets; j++, t++) {
855 err = t4_sge_alloc_eth_txq(adap, t, dev,
856 netdev_get_tx_queue(dev, j),
857 s->fw_evtq.cntxt_id);
863 for_each_port(adap, i) {
864 /* Note that cmplqid below is 0 if we don't
865 * have RDMA queues, and that's the right value.
868 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
870 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
871 s->fw_evtq.cntxt_id, cmplqid);
876 if (!is_t4(adap->params.chip)) {
877 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
878 netdev_get_tx_queue(adap->port[0], 0)
879 , s->fw_evtq.cntxt_id);
884 t4_write_reg(adap, is_t4(adap->params.chip) ?
885 MPS_TRC_RSS_CONTROL_A :
886 MPS_T5_TRC_RSS_CONTROL_A,
887 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
888 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
891 t4_free_sge_resources(adap);
895 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
896 void *accel_priv, select_queue_fallback_t fallback)
900 #ifdef CONFIG_CHELSIO_T4_DCB
901 /* If a Data Center Bridging has been successfully negotiated on this
902 * link then we'll use the skb's priority to map it to a TX Queue.
903 * The skb's priority is determined via the VLAN Tag Priority Code
906 if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
910 err = vlan_get_tag(skb, &vlan_tci);
914 "TX Packet without VLAN Tag on DCB Link\n");
917 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
918 #ifdef CONFIG_CHELSIO_T4_FCOE
919 if (skb->protocol == htons(ETH_P_FCOE))
920 txq = skb->priority & 0x7;
921 #endif /* CONFIG_CHELSIO_T4_FCOE */
925 #endif /* CONFIG_CHELSIO_T4_DCB */
928 txq = (skb_rx_queue_recorded(skb)
929 ? skb_get_rx_queue(skb)
930 : smp_processor_id());
932 while (unlikely(txq >= dev->real_num_tx_queues))
933 txq -= dev->real_num_tx_queues;
938 return fallback(dev, skb) % dev->real_num_tx_queues;
941 static int closest_timer(const struct sge *s, int time)
943 int i, delta, match = 0, min_delta = INT_MAX;
945 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
946 delta = time - s->timer_val[i];
949 if (delta < min_delta) {
957 static int closest_thres(const struct sge *s, int thres)
959 int i, delta, match = 0, min_delta = INT_MAX;
961 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
962 delta = thres - s->counter_val[i];
965 if (delta < min_delta) {
974 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
976 * @us: the hold-off time in us, or 0 to disable timer
977 * @cnt: the hold-off packet count, or 0 to disable counter
979 * Sets an Rx queue's interrupt hold-off time and packet count. At least
980 * one of the two needs to be enabled for the queue to generate interrupts.
982 int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
983 unsigned int us, unsigned int cnt)
985 struct adapter *adap = q->adap;
994 new_idx = closest_thres(&adap->sge, cnt);
995 if (q->desc && q->pktcnt_idx != new_idx) {
996 /* the queue has already been created, update it */
997 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
999 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1000 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1001 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1006 q->pktcnt_idx = new_idx;
1009 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1010 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1014 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1016 const struct port_info *pi = netdev_priv(dev);
1017 netdev_features_t changed = dev->features ^ features;
1020 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1023 err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
1025 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1027 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1031 static int setup_debugfs(struct adapter *adap)
1033 if (IS_ERR_OR_NULL(adap->debugfs_root))
1036 #ifdef CONFIG_DEBUG_FS
1037 t4_setup_debugfs(adap);
1043 * upper-layer driver support
1047 * Allocate an active-open TID and set it to the supplied value.
1049 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1053 spin_lock_bh(&t->atid_lock);
1055 union aopen_entry *p = t->afree;
1057 atid = (p - t->atid_tab) + t->atid_base;
1062 spin_unlock_bh(&t->atid_lock);
1065 EXPORT_SYMBOL(cxgb4_alloc_atid);
1068 * Release an active-open TID.
1070 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1072 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1074 spin_lock_bh(&t->atid_lock);
1078 spin_unlock_bh(&t->atid_lock);
1080 EXPORT_SYMBOL(cxgb4_free_atid);
1083 * Allocate a server TID and set it to the supplied value.
1085 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1089 spin_lock_bh(&t->stid_lock);
1090 if (family == PF_INET) {
1091 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1092 if (stid < t->nstids)
1093 __set_bit(stid, t->stid_bmap);
1097 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1102 t->stid_tab[stid].data = data;
1103 stid += t->stid_base;
1104 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1105 * This is equivalent to 4 TIDs. With CLIP enabled it
1108 if (family == PF_INET6) {
1109 t->stids_in_use += 2;
1110 t->v6_stids_in_use += 2;
1115 spin_unlock_bh(&t->stid_lock);
1118 EXPORT_SYMBOL(cxgb4_alloc_stid);
1120 /* Allocate a server filter TID and set it to the supplied value.
1122 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1126 spin_lock_bh(&t->stid_lock);
1127 if (family == PF_INET) {
1128 stid = find_next_zero_bit(t->stid_bmap,
1129 t->nstids + t->nsftids, t->nstids);
1130 if (stid < (t->nstids + t->nsftids))
1131 __set_bit(stid, t->stid_bmap);
1138 t->stid_tab[stid].data = data;
1140 stid += t->sftid_base;
1143 spin_unlock_bh(&t->stid_lock);
1146 EXPORT_SYMBOL(cxgb4_alloc_sftid);
1148 /* Release a server TID.
1150 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1152 /* Is it a server filter TID? */
1153 if (t->nsftids && (stid >= t->sftid_base)) {
1154 stid -= t->sftid_base;
1157 stid -= t->stid_base;
1160 spin_lock_bh(&t->stid_lock);
1161 if (family == PF_INET)
1162 __clear_bit(stid, t->stid_bmap);
1164 bitmap_release_region(t->stid_bmap, stid, 1);
1165 t->stid_tab[stid].data = NULL;
1166 if (stid < t->nstids) {
1167 if (family == PF_INET6) {
1168 t->stids_in_use -= 2;
1169 t->v6_stids_in_use -= 2;
1177 spin_unlock_bh(&t->stid_lock);
1179 EXPORT_SYMBOL(cxgb4_free_stid);
1182 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1184 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1187 struct cpl_tid_release *req;
1189 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1190 req = __skb_put(skb, sizeof(*req));
1191 INIT_TP_WR(req, tid);
1192 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1196 * Queue a TID release request and if necessary schedule a work queue to
1199 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1202 void **p = &t->tid_tab[tid];
1203 struct adapter *adap = container_of(t, struct adapter, tids);
1205 spin_lock_bh(&adap->tid_release_lock);
1206 *p = adap->tid_release_head;
1207 /* Low 2 bits encode the Tx channel number */
1208 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1209 if (!adap->tid_release_task_busy) {
1210 adap->tid_release_task_busy = true;
1211 queue_work(adap->workq, &adap->tid_release_task);
1213 spin_unlock_bh(&adap->tid_release_lock);
1217 * Process the list of pending TID release requests.
1219 static void process_tid_release_list(struct work_struct *work)
1221 struct sk_buff *skb;
1222 struct adapter *adap;
1224 adap = container_of(work, struct adapter, tid_release_task);
1226 spin_lock_bh(&adap->tid_release_lock);
1227 while (adap->tid_release_head) {
1228 void **p = adap->tid_release_head;
1229 unsigned int chan = (uintptr_t)p & 3;
1230 p = (void *)p - chan;
1232 adap->tid_release_head = *p;
1234 spin_unlock_bh(&adap->tid_release_lock);
1236 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1238 schedule_timeout_uninterruptible(1);
1240 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1241 t4_ofld_send(adap, skb);
1242 spin_lock_bh(&adap->tid_release_lock);
1244 adap->tid_release_task_busy = false;
1245 spin_unlock_bh(&adap->tid_release_lock);
1249 * Release a TID and inform HW. If we are unable to allocate the release
1250 * message we defer to a work queue.
1252 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1253 unsigned short family)
1255 struct sk_buff *skb;
1256 struct adapter *adap = container_of(t, struct adapter, tids);
1258 WARN_ON(tid >= t->ntids);
1260 if (t->tid_tab[tid]) {
1261 t->tid_tab[tid] = NULL;
1262 atomic_dec(&t->conns_in_use);
1263 if (t->hash_base && (tid >= t->hash_base)) {
1264 if (family == AF_INET6)
1265 atomic_sub(2, &t->hash_tids_in_use);
1267 atomic_dec(&t->hash_tids_in_use);
1269 if (family == AF_INET6)
1270 atomic_sub(2, &t->tids_in_use);
1272 atomic_dec(&t->tids_in_use);
1276 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1278 mk_tid_release(skb, chan, tid);
1279 t4_ofld_send(adap, skb);
1281 cxgb4_queue_tid_release(t, chan, tid);
1283 EXPORT_SYMBOL(cxgb4_remove_tid);
1286 * Allocate and initialize the TID tables. Returns 0 on success.
1288 static int tid_init(struct tid_info *t)
1290 struct adapter *adap = container_of(t, struct adapter, tids);
1291 unsigned int max_ftids = t->nftids + t->nsftids;
1292 unsigned int natids = t->natids;
1293 unsigned int stid_bmap_size;
1294 unsigned int ftid_bmap_size;
1297 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1298 ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1299 size = t->ntids * sizeof(*t->tid_tab) +
1300 natids * sizeof(*t->atid_tab) +
1301 t->nstids * sizeof(*t->stid_tab) +
1302 t->nsftids * sizeof(*t->stid_tab) +
1303 stid_bmap_size * sizeof(long) +
1304 max_ftids * sizeof(*t->ftid_tab) +
1305 ftid_bmap_size * sizeof(long);
1307 t->tid_tab = kvzalloc(size, GFP_KERNEL);
1311 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1312 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1313 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1314 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1315 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1316 spin_lock_init(&t->stid_lock);
1317 spin_lock_init(&t->atid_lock);
1318 spin_lock_init(&t->ftid_lock);
1320 t->stids_in_use = 0;
1321 t->v6_stids_in_use = 0;
1322 t->sftids_in_use = 0;
1324 t->atids_in_use = 0;
1325 atomic_set(&t->tids_in_use, 0);
1326 atomic_set(&t->conns_in_use, 0);
1327 atomic_set(&t->hash_tids_in_use, 0);
1329 /* Setup the free list for atid_tab and clear the stid bitmap. */
1332 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1333 t->afree = t->atid_tab;
1336 if (is_offload(adap)) {
1337 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1338 /* Reserve stid 0 for T4/T5 adapters */
1339 if (!t->stid_base &&
1340 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1341 __set_bit(0, t->stid_bmap);
1344 bitmap_zero(t->ftid_bmap, t->nftids);
1349 * cxgb4_create_server - create an IP server
1351 * @stid: the server TID
1352 * @sip: local IP address to bind server to
1353 * @sport: the server's TCP port
1354 * @queue: queue to direct messages from this server to
1356 * Create an IP server for the given port and address.
1357 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1359 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1360 __be32 sip, __be16 sport, __be16 vlan,
1364 struct sk_buff *skb;
1365 struct adapter *adap;
1366 struct cpl_pass_open_req *req;
1369 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1373 adap = netdev2adap(dev);
1374 req = __skb_put(skb, sizeof(*req));
1376 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1377 req->local_port = sport;
1378 req->peer_port = htons(0);
1379 req->local_ip = sip;
1380 req->peer_ip = htonl(0);
1381 chan = rxq_to_chan(&adap->sge, queue);
1382 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1383 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1384 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1385 ret = t4_mgmt_tx(adap, skb);
1386 return net_xmit_eval(ret);
1388 EXPORT_SYMBOL(cxgb4_create_server);
1390 /* cxgb4_create_server6 - create an IPv6 server
1392 * @stid: the server TID
1393 * @sip: local IPv6 address to bind server to
1394 * @sport: the server's TCP port
1395 * @queue: queue to direct messages from this server to
1397 * Create an IPv6 server for the given port and address.
1398 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1400 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1401 const struct in6_addr *sip, __be16 sport,
1405 struct sk_buff *skb;
1406 struct adapter *adap;
1407 struct cpl_pass_open_req6 *req;
1410 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1414 adap = netdev2adap(dev);
1415 req = __skb_put(skb, sizeof(*req));
1417 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1418 req->local_port = sport;
1419 req->peer_port = htons(0);
1420 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1421 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1422 req->peer_ip_hi = cpu_to_be64(0);
1423 req->peer_ip_lo = cpu_to_be64(0);
1424 chan = rxq_to_chan(&adap->sge, queue);
1425 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1426 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1427 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1428 ret = t4_mgmt_tx(adap, skb);
1429 return net_xmit_eval(ret);
1431 EXPORT_SYMBOL(cxgb4_create_server6);
1433 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1434 unsigned int queue, bool ipv6)
1436 struct sk_buff *skb;
1437 struct adapter *adap;
1438 struct cpl_close_listsvr_req *req;
1441 adap = netdev2adap(dev);
1443 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1447 req = __skb_put(skb, sizeof(*req));
1449 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
1450 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1451 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
1452 ret = t4_mgmt_tx(adap, skb);
1453 return net_xmit_eval(ret);
1455 EXPORT_SYMBOL(cxgb4_remove_server);
1458 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1459 * @mtus: the HW MTU table
1460 * @mtu: the target MTU
1461 * @idx: index of selected entry in the MTU table
1463 * Returns the index and the value in the HW MTU table that is closest to
1464 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1465 * table, in which case that smallest available value is selected.
1467 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1472 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1478 EXPORT_SYMBOL(cxgb4_best_mtu);
1481 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1482 * @mtus: the HW MTU table
1483 * @header_size: Header Size
1484 * @data_size_max: maximum Data Segment Size
1485 * @data_size_align: desired Data Segment Size Alignment (2^N)
1486 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1488 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1489 * MTU Table based solely on a Maximum MTU parameter, we break that
1490 * parameter up into a Header Size and Maximum Data Segment Size, and
1491 * provide a desired Data Segment Size Alignment. If we find an MTU in
1492 * the Hardware MTU Table which will result in a Data Segment Size with
1493 * the requested alignment _and_ that MTU isn't "too far" from the
1494 * closest MTU, then we'll return that rather than the closest MTU.
1496 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1497 unsigned short header_size,
1498 unsigned short data_size_max,
1499 unsigned short data_size_align,
1500 unsigned int *mtu_idxp)
1502 unsigned short max_mtu = header_size + data_size_max;
1503 unsigned short data_size_align_mask = data_size_align - 1;
1504 int mtu_idx, aligned_mtu_idx;
1506 /* Scan the MTU Table till we find an MTU which is larger than our
1507 * Maximum MTU or we reach the end of the table. Along the way,
1508 * record the last MTU found, if any, which will result in a Data
1509 * Segment Length matching the requested alignment.
1511 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1512 unsigned short data_size = mtus[mtu_idx] - header_size;
1514 /* If this MTU minus the Header Size would result in a
1515 * Data Segment Size of the desired alignment, remember it.
1517 if ((data_size & data_size_align_mask) == 0)
1518 aligned_mtu_idx = mtu_idx;
1520 /* If we're not at the end of the Hardware MTU Table and the
1521 * next element is larger than our Maximum MTU, drop out of
1524 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1528 /* If we fell out of the loop because we ran to the end of the table,
1529 * then we just have to use the last [largest] entry.
1531 if (mtu_idx == NMTUS)
1534 /* If we found an MTU which resulted in the requested Data Segment
1535 * Length alignment and that's "not far" from the largest MTU which is
1536 * less than or equal to the maximum MTU, then use that.
1538 if (aligned_mtu_idx >= 0 &&
1539 mtu_idx - aligned_mtu_idx <= 1)
1540 mtu_idx = aligned_mtu_idx;
1542 /* If the caller has passed in an MTU Index pointer, pass the
1543 * MTU Index back. Return the MTU value.
1546 *mtu_idxp = mtu_idx;
1547 return mtus[mtu_idx];
1549 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1552 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1554 * @viid: VI id of the given port
1556 * Return the SMT index for this VI.
1558 unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
1560 /* In T4/T5, SMT contains 256 SMAC entries organized in
1561 * 128 rows of 2 entries each.
1562 * In T6, SMT contains 256 SMAC entries in 256 rows.
1563 * TODO: The below code needs to be updated when we add support
1566 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1567 return ((viid & 0x7f) << 1);
1569 return (viid & 0x7f);
1571 EXPORT_SYMBOL(cxgb4_tp_smt_idx);
1574 * cxgb4_port_chan - get the HW channel of a port
1575 * @dev: the net device for the port
1577 * Return the HW Tx channel of the given port.
1579 unsigned int cxgb4_port_chan(const struct net_device *dev)
1581 return netdev2pinfo(dev)->tx_chan;
1583 EXPORT_SYMBOL(cxgb4_port_chan);
1585 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1587 struct adapter *adap = netdev2adap(dev);
1588 u32 v1, v2, lp_count, hp_count;
1590 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1591 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1592 if (is_t4(adap->params.chip)) {
1593 lp_count = LP_COUNT_G(v1);
1594 hp_count = HP_COUNT_G(v1);
1596 lp_count = LP_COUNT_T5_G(v1);
1597 hp_count = HP_COUNT_T5_G(v2);
1599 return lpfifo ? lp_count : hp_count;
1601 EXPORT_SYMBOL(cxgb4_dbfifo_count);
1604 * cxgb4_port_viid - get the VI id of a port
1605 * @dev: the net device for the port
1607 * Return the VI id of the given port.
1609 unsigned int cxgb4_port_viid(const struct net_device *dev)
1611 return netdev2pinfo(dev)->viid;
1613 EXPORT_SYMBOL(cxgb4_port_viid);
1616 * cxgb4_port_idx - get the index of a port
1617 * @dev: the net device for the port
1619 * Return the index of the given port.
1621 unsigned int cxgb4_port_idx(const struct net_device *dev)
1623 return netdev2pinfo(dev)->port_id;
1625 EXPORT_SYMBOL(cxgb4_port_idx);
1627 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
1628 struct tp_tcp_stats *v6)
1630 struct adapter *adap = pci_get_drvdata(pdev);
1632 spin_lock(&adap->stats_lock);
1633 t4_tp_get_tcp_stats(adap, v4, v6);
1634 spin_unlock(&adap->stats_lock);
1636 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
1638 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
1639 const unsigned int *pgsz_order)
1641 struct adapter *adap = netdev2adap(dev);
1643 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
1644 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
1645 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
1646 HPZ3_V(pgsz_order[3]));
1648 EXPORT_SYMBOL(cxgb4_iscsi_init);
1650 int cxgb4_flush_eq_cache(struct net_device *dev)
1652 struct adapter *adap = netdev2adap(dev);
1654 return t4_sge_ctxt_flush(adap, adap->mbox);
1656 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
1658 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
1660 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
1664 spin_lock(&adap->win0_lock);
1665 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
1666 sizeof(indices), (__be32 *)&indices,
1668 spin_unlock(&adap->win0_lock);
1670 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
1671 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
1676 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
1679 struct adapter *adap = netdev2adap(dev);
1680 u16 hw_pidx, hw_cidx;
1683 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
1687 if (pidx != hw_pidx) {
1691 if (pidx >= hw_pidx)
1692 delta = pidx - hw_pidx;
1694 delta = size - hw_pidx + pidx;
1696 if (is_t4(adap->params.chip))
1697 val = PIDX_V(delta);
1699 val = PIDX_T5_V(delta);
1701 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1707 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
1709 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
1711 struct adapter *adap;
1712 u32 offset, memtype, memaddr;
1713 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
1714 u32 edc0_end, edc1_end, mc0_end, mc1_end;
1717 adap = netdev2adap(dev);
1719 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
1721 /* Figure out where the offset lands in the Memory Type/Address scheme.
1722 * This code assumes that the memory is laid out starting at offset 0
1723 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
1724 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
1725 * MC0, and some have both MC0 and MC1.
1727 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
1728 edc0_size = EDRAM0_SIZE_G(size) << 20;
1729 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
1730 edc1_size = EDRAM1_SIZE_G(size) << 20;
1731 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
1732 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
1734 edc0_end = edc0_size;
1735 edc1_end = edc0_end + edc1_size;
1736 mc0_end = edc1_end + mc0_size;
1738 if (offset < edc0_end) {
1741 } else if (offset < edc1_end) {
1743 memaddr = offset - edc0_end;
1745 if (offset < mc0_end) {
1747 memaddr = offset - edc1_end;
1748 } else if (is_t5(adap->params.chip)) {
1749 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1750 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
1751 mc1_end = mc0_end + mc1_size;
1752 if (offset < mc1_end) {
1754 memaddr = offset - mc0_end;
1756 /* offset beyond the end of any memory */
1760 /* T4/T6 only has a single memory channel */
1765 spin_lock(&adap->win0_lock);
1766 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
1767 spin_unlock(&adap->win0_lock);
1771 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
1775 EXPORT_SYMBOL(cxgb4_read_tpte);
1777 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
1780 struct adapter *adap;
1782 adap = netdev2adap(dev);
1783 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
1784 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
1786 return ((u64)hi << 32) | (u64)lo;
1788 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
1790 int cxgb4_bar2_sge_qregs(struct net_device *dev,
1792 enum cxgb4_bar2_qtype qtype,
1795 unsigned int *pbar2_qid)
1797 return t4_bar2_sge_qregs(netdev2adap(dev),
1799 (qtype == CXGB4_BAR2_QTYPE_EGRESS
1800 ? T4_BAR2_QTYPE_EGRESS
1801 : T4_BAR2_QTYPE_INGRESS),
1806 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
1808 static struct pci_driver cxgb4_driver;
1810 static void check_neigh_update(struct neighbour *neigh)
1812 const struct device *parent;
1813 const struct net_device *netdev = neigh->dev;
1815 if (is_vlan_dev(netdev))
1816 netdev = vlan_dev_real_dev(netdev);
1817 parent = netdev->dev.parent;
1818 if (parent && parent->driver == &cxgb4_driver.driver)
1819 t4_l2t_update(dev_get_drvdata(parent), neigh);
1822 static int netevent_cb(struct notifier_block *nb, unsigned long event,
1826 case NETEVENT_NEIGH_UPDATE:
1827 check_neigh_update(data);
1829 case NETEVENT_REDIRECT:
1836 static bool netevent_registered;
1837 static struct notifier_block cxgb4_netevent_nb = {
1838 .notifier_call = netevent_cb
1841 static void drain_db_fifo(struct adapter *adap, int usecs)
1843 u32 v1, v2, lp_count, hp_count;
1846 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1847 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1848 if (is_t4(adap->params.chip)) {
1849 lp_count = LP_COUNT_G(v1);
1850 hp_count = HP_COUNT_G(v1);
1852 lp_count = LP_COUNT_T5_G(v1);
1853 hp_count = HP_COUNT_T5_G(v2);
1856 if (lp_count == 0 && hp_count == 0)
1858 set_current_state(TASK_UNINTERRUPTIBLE);
1859 schedule_timeout(usecs_to_jiffies(usecs));
1863 static void disable_txq_db(struct sge_txq *q)
1865 unsigned long flags;
1867 spin_lock_irqsave(&q->db_lock, flags);
1869 spin_unlock_irqrestore(&q->db_lock, flags);
1872 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
1874 spin_lock_irq(&q->db_lock);
1875 if (q->db_pidx_inc) {
1876 /* Make sure that all writes to the TX descriptors
1877 * are committed before we tell HW about them.
1880 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1881 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
1885 spin_unlock_irq(&q->db_lock);
1888 static void disable_dbs(struct adapter *adap)
1892 for_each_ethrxq(&adap->sge, i)
1893 disable_txq_db(&adap->sge.ethtxq[i].q);
1894 if (is_offload(adap)) {
1895 struct sge_uld_txq_info *txq_info =
1896 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1899 for_each_ofldtxq(&adap->sge, i) {
1900 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1902 disable_txq_db(&txq->q);
1906 for_each_port(adap, i)
1907 disable_txq_db(&adap->sge.ctrlq[i].q);
1910 static void enable_dbs(struct adapter *adap)
1914 for_each_ethrxq(&adap->sge, i)
1915 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
1916 if (is_offload(adap)) {
1917 struct sge_uld_txq_info *txq_info =
1918 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1921 for_each_ofldtxq(&adap->sge, i) {
1922 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1924 enable_txq_db(adap, &txq->q);
1928 for_each_port(adap, i)
1929 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
1932 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
1934 enum cxgb4_uld type = CXGB4_ULD_RDMA;
1936 if (adap->uld && adap->uld[type].handle)
1937 adap->uld[type].control(adap->uld[type].handle, cmd);
1940 static void process_db_full(struct work_struct *work)
1942 struct adapter *adap;
1944 adap = container_of(work, struct adapter, db_full_task);
1946 drain_db_fifo(adap, dbfifo_drain_delay);
1948 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
1949 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1950 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1951 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
1952 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
1954 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
1955 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
1958 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
1960 u16 hw_pidx, hw_cidx;
1963 spin_lock_irq(&q->db_lock);
1964 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
1967 if (q->db_pidx != hw_pidx) {
1971 if (q->db_pidx >= hw_pidx)
1972 delta = q->db_pidx - hw_pidx;
1974 delta = q->size - hw_pidx + q->db_pidx;
1976 if (is_t4(adap->params.chip))
1977 val = PIDX_V(delta);
1979 val = PIDX_T5_V(delta);
1981 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1982 QID_V(q->cntxt_id) | val);
1987 spin_unlock_irq(&q->db_lock);
1989 CH_WARN(adap, "DB drop recovery failed.\n");
1992 static void recover_all_queues(struct adapter *adap)
1996 for_each_ethrxq(&adap->sge, i)
1997 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
1998 if (is_offload(adap)) {
1999 struct sge_uld_txq_info *txq_info =
2000 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2002 for_each_ofldtxq(&adap->sge, i) {
2003 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2005 sync_txq_pidx(adap, &txq->q);
2009 for_each_port(adap, i)
2010 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2013 static void process_db_drop(struct work_struct *work)
2015 struct adapter *adap;
2017 adap = container_of(work, struct adapter, db_drop_task);
2019 if (is_t4(adap->params.chip)) {
2020 drain_db_fifo(adap, dbfifo_drain_delay);
2021 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2022 drain_db_fifo(adap, dbfifo_drain_delay);
2023 recover_all_queues(adap);
2024 drain_db_fifo(adap, dbfifo_drain_delay);
2026 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2027 } else if (is_t5(adap->params.chip)) {
2028 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2029 u16 qid = (dropped_db >> 15) & 0x1ffff;
2030 u16 pidx_inc = dropped_db & 0x1fff;
2032 unsigned int bar2_qid;
2035 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2036 0, &bar2_qoffset, &bar2_qid);
2038 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2039 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2041 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2042 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2044 /* Re-enable BAR2 WC */
2045 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2048 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2049 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2052 void t4_db_full(struct adapter *adap)
2054 if (is_t4(adap->params.chip)) {
2056 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2057 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2058 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2059 queue_work(adap->workq, &adap->db_full_task);
2063 void t4_db_dropped(struct adapter *adap)
2065 if (is_t4(adap->params.chip)) {
2067 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2069 queue_work(adap->workq, &adap->db_drop_task);
2072 void t4_register_netevent_notifier(void)
2074 if (!netevent_registered) {
2075 register_netevent_notifier(&cxgb4_netevent_nb);
2076 netevent_registered = true;
2080 static void detach_ulds(struct adapter *adap)
2084 mutex_lock(&uld_mutex);
2085 list_del(&adap->list_node);
2087 for (i = 0; i < CXGB4_ULD_MAX; i++)
2088 if (adap->uld && adap->uld[i].handle)
2089 adap->uld[i].state_change(adap->uld[i].handle,
2090 CXGB4_STATE_DETACH);
2092 if (netevent_registered && list_empty(&adapter_list)) {
2093 unregister_netevent_notifier(&cxgb4_netevent_nb);
2094 netevent_registered = false;
2096 mutex_unlock(&uld_mutex);
2099 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2103 mutex_lock(&uld_mutex);
2104 for (i = 0; i < CXGB4_ULD_MAX; i++)
2105 if (adap->uld && adap->uld[i].handle)
2106 adap->uld[i].state_change(adap->uld[i].handle,
2108 mutex_unlock(&uld_mutex);
2111 #if IS_ENABLED(CONFIG_IPV6)
2112 static int cxgb4_inet6addr_handler(struct notifier_block *this,
2113 unsigned long event, void *data)
2115 struct inet6_ifaddr *ifa = data;
2116 struct net_device *event_dev = ifa->idev->dev;
2117 const struct device *parent = NULL;
2118 #if IS_ENABLED(CONFIG_BONDING)
2119 struct adapter *adap;
2121 if (is_vlan_dev(event_dev))
2122 event_dev = vlan_dev_real_dev(event_dev);
2123 #if IS_ENABLED(CONFIG_BONDING)
2124 if (event_dev->flags & IFF_MASTER) {
2125 list_for_each_entry(adap, &adapter_list, list_node) {
2128 cxgb4_clip_get(adap->port[0],
2129 (const u32 *)ifa, 1);
2132 cxgb4_clip_release(adap->port[0],
2133 (const u32 *)ifa, 1);
2144 parent = event_dev->dev.parent;
2146 if (parent && parent->driver == &cxgb4_driver.driver) {
2149 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2152 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2161 static bool inet6addr_registered;
2162 static struct notifier_block cxgb4_inet6addr_notifier = {
2163 .notifier_call = cxgb4_inet6addr_handler
2166 static void update_clip(const struct adapter *adap)
2169 struct net_device *dev;
2174 for (i = 0; i < MAX_NPORTS; i++) {
2175 dev = adap->port[i];
2179 ret = cxgb4_update_root_dev_clip(dev);
2186 #endif /* IS_ENABLED(CONFIG_IPV6) */
2189 * cxgb_up - enable the adapter
2190 * @adap: adapter being enabled
2192 * Called when the first port is enabled, this function performs the
2193 * actions necessary to make an adapter operational, such as completing
2194 * the initialization of HW modules, and enabling interrupts.
2196 * Must be called with the rtnl lock held.
2198 static int cxgb_up(struct adapter *adap)
2202 mutex_lock(&uld_mutex);
2203 err = setup_sge_queues(adap);
2206 err = setup_rss(adap);
2210 if (adap->flags & USING_MSIX) {
2211 name_msix_vecs(adap);
2212 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2213 adap->msix_info[0].desc, adap);
2216 err = request_msix_queue_irqs(adap);
2218 free_irq(adap->msix_info[0].vec, adap);
2222 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2223 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2224 adap->port[0]->name, adap);
2231 t4_intr_enable(adap);
2232 adap->flags |= FULL_INIT_DONE;
2233 mutex_unlock(&uld_mutex);
2235 notify_ulds(adap, CXGB4_STATE_UP);
2236 #if IS_ENABLED(CONFIG_IPV6)
2239 /* Initialize hash mac addr list*/
2240 INIT_LIST_HEAD(&adap->mac_hlist);
2244 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2246 t4_free_sge_resources(adap);
2248 mutex_unlock(&uld_mutex);
2252 static void cxgb_down(struct adapter *adapter)
2254 cancel_work_sync(&adapter->tid_release_task);
2255 cancel_work_sync(&adapter->db_full_task);
2256 cancel_work_sync(&adapter->db_drop_task);
2257 adapter->tid_release_task_busy = false;
2258 adapter->tid_release_head = NULL;
2260 t4_sge_stop(adapter);
2261 t4_free_sge_resources(adapter);
2262 adapter->flags &= ~FULL_INIT_DONE;
2266 * net_device operations
2268 static int cxgb_open(struct net_device *dev)
2271 struct port_info *pi = netdev_priv(dev);
2272 struct adapter *adapter = pi->adapter;
2274 netif_carrier_off(dev);
2276 if (!(adapter->flags & FULL_INIT_DONE)) {
2277 err = cxgb_up(adapter);
2282 /* It's possible that the basic port information could have
2283 * changed since we first read it.
2285 err = t4_update_port_info(pi);
2289 err = link_start(dev);
2291 netif_tx_start_all_queues(dev);
2295 static int cxgb_close(struct net_device *dev)
2297 struct port_info *pi = netdev_priv(dev);
2298 struct adapter *adapter = pi->adapter;
2300 netif_tx_stop_all_queues(dev);
2301 netif_carrier_off(dev);
2302 return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
2305 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2306 __be32 sip, __be16 sport, __be16 vlan,
2307 unsigned int queue, unsigned char port, unsigned char mask)
2310 struct filter_entry *f;
2311 struct adapter *adap;
2315 adap = netdev2adap(dev);
2317 /* Adjust stid to correct filter index */
2318 stid -= adap->tids.sftid_base;
2319 stid += adap->tids.nftids;
2321 /* Check to make sure the filter requested is writable ...
2323 f = &adap->tids.ftid_tab[stid];
2324 ret = writable_filter(f);
2328 /* Clear out any old resources being used by the filter before
2329 * we start constructing the new filter.
2332 clear_filter(adap, f);
2334 /* Clear out filter specifications */
2335 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2336 f->fs.val.lport = cpu_to_be16(sport);
2337 f->fs.mask.lport = ~0;
2339 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2340 for (i = 0; i < 4; i++) {
2341 f->fs.val.lip[i] = val[i];
2342 f->fs.mask.lip[i] = ~0;
2344 if (adap->params.tp.vlan_pri_map & PORT_F) {
2345 f->fs.val.iport = port;
2346 f->fs.mask.iport = mask;
2350 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2351 f->fs.val.proto = IPPROTO_TCP;
2352 f->fs.mask.proto = ~0;
2357 /* Mark filter as locked */
2361 /* Save the actual tid. We need this to get the corresponding
2362 * filter entry structure in filter_rpl.
2364 f->tid = stid + adap->tids.ftid_base;
2365 ret = set_filter_wr(adap, stid);
2367 clear_filter(adap, f);
2373 EXPORT_SYMBOL(cxgb4_create_server_filter);
2375 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2376 unsigned int queue, bool ipv6)
2378 struct filter_entry *f;
2379 struct adapter *adap;
2381 adap = netdev2adap(dev);
2383 /* Adjust stid to correct filter index */
2384 stid -= adap->tids.sftid_base;
2385 stid += adap->tids.nftids;
2387 f = &adap->tids.ftid_tab[stid];
2388 /* Unlock the filter */
2391 return delete_filter(adap, stid);
2393 EXPORT_SYMBOL(cxgb4_remove_server_filter);
2395 static void cxgb_get_stats(struct net_device *dev,
2396 struct rtnl_link_stats64 *ns)
2398 struct port_stats stats;
2399 struct port_info *p = netdev_priv(dev);
2400 struct adapter *adapter = p->adapter;
2402 /* Block retrieving statistics during EEH error
2403 * recovery. Otherwise, the recovery might fail
2404 * and the PCI device will be removed permanently
2406 spin_lock(&adapter->stats_lock);
2407 if (!netif_device_present(dev)) {
2408 spin_unlock(&adapter->stats_lock);
2411 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2413 spin_unlock(&adapter->stats_lock);
2415 ns->tx_bytes = stats.tx_octets;
2416 ns->tx_packets = stats.tx_frames;
2417 ns->rx_bytes = stats.rx_octets;
2418 ns->rx_packets = stats.rx_frames;
2419 ns->multicast = stats.rx_mcast_frames;
2421 /* detailed rx_errors */
2422 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2424 ns->rx_over_errors = 0;
2425 ns->rx_crc_errors = stats.rx_fcs_err;
2426 ns->rx_frame_errors = stats.rx_symbol_err;
2427 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
2428 stats.rx_ovflow2 + stats.rx_ovflow3 +
2429 stats.rx_trunc0 + stats.rx_trunc1 +
2430 stats.rx_trunc2 + stats.rx_trunc3;
2431 ns->rx_missed_errors = 0;
2433 /* detailed tx_errors */
2434 ns->tx_aborted_errors = 0;
2435 ns->tx_carrier_errors = 0;
2436 ns->tx_fifo_errors = 0;
2437 ns->tx_heartbeat_errors = 0;
2438 ns->tx_window_errors = 0;
2440 ns->tx_errors = stats.tx_error_frames;
2441 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2442 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2445 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2448 int ret = 0, prtad, devad;
2449 struct port_info *pi = netdev_priv(dev);
2450 struct adapter *adapter = pi->adapter;
2451 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2455 if (pi->mdio_addr < 0)
2457 data->phy_id = pi->mdio_addr;
2461 if (mdio_phy_id_is_c45(data->phy_id)) {
2462 prtad = mdio_phy_id_prtad(data->phy_id);
2463 devad = mdio_phy_id_devad(data->phy_id);
2464 } else if (data->phy_id < 32) {
2465 prtad = data->phy_id;
2467 data->reg_num &= 0x1f;
2471 mbox = pi->adapter->pf;
2472 if (cmd == SIOCGMIIREG)
2473 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2474 data->reg_num, &data->val_out);
2476 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2477 data->reg_num, data->val_in);
2480 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2481 sizeof(pi->tstamp_config)) ?
2484 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
2485 sizeof(pi->tstamp_config)))
2488 if (!is_t4(adapter->params.chip)) {
2489 switch (pi->tstamp_config.tx_type) {
2490 case HWTSTAMP_TX_OFF:
2491 case HWTSTAMP_TX_ON:
2497 switch (pi->tstamp_config.rx_filter) {
2498 case HWTSTAMP_FILTER_NONE:
2499 pi->rxtstamp = false;
2501 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2502 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2503 cxgb4_ptprx_timestamping(pi, pi->port_id,
2506 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2507 cxgb4_ptprx_timestamping(pi, pi->port_id,
2510 case HWTSTAMP_FILTER_ALL:
2511 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2512 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2513 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2514 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2515 pi->rxtstamp = true;
2518 pi->tstamp_config.rx_filter =
2519 HWTSTAMP_FILTER_NONE;
2523 if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
2524 (pi->tstamp_config.rx_filter ==
2525 HWTSTAMP_FILTER_NONE)) {
2526 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
2527 pi->ptp_enable = false;
2530 if (pi->tstamp_config.rx_filter !=
2531 HWTSTAMP_FILTER_NONE) {
2532 if (cxgb4_ptp_redirect_rx_packet(adapter,
2534 pi->ptp_enable = true;
2537 /* For T4 Adapters */
2538 switch (pi->tstamp_config.rx_filter) {
2539 case HWTSTAMP_FILTER_NONE:
2540 pi->rxtstamp = false;
2542 case HWTSTAMP_FILTER_ALL:
2543 pi->rxtstamp = true;
2546 pi->tstamp_config.rx_filter =
2547 HWTSTAMP_FILTER_NONE;
2551 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2552 sizeof(pi->tstamp_config)) ?
2560 static void cxgb_set_rxmode(struct net_device *dev)
2562 /* unfortunately we can't return errors to the stack */
2563 set_rxmode(dev, -1, false);
2566 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2569 struct port_info *pi = netdev_priv(dev);
2571 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
2578 #ifdef CONFIG_PCI_IOV
2579 static int dummy_open(struct net_device *dev)
2581 /* Turn carrier off since we don't have to transmit anything on this
2584 netif_carrier_off(dev);
2588 /* Fill MAC address that will be assigned by the FW */
2589 static void fill_vf_station_mac_addr(struct adapter *adap)
2592 u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
2597 err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
2599 na = adap->params.vpd.na;
2600 for (i = 0; i < ETH_ALEN; i++)
2601 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
2602 hex2val(na[2 * i + 1]));
2603 a = (hw_addr[0] << 8) | hw_addr[1];
2604 b = (hw_addr[1] << 8) | hw_addr[2];
2606 a |= 0x0200; /* locally assigned Ethernet MAC address */
2607 a &= ~0x0100; /* not a multicast Ethernet MAC address */
2608 macaddr[0] = a >> 8;
2609 macaddr[1] = a & 0xff;
2611 for (i = 2; i < 5; i++)
2612 macaddr[i] = hw_addr[i + 1];
2614 for (i = 0; i < adap->num_vfs; i++) {
2615 macaddr[5] = adap->pf * 16 + i;
2616 ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr);
2621 static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
2623 struct port_info *pi = netdev_priv(dev);
2624 struct adapter *adap = pi->adapter;
2627 /* verify MAC addr is valid */
2628 if (!is_valid_ether_addr(mac)) {
2629 dev_err(pi->adapter->pdev_dev,
2630 "Invalid Ethernet address %pM for VF %d\n",
2635 dev_info(pi->adapter->pdev_dev,
2636 "Setting MAC %pM on VF %d\n", mac, vf);
2637 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
2639 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
2643 static int cxgb_get_vf_config(struct net_device *dev,
2644 int vf, struct ifla_vf_info *ivi)
2646 struct port_info *pi = netdev_priv(dev);
2647 struct adapter *adap = pi->adapter;
2649 if (vf >= adap->num_vfs)
2652 ivi->max_tx_rate = adap->vfinfo[vf].tx_rate;
2653 ivi->min_tx_rate = 0;
2654 ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr);
2658 static int cxgb_get_phys_port_id(struct net_device *dev,
2659 struct netdev_phys_item_id *ppid)
2661 struct port_info *pi = netdev_priv(dev);
2662 unsigned int phy_port_id;
2664 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
2665 ppid->id_len = sizeof(phy_port_id);
2666 memcpy(ppid->id, &phy_port_id, ppid->id_len);
2670 static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2673 struct port_info *pi = netdev_priv(dev);
2674 struct adapter *adap = pi->adapter;
2675 struct fw_port_cmd port_cmd, port_rpl;
2676 u32 link_status, speed = 0;
2677 u32 fw_pfvf, fw_class;
2682 if (vf >= adap->num_vfs)
2686 dev_err(adap->pdev_dev,
2687 "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
2691 /* Retrieve link details for VF port */
2692 memset(&port_cmd, 0, sizeof(port_cmd));
2693 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
2696 FW_PORT_CMD_PORTID_V(pi->port_id));
2697 port_cmd.action_to_len16 =
2698 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
2699 FW_LEN16(port_cmd));
2700 ret = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
2702 if (ret != FW_SUCCESS) {
2703 dev_err(adap->pdev_dev,
2704 "Failed to get link status for VF %d\n", vf);
2707 link_status = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
2708 link_ok = (link_status & FW_PORT_CMD_LSTATUS_F) != 0;
2710 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
2713 /* Determine link speed */
2714 if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
2716 else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
2718 else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
2720 else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
2722 else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
2724 else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
2727 if (max_tx_rate > speed) {
2728 dev_err(adap->pdev_dev,
2729 "Max tx rate %d for VF %d can't be > link-speed %u",
2730 max_tx_rate, vf, speed);
2733 pktsize = be16_to_cpu(port_rpl.u.info.mtu);
2734 /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
2735 pktsize = pktsize - sizeof(struct ethhdr) - 4;
2736 /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
2737 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
2738 /* configure Traffic Class for rate-limiting */
2739 ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
2740 SCHED_CLASS_LEVEL_CL_RL,
2741 SCHED_CLASS_MODE_CLASS,
2742 SCHED_CLASS_RATEUNIT_BITS,
2743 SCHED_CLASS_RATEMODE_ABS,
2744 pi->port_id, class_id, 0,
2745 max_tx_rate * 1000, 0, pktsize);
2747 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
2751 dev_info(adap->pdev_dev,
2752 "Class %d with MSS %u configured with rate %u\n",
2753 class_id, pktsize, max_tx_rate);
2755 /* bind VF to configured Traffic Class */
2756 fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2757 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
2758 fw_class = class_id;
2759 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
2762 dev_err(adap->pdev_dev,
2763 "Err %d in binding VF %d to Traffic Class %d\n",
2767 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
2768 adap->pf, vf, class_id);
2769 adap->vfinfo[vf].tx_rate = max_tx_rate;
2775 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2778 struct sockaddr *addr = p;
2779 struct port_info *pi = netdev_priv(dev);
2781 if (!is_valid_ether_addr(addr->sa_data))
2782 return -EADDRNOTAVAIL;
2784 ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
2785 pi->xact_addr_filt, addr->sa_data, true, true);
2789 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2790 pi->xact_addr_filt = ret;
2794 #ifdef CONFIG_NET_POLL_CONTROLLER
2795 static void cxgb_netpoll(struct net_device *dev)
2797 struct port_info *pi = netdev_priv(dev);
2798 struct adapter *adap = pi->adapter;
2800 if (adap->flags & USING_MSIX) {
2802 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2804 for (i = pi->nqsets; i; i--, rx++)
2805 t4_sge_intr_msix(0, &rx->rspq);
2807 t4_intr_handler(adap)(0, adap);
2811 static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
2813 struct port_info *pi = netdev_priv(dev);
2814 struct adapter *adap = pi->adapter;
2815 struct sched_class *e;
2816 struct ch_sched_params p;
2817 struct ch_sched_queue qe;
2821 if (!can_sched(dev))
2824 if (index < 0 || index > pi->nqsets - 1)
2827 if (!(adap->flags & FULL_INIT_DONE)) {
2828 dev_err(adap->pdev_dev,
2829 "Failed to rate limit on queue %d. Link Down?\n",
2834 /* Convert from Mbps to Kbps */
2835 req_rate = rate << 10;
2837 /* Max rate is 10 Gbps */
2838 if (req_rate >= SCHED_MAX_RATE_KBPS) {
2839 dev_err(adap->pdev_dev,
2840 "Invalid rate %u Mbps, Max rate is %u Gbps\n",
2841 rate, SCHED_MAX_RATE_KBPS);
2845 /* First unbind the queue from any existing class */
2846 memset(&qe, 0, sizeof(qe));
2848 qe.class = SCHED_CLS_NONE;
2850 err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
2852 dev_err(adap->pdev_dev,
2853 "Unbinding Queue %d on port %d fail. Err: %d\n",
2854 index, pi->port_id, err);
2858 /* Queue already unbound */
2862 /* Fetch any available unused or matching scheduling class */
2863 memset(&p, 0, sizeof(p));
2864 p.type = SCHED_CLASS_TYPE_PACKET;
2865 p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
2866 p.u.params.mode = SCHED_CLASS_MODE_CLASS;
2867 p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
2868 p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
2869 p.u.params.channel = pi->tx_chan;
2870 p.u.params.class = SCHED_CLS_NONE;
2871 p.u.params.minrate = 0;
2872 p.u.params.maxrate = req_rate;
2873 p.u.params.weight = 0;
2874 p.u.params.pktsize = dev->mtu;
2876 e = cxgb4_sched_class_alloc(dev, &p);
2880 /* Bind the queue to a scheduling class */
2881 memset(&qe, 0, sizeof(qe));
2885 err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
2887 dev_err(adap->pdev_dev,
2888 "Queue rate limiting failed. Err: %d\n", err);
2892 static int cxgb_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
2893 __be16 proto, struct tc_to_netdev *tc)
2895 struct port_info *pi = netdev2pinfo(dev);
2896 struct adapter *adap = netdev2adap(dev);
2901 if (!(adap->flags & FULL_INIT_DONE)) {
2902 dev_err(adap->pdev_dev,
2903 "Failed to setup tc on port %d. Link Down?\n",
2908 if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
2909 tc->type == TC_SETUP_CLSU32) {
2910 switch (tc->cls_u32->command) {
2911 case TC_CLSU32_NEW_KNODE:
2912 case TC_CLSU32_REPLACE_KNODE:
2913 return cxgb4_config_knode(dev, proto, tc->cls_u32);
2914 case TC_CLSU32_DELETE_KNODE:
2915 return cxgb4_delete_knode(dev, proto, tc->cls_u32);
2924 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2925 netdev_features_t features)
2927 /* Disable GRO, if RX_CSUM is disabled */
2928 if (!(features & NETIF_F_RXCSUM))
2929 features &= ~NETIF_F_GRO;
2934 static const struct net_device_ops cxgb4_netdev_ops = {
2935 .ndo_open = cxgb_open,
2936 .ndo_stop = cxgb_close,
2937 .ndo_start_xmit = t4_eth_xmit,
2938 .ndo_select_queue = cxgb_select_queue,
2939 .ndo_get_stats64 = cxgb_get_stats,
2940 .ndo_set_rx_mode = cxgb_set_rxmode,
2941 .ndo_set_mac_address = cxgb_set_mac_addr,
2942 .ndo_set_features = cxgb_set_features,
2943 .ndo_validate_addr = eth_validate_addr,
2944 .ndo_do_ioctl = cxgb_ioctl,
2945 .ndo_change_mtu = cxgb_change_mtu,
2946 #ifdef CONFIG_NET_POLL_CONTROLLER
2947 .ndo_poll_controller = cxgb_netpoll,
2949 #ifdef CONFIG_CHELSIO_T4_FCOE
2950 .ndo_fcoe_enable = cxgb_fcoe_enable,
2951 .ndo_fcoe_disable = cxgb_fcoe_disable,
2952 #endif /* CONFIG_CHELSIO_T4_FCOE */
2953 .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
2954 .ndo_setup_tc = cxgb_setup_tc,
2955 .ndo_fix_features = cxgb_fix_features,
2958 #ifdef CONFIG_PCI_IOV
2959 static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
2960 .ndo_open = dummy_open,
2961 .ndo_set_vf_mac = cxgb_set_vf_mac,
2962 .ndo_get_vf_config = cxgb_get_vf_config,
2963 .ndo_set_vf_rate = cxgb_set_vf_rate,
2964 .ndo_get_phys_port_id = cxgb_get_phys_port_id,
2968 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2970 struct adapter *adapter = netdev2adap(dev);
2972 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
2973 strlcpy(info->version, cxgb4_driver_version,
2974 sizeof(info->version));
2975 strlcpy(info->bus_info, pci_name(adapter->pdev),
2976 sizeof(info->bus_info));
2979 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
2980 .get_drvinfo = get_drvinfo,
2983 void t4_fatal_err(struct adapter *adap)
2987 if (pci_channel_offline(adap->pdev))
2990 /* Disable the SGE since ULDs are going to free resources that
2991 * could be exposed to the adapter. RDMA MWs for example...
2993 t4_shutdown_adapter(adap);
2994 for_each_port(adap, port) {
2995 struct net_device *dev = adap->port[port];
2997 /* If we get here in very early initialization the network
2998 * devices may not have been set up yet.
3003 netif_tx_stop_all_queues(dev);
3004 netif_carrier_off(dev);
3006 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3009 static void setup_memwin(struct adapter *adap)
3011 u32 nic_win_base = t4_get_util_window(adap);
3013 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3016 static void setup_memwin_rdma(struct adapter *adap)
3018 if (adap->vres.ocq.size) {
3022 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3023 start &= PCI_BASE_ADDRESS_MEM_MASK;
3024 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3025 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3027 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3028 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3030 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3031 adap->vres.ocq.start);
3033 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3037 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3042 /* get device capabilities */
3043 memset(c, 0, sizeof(*c));
3044 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3045 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3046 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
3047 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
3051 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3052 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3053 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
3057 ret = t4_config_glbl_rss(adap, adap->pf,
3058 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3059 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3060 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
3064 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
3065 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3072 /* tweak some settings */
3073 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
3074 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
3075 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3076 v = t4_read_reg(adap, TP_PIO_DATA_A);
3077 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
3079 /* first 4 Tx modulation queues point to consecutive Tx channels */
3080 adap->params.tp.tx_modq_map = 0xE4;
3081 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3082 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
3084 /* associate each Tx modulation queue with consecutive Tx channels */
3086 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3087 &v, 1, TP_TX_SCHED_HDR_A);
3088 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3089 &v, 1, TP_TX_SCHED_FIFO_A);
3090 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3091 &v, 1, TP_TX_SCHED_PCMD_A);
3093 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3094 if (is_offload(adap)) {
3095 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3096 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3097 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3098 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3099 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3100 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3101 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3102 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3103 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3104 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3107 /* get basic stuff going */
3108 return t4_early_init(adap, adap->pf);
3112 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3114 #define MAX_ATIDS 8192U
3117 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3119 * If the firmware we're dealing with has Configuration File support, then
3120 * we use that to perform all configuration
3124 * Tweak configuration based on module parameters, etc. Most of these have
3125 * defaults assigned to them by Firmware Configuration Files (if we're using
3126 * them) but need to be explicitly set if we're using hard-coded
3127 * initialization. But even in the case of using Firmware Configuration
3128 * Files, we'd like to expose the ability to change these via module
3129 * parameters so these are essentially common tweaks/settings for
3130 * Configuration Files and hard-coded initialization ...
3132 static int adap_init0_tweaks(struct adapter *adapter)
3135 * Fix up various Host-Dependent Parameters like Page Size, Cache
3136 * Line Size, etc. The firmware default is for a 4KB Page Size and
3137 * 64B Cache Line Size ...
3139 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3142 * Process module parameters which affect early initialization.
3144 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3145 dev_err(&adapter->pdev->dev,
3146 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3150 t4_set_reg_field(adapter, SGE_CONTROL_A,
3151 PKTSHIFT_V(PKTSHIFT_M),
3152 PKTSHIFT_V(rx_dma_offset));
3155 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3156 * adds the pseudo header itself.
3158 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
3159 CSUM_HAS_PSEUDO_HDR_F, 0);
3164 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3165 * unto themselves and they contain their own firmware to perform their
3168 static int phy_aq1202_version(const u8 *phy_fw_data,
3173 /* At offset 0x8 you're looking for the primary image's
3174 * starting offset which is 3 Bytes wide
3176 * At offset 0xa of the primary image, you look for the offset
3177 * of the DRAM segment which is 3 Bytes wide.
3179 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3182 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3183 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3184 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3186 offset = le24(phy_fw_data + 0x8) << 12;
3187 offset = le24(phy_fw_data + offset + 0xa);
3188 return be16(phy_fw_data + offset + 0x27e);
3195 static struct info_10gbt_phy_fw {
3196 unsigned int phy_fw_id; /* PCI Device ID */
3197 char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
3198 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3199 int phy_flash; /* Has FLASH for PHY Firmware */
3200 } phy_info_array[] = {
3202 PHY_AQ1202_DEVICEID,
3203 PHY_AQ1202_FIRMWARE,
3208 PHY_BCM84834_DEVICEID,
3209 PHY_BCM84834_FIRMWARE,
3216 static struct info_10gbt_phy_fw *find_phy_info(int devid)
3220 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3221 if (phy_info_array[i].phy_fw_id == devid)
3222 return &phy_info_array[i];
3227 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3228 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3229 * we return a negative error number. If we transfer new firmware we return 1
3230 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3232 static int adap_init0_phy(struct adapter *adap)
3234 const struct firmware *phyf;
3236 struct info_10gbt_phy_fw *phy_info;
3238 /* Use the device ID to determine which PHY file to flash.
3240 phy_info = find_phy_info(adap->pdev->device);
3242 dev_warn(adap->pdev_dev,
3243 "No PHY Firmware file found for this PHY\n");
3247 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3248 * use that. The adapter firmware provides us with a memory buffer
3249 * where we can load a PHY firmware file from the host if we want to
3250 * override the PHY firmware File in flash.
3252 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
3255 /* For adapters without FLASH attached to PHY for their
3256 * firmware, it's obviously a fatal error if we can't get the
3257 * firmware to the adapter. For adapters with PHY firmware
3258 * FLASH storage, it's worth a warning if we can't find the
3259 * PHY Firmware but we'll neuter the error ...
3261 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3262 "/lib/firmware/%s, error %d\n",
3263 phy_info->phy_fw_file, -ret);
3264 if (phy_info->phy_flash) {
3265 int cur_phy_fw_ver = 0;
3267 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3268 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3269 "FLASH copy, version %#x\n", cur_phy_fw_ver);
3276 /* Load PHY Firmware onto adapter.
3278 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3279 phy_info->phy_fw_version,
3280 (u8 *)phyf->data, phyf->size);
3282 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3285 int new_phy_fw_ver = 0;
3287 if (phy_info->phy_fw_version)
3288 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3290 dev_info(adap->pdev_dev, "Successfully transferred PHY "
3291 "Firmware /lib/firmware/%s, version %#x\n",
3292 phy_info->phy_fw_file, new_phy_fw_ver);
3295 release_firmware(phyf);
3301 * Attempt to initialize the adapter via a Firmware Configuration File.
3303 static int adap_init0_config(struct adapter *adapter, int reset)
3305 struct fw_caps_config_cmd caps_cmd;
3306 const struct firmware *cf;
3307 unsigned long mtype = 0, maddr = 0;
3308 u32 finiver, finicsum, cfcsum;
3310 int config_issued = 0;
3311 char *fw_config_file, fw_config_file_path[256];
3312 char *config_name = NULL;
3315 * Reset device if necessary.
3318 ret = t4_fw_reset(adapter, adapter->mbox,
3319 PIORSTMODE_F | PIORST_F);
3324 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3325 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3326 * to be performed after any global adapter RESET above since some
3327 * PHYs only have local RAM copies of the PHY firmware.
3329 if (is_10gbt_device(adapter->pdev->device)) {
3330 ret = adap_init0_phy(adapter);
3335 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3336 * then use that. Otherwise, use the configuration file stored
3337 * in the adapter flash ...
3339 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
3341 fw_config_file = FW4_CFNAME;
3344 fw_config_file = FW5_CFNAME;
3347 fw_config_file = FW6_CFNAME;
3350 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3351 adapter->pdev->device);
3356 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
3358 config_name = "On FLASH";
3359 mtype = FW_MEMTYPE_CF_FLASH;
3360 maddr = t4_flash_cfg_addr(adapter);
3362 u32 params[7], val[7];
3364 sprintf(fw_config_file_path,
3365 "/lib/firmware/%s", fw_config_file);
3366 config_name = fw_config_file_path;
3368 if (cf->size >= FLASH_CFG_MAX_SIZE)
3371 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3372 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3373 ret = t4_query_params(adapter, adapter->mbox,
3374 adapter->pf, 0, 1, params, val);
3377 * For t4_memory_rw() below addresses and
3378 * sizes have to be in terms of multiples of 4
3379 * bytes. So, if the Configuration File isn't
3380 * a multiple of 4 bytes in length we'll have
3381 * to write that out separately since we can't
3382 * guarantee that the bytes following the
3383 * residual byte in the buffer returned by
3384 * request_firmware() are zeroed out ...
3386 size_t resid = cf->size & 0x3;
3387 size_t size = cf->size & ~0x3;
3388 __be32 *data = (__be32 *)cf->data;
3390 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3391 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
3393 spin_lock(&adapter->win0_lock);
3394 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3395 size, data, T4_MEMORY_WRITE);
3396 if (ret == 0 && resid != 0) {
3403 last.word = data[size >> 2];
3404 for (i = resid; i < 4; i++)
3406 ret = t4_memory_rw(adapter, 0, mtype,
3411 spin_unlock(&adapter->win0_lock);
3415 release_firmware(cf);
3421 * Issue a Capability Configuration command to the firmware to get it
3422 * to parse the Configuration File. We don't use t4_fw_config_file()
3423 * because we want the ability to modify various features after we've
3424 * processed the configuration file ...
3426 memset(&caps_cmd, 0, sizeof(caps_cmd));
3427 caps_cmd.op_to_write =
3428 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3431 caps_cmd.cfvalid_to_len16 =
3432 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3433 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3434 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
3435 FW_LEN16(caps_cmd));
3436 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3439 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3440 * Configuration File in FLASH), our last gasp effort is to use the
3441 * Firmware Configuration File which is embedded in the firmware. A
3442 * very few early versions of the firmware didn't have one embedded
3443 * but we can ignore those.
3445 if (ret == -ENOENT) {
3446 memset(&caps_cmd, 0, sizeof(caps_cmd));
3447 caps_cmd.op_to_write =
3448 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3451 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3452 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3453 sizeof(caps_cmd), &caps_cmd);
3454 config_name = "Firmware Default";
3461 finiver = ntohl(caps_cmd.finiver);
3462 finicsum = ntohl(caps_cmd.finicsum);
3463 cfcsum = ntohl(caps_cmd.cfcsum);
3464 if (finicsum != cfcsum)
3465 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3466 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3470 * And now tell the firmware to use the configuration we just loaded.
3472 caps_cmd.op_to_write =
3473 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3476 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3477 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3483 * Tweak configuration based on system architecture, module
3486 ret = adap_init0_tweaks(adapter);
3491 * And finally tell the firmware to initialize itself using the
3492 * parameters from the Configuration File.
3494 ret = t4_fw_initialize(adapter, adapter->mbox);
3498 /* Emit Firmware Configuration File information and return
3501 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3502 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3503 config_name, finiver, cfcsum);
3507 * Something bad happened. Return the error ... (If the "error"
3508 * is that there's no Configuration File on the adapter we don't
3509 * want to issue a warning since this is fairly common.)
3512 if (config_issued && ret != -ENOENT)
3513 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
3518 static struct fw_info fw_info_array[] = {
3521 .fs_name = FW4_CFNAME,
3522 .fw_mod_name = FW4_FNAME,
3524 .chip = FW_HDR_CHIP_T4,
3525 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
3526 .intfver_nic = FW_INTFVER(T4, NIC),
3527 .intfver_vnic = FW_INTFVER(T4, VNIC),
3528 .intfver_ri = FW_INTFVER(T4, RI),
3529 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3530 .intfver_fcoe = FW_INTFVER(T4, FCOE),
3534 .fs_name = FW5_CFNAME,
3535 .fw_mod_name = FW5_FNAME,
3537 .chip = FW_HDR_CHIP_T5,
3538 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
3539 .intfver_nic = FW_INTFVER(T5, NIC),
3540 .intfver_vnic = FW_INTFVER(T5, VNIC),
3541 .intfver_ri = FW_INTFVER(T5, RI),
3542 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3543 .intfver_fcoe = FW_INTFVER(T5, FCOE),
3547 .fs_name = FW6_CFNAME,
3548 .fw_mod_name = FW6_FNAME,
3550 .chip = FW_HDR_CHIP_T6,
3551 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
3552 .intfver_nic = FW_INTFVER(T6, NIC),
3553 .intfver_vnic = FW_INTFVER(T6, VNIC),
3554 .intfver_ofld = FW_INTFVER(T6, OFLD),
3555 .intfver_ri = FW_INTFVER(T6, RI),
3556 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3557 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3558 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3559 .intfver_fcoe = FW_INTFVER(T6, FCOE),
3565 static struct fw_info *find_fw_info(int chip)
3569 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
3570 if (fw_info_array[i].chip == chip)
3571 return &fw_info_array[i];
3577 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3579 static int adap_init0(struct adapter *adap)
3583 enum dev_state state;
3584 u32 params[7], val[7];
3585 struct fw_caps_config_cmd caps_cmd;
3588 /* Grab Firmware Device Log parameters as early as possible so we have
3589 * access to it for debugging, etc.
3591 ret = t4_init_devlog_params(adap);
3595 /* Contact FW, advertising Master capability */
3596 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
3597 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
3599 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3603 if (ret == adap->mbox)
3604 adap->flags |= MASTER_PF;
3607 * If we're the Master PF Driver and the device is uninitialized,
3608 * then let's consider upgrading the firmware ... (We always want
3609 * to check the firmware version number in order to A. get it for
3610 * later reporting and B. to warn if the currently loaded firmware
3611 * is excessively mismatched relative to the driver.)
3613 t4_get_fw_version(adap, &adap->params.fw_vers);
3614 t4_get_bs_version(adap, &adap->params.bs_vers);
3615 t4_get_tp_version(adap, &adap->params.tp_vers);
3616 t4_get_exprom_version(adap, &adap->params.er_vers);
3618 ret = t4_check_fw_version(adap);
3619 /* If firmware is too old (not supported by driver) force an update. */
3621 state = DEV_STATE_UNINIT;
3622 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
3623 struct fw_info *fw_info;
3624 struct fw_hdr *card_fw;
3625 const struct firmware *fw;
3626 const u8 *fw_data = NULL;
3627 unsigned int fw_size = 0;
3629 /* This is the firmware whose headers the driver was compiled
3632 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
3633 if (fw_info == NULL) {
3634 dev_err(adap->pdev_dev,
3635 "unable to get firmware info for chip %d.\n",
3636 CHELSIO_CHIP_VERSION(adap->params.chip));
3640 /* allocate memory to read the header of the firmware on the
3643 card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
3645 /* Get FW from from /lib/firmware/ */
3646 ret = request_firmware(&fw, fw_info->fw_mod_name,
3649 dev_err(adap->pdev_dev,
3650 "unable to load firmware image %s, error %d\n",
3651 fw_info->fw_mod_name, ret);
3657 /* upgrade FW logic */
3658 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
3662 release_firmware(fw);
3670 * Grab VPD parameters. This should be done after we establish a
3671 * connection to the firmware since some of the VPD parameters
3672 * (notably the Core Clock frequency) are retrieved via requests to
3673 * the firmware. On the other hand, we need these fairly early on
3674 * so we do this right after getting ahold of the firmware.
3676 ret = t4_get_vpd_params(adap, &adap->params.vpd);
3681 * Find out what ports are available to us. Note that we need to do
3682 * this before calling adap_init0_no_config() since it needs nports
3686 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3687 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
3688 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
3692 adap->params.nports = hweight32(port_vec);
3693 adap->params.portvec = port_vec;
3695 /* If the firmware is initialized already, emit a simply note to that
3696 * effect. Otherwise, it's time to try initializing the adapter.
3698 if (state == DEV_STATE_INIT) {
3699 dev_info(adap->pdev_dev, "Coming up as %s: "\
3700 "Adapter already initialized\n",
3701 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
3703 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3704 "Initializing adapter\n");
3706 /* Find out whether we're dealing with a version of the
3707 * firmware which has configuration file support.
3709 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3710 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
3711 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3714 /* If the firmware doesn't support Configuration Files,
3718 dev_err(adap->pdev_dev, "firmware doesn't support "
3719 "Firmware Configuration Files\n");
3723 /* The firmware provides us with a memory buffer where we can
3724 * load a Configuration File from the host if we want to
3725 * override the Configuration File in flash.
3727 ret = adap_init0_config(adap, reset);
3728 if (ret == -ENOENT) {
3729 dev_err(adap->pdev_dev, "no Configuration File "
3730 "present on adapter.\n");
3734 dev_err(adap->pdev_dev, "could not initialize "
3735 "adapter, error %d\n", -ret);
3740 /* Give the SGE code a chance to pull in anything that it needs ...
3741 * Note that this must be called after we retrieve our VPD parameters
3742 * in order to know how to convert core ticks to seconds, etc.
3744 ret = t4_sge_init(adap);
3748 if (is_bypass_device(adap->pdev->device))
3749 adap->params.bypass = 1;
3752 * Grab some of our basic fundamental operating parameters.
3754 #define FW_PARAM_DEV(param) \
3755 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3756 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
3758 #define FW_PARAM_PFVF(param) \
3759 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3760 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
3761 FW_PARAMS_PARAM_Y_V(0) | \
3762 FW_PARAMS_PARAM_Z_V(0)
3764 params[0] = FW_PARAM_PFVF(EQ_START);
3765 params[1] = FW_PARAM_PFVF(L2T_START);
3766 params[2] = FW_PARAM_PFVF(L2T_END);
3767 params[3] = FW_PARAM_PFVF(FILTER_START);
3768 params[4] = FW_PARAM_PFVF(FILTER_END);
3769 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3770 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
3773 adap->sge.egr_start = val[0];
3774 adap->l2t_start = val[1];
3775 adap->l2t_end = val[2];
3776 adap->tids.ftid_base = val[3];
3777 adap->tids.nftids = val[4] - val[3] + 1;
3778 adap->sge.ingr_start = val[5];
3780 /* qids (ingress/egress) returned from firmware can be anywhere
3781 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
3782 * Hence driver needs to allocate memory for this range to
3783 * store the queue info. Get the highest IQFLINT/EQ index returned
3784 * in FW_EQ_*_CMD.alloc command.
3786 params[0] = FW_PARAM_PFVF(EQ_END);
3787 params[1] = FW_PARAM_PFVF(IQFLINT_END);
3788 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3791 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
3792 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
3794 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
3795 sizeof(*adap->sge.egr_map), GFP_KERNEL);
3796 if (!adap->sge.egr_map) {
3801 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
3802 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
3803 if (!adap->sge.ingr_map) {
3808 /* Allocate the memory for the vaious egress queue bitmaps
3809 * ie starving_fl, txq_maperr and blocked_fl.
3811 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3812 sizeof(long), GFP_KERNEL);
3813 if (!adap->sge.starving_fl) {
3818 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3819 sizeof(long), GFP_KERNEL);
3820 if (!adap->sge.txq_maperr) {
3825 #ifdef CONFIG_DEBUG_FS
3826 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3827 sizeof(long), GFP_KERNEL);
3828 if (!adap->sge.blocked_fl) {
3834 params[0] = FW_PARAM_PFVF(CLIP_START);
3835 params[1] = FW_PARAM_PFVF(CLIP_END);
3836 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3839 adap->clipt_start = val[0];
3840 adap->clipt_end = val[1];
3842 /* We don't yet have a PARAMs calls to retrieve the number of Traffic
3843 * Classes supported by the hardware/firmware so we hard code it here
3846 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
3848 /* query params related to active filter region */
3849 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
3850 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
3851 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
3852 /* If Active filter size is set we enable establishing
3853 * offload connection through firmware work request
3855 if ((val[0] != val[1]) && (ret >= 0)) {
3856 adap->flags |= FW_OFLD_CONN;
3857 adap->tids.aftid_base = val[0];
3858 adap->tids.aftid_end = val[1];
3861 /* If we're running on newer firmware, let it know that we're
3862 * prepared to deal with encapsulated CPL messages. Older
3863 * firmware won't understand this and we'll just get
3864 * unencapsulated messages ...
3866 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3868 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
3871 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
3872 * capability. Earlier versions of the firmware didn't have the
3873 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
3874 * permission to use ULPTX MEMWRITE DSGL.
3876 if (is_t4(adap->params.chip)) {
3877 adap->params.ulptx_memwrite_dsgl = false;
3879 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
3880 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
3882 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
3885 /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
3886 params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
3887 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
3889 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
3892 * Get device capabilities so we can determine what resources we need
3895 memset(&caps_cmd, 0, sizeof(caps_cmd));
3896 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3897 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3898 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3899 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
3904 if (caps_cmd.ofldcaps) {
3905 /* query offload-related parameters */
3906 params[0] = FW_PARAM_DEV(NTID);
3907 params[1] = FW_PARAM_PFVF(SERVER_START);
3908 params[2] = FW_PARAM_PFVF(SERVER_END);
3909 params[3] = FW_PARAM_PFVF(TDDP_START);
3910 params[4] = FW_PARAM_PFVF(TDDP_END);
3911 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3912 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
3916 adap->tids.ntids = val[0];
3917 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3918 adap->tids.stid_base = val[1];
3919 adap->tids.nstids = val[2] - val[1] + 1;
3921 * Setup server filter region. Divide the available filter
3922 * region into two parts. Regular filters get 1/3rd and server
3923 * filters get 2/3rd part. This is only enabled if workarond
3925 * 1. For regular filters.
3926 * 2. Server filter: This are special filters which are used
3927 * to redirect SYN packets to offload queue.
3929 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
3930 adap->tids.sftid_base = adap->tids.ftid_base +
3931 DIV_ROUND_UP(adap->tids.nftids, 3);
3932 adap->tids.nsftids = adap->tids.nftids -
3933 DIV_ROUND_UP(adap->tids.nftids, 3);
3934 adap->tids.nftids = adap->tids.sftid_base -
3935 adap->tids.ftid_base;
3937 adap->vres.ddp.start = val[3];
3938 adap->vres.ddp.size = val[4] - val[3] + 1;
3939 adap->params.ofldq_wr_cred = val[5];
3941 adap->params.offload = 1;
3942 adap->num_ofld_uld += 1;
3944 if (caps_cmd.rdmacaps) {
3945 params[0] = FW_PARAM_PFVF(STAG_START);
3946 params[1] = FW_PARAM_PFVF(STAG_END);
3947 params[2] = FW_PARAM_PFVF(RQ_START);
3948 params[3] = FW_PARAM_PFVF(RQ_END);
3949 params[4] = FW_PARAM_PFVF(PBL_START);
3950 params[5] = FW_PARAM_PFVF(PBL_END);
3951 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
3955 adap->vres.stag.start = val[0];
3956 adap->vres.stag.size = val[1] - val[0] + 1;
3957 adap->vres.rq.start = val[2];
3958 adap->vres.rq.size = val[3] - val[2] + 1;
3959 adap->vres.pbl.start = val[4];
3960 adap->vres.pbl.size = val[5] - val[4] + 1;
3962 params[0] = FW_PARAM_PFVF(SQRQ_START);
3963 params[1] = FW_PARAM_PFVF(SQRQ_END);
3964 params[2] = FW_PARAM_PFVF(CQ_START);
3965 params[3] = FW_PARAM_PFVF(CQ_END);
3966 params[4] = FW_PARAM_PFVF(OCQ_START);
3967 params[5] = FW_PARAM_PFVF(OCQ_END);
3968 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
3972 adap->vres.qp.start = val[0];
3973 adap->vres.qp.size = val[1] - val[0] + 1;
3974 adap->vres.cq.start = val[2];
3975 adap->vres.cq.size = val[3] - val[2] + 1;
3976 adap->vres.ocq.start = val[4];
3977 adap->vres.ocq.size = val[5] - val[4] + 1;
3979 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
3980 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
3981 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
3984 adap->params.max_ordird_qp = 8;
3985 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
3988 adap->params.max_ordird_qp = val[0];
3989 adap->params.max_ird_adapter = val[1];
3991 dev_info(adap->pdev_dev,
3992 "max_ordird_qp %d max_ird_adapter %d\n",
3993 adap->params.max_ordird_qp,
3994 adap->params.max_ird_adapter);
3995 adap->num_ofld_uld += 2;
3997 if (caps_cmd.iscsicaps) {
3998 params[0] = FW_PARAM_PFVF(ISCSI_START);
3999 params[1] = FW_PARAM_PFVF(ISCSI_END);
4000 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4004 adap->vres.iscsi.start = val[0];
4005 adap->vres.iscsi.size = val[1] - val[0] + 1;
4006 /* LIO target and cxgb4i initiaitor */
4007 adap->num_ofld_uld += 2;
4009 if (caps_cmd.cryptocaps) {
4010 /* Should query params here...TODO */
4011 params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
4012 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4018 adap->vres.ncrypto_fc = val[0];
4020 adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
4023 #undef FW_PARAM_PFVF
4026 /* The MTU/MSS Table is initialized by now, so load their values. If
4027 * we're initializing the adapter, then we'll make any modifications
4028 * we want to the MTU/MSS Table and also initialize the congestion
4031 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4032 if (state != DEV_STATE_INIT) {
4035 /* The default MTU Table contains values 1492 and 1500.
4036 * However, for TCP, it's better to have two values which are
4037 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4038 * This allows us to have a TCP Data Payload which is a
4039 * multiple of 8 regardless of what combination of TCP Options
4040 * are in use (always a multiple of 4 bytes) which is
4041 * important for performance reasons. For instance, if no
4042 * options are in use, then we have a 20-byte IP header and a
4043 * 20-byte TCP header. In this case, a 1500-byte MSS would
4044 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4045 * which is not a multiple of 8. So using an MSS of 1488 in
4046 * this case results in a TCP Data Payload of 1448 bytes which
4047 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4048 * Stamps have been negotiated, then an MTU of 1500 bytes
4049 * results in a TCP Data Payload of 1448 bytes which, as
4050 * above, is a multiple of 8 bytes ...
4052 for (i = 0; i < NMTUS; i++)
4053 if (adap->params.mtus[i] == 1492) {
4054 adap->params.mtus[i] = 1488;
4058 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4059 adap->params.b_wnd);
4061 t4_init_sge_params(adap);
4062 adap->flags |= FW_OK;
4063 t4_init_tp_params(adap);
4067 * Something bad happened. If a command timed out or failed with EIO
4068 * FW does not operate within its spec or something catastrophic
4069 * happened to HW/FW, stop issuing commands.
4072 kfree(adap->sge.egr_map);
4073 kfree(adap->sge.ingr_map);
4074 kfree(adap->sge.starving_fl);
4075 kfree(adap->sge.txq_maperr);
4076 #ifdef CONFIG_DEBUG_FS
4077 kfree(adap->sge.blocked_fl);
4079 if (ret != -ETIMEDOUT && ret != -EIO)
4080 t4_fw_bye(adap, adap->mbox);
4086 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4087 pci_channel_state_t state)
4090 struct adapter *adap = pci_get_drvdata(pdev);
4096 adap->flags &= ~FW_OK;
4097 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
4098 spin_lock(&adap->stats_lock);
4099 for_each_port(adap, i) {
4100 struct net_device *dev = adap->port[i];
4102 netif_device_detach(dev);
4103 netif_carrier_off(dev);
4106 spin_unlock(&adap->stats_lock);
4107 disable_interrupts(adap);
4108 if (adap->flags & FULL_INIT_DONE)
4111 if ((adap->flags & DEV_ENABLED)) {
4112 pci_disable_device(pdev);
4113 adap->flags &= ~DEV_ENABLED;
4115 out: return state == pci_channel_io_perm_failure ?
4116 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4119 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4122 struct fw_caps_config_cmd c;
4123 struct adapter *adap = pci_get_drvdata(pdev);
4126 pci_restore_state(pdev);
4127 pci_save_state(pdev);
4128 return PCI_ERS_RESULT_RECOVERED;
4131 if (!(adap->flags & DEV_ENABLED)) {
4132 if (pci_enable_device(pdev)) {
4133 dev_err(&pdev->dev, "Cannot reenable PCI "
4134 "device after reset\n");
4135 return PCI_ERS_RESULT_DISCONNECT;
4137 adap->flags |= DEV_ENABLED;
4140 pci_set_master(pdev);
4141 pci_restore_state(pdev);
4142 pci_save_state(pdev);
4143 pci_cleanup_aer_uncorrect_error_status(pdev);
4145 if (t4_wait_dev_ready(adap->regs) < 0)
4146 return PCI_ERS_RESULT_DISCONNECT;
4147 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
4148 return PCI_ERS_RESULT_DISCONNECT;
4149 adap->flags |= FW_OK;
4150 if (adap_init1(adap, &c))
4151 return PCI_ERS_RESULT_DISCONNECT;
4153 for_each_port(adap, i) {
4154 struct port_info *p = adap2pinfo(adap, i);
4156 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
4159 return PCI_ERS_RESULT_DISCONNECT;
4161 p->xact_addr_filt = -1;
4164 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4165 adap->params.b_wnd);
4168 return PCI_ERS_RESULT_DISCONNECT;
4169 return PCI_ERS_RESULT_RECOVERED;
4172 static void eeh_resume(struct pci_dev *pdev)
4175 struct adapter *adap = pci_get_drvdata(pdev);
4181 for_each_port(adap, i) {
4182 struct net_device *dev = adap->port[i];
4184 if (netif_running(dev)) {
4186 cxgb_set_rxmode(dev);
4188 netif_device_attach(dev);
4194 static const struct pci_error_handlers cxgb4_eeh = {
4195 .error_detected = eeh_err_detected,
4196 .slot_reset = eeh_slot_reset,
4197 .resume = eeh_resume,
4200 /* Return true if the Link Configuration supports "High Speeds" (those greater
4203 static inline bool is_x_10g_port(const struct link_config *lc)
4205 unsigned int speeds, high_speeds;
4207 speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
4208 high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
4210 return high_speeds != 0;
4214 * Perform default configuration of DMA queues depending on the number and type
4215 * of ports we found and the number of available CPUs. Most settings can be
4216 * modified by the admin prior to actual use.
4218 static void cfg_queues(struct adapter *adap)
4220 struct sge *s = &adap->sge;
4221 int i = 0, n10g = 0, qidx = 0;
4222 #ifndef CONFIG_CHELSIO_T4_DCB
4226 /* Reduce memory usage in kdump environment, disable all offload.
4228 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
4229 adap->params.offload = 0;
4230 adap->params.crypto = 0;
4233 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
4234 #ifdef CONFIG_CHELSIO_T4_DCB
4235 /* For Data Center Bridging support we need to be able to support up
4236 * to 8 Traffic Priorities; each of which will be assigned to its
4237 * own TX Queue in order to prevent Head-Of-Line Blocking.
4239 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
4240 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
4241 MAX_ETH_QSETS, adap->params.nports * 8);
4245 for_each_port(adap, i) {
4246 struct port_info *pi = adap2pinfo(adap, i);
4248 pi->first_qset = qidx;
4249 pi->nqsets = is_kdump_kernel() ? 1 : 8;
4252 #else /* !CONFIG_CHELSIO_T4_DCB */
4254 * We default to 1 queue per non-10G port and up to # of cores queues
4258 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
4259 if (q10g > netif_get_num_default_rss_queues())
4260 q10g = netif_get_num_default_rss_queues();
4262 if (is_kdump_kernel())
4265 for_each_port(adap, i) {
4266 struct port_info *pi = adap2pinfo(adap, i);
4268 pi->first_qset = qidx;
4269 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
4272 #endif /* !CONFIG_CHELSIO_T4_DCB */
4275 s->max_ethqsets = qidx; /* MSI-X may lower it later */
4279 * For offload we use 1 queue/channel if all ports are up to 1G,
4280 * otherwise we divide all available queues amongst the channels
4281 * capped by the number of available cores.
4284 i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
4285 s->ofldqsets = roundup(i, adap->params.nports);
4287 s->ofldqsets = adap->params.nports;
4291 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4292 struct sge_eth_rxq *r = &s->ethrxq[i];
4294 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
4298 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4299 s->ethtxq[i].q.size = 1024;
4301 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4302 s->ctrlq[i].q.size = 512;
4304 if (!is_t4(adap->params.chip))
4305 s->ptptxq.q.size = 8;
4307 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
4308 init_rspq(adap, &s->intrq, 0, 1, 512, 64);
4312 * Reduce the number of Ethernet queues across all ports to at most n.
4313 * n provides at least one queue per port.
4315 static void reduce_ethqs(struct adapter *adap, int n)
4318 struct port_info *pi;
4320 while (n < adap->sge.ethqsets)
4321 for_each_port(adap, i) {
4322 pi = adap2pinfo(adap, i);
4323 if (pi->nqsets > 1) {
4325 adap->sge.ethqsets--;
4326 if (adap->sge.ethqsets <= n)
4332 for_each_port(adap, i) {
4333 pi = adap2pinfo(adap, i);
4339 static int get_msix_info(struct adapter *adap)
4341 struct uld_msix_info *msix_info;
4342 unsigned int max_ingq = 0;
4344 if (is_offload(adap))
4345 max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
4346 if (is_pci_uld(adap))
4347 max_ingq += MAX_OFLD_QSETS * adap->num_uld;
4352 msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
4356 adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
4357 sizeof(long), GFP_KERNEL);
4358 if (!adap->msix_bmap_ulds.msix_bmap) {
4362 spin_lock_init(&adap->msix_bmap_ulds.lock);
4363 adap->msix_info_ulds = msix_info;
4368 static void free_msix_info(struct adapter *adap)
4370 if (!(adap->num_uld && adap->num_ofld_uld))
4373 kfree(adap->msix_info_ulds);
4374 kfree(adap->msix_bmap_ulds.msix_bmap);
4377 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4378 #define EXTRA_VECS 2
4380 static int enable_msix(struct adapter *adap)
4382 int ofld_need = 0, uld_need = 0;
4383 int i, j, want, need, allocated;
4384 struct sge *s = &adap->sge;
4385 unsigned int nchan = adap->params.nports;
4386 struct msix_entry *entries;
4387 int max_ingq = MAX_INGQ;
4389 if (is_pci_uld(adap))
4390 max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
4391 if (is_offload(adap))
4392 max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
4393 entries = kmalloc(sizeof(*entries) * (max_ingq + 1),
4399 if (get_msix_info(adap)) {
4400 adap->params.offload = 0;
4401 adap->params.crypto = 0;
4404 for (i = 0; i < max_ingq + 1; ++i)
4405 entries[i].entry = i;
4407 want = s->max_ethqsets + EXTRA_VECS;
4408 if (is_offload(adap)) {
4409 want += adap->num_ofld_uld * s->ofldqsets;
4410 ofld_need = adap->num_ofld_uld * nchan;
4412 if (is_pci_uld(adap)) {
4413 want += adap->num_uld * s->ofldqsets;
4414 uld_need = adap->num_uld * nchan;
4416 #ifdef CONFIG_CHELSIO_T4_DCB
4417 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4420 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
4422 need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
4424 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
4425 if (allocated < 0) {
4426 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
4427 " not using MSI-X\n");
4432 /* Distribute available vectors to the various queue groups.
4433 * Every group gets its minimum requirement and NIC gets top
4434 * priority for leftovers.
4436 i = allocated - EXTRA_VECS - ofld_need - uld_need;
4437 if (i < s->max_ethqsets) {
4438 s->max_ethqsets = i;
4439 if (i < s->ethqsets)
4440 reduce_ethqs(adap, i);
4443 if (allocated < want)
4444 s->nqs_per_uld = nchan;
4446 s->nqs_per_uld = s->ofldqsets;
4449 for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
4450 adap->msix_info[i].vec = entries[i].vector;
4452 for (j = 0 ; i < allocated; ++i, j++) {
4453 adap->msix_info_ulds[j].vec = entries[i].vector;
4454 adap->msix_info_ulds[j].idx = i;
4456 adap->msix_bmap_ulds.mapsize = j;
4458 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
4459 "nic %d per uld %d\n",
4460 allocated, s->max_ethqsets, s->nqs_per_uld);
4468 static int init_rss(struct adapter *adap)
4473 err = t4_init_rss_mode(adap, adap->mbox);
4477 for_each_port(adap, i) {
4478 struct port_info *pi = adap2pinfo(adap, i);
4480 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4487 static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
4488 enum pci_bus_speed *speed,
4489 enum pcie_link_width *width)
4491 u32 lnkcap1, lnkcap2;
4494 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
4496 *speed = PCI_SPEED_UNKNOWN;
4497 *width = PCIE_LNK_WIDTH_UNKNOWN;
4499 err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
4501 err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
4503 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
4504 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
4505 *speed = PCIE_SPEED_8_0GT;
4506 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
4507 *speed = PCIE_SPEED_5_0GT;
4508 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
4509 *speed = PCIE_SPEED_2_5GT;
4512 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
4513 if (!lnkcap2) { /* pre-r3.0 */
4514 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
4515 *speed = PCIE_SPEED_5_0GT;
4516 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
4517 *speed = PCIE_SPEED_2_5GT;
4521 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
4522 return err1 ? err1 : err2 ? err2 : -EINVAL;
4526 static void cxgb4_check_pcie_caps(struct adapter *adap)
4528 enum pcie_link_width width, width_cap;
4529 enum pci_bus_speed speed, speed_cap;
4531 #define PCIE_SPEED_STR(speed) \
4532 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
4533 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
4534 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
4537 if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
4538 dev_warn(adap->pdev_dev,
4539 "Unable to determine PCIe device BW capabilities\n");
4543 if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
4544 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
4545 dev_warn(adap->pdev_dev,
4546 "Unable to determine PCI Express bandwidth.\n");
4550 dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
4551 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
4552 dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
4554 if (speed < speed_cap || width < width_cap)
4555 dev_info(adap->pdev_dev,
4556 "A slot with more lanes and/or higher speed is "
4557 "suggested for optimal performance.\n");
4560 /* Dump basic information about the adapter */
4561 static void print_adapter_info(struct adapter *adapter)
4563 /* Device information */
4564 dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
4565 adapter->params.vpd.id,
4566 CHELSIO_CHIP_RELEASE(adapter->params.chip));
4567 dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
4568 adapter->params.vpd.sn, adapter->params.vpd.pn);
4570 /* Firmware Version */
4571 if (!adapter->params.fw_vers)
4572 dev_warn(adapter->pdev_dev, "No firmware loaded\n");
4574 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
4575 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
4576 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
4577 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
4578 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
4580 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
4581 * Firmware, so dev_info() is more appropriate here.)
4583 if (!adapter->params.bs_vers)
4584 dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
4586 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
4587 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
4588 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
4589 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
4590 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
4592 /* TP Microcode Version */
4593 if (!adapter->params.tp_vers)
4594 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
4596 dev_info(adapter->pdev_dev,
4597 "TP Microcode version: %u.%u.%u.%u\n",
4598 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
4599 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
4600 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
4601 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
4603 /* Expansion ROM version */
4604 if (!adapter->params.er_vers)
4605 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
4607 dev_info(adapter->pdev_dev,
4608 "Expansion ROM version: %u.%u.%u.%u\n",
4609 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
4610 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
4611 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
4612 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
4614 /* Software/Hardware configuration */
4615 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
4616 is_offload(adapter) ? "R" : "",
4617 ((adapter->flags & USING_MSIX) ? "MSI-X" :
4618 (adapter->flags & USING_MSI) ? "MSI" : ""),
4619 is_offload(adapter) ? "Offload" : "non-Offload");
4622 static void print_port_info(const struct net_device *dev)
4626 const char *spd = "";
4627 const struct port_info *pi = netdev_priv(dev);
4628 const struct adapter *adap = pi->adapter;
4630 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4632 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4634 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
4637 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4638 bufp += sprintf(bufp, "100M/");
4639 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4640 bufp += sprintf(bufp, "1G/");
4641 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4642 bufp += sprintf(bufp, "10G/");
4643 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
4644 bufp += sprintf(bufp, "25G/");
4645 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
4646 bufp += sprintf(bufp, "40G/");
4647 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
4648 bufp += sprintf(bufp, "100G/");
4651 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
4653 netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
4654 dev->name, adap->params.vpd.id, adap->name, buf);
4658 * Free the following resources:
4659 * - memory used for tables
4662 * - resources FW is holding for us
4664 static void free_some_resources(struct adapter *adapter)
4668 kvfree(adapter->l2t);
4669 t4_cleanup_sched(adapter);
4670 kvfree(adapter->tids.tid_tab);
4671 cxgb4_cleanup_tc_u32(adapter);
4672 kfree(adapter->sge.egr_map);
4673 kfree(adapter->sge.ingr_map);
4674 kfree(adapter->sge.starving_fl);
4675 kfree(adapter->sge.txq_maperr);
4676 #ifdef CONFIG_DEBUG_FS
4677 kfree(adapter->sge.blocked_fl);
4679 disable_msi(adapter);
4681 for_each_port(adapter, i)
4682 if (adapter->port[i]) {
4683 struct port_info *pi = adap2pinfo(adapter, i);
4686 t4_free_vi(adapter, adapter->mbox, adapter->pf,
4688 kfree(adap2pinfo(adapter, i)->rss);
4689 free_netdev(adapter->port[i]);
4691 if (adapter->flags & FW_OK)
4692 t4_fw_bye(adapter, adapter->pf);
4695 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4696 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4697 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4698 #define SEGMENT_SIZE 128
4700 static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
4704 /* Retrieve adapter's device ID */
4705 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
4707 switch (device_id >> 12) {
4709 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
4711 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4713 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4715 dev_err(&pdev->dev, "Device %d is not supported\n",
4721 #ifdef CONFIG_PCI_IOV
4722 static void dummy_setup(struct net_device *dev)
4724 dev->type = ARPHRD_NONE;
4726 dev->hard_header_len = 0;
4728 dev->tx_queue_len = 0;
4729 dev->flags |= IFF_NOARP;
4730 dev->priv_flags |= IFF_NO_QUEUE;
4732 /* Initialize the device structure. */
4733 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
4734 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
4735 dev->needs_free_netdev = true;
4738 static int config_mgmt_dev(struct pci_dev *pdev)
4740 struct adapter *adap = pci_get_drvdata(pdev);
4741 struct net_device *netdev;
4742 struct port_info *pi;
4743 char name[IFNAMSIZ];
4746 snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
4747 netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN,
4752 pi = netdev_priv(netdev);
4754 pi->port_id = adap->pf % adap->params.nports;
4755 SET_NETDEV_DEV(netdev, &pdev->dev);
4757 adap->port[0] = netdev;
4759 err = register_netdev(adap->port[0]);
4761 pr_info("Unable to register VF mgmt netdev %s\n", name);
4762 free_netdev(adap->port[0]);
4763 adap->port[0] = NULL;
4769 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
4771 struct adapter *adap = pci_get_drvdata(pdev);
4773 int current_vfs = pci_num_vf(pdev);
4776 pcie_fw = readl(adap->regs + PCIE_FW_A);
4777 /* Check if cxgb4 is the MASTER and fw is initialized */
4778 if (!(pcie_fw & PCIE_FW_INIT_F) ||
4779 !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
4780 PCIE_FW_MASTER_G(pcie_fw) != 4) {
4781 dev_warn(&pdev->dev,
4782 "cxgb4 driver needs to be MASTER to support SRIOV\n");
4786 /* If any of the VF's is already assigned to Guest OS, then
4787 * SRIOV for the same cannot be modified
4789 if (current_vfs && pci_vfs_assigned(pdev)) {
4791 "Cannot modify SR-IOV while VFs are assigned\n");
4792 num_vfs = current_vfs;
4796 /* Disable SRIOV when zero is passed.
4797 * One needs to disable SRIOV before modifying it, else
4798 * stack throws the below warning:
4799 * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
4802 pci_disable_sriov(pdev);
4803 if (adap->port[0]) {
4804 unregister_netdev(adap->port[0]);
4805 adap->port[0] = NULL;
4807 /* free VF resources */
4808 kfree(adap->vfinfo);
4809 adap->vfinfo = NULL;
4814 if (num_vfs != current_vfs) {
4815 err = pci_enable_sriov(pdev, num_vfs);
4819 adap->num_vfs = num_vfs;
4820 err = config_mgmt_dev(pdev);
4825 adap->vfinfo = kcalloc(adap->num_vfs,
4826 sizeof(struct vf_info), GFP_KERNEL);
4828 fill_vf_station_mac_addr(adap);
4833 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4835 int func, i, err, s_qpp, qpp, num_seg;
4836 struct port_info *pi;
4837 bool highdma = false;
4838 struct adapter *adapter = NULL;
4839 struct net_device *netdev;
4842 enum chip_type chip;
4843 static int adap_idx = 1;
4844 #ifdef CONFIG_PCI_IOV
4848 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4850 err = pci_request_regions(pdev, KBUILD_MODNAME);
4852 /* Just info, some other driver may have claimed the device. */
4853 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4857 err = pci_enable_device(pdev);
4859 dev_err(&pdev->dev, "cannot enable PCI device\n");
4860 goto out_release_regions;
4863 regs = pci_ioremap_bar(pdev, 0);
4865 dev_err(&pdev->dev, "cannot map device registers\n");
4867 goto out_disable_device;
4870 err = t4_wait_dev_ready(regs);
4872 goto out_unmap_bar0;
4874 /* We control everything through one PF */
4875 whoami = readl(regs + PL_WHOAMI_A);
4876 pl_rev = REV_G(readl(regs + PL_REV_A));
4877 chip = get_chip_type(pdev, pl_rev);
4878 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
4879 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4880 if (func != ent->driver_data) {
4881 #ifndef CONFIG_PCI_IOV
4884 pci_disable_device(pdev);
4885 pci_save_state(pdev); /* to restore SR-IOV later */
4889 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4891 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4893 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4894 "coherent allocations\n");
4895 goto out_unmap_bar0;
4898 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4900 dev_err(&pdev->dev, "no usable DMA configuration\n");
4901 goto out_unmap_bar0;
4905 pci_enable_pcie_error_reporting(pdev);
4906 pci_set_master(pdev);
4907 pci_save_state(pdev);
4909 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4912 goto out_unmap_bar0;
4916 adapter->workq = create_singlethread_workqueue("cxgb4");
4917 if (!adapter->workq) {
4919 goto out_free_adapter;
4922 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
4923 (sizeof(struct mbox_cmd) *
4924 T4_OS_LOG_MBOX_CMDS),
4926 if (!adapter->mbox_log) {
4928 goto out_free_adapter;
4930 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
4932 /* PCI device has been enabled */
4933 adapter->flags |= DEV_ENABLED;
4935 adapter->regs = regs;
4936 adapter->pdev = pdev;
4937 adapter->pdev_dev = &pdev->dev;
4938 adapter->name = pci_name(pdev);
4939 adapter->mbox = func;
4941 adapter->msg_enable = DFLT_MSG_ENABLE;
4942 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4944 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
4945 * Ingress Packet Data to Free List Buffers in order to allow for
4946 * chipset performance optimizations between the Root Complex and
4947 * Memory Controllers. (Messages to the associated Ingress Queue
4948 * notifying new Packet Placement in the Free Lists Buffers will be
4949 * send without the Relaxed Ordering Attribute thus guaranteeing that
4950 * all preceding PCIe Transaction Layer Packets will be processed
4951 * first.) But some Root Complexes have various issues with Upstream
4952 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
4953 * The PCIe devices which under the Root Complexes will be cleared the
4954 * Relaxed Ordering bit in the configuration space, So we check our
4955 * PCIe configuration space to see if it's flagged with advice against
4956 * using Relaxed Ordering.
4958 if (!pcie_relaxed_ordering_enabled(pdev))
4959 adapter->flags |= ROOT_NO_RELAXED_ORDERING;
4961 spin_lock_init(&adapter->stats_lock);
4962 spin_lock_init(&adapter->tid_release_lock);
4963 spin_lock_init(&adapter->win0_lock);
4964 spin_lock_init(&adapter->mbox_lock);
4966 INIT_LIST_HEAD(&adapter->mlist.list);
4968 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
4969 INIT_WORK(&adapter->db_full_task, process_db_full);
4970 INIT_WORK(&adapter->db_drop_task, process_db_drop);
4972 err = t4_prep_adapter(adapter);
4974 goto out_free_adapter;
4977 if (!is_t4(adapter->params.chip)) {
4978 s_qpp = (QUEUESPERPAGEPF0_S +
4979 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
4981 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
4982 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
4983 num_seg = PAGE_SIZE / SEGMENT_SIZE;
4985 /* Each segment size is 128B. Write coalescing is enabled only
4986 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
4987 * queue is less no of segments that can be accommodated in
4990 if (qpp > num_seg) {
4992 "Incorrect number of egress queues per page\n");
4994 goto out_free_adapter;
4996 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
4997 pci_resource_len(pdev, 2));
4998 if (!adapter->bar2) {
4999 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5001 goto out_free_adapter;
5005 setup_memwin(adapter);
5006 err = adap_init0(adapter);
5007 #ifdef CONFIG_DEBUG_FS
5008 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
5010 setup_memwin_rdma(adapter);
5014 /* configure SGE_STAT_CFG_A to read WC stats */
5015 if (!is_t4(adapter->params.chip))
5016 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
5017 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
5020 for_each_port(adapter, i) {
5021 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5028 SET_NETDEV_DEV(netdev, &pdev->dev);
5030 adapter->port[i] = netdev;
5031 pi = netdev_priv(netdev);
5032 pi->adapter = adapter;
5033 pi->xact_addr_filt = -1;
5035 netdev->irq = pdev->irq;
5037 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5038 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5039 NETIF_F_RXCSUM | NETIF_F_RXHASH |
5040 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
5043 netdev->hw_features |= NETIF_F_HIGHDMA;
5044 netdev->features |= netdev->hw_features;
5045 netdev->vlan_features = netdev->features & VLAN_FEAT;
5047 netdev->priv_flags |= IFF_UNICAST_FLT;
5049 /* MTU range: 81 - 9600 */
5050 netdev->min_mtu = 81;
5051 netdev->max_mtu = MAX_MTU;
5053 netdev->netdev_ops = &cxgb4_netdev_ops;
5054 #ifdef CONFIG_CHELSIO_T4_DCB
5055 netdev->dcbnl_ops = &cxgb4_dcb_ops;
5056 cxgb4_dcb_state_init(netdev);
5058 cxgb4_set_ethtool_ops(netdev);
5061 pci_set_drvdata(pdev, adapter);
5063 if (adapter->flags & FW_OK) {
5064 err = t4_port_init(adapter, func, func, 0);
5067 } else if (adapter->params.nports == 1) {
5068 /* If we don't have a connection to the firmware -- possibly
5069 * because of an error -- grab the raw VPD parameters so we
5070 * can set the proper MAC Address on the debug network
5071 * interface that we've created.
5073 u8 hw_addr[ETH_ALEN];
5074 u8 *na = adapter->params.vpd.na;
5076 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
5078 for (i = 0; i < ETH_ALEN; i++)
5079 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
5080 hex2val(na[2 * i + 1]));
5081 t4_set_hw_addr(adapter, 0, hw_addr);
5085 /* Configure queues and allocate tables now, they can be needed as
5086 * soon as the first register_netdev completes.
5088 cfg_queues(adapter);
5090 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
5091 if (!adapter->l2t) {
5092 /* We tolerate a lack of L2T, giving up some functionality */
5093 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5094 adapter->params.offload = 0;
5097 #if IS_ENABLED(CONFIG_IPV6)
5098 if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) &&
5099 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
5100 /* CLIP functionality is not present in hardware,
5101 * hence disable all offload features
5103 dev_warn(&pdev->dev,
5104 "CLIP not enabled in hardware, continuing\n");
5105 adapter->params.offload = 0;
5107 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
5108 adapter->clipt_end);
5109 if (!adapter->clipt) {
5110 /* We tolerate a lack of clip_table, giving up
5111 * some functionality
5113 dev_warn(&pdev->dev,
5114 "could not allocate Clip table, continuing\n");
5115 adapter->params.offload = 0;
5120 for_each_port(adapter, i) {
5121 pi = adap2pinfo(adapter, i);
5122 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
5124 dev_warn(&pdev->dev,
5125 "could not activate scheduling on port %d\n",
5129 if (tid_init(&adapter->tids) < 0) {
5130 dev_warn(&pdev->dev, "could not allocate TID table, "
5132 adapter->params.offload = 0;
5134 adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
5135 if (!adapter->tc_u32)
5136 dev_warn(&pdev->dev,
5137 "could not offload tc u32, continuing\n");
5140 if (is_offload(adapter)) {
5141 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
5142 u32 hash_base, hash_reg;
5144 if (chip <= CHELSIO_T5) {
5145 hash_reg = LE_DB_TID_HASHBASE_A;
5146 hash_base = t4_read_reg(adapter, hash_reg);
5147 adapter->tids.hash_base = hash_base / 4;
5149 hash_reg = T6_LE_DB_HASH_TID_BASE_A;
5150 hash_base = t4_read_reg(adapter, hash_reg);
5151 adapter->tids.hash_base = hash_base;
5156 /* See what interrupts we'll be using */
5157 if (msi > 1 && enable_msix(adapter) == 0)
5158 adapter->flags |= USING_MSIX;
5159 else if (msi > 0 && pci_enable_msi(pdev) == 0) {
5160 adapter->flags |= USING_MSI;
5162 free_msix_info(adapter);
5165 /* check for PCI Express bandwidth capabiltites */
5166 cxgb4_check_pcie_caps(adapter);
5168 err = init_rss(adapter);
5173 * The card is now ready to go. If any errors occur during device
5174 * registration we do not fail the whole card but rather proceed only
5175 * with the ports we manage to register successfully. However we must
5176 * register at least one net device.
5178 for_each_port(adapter, i) {
5179 pi = adap2pinfo(adapter, i);
5180 adapter->port[i]->dev_port = pi->lport;
5181 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5182 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5184 netif_carrier_off(adapter->port[i]);
5186 err = register_netdev(adapter->port[i]);
5189 adapter->chan_map[pi->tx_chan] = i;
5190 print_port_info(adapter->port[i]);
5193 dev_err(&pdev->dev, "could not register any net devices\n");
5197 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5201 if (cxgb4_debugfs_root) {
5202 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5203 cxgb4_debugfs_root);
5204 setup_debugfs(adapter);
5207 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5208 pdev->needs_freset = 1;
5210 if (is_uld(adapter)) {
5211 mutex_lock(&uld_mutex);
5212 list_add_tail(&adapter->list_node, &adapter_list);
5213 mutex_unlock(&uld_mutex);
5216 if (!is_t4(adapter->params.chip))
5217 cxgb4_ptp_init(adapter);
5219 print_adapter_info(adapter);
5220 setup_fw_sge_queues(adapter);
5224 #ifdef CONFIG_PCI_IOV
5225 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5228 goto free_pci_region;
5231 adapter->pdev = pdev;
5232 adapter->pdev_dev = &pdev->dev;
5233 adapter->name = pci_name(pdev);
5234 adapter->mbox = func;
5236 adapter->regs = regs;
5237 adapter->adap_idx = adap_idx;
5238 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
5239 (sizeof(struct mbox_cmd) *
5240 T4_OS_LOG_MBOX_CMDS),
5242 if (!adapter->mbox_log) {
5246 spin_lock_init(&adapter->mbox_lock);
5247 INIT_LIST_HEAD(&adapter->mlist.list);
5249 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5250 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
5251 err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
5254 dev_err(adapter->pdev_dev, "Could not fetch port params\n");
5258 adapter->params.nports = hweight32(port_vec);
5259 pci_set_drvdata(pdev, adapter);
5263 kfree(adapter->mbox_log);
5268 pci_disable_sriov(pdev);
5269 pci_release_regions(pdev);
5276 free_some_resources(adapter);
5277 if (adapter->flags & USING_MSIX)
5278 free_msix_info(adapter);
5279 if (adapter->num_uld || adapter->num_ofld_uld)
5280 t4_uld_mem_free(adapter);
5282 if (!is_t4(adapter->params.chip))
5283 iounmap(adapter->bar2);
5286 destroy_workqueue(adapter->workq);
5288 kfree(adapter->mbox_log);
5293 pci_disable_pcie_error_reporting(pdev);
5294 pci_disable_device(pdev);
5295 out_release_regions:
5296 pci_release_regions(pdev);
5300 static void remove_one(struct pci_dev *pdev)
5302 struct adapter *adapter = pci_get_drvdata(pdev);
5305 pci_release_regions(pdev);
5309 if (adapter->pf == 4) {
5312 /* Tear down per-adapter Work Queue first since it can contain
5313 * references to our adapter data structure.
5315 destroy_workqueue(adapter->workq);
5317 if (is_uld(adapter)) {
5318 detach_ulds(adapter);
5319 t4_uld_clean_up(adapter);
5322 disable_interrupts(adapter);
5324 for_each_port(adapter, i)
5325 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5326 unregister_netdev(adapter->port[i]);
5328 debugfs_remove_recursive(adapter->debugfs_root);
5330 if (!is_t4(adapter->params.chip))
5331 cxgb4_ptp_stop(adapter);
5333 /* If we allocated filters, free up state associated with any
5336 clear_all_filters(adapter);
5338 if (adapter->flags & FULL_INIT_DONE)
5341 if (adapter->flags & USING_MSIX)
5342 free_msix_info(adapter);
5343 if (adapter->num_uld || adapter->num_ofld_uld)
5344 t4_uld_mem_free(adapter);
5345 free_some_resources(adapter);
5346 #if IS_ENABLED(CONFIG_IPV6)
5347 t4_cleanup_clip_tbl(adapter);
5349 iounmap(adapter->regs);
5350 if (!is_t4(adapter->params.chip))
5351 iounmap(adapter->bar2);
5352 pci_disable_pcie_error_reporting(pdev);
5353 if ((adapter->flags & DEV_ENABLED)) {
5354 pci_disable_device(pdev);
5355 adapter->flags &= ~DEV_ENABLED;
5357 pci_release_regions(pdev);
5358 kfree(adapter->mbox_log);
5362 #ifdef CONFIG_PCI_IOV
5364 if (adapter->port[0])
5365 unregister_netdev(adapter->port[0]);
5366 iounmap(adapter->regs);
5367 kfree(adapter->vfinfo);
5368 kfree(adapter->mbox_log);
5370 pci_disable_sriov(pdev);
5371 pci_release_regions(pdev);
5376 /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
5377 * delivery. This is essentially a stripped down version of the PCI remove()
5378 * function where we do the minimal amount of work necessary to shutdown any
5381 static void shutdown_one(struct pci_dev *pdev)
5383 struct adapter *adapter = pci_get_drvdata(pdev);
5385 /* As with remove_one() above (see extended comment), we only want do
5386 * do cleanup on PCI Devices which went all the way through init_one()
5390 pci_release_regions(pdev);
5394 if (adapter->pf == 4) {
5397 for_each_port(adapter, i)
5398 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5399 cxgb_close(adapter->port[i]);
5401 if (is_uld(adapter)) {
5402 detach_ulds(adapter);
5403 t4_uld_clean_up(adapter);
5406 disable_interrupts(adapter);
5407 disable_msi(adapter);
5409 t4_sge_stop(adapter);
5410 if (adapter->flags & FW_OK)
5411 t4_fw_bye(adapter, adapter->mbox);
5413 #ifdef CONFIG_PCI_IOV
5415 if (adapter->port[0])
5416 unregister_netdev(adapter->port[0]);
5417 iounmap(adapter->regs);
5418 kfree(adapter->vfinfo);
5419 kfree(adapter->mbox_log);
5421 pci_disable_sriov(pdev);
5422 pci_release_regions(pdev);
5427 static struct pci_driver cxgb4_driver = {
5428 .name = KBUILD_MODNAME,
5429 .id_table = cxgb4_pci_tbl,
5431 .remove = remove_one,
5432 .shutdown = shutdown_one,
5433 #ifdef CONFIG_PCI_IOV
5434 .sriov_configure = cxgb4_iov_configure,
5436 .err_handler = &cxgb4_eeh,
5439 static int __init cxgb4_init_module(void)
5443 /* Debugfs support is optional, just warn if this fails */
5444 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5445 if (!cxgb4_debugfs_root)
5446 pr_warn("could not create debugfs entry, continuing\n");
5448 ret = pci_register_driver(&cxgb4_driver);
5450 debugfs_remove(cxgb4_debugfs_root);
5452 #if IS_ENABLED(CONFIG_IPV6)
5453 if (!inet6addr_registered) {
5454 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5455 inet6addr_registered = true;
5462 static void __exit cxgb4_cleanup_module(void)
5464 #if IS_ENABLED(CONFIG_IPV6)
5465 if (inet6addr_registered) {
5466 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5467 inet6addr_registered = false;
5470 pci_unregister_driver(&cxgb4_driver);
5471 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
5474 module_init(cxgb4_init_module);
5475 module_exit(cxgb4_cleanup_module);