2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <linux/uaccess.h>
66 #include <linux/crash_dump.h>
67 #include <net/udp_tunnel.h>
69 #if defined(CONFIG_CHELSIO_TLS_DEVICE)
74 #include "cxgb4_filter.h"
76 #include "t4_values.h"
79 #include "t4fw_version.h"
80 #include "cxgb4_dcb.h"
82 #include "cxgb4_debugfs.h"
87 #include "cxgb4_tc_u32.h"
88 #include "cxgb4_tc_flower.h"
89 #include "cxgb4_tc_mqprio.h"
90 #include "cxgb4_tc_matchall.h"
91 #include "cxgb4_ptp.h"
92 #include "cxgb4_cudbg.h"
94 char cxgb4_driver_name[] = KBUILD_MODNAME;
96 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
98 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
99 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
100 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
102 /* Macros needed to support the PCI Device ID Table ...
104 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
105 static const struct pci_device_id cxgb4_pci_tbl[] = {
106 #define CXGB4_UNIFIED_PF 0x4
108 #define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
110 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
113 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
115 #define CH_PCI_ID_TABLE_ENTRY(devid) \
116 {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
118 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
122 #include "t4_pci_id_tbl.h"
124 #define FW4_FNAME "cxgb4/t4fw.bin"
125 #define FW5_FNAME "cxgb4/t5fw.bin"
126 #define FW6_FNAME "cxgb4/t6fw.bin"
127 #define FW4_CFNAME "cxgb4/t4-config.txt"
128 #define FW5_CFNAME "cxgb4/t5-config.txt"
129 #define FW6_CFNAME "cxgb4/t6-config.txt"
130 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
131 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
132 #define PHY_AQ1202_DEVICEID 0x4409
133 #define PHY_BCM84834_DEVICEID 0x4486
135 MODULE_DESCRIPTION(DRV_DESC);
136 MODULE_AUTHOR("Chelsio Communications");
137 MODULE_LICENSE("Dual BSD/GPL");
138 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
139 MODULE_FIRMWARE(FW4_FNAME);
140 MODULE_FIRMWARE(FW5_FNAME);
141 MODULE_FIRMWARE(FW6_FNAME);
144 * The driver uses the best interrupt scheme available on a platform in the
145 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
146 * of these schemes the driver may consider as follows:
148 * msi = 2: choose from among all three options
149 * msi = 1: only consider MSI and INTx interrupts
150 * msi = 0: force INTx interrupts
154 module_param(msi, int, 0644);
155 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
158 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
159 * offset by 2 bytes in order to have the IP headers line up on 4-byte
160 * boundaries. This is a requirement for many architectures which will throw
161 * a machine check fault if an attempt is made to access one of the 4-byte IP
162 * header fields on a non-4-byte boundary. And it's a major performance issue
163 * even on some architectures which allow it like some implementations of the
164 * x86 ISA. However, some architectures don't mind this and for some very
165 * edge-case performance sensitive applications (like forwarding large volumes
166 * of small packets), setting this DMA offset to 0 will decrease the number of
167 * PCI-E Bus transfers enough to measurably affect performance.
169 static int rx_dma_offset = 2;
171 /* TX Queue select used to determine what algorithm to use for selecting TX
172 * queue. Select between the kernel provided function (select_queue=0) or user
173 * cxgb_select_queue function (select_queue=1)
175 * Default: select_queue=0
177 static int select_queue;
178 module_param(select_queue, int, 0644);
179 MODULE_PARM_DESC(select_queue,
180 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
182 static struct dentry *cxgb4_debugfs_root;
184 LIST_HEAD(adapter_list);
185 DEFINE_MUTEX(uld_mutex);
188 static int cfg_queues(struct adapter *adap);
190 static void link_report(struct net_device *dev)
192 if (!netif_carrier_ok(dev))
193 netdev_info(dev, "link down\n");
195 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
198 const struct port_info *p = netdev_priv(dev);
200 switch (p->link_cfg.speed) {
223 pr_info("%s: unsupported speed: %d\n",
224 dev->name, p->link_cfg.speed);
228 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
233 #ifdef CONFIG_CHELSIO_T4_DCB
234 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
235 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
237 struct port_info *pi = netdev_priv(dev);
238 struct adapter *adap = pi->adapter;
239 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
242 /* We use a simple mapping of Port TX Queue Index to DCB
243 * Priority when we're enabling DCB.
245 for (i = 0; i < pi->nqsets; i++, txq++) {
249 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
251 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
252 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
253 value = enable ? i : 0xffffffff;
255 /* Since we can be called while atomic (from "interrupt
256 * level") we need to issue the Set Parameters Commannd
257 * without sleeping (timeout < 0).
259 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
261 -FW_CMD_MAX_TIMEOUT);
264 dev_err(adap->pdev_dev,
265 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
266 enable ? "set" : "unset", pi->port_id, i, -err);
268 txq->dcb_prio = enable ? value : 0;
272 int cxgb4_dcb_enabled(const struct net_device *dev)
274 struct port_info *pi = netdev_priv(dev);
276 if (!pi->dcb.enabled)
279 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
280 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
282 #endif /* CONFIG_CHELSIO_T4_DCB */
284 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
286 struct net_device *dev = adapter->port[port_id];
288 /* Skip changes from disabled ports. */
289 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
291 netif_carrier_on(dev);
293 #ifdef CONFIG_CHELSIO_T4_DCB
294 if (cxgb4_dcb_enabled(dev)) {
295 cxgb4_dcb_reset(dev);
296 dcb_tx_queue_prio_enable(dev, false);
298 #endif /* CONFIG_CHELSIO_T4_DCB */
299 netif_carrier_off(dev);
306 void t4_os_portmod_changed(struct adapter *adap, int port_id)
308 static const char *mod_str[] = {
309 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
312 struct net_device *dev = adap->port[port_id];
313 struct port_info *pi = netdev_priv(dev);
315 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
316 netdev_info(dev, "port module unplugged\n");
317 else if (pi->mod_type < ARRAY_SIZE(mod_str))
318 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
319 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
320 netdev_info(dev, "%s: unsupported port module inserted\n",
322 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
323 netdev_info(dev, "%s: unknown port module inserted\n",
325 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
326 netdev_info(dev, "%s: transceiver module error\n", dev->name);
328 netdev_info(dev, "%s: unknown module type %d inserted\n",
329 dev->name, pi->mod_type);
331 /* If the interface is running, then we'll need any "sticky" Link
332 * Parameters redone with a new Transceiver Module.
334 pi->link_cfg.redo_l1cfg = netif_running(dev);
337 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
338 module_param(dbfifo_int_thresh, int, 0644);
339 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
342 * usecs to sleep while draining the dbfifo
344 static int dbfifo_drain_delay = 1000;
345 module_param(dbfifo_drain_delay, int, 0644);
346 MODULE_PARM_DESC(dbfifo_drain_delay,
347 "usecs to sleep while draining the dbfifo");
349 static inline int cxgb4_set_addr_hash(struct port_info *pi)
351 struct adapter *adap = pi->adapter;
354 struct hash_mac_addr *entry;
356 /* Calculate the hash vector for the updated list and program it */
357 list_for_each_entry(entry, &adap->mac_hlist, list) {
358 ucast |= is_unicast_ether_addr(entry->addr);
359 vec |= (1ULL << hash_mac_addr(entry->addr));
361 return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
365 static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
367 struct port_info *pi = netdev_priv(netdev);
368 struct adapter *adap = pi->adapter;
372 /* idx stores the index of allocated filters,
373 * its size should be modified based on the number of
374 * MAC addresses that we allocate filters for
379 bool ucast = is_unicast_ether_addr(mac_addr);
380 const u8 *maclist[1] = {mac_addr};
381 struct hash_mac_addr *new_entry;
383 ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
384 idx, ucast ? &uhash : &mhash, false);
387 /* if hash != 0, then add the addr to hash addr list
388 * so on the end we will calculate the hash for the
389 * list and program it
391 if (uhash || mhash) {
392 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
395 ether_addr_copy(new_entry->addr, mac_addr);
396 list_add_tail(&new_entry->list, &adap->mac_hlist);
397 ret = cxgb4_set_addr_hash(pi);
400 return ret < 0 ? ret : 0;
403 static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
405 struct port_info *pi = netdev_priv(netdev);
406 struct adapter *adap = pi->adapter;
408 const u8 *maclist[1] = {mac_addr};
409 struct hash_mac_addr *entry, *tmp;
411 /* If the MAC address to be removed is in the hash addr
412 * list, delete it from the list and update hash vector
414 list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
415 if (ether_addr_equal(entry->addr, mac_addr)) {
416 list_del(&entry->list);
418 return cxgb4_set_addr_hash(pi);
422 ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false);
423 return ret < 0 ? -EINVAL : 0;
427 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
428 * If @mtu is -1 it is left unchanged.
430 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
432 struct port_info *pi = netdev_priv(dev);
433 struct adapter *adapter = pi->adapter;
435 __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
436 __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
438 return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
439 (dev->flags & IFF_PROMISC) ? 1 : 0,
440 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
445 * cxgb4_change_mac - Update match filter for a MAC address.
448 * @tcam_idx: TCAM index of existing filter for old value of MAC address,
450 * @addr: the new MAC address value
451 * @persist: whether a new MAC allocation should be persistent
452 * @add_smt: if true also add the address to the HW SMT
454 * Modifies an MPS filter and sets it to the new MAC address if
455 * @tcam_idx >= 0, or adds the MAC address to a new filter if
456 * @tcam_idx < 0. In the latter case the address is added persistently
457 * if @persist is %true.
458 * Addresses are programmed to hash region, if tcam runs out of entries.
461 int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
462 int *tcam_idx, const u8 *addr, bool persist,
465 struct adapter *adapter = pi->adapter;
466 struct hash_mac_addr *entry, *new_entry;
469 ret = t4_change_mac(adapter, adapter->mbox, viid,
470 *tcam_idx, addr, persist, smt_idx);
471 /* We ran out of TCAM entries. try programming hash region. */
472 if (ret == -ENOMEM) {
473 /* If the MAC address to be updated is in the hash addr
474 * list, update it from the list
476 list_for_each_entry(entry, &adapter->mac_hlist, list) {
477 if (entry->iface_mac) {
478 ether_addr_copy(entry->addr, addr);
482 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
485 ether_addr_copy(new_entry->addr, addr);
486 new_entry->iface_mac = true;
487 list_add_tail(&new_entry->list, &adapter->mac_hlist);
489 ret = cxgb4_set_addr_hash(pi);
490 } else if (ret >= 0) {
499 * link_start - enable a port
500 * @dev: the port to enable
502 * Performs the MAC and PHY actions needed to enable a port.
504 static int link_start(struct net_device *dev)
507 struct port_info *pi = netdev_priv(dev);
508 unsigned int mb = pi->adapter->pf;
511 * We do not set address filters and promiscuity here, the stack does
512 * that step explicitly.
514 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
515 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
517 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
518 dev->dev_addr, true, &pi->smt_idx);
520 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
524 ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
525 true, CXGB4_DCB_ENABLED);
532 #ifdef CONFIG_CHELSIO_T4_DCB
533 /* Handle a Data Center Bridging update message from the firmware. */
534 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
536 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
537 struct net_device *dev = adap->port[adap->chan_map[port]];
538 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
541 cxgb4_dcb_handle_fw_update(adap, pcmd);
542 new_dcb_enabled = cxgb4_dcb_enabled(dev);
544 /* If the DCB has become enabled or disabled on the port then we're
545 * going to need to set up/tear down DCB Priority parameters for the
546 * TX Queues associated with the port.
548 if (new_dcb_enabled != old_dcb_enabled)
549 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
551 #endif /* CONFIG_CHELSIO_T4_DCB */
553 /* Response queue handler for the FW event queue.
555 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
556 const struct pkt_gl *gl)
558 u8 opcode = ((const struct rss_header *)rsp)->opcode;
560 rsp++; /* skip RSS header */
562 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
564 if (unlikely(opcode == CPL_FW4_MSG &&
565 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
567 opcode = ((const struct rss_header *)rsp)->opcode;
569 if (opcode != CPL_SGE_EGR_UPDATE) {
570 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
576 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
577 const struct cpl_sge_egr_update *p = (void *)rsp;
578 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
581 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
583 if (txq->q_type == CXGB4_TXQ_ETH) {
584 struct sge_eth_txq *eq;
586 eq = container_of(txq, struct sge_eth_txq, q);
587 t4_sge_eth_txq_egress_update(q->adap, eq, -1);
589 struct sge_uld_txq *oq;
591 oq = container_of(txq, struct sge_uld_txq, q);
592 tasklet_schedule(&oq->qresume_tsk);
594 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
595 const struct cpl_fw6_msg *p = (void *)rsp;
597 #ifdef CONFIG_CHELSIO_T4_DCB
598 const struct fw_port_cmd *pcmd = (const void *)p->data;
599 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
600 unsigned int action =
601 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
603 if (cmd == FW_PORT_CMD &&
604 (action == FW_PORT_ACTION_GET_PORT_INFO ||
605 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
606 int port = FW_PORT_CMD_PORTID_G(
607 be32_to_cpu(pcmd->op_to_portid));
608 struct net_device *dev;
609 int dcbxdis, state_input;
611 dev = q->adap->port[q->adap->chan_map[port]];
612 dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
613 ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
614 : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
615 & FW_PORT_CMD_DCBXDIS32_F));
616 state_input = (dcbxdis
617 ? CXGB4_DCB_INPUT_FW_DISABLED
618 : CXGB4_DCB_INPUT_FW_ENABLED);
620 cxgb4_dcb_state_fsm(dev, state_input);
623 if (cmd == FW_PORT_CMD &&
624 action == FW_PORT_ACTION_L2_DCB_CFG)
625 dcb_rpl(q->adap, pcmd);
629 t4_handle_fw_rpl(q->adap, p->data);
630 } else if (opcode == CPL_L2T_WRITE_RPL) {
631 const struct cpl_l2t_write_rpl *p = (void *)rsp;
633 do_l2t_write_rpl(q->adap, p);
634 } else if (opcode == CPL_SMT_WRITE_RPL) {
635 const struct cpl_smt_write_rpl *p = (void *)rsp;
637 do_smt_write_rpl(q->adap, p);
638 } else if (opcode == CPL_SET_TCB_RPL) {
639 const struct cpl_set_tcb_rpl *p = (void *)rsp;
641 filter_rpl(q->adap, p);
642 } else if (opcode == CPL_ACT_OPEN_RPL) {
643 const struct cpl_act_open_rpl *p = (void *)rsp;
645 hash_filter_rpl(q->adap, p);
646 } else if (opcode == CPL_ABORT_RPL_RSS) {
647 const struct cpl_abort_rpl_rss *p = (void *)rsp;
649 hash_del_filter_rpl(q->adap, p);
650 } else if (opcode == CPL_SRQ_TABLE_RPL) {
651 const struct cpl_srq_table_rpl *p = (void *)rsp;
653 do_srq_table_rpl(q->adap, p);
655 dev_err(q->adap->pdev_dev,
656 "unexpected CPL %#x on FW event queue\n", opcode);
661 static void disable_msi(struct adapter *adapter)
663 if (adapter->flags & CXGB4_USING_MSIX) {
664 pci_disable_msix(adapter->pdev);
665 adapter->flags &= ~CXGB4_USING_MSIX;
666 } else if (adapter->flags & CXGB4_USING_MSI) {
667 pci_disable_msi(adapter->pdev);
668 adapter->flags &= ~CXGB4_USING_MSI;
673 * Interrupt handler for non-data events used with MSI-X.
675 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
677 struct adapter *adap = cookie;
678 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
682 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
684 if (adap->flags & CXGB4_MASTER_PF)
685 t4_slow_intr_handler(adap);
689 int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
690 cpumask_var_t *aff_mask, int idx)
694 if (!zalloc_cpumask_var(aff_mask, GFP_KERNEL)) {
695 dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n");
699 cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)),
702 rv = irq_set_affinity_hint(vec, *aff_mask);
704 dev_warn(adap->pdev_dev,
705 "irq_set_affinity_hint %u failed %d\n",
711 void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask)
713 irq_set_affinity_hint(vec, NULL);
714 free_cpumask_var(aff_mask);
717 static int request_msix_queue_irqs(struct adapter *adap)
719 struct sge *s = &adap->sge;
720 struct msix_info *minfo;
723 if (s->fwevtq_msix_idx < 0)
726 err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
728 adap->msix_info[s->fwevtq_msix_idx].desc,
733 for_each_ethrxq(s, ethqidx) {
734 minfo = s->ethrxq[ethqidx].msix;
735 err = request_irq(minfo->vec,
738 &s->ethrxq[ethqidx].rspq);
742 cxgb4_set_msix_aff(adap, minfo->vec,
743 &minfo->aff_mask, ethqidx);
748 while (--ethqidx >= 0) {
749 minfo = s->ethrxq[ethqidx].msix;
750 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
751 free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
753 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
757 static void free_msix_queue_irqs(struct adapter *adap)
759 struct sge *s = &adap->sge;
760 struct msix_info *minfo;
763 free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
764 for_each_ethrxq(s, i) {
765 minfo = s->ethrxq[i].msix;
766 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
767 free_irq(minfo->vec, &s->ethrxq[i].rspq);
771 static int setup_ppod_edram(struct adapter *adap)
773 unsigned int param, val;
776 /* Driver sends FW_PARAMS_PARAM_DEV_PPOD_EDRAM read command to check
777 * if firmware supports ppod edram feature or not. If firmware
778 * returns 1, then driver can enable this feature by sending
779 * FW_PARAMS_PARAM_DEV_PPOD_EDRAM write command with value 1 to
780 * enable ppod edram feature.
782 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
783 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM));
785 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
787 dev_warn(adap->pdev_dev,
788 "querying PPOD_EDRAM support failed: %d\n",
796 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
798 dev_err(adap->pdev_dev,
799 "setting PPOD_EDRAM failed: %d\n", ret);
805 static void adap_config_hpfilter(struct adapter *adapter)
810 /* Enable HP filter region. Older fw will fail this request and
813 param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
814 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
817 /* An error means FW doesn't know about HP filter support,
818 * it's not a problem, don't return an error.
821 dev_err(adapter->pdev_dev,
822 "HP filter region isn't supported by FW\n");
826 * cxgb4_write_rss - write the RSS table for a given port
828 * @queues: array of queue indices for RSS
830 * Sets up the portion of the HW RSS table for the port's VI to distribute
831 * packets to the Rx queues in @queues.
832 * Should never be called before setting up sge eth rx queues
834 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
838 struct adapter *adapter = pi->adapter;
839 const struct sge_eth_rxq *rxq;
841 rxq = &adapter->sge.ethrxq[pi->first_qset];
842 rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
846 /* map the queue indices to queue ids */
847 for (i = 0; i < pi->rss_size; i++, queues++)
848 rss[i] = rxq[*queues].rspq.abs_id;
850 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
851 pi->rss_size, rss, pi->rss_size);
852 /* If Tunnel All Lookup isn't specified in the global RSS
853 * Configuration, then we need to specify a default Ingress
854 * Queue for any ingress packets which aren't hashed. We'll
855 * use our first ingress queue ...
858 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
859 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
860 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
861 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
862 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
863 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
870 * setup_rss - configure RSS
873 * Sets up RSS for each port.
875 static int setup_rss(struct adapter *adap)
879 for_each_port(adap, i) {
880 const struct port_info *pi = adap2pinfo(adap, i);
882 /* Fill default values with equal distribution */
883 for (j = 0; j < pi->rss_size; j++)
884 pi->rss[j] = j % pi->nqsets;
886 err = cxgb4_write_rss(pi, pi->rss);
894 * Return the channel of the ingress queue with the given qid.
896 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
898 qid -= p->ingr_start;
899 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
902 void cxgb4_quiesce_rx(struct sge_rspq *q)
905 napi_disable(&q->napi);
909 * Wait until all NAPI handlers are descheduled.
911 static void quiesce_rx(struct adapter *adap)
915 for (i = 0; i < adap->sge.ingr_sz; i++) {
916 struct sge_rspq *q = adap->sge.ingr_map[i];
925 /* Disable interrupt and napi handler */
926 static void disable_interrupts(struct adapter *adap)
928 struct sge *s = &adap->sge;
930 if (adap->flags & CXGB4_FULL_INIT_DONE) {
931 t4_intr_disable(adap);
932 if (adap->flags & CXGB4_USING_MSIX) {
933 free_msix_queue_irqs(adap);
934 free_irq(adap->msix_info[s->nd_msix_idx].vec,
937 free_irq(adap->pdev->irq, adap);
943 void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
946 napi_enable(&q->napi);
948 /* 0-increment GTS to start the timer and enable interrupts */
949 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
950 SEINTARM_V(q->intr_params) |
951 INGRESSQID_V(q->cntxt_id));
955 * Enable NAPI scheduling and interrupt generation for all Rx queues.
957 static void enable_rx(struct adapter *adap)
961 for (i = 0; i < adap->sge.ingr_sz; i++) {
962 struct sge_rspq *q = adap->sge.ingr_map[i];
967 cxgb4_enable_rx(adap, q);
971 static int setup_non_data_intr(struct adapter *adap)
975 adap->sge.nd_msix_idx = -1;
976 if (!(adap->flags & CXGB4_USING_MSIX))
979 /* Request MSI-X vector for non-data interrupt */
980 msix = cxgb4_get_msix_idx_from_bmap(adap);
984 snprintf(adap->msix_info[msix].desc,
985 sizeof(adap->msix_info[msix].desc),
986 "%s", adap->port[0]->name);
988 adap->sge.nd_msix_idx = msix;
992 static int setup_fw_sge_queues(struct adapter *adap)
994 struct sge *s = &adap->sge;
997 bitmap_zero(s->starving_fl, s->egr_sz);
998 bitmap_zero(s->txq_maperr, s->egr_sz);
1000 if (adap->flags & CXGB4_USING_MSIX) {
1001 s->fwevtq_msix_idx = -1;
1002 msix = cxgb4_get_msix_idx_from_bmap(adap);
1006 snprintf(adap->msix_info[msix].desc,
1007 sizeof(adap->msix_info[msix].desc),
1008 "%s-FWeventq", adap->port[0]->name);
1010 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1011 NULL, NULL, NULL, -1);
1014 msix = -((int)s->intrq.abs_id + 1);
1017 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1018 msix, NULL, fwevtq_handler, NULL, -1);
1019 if (err && msix >= 0)
1020 cxgb4_free_msix_idx_in_bmap(adap, msix);
1022 s->fwevtq_msix_idx = msix;
1027 * setup_sge_queues - configure SGE Tx/Rx/response queues
1028 * @adap: the adapter
1030 * Determines how many sets of SGE queues to use and initializes them.
1031 * We support multiple queue sets per port if we have MSI-X, otherwise
1032 * just one queue set per port.
1034 static int setup_sge_queues(struct adapter *adap)
1036 struct sge_uld_rxq_info *rxq_info = NULL;
1037 struct sge *s = &adap->sge;
1038 unsigned int cmplqid = 0;
1039 int err, i, j, msix = 0;
1042 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
1044 if (!(adap->flags & CXGB4_USING_MSIX))
1045 msix = -((int)s->intrq.abs_id + 1);
1047 for_each_port(adap, i) {
1048 struct net_device *dev = adap->port[i];
1049 struct port_info *pi = netdev_priv(dev);
1050 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1051 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1053 for (j = 0; j < pi->nqsets; j++, q++) {
1055 msix = cxgb4_get_msix_idx_from_bmap(adap);
1061 snprintf(adap->msix_info[msix].desc,
1062 sizeof(adap->msix_info[msix].desc),
1063 "%s-Rx%d", dev->name, j);
1064 q->msix = &adap->msix_info[msix];
1067 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1071 t4_get_tp_ch_map(adap,
1076 memset(&q->stats, 0, sizeof(q->stats));
1079 q = &s->ethrxq[pi->first_qset];
1080 for (j = 0; j < pi->nqsets; j++, t++, q++) {
1081 err = t4_sge_alloc_eth_txq(adap, t, dev,
1082 netdev_get_tx_queue(dev, j),
1084 !!(adap->flags & CXGB4_SGE_DBQ_TIMER));
1090 for_each_port(adap, i) {
1091 /* Note that cmplqid below is 0 if we don't
1092 * have RDMA queues, and that's the right value.
1095 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
1097 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1098 s->fw_evtq.cntxt_id, cmplqid);
1103 if (!is_t4(adap->params.chip)) {
1104 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
1105 netdev_get_tx_queue(adap->port[0], 0)
1106 , s->fw_evtq.cntxt_id, false);
1111 t4_write_reg(adap, is_t4(adap->params.chip) ?
1112 MPS_TRC_RSS_CONTROL_A :
1113 MPS_T5_TRC_RSS_CONTROL_A,
1114 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1115 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1118 dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
1119 t4_free_sge_resources(adap);
1123 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1124 struct net_device *sb_dev)
1128 #ifdef CONFIG_CHELSIO_T4_DCB
1129 /* If a Data Center Bridging has been successfully negotiated on this
1130 * link then we'll use the skb's priority to map it to a TX Queue.
1131 * The skb's priority is determined via the VLAN Tag Priority Code
1134 if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
1138 err = vlan_get_tag(skb, &vlan_tci);
1139 if (unlikely(err)) {
1140 if (net_ratelimit())
1142 "TX Packet without VLAN Tag on DCB Link\n");
1145 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1146 #ifdef CONFIG_CHELSIO_T4_FCOE
1147 if (skb->protocol == htons(ETH_P_FCOE))
1148 txq = skb->priority & 0x7;
1149 #endif /* CONFIG_CHELSIO_T4_FCOE */
1153 #endif /* CONFIG_CHELSIO_T4_DCB */
1156 struct port_info *pi = netdev2pinfo(dev);
1159 ver = ip_hdr(skb)->version;
1160 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
1161 ip_hdr(skb)->protocol;
1163 /* Send unsupported traffic pattern to normal NIC queues. */
1164 txq = netdev_pick_tx(dev, skb, sb_dev);
1165 if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
1166 skb->encapsulation ||
1167 (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
1168 txq = txq % pi->nqsets;
1174 txq = (skb_rx_queue_recorded(skb)
1175 ? skb_get_rx_queue(skb)
1176 : smp_processor_id());
1178 while (unlikely(txq >= dev->real_num_tx_queues))
1179 txq -= dev->real_num_tx_queues;
1184 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
1187 static int closest_timer(const struct sge *s, int time)
1189 int i, delta, match = 0, min_delta = INT_MAX;
1191 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1192 delta = time - s->timer_val[i];
1195 if (delta < min_delta) {
1203 static int closest_thres(const struct sge *s, int thres)
1205 int i, delta, match = 0, min_delta = INT_MAX;
1207 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1208 delta = thres - s->counter_val[i];
1211 if (delta < min_delta) {
1220 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1222 * @us: the hold-off time in us, or 0 to disable timer
1223 * @cnt: the hold-off packet count, or 0 to disable counter
1225 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1226 * one of the two needs to be enabled for the queue to generate interrupts.
1228 int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1229 unsigned int us, unsigned int cnt)
1231 struct adapter *adap = q->adap;
1233 if ((us | cnt) == 0)
1240 new_idx = closest_thres(&adap->sge, cnt);
1241 if (q->desc && q->pktcnt_idx != new_idx) {
1242 /* the queue has already been created, update it */
1243 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1244 FW_PARAMS_PARAM_X_V(
1245 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1246 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1247 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1252 q->pktcnt_idx = new_idx;
1255 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1256 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1260 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1262 const struct port_info *pi = netdev_priv(dev);
1263 netdev_features_t changed = dev->features ^ features;
1266 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1269 err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
1271 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1273 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1277 static int setup_debugfs(struct adapter *adap)
1279 if (IS_ERR_OR_NULL(adap->debugfs_root))
1282 #ifdef CONFIG_DEBUG_FS
1283 t4_setup_debugfs(adap);
1289 * upper-layer driver support
1293 * Allocate an active-open TID and set it to the supplied value.
1295 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1299 spin_lock_bh(&t->atid_lock);
1301 union aopen_entry *p = t->afree;
1303 atid = (p - t->atid_tab) + t->atid_base;
1308 spin_unlock_bh(&t->atid_lock);
1311 EXPORT_SYMBOL(cxgb4_alloc_atid);
1314 * Release an active-open TID.
1316 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1318 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1320 spin_lock_bh(&t->atid_lock);
1324 spin_unlock_bh(&t->atid_lock);
1326 EXPORT_SYMBOL(cxgb4_free_atid);
1329 * Allocate a server TID and set it to the supplied value.
1331 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1335 spin_lock_bh(&t->stid_lock);
1336 if (family == PF_INET) {
1337 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1338 if (stid < t->nstids)
1339 __set_bit(stid, t->stid_bmap);
1343 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1348 t->stid_tab[stid].data = data;
1349 stid += t->stid_base;
1350 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1351 * This is equivalent to 4 TIDs. With CLIP enabled it
1354 if (family == PF_INET6) {
1355 t->stids_in_use += 2;
1356 t->v6_stids_in_use += 2;
1361 spin_unlock_bh(&t->stid_lock);
1364 EXPORT_SYMBOL(cxgb4_alloc_stid);
1366 /* Allocate a server filter TID and set it to the supplied value.
1368 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1372 spin_lock_bh(&t->stid_lock);
1373 if (family == PF_INET) {
1374 stid = find_next_zero_bit(t->stid_bmap,
1375 t->nstids + t->nsftids, t->nstids);
1376 if (stid < (t->nstids + t->nsftids))
1377 __set_bit(stid, t->stid_bmap);
1384 t->stid_tab[stid].data = data;
1386 stid += t->sftid_base;
1389 spin_unlock_bh(&t->stid_lock);
1392 EXPORT_SYMBOL(cxgb4_alloc_sftid);
1394 /* Release a server TID.
1396 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1398 /* Is it a server filter TID? */
1399 if (t->nsftids && (stid >= t->sftid_base)) {
1400 stid -= t->sftid_base;
1403 stid -= t->stid_base;
1406 spin_lock_bh(&t->stid_lock);
1407 if (family == PF_INET)
1408 __clear_bit(stid, t->stid_bmap);
1410 bitmap_release_region(t->stid_bmap, stid, 1);
1411 t->stid_tab[stid].data = NULL;
1412 if (stid < t->nstids) {
1413 if (family == PF_INET6) {
1414 t->stids_in_use -= 2;
1415 t->v6_stids_in_use -= 2;
1423 spin_unlock_bh(&t->stid_lock);
1425 EXPORT_SYMBOL(cxgb4_free_stid);
1428 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1430 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1433 struct cpl_tid_release *req;
1435 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1436 req = __skb_put(skb, sizeof(*req));
1437 INIT_TP_WR(req, tid);
1438 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1442 * Queue a TID release request and if necessary schedule a work queue to
1445 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1448 struct adapter *adap = container_of(t, struct adapter, tids);
1449 void **p = &t->tid_tab[tid - t->tid_base];
1451 spin_lock_bh(&adap->tid_release_lock);
1452 *p = adap->tid_release_head;
1453 /* Low 2 bits encode the Tx channel number */
1454 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1455 if (!adap->tid_release_task_busy) {
1456 adap->tid_release_task_busy = true;
1457 queue_work(adap->workq, &adap->tid_release_task);
1459 spin_unlock_bh(&adap->tid_release_lock);
1463 * Process the list of pending TID release requests.
1465 static void process_tid_release_list(struct work_struct *work)
1467 struct sk_buff *skb;
1468 struct adapter *adap;
1470 adap = container_of(work, struct adapter, tid_release_task);
1472 spin_lock_bh(&adap->tid_release_lock);
1473 while (adap->tid_release_head) {
1474 void **p = adap->tid_release_head;
1475 unsigned int chan = (uintptr_t)p & 3;
1476 p = (void *)p - chan;
1478 adap->tid_release_head = *p;
1480 spin_unlock_bh(&adap->tid_release_lock);
1482 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1484 schedule_timeout_uninterruptible(1);
1486 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1487 t4_ofld_send(adap, skb);
1488 spin_lock_bh(&adap->tid_release_lock);
1490 adap->tid_release_task_busy = false;
1491 spin_unlock_bh(&adap->tid_release_lock);
1495 * Release a TID and inform HW. If we are unable to allocate the release
1496 * message we defer to a work queue.
1498 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1499 unsigned short family)
1501 struct adapter *adap = container_of(t, struct adapter, tids);
1502 struct sk_buff *skb;
1504 WARN_ON(tid_out_of_range(&adap->tids, tid));
1506 if (t->tid_tab[tid - adap->tids.tid_base]) {
1507 t->tid_tab[tid - adap->tids.tid_base] = NULL;
1508 atomic_dec(&t->conns_in_use);
1509 if (t->hash_base && (tid >= t->hash_base)) {
1510 if (family == AF_INET6)
1511 atomic_sub(2, &t->hash_tids_in_use);
1513 atomic_dec(&t->hash_tids_in_use);
1515 if (family == AF_INET6)
1516 atomic_sub(2, &t->tids_in_use);
1518 atomic_dec(&t->tids_in_use);
1522 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1524 mk_tid_release(skb, chan, tid);
1525 t4_ofld_send(adap, skb);
1527 cxgb4_queue_tid_release(t, chan, tid);
1529 EXPORT_SYMBOL(cxgb4_remove_tid);
1532 * Allocate and initialize the TID tables. Returns 0 on success.
1534 static int tid_init(struct tid_info *t)
1536 struct adapter *adap = container_of(t, struct adapter, tids);
1537 unsigned int max_ftids = t->nftids + t->nsftids;
1538 unsigned int natids = t->natids;
1539 unsigned int hpftid_bmap_size;
1540 unsigned int eotid_bmap_size;
1541 unsigned int stid_bmap_size;
1542 unsigned int ftid_bmap_size;
1545 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1546 ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1547 hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
1548 eotid_bmap_size = BITS_TO_LONGS(t->neotids);
1549 size = t->ntids * sizeof(*t->tid_tab) +
1550 natids * sizeof(*t->atid_tab) +
1551 t->nstids * sizeof(*t->stid_tab) +
1552 t->nsftids * sizeof(*t->stid_tab) +
1553 stid_bmap_size * sizeof(long) +
1554 t->nhpftids * sizeof(*t->hpftid_tab) +
1555 hpftid_bmap_size * sizeof(long) +
1556 max_ftids * sizeof(*t->ftid_tab) +
1557 ftid_bmap_size * sizeof(long) +
1558 t->neotids * sizeof(*t->eotid_tab) +
1559 eotid_bmap_size * sizeof(long);
1561 t->tid_tab = kvzalloc(size, GFP_KERNEL);
1565 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1566 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1567 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1568 t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1569 t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
1570 t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
1571 t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1572 t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
1573 t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
1574 spin_lock_init(&t->stid_lock);
1575 spin_lock_init(&t->atid_lock);
1576 spin_lock_init(&t->ftid_lock);
1578 t->stids_in_use = 0;
1579 t->v6_stids_in_use = 0;
1580 t->sftids_in_use = 0;
1582 t->atids_in_use = 0;
1583 atomic_set(&t->tids_in_use, 0);
1584 atomic_set(&t->conns_in_use, 0);
1585 atomic_set(&t->hash_tids_in_use, 0);
1586 atomic_set(&t->eotids_in_use, 0);
1588 /* Setup the free list for atid_tab and clear the stid bitmap. */
1591 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1592 t->afree = t->atid_tab;
1595 if (is_offload(adap)) {
1596 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1597 /* Reserve stid 0 for T4/T5 adapters */
1598 if (!t->stid_base &&
1599 CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1600 __set_bit(0, t->stid_bmap);
1603 bitmap_zero(t->eotid_bmap, t->neotids);
1607 bitmap_zero(t->hpftid_bmap, t->nhpftids);
1608 bitmap_zero(t->ftid_bmap, t->nftids);
1613 * cxgb4_create_server - create an IP server
1615 * @stid: the server TID
1616 * @sip: local IP address to bind server to
1617 * @sport: the server's TCP port
1618 * @queue: queue to direct messages from this server to
1620 * Create an IP server for the given port and address.
1621 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1623 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1624 __be32 sip, __be16 sport, __be16 vlan,
1628 struct sk_buff *skb;
1629 struct adapter *adap;
1630 struct cpl_pass_open_req *req;
1633 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1637 adap = netdev2adap(dev);
1638 req = __skb_put(skb, sizeof(*req));
1640 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1641 req->local_port = sport;
1642 req->peer_port = htons(0);
1643 req->local_ip = sip;
1644 req->peer_ip = htonl(0);
1645 chan = rxq_to_chan(&adap->sge, queue);
1646 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1647 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1648 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1649 ret = t4_mgmt_tx(adap, skb);
1650 return net_xmit_eval(ret);
1652 EXPORT_SYMBOL(cxgb4_create_server);
1654 /* cxgb4_create_server6 - create an IPv6 server
1656 * @stid: the server TID
1657 * @sip: local IPv6 address to bind server to
1658 * @sport: the server's TCP port
1659 * @queue: queue to direct messages from this server to
1661 * Create an IPv6 server for the given port and address.
1662 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1664 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1665 const struct in6_addr *sip, __be16 sport,
1669 struct sk_buff *skb;
1670 struct adapter *adap;
1671 struct cpl_pass_open_req6 *req;
1674 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1678 adap = netdev2adap(dev);
1679 req = __skb_put(skb, sizeof(*req));
1681 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1682 req->local_port = sport;
1683 req->peer_port = htons(0);
1684 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1685 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1686 req->peer_ip_hi = cpu_to_be64(0);
1687 req->peer_ip_lo = cpu_to_be64(0);
1688 chan = rxq_to_chan(&adap->sge, queue);
1689 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1690 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1691 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1692 ret = t4_mgmt_tx(adap, skb);
1693 return net_xmit_eval(ret);
1695 EXPORT_SYMBOL(cxgb4_create_server6);
1697 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1698 unsigned int queue, bool ipv6)
1700 struct sk_buff *skb;
1701 struct adapter *adap;
1702 struct cpl_close_listsvr_req *req;
1705 adap = netdev2adap(dev);
1707 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1711 req = __skb_put(skb, sizeof(*req));
1713 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
1714 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1715 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
1716 ret = t4_mgmt_tx(adap, skb);
1717 return net_xmit_eval(ret);
1719 EXPORT_SYMBOL(cxgb4_remove_server);
1722 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1723 * @mtus: the HW MTU table
1724 * @mtu: the target MTU
1725 * @idx: index of selected entry in the MTU table
1727 * Returns the index and the value in the HW MTU table that is closest to
1728 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1729 * table, in which case that smallest available value is selected.
1731 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1736 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1742 EXPORT_SYMBOL(cxgb4_best_mtu);
1745 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1746 * @mtus: the HW MTU table
1747 * @header_size: Header Size
1748 * @data_size_max: maximum Data Segment Size
1749 * @data_size_align: desired Data Segment Size Alignment (2^N)
1750 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1752 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1753 * MTU Table based solely on a Maximum MTU parameter, we break that
1754 * parameter up into a Header Size and Maximum Data Segment Size, and
1755 * provide a desired Data Segment Size Alignment. If we find an MTU in
1756 * the Hardware MTU Table which will result in a Data Segment Size with
1757 * the requested alignment _and_ that MTU isn't "too far" from the
1758 * closest MTU, then we'll return that rather than the closest MTU.
1760 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1761 unsigned short header_size,
1762 unsigned short data_size_max,
1763 unsigned short data_size_align,
1764 unsigned int *mtu_idxp)
1766 unsigned short max_mtu = header_size + data_size_max;
1767 unsigned short data_size_align_mask = data_size_align - 1;
1768 int mtu_idx, aligned_mtu_idx;
1770 /* Scan the MTU Table till we find an MTU which is larger than our
1771 * Maximum MTU or we reach the end of the table. Along the way,
1772 * record the last MTU found, if any, which will result in a Data
1773 * Segment Length matching the requested alignment.
1775 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1776 unsigned short data_size = mtus[mtu_idx] - header_size;
1778 /* If this MTU minus the Header Size would result in a
1779 * Data Segment Size of the desired alignment, remember it.
1781 if ((data_size & data_size_align_mask) == 0)
1782 aligned_mtu_idx = mtu_idx;
1784 /* If we're not at the end of the Hardware MTU Table and the
1785 * next element is larger than our Maximum MTU, drop out of
1788 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1792 /* If we fell out of the loop because we ran to the end of the table,
1793 * then we just have to use the last [largest] entry.
1795 if (mtu_idx == NMTUS)
1798 /* If we found an MTU which resulted in the requested Data Segment
1799 * Length alignment and that's "not far" from the largest MTU which is
1800 * less than or equal to the maximum MTU, then use that.
1802 if (aligned_mtu_idx >= 0 &&
1803 mtu_idx - aligned_mtu_idx <= 1)
1804 mtu_idx = aligned_mtu_idx;
1806 /* If the caller has passed in an MTU Index pointer, pass the
1807 * MTU Index back. Return the MTU value.
1810 *mtu_idxp = mtu_idx;
1811 return mtus[mtu_idx];
1813 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1816 * cxgb4_port_chan - get the HW channel of a port
1817 * @dev: the net device for the port
1819 * Return the HW Tx channel of the given port.
1821 unsigned int cxgb4_port_chan(const struct net_device *dev)
1823 return netdev2pinfo(dev)->tx_chan;
1825 EXPORT_SYMBOL(cxgb4_port_chan);
1828 * cxgb4_port_e2cchan - get the HW c-channel of a port
1829 * @dev: the net device for the port
1831 * Return the HW RX c-channel of the given port.
1833 unsigned int cxgb4_port_e2cchan(const struct net_device *dev)
1835 return netdev2pinfo(dev)->rx_cchan;
1837 EXPORT_SYMBOL(cxgb4_port_e2cchan);
1839 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1841 struct adapter *adap = netdev2adap(dev);
1842 u32 v1, v2, lp_count, hp_count;
1844 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1845 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
1846 if (is_t4(adap->params.chip)) {
1847 lp_count = LP_COUNT_G(v1);
1848 hp_count = HP_COUNT_G(v1);
1850 lp_count = LP_COUNT_T5_G(v1);
1851 hp_count = HP_COUNT_T5_G(v2);
1853 return lpfifo ? lp_count : hp_count;
1855 EXPORT_SYMBOL(cxgb4_dbfifo_count);
1858 * cxgb4_port_viid - get the VI id of a port
1859 * @dev: the net device for the port
1861 * Return the VI id of the given port.
1863 unsigned int cxgb4_port_viid(const struct net_device *dev)
1865 return netdev2pinfo(dev)->viid;
1867 EXPORT_SYMBOL(cxgb4_port_viid);
1870 * cxgb4_port_idx - get the index of a port
1871 * @dev: the net device for the port
1873 * Return the index of the given port.
1875 unsigned int cxgb4_port_idx(const struct net_device *dev)
1877 return netdev2pinfo(dev)->port_id;
1879 EXPORT_SYMBOL(cxgb4_port_idx);
1881 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
1882 struct tp_tcp_stats *v6)
1884 struct adapter *adap = pci_get_drvdata(pdev);
1886 spin_lock(&adap->stats_lock);
1887 t4_tp_get_tcp_stats(adap, v4, v6, false);
1888 spin_unlock(&adap->stats_lock);
1890 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
1892 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
1893 const unsigned int *pgsz_order)
1895 struct adapter *adap = netdev2adap(dev);
1897 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
1898 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
1899 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
1900 HPZ3_V(pgsz_order[3]));
1902 EXPORT_SYMBOL(cxgb4_iscsi_init);
1904 int cxgb4_flush_eq_cache(struct net_device *dev)
1906 struct adapter *adap = netdev2adap(dev);
1908 return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
1910 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
1912 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
1914 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
1918 spin_lock(&adap->win0_lock);
1919 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
1920 sizeof(indices), (__be32 *)&indices,
1922 spin_unlock(&adap->win0_lock);
1924 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
1925 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
1930 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
1933 struct adapter *adap = netdev2adap(dev);
1934 u16 hw_pidx, hw_cidx;
1937 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
1941 if (pidx != hw_pidx) {
1945 if (pidx >= hw_pidx)
1946 delta = pidx - hw_pidx;
1948 delta = size - hw_pidx + pidx;
1950 if (is_t4(adap->params.chip))
1951 val = PIDX_V(delta);
1953 val = PIDX_T5_V(delta);
1955 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1961 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
1963 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
1965 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
1966 u32 edc0_end, edc1_end, mc0_end, mc1_end;
1967 u32 offset, memtype, memaddr;
1968 struct adapter *adap;
1972 adap = netdev2adap(dev);
1974 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
1976 /* Figure out where the offset lands in the Memory Type/Address scheme.
1977 * This code assumes that the memory is laid out starting at offset 0
1978 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
1979 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
1980 * MC0, and some have both MC0 and MC1.
1982 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
1983 edc0_size = EDRAM0_SIZE_G(size) << 20;
1984 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
1985 edc1_size = EDRAM1_SIZE_G(size) << 20;
1986 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
1987 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
1989 if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
1990 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
1991 hma_size = EXT_MEM1_SIZE_G(size) << 20;
1993 edc0_end = edc0_size;
1994 edc1_end = edc0_end + edc1_size;
1995 mc0_end = edc1_end + mc0_size;
1997 if (offset < edc0_end) {
2000 } else if (offset < edc1_end) {
2002 memaddr = offset - edc0_end;
2004 if (hma_size && (offset < (edc1_end + hma_size))) {
2006 memaddr = offset - edc1_end;
2007 } else if (offset < mc0_end) {
2009 memaddr = offset - edc1_end;
2010 } else if (is_t5(adap->params.chip)) {
2011 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2012 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
2013 mc1_end = mc0_end + mc1_size;
2014 if (offset < mc1_end) {
2016 memaddr = offset - mc0_end;
2018 /* offset beyond the end of any memory */
2022 /* T4/T6 only has a single memory channel */
2027 spin_lock(&adap->win0_lock);
2028 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2029 spin_unlock(&adap->win0_lock);
2033 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2037 EXPORT_SYMBOL(cxgb4_read_tpte);
2039 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2042 struct adapter *adap;
2044 adap = netdev2adap(dev);
2045 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2046 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
2048 return ((u64)hi << 32) | (u64)lo;
2050 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2052 int cxgb4_bar2_sge_qregs(struct net_device *dev,
2054 enum cxgb4_bar2_qtype qtype,
2057 unsigned int *pbar2_qid)
2059 return t4_bar2_sge_qregs(netdev2adap(dev),
2061 (qtype == CXGB4_BAR2_QTYPE_EGRESS
2062 ? T4_BAR2_QTYPE_EGRESS
2063 : T4_BAR2_QTYPE_INGRESS),
2068 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2070 static struct pci_driver cxgb4_driver;
2072 static void check_neigh_update(struct neighbour *neigh)
2074 const struct device *parent;
2075 const struct net_device *netdev = neigh->dev;
2077 if (is_vlan_dev(netdev))
2078 netdev = vlan_dev_real_dev(netdev);
2079 parent = netdev->dev.parent;
2080 if (parent && parent->driver == &cxgb4_driver.driver)
2081 t4_l2t_update(dev_get_drvdata(parent), neigh);
2084 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2088 case NETEVENT_NEIGH_UPDATE:
2089 check_neigh_update(data);
2091 case NETEVENT_REDIRECT:
2098 static bool netevent_registered;
2099 static struct notifier_block cxgb4_netevent_nb = {
2100 .notifier_call = netevent_cb
2103 static void drain_db_fifo(struct adapter *adap, int usecs)
2105 u32 v1, v2, lp_count, hp_count;
2108 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2109 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2110 if (is_t4(adap->params.chip)) {
2111 lp_count = LP_COUNT_G(v1);
2112 hp_count = HP_COUNT_G(v1);
2114 lp_count = LP_COUNT_T5_G(v1);
2115 hp_count = HP_COUNT_T5_G(v2);
2118 if (lp_count == 0 && hp_count == 0)
2120 set_current_state(TASK_UNINTERRUPTIBLE);
2121 schedule_timeout(usecs_to_jiffies(usecs));
2125 static void disable_txq_db(struct sge_txq *q)
2127 unsigned long flags;
2129 spin_lock_irqsave(&q->db_lock, flags);
2131 spin_unlock_irqrestore(&q->db_lock, flags);
2134 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
2136 spin_lock_irq(&q->db_lock);
2137 if (q->db_pidx_inc) {
2138 /* Make sure that all writes to the TX descriptors
2139 * are committed before we tell HW about them.
2142 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2143 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2147 spin_unlock_irq(&q->db_lock);
2150 static void disable_dbs(struct adapter *adap)
2154 for_each_ethrxq(&adap->sge, i)
2155 disable_txq_db(&adap->sge.ethtxq[i].q);
2156 if (is_offload(adap)) {
2157 struct sge_uld_txq_info *txq_info =
2158 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2161 for_each_ofldtxq(&adap->sge, i) {
2162 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2164 disable_txq_db(&txq->q);
2168 for_each_port(adap, i)
2169 disable_txq_db(&adap->sge.ctrlq[i].q);
2172 static void enable_dbs(struct adapter *adap)
2176 for_each_ethrxq(&adap->sge, i)
2177 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2178 if (is_offload(adap)) {
2179 struct sge_uld_txq_info *txq_info =
2180 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2183 for_each_ofldtxq(&adap->sge, i) {
2184 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2186 enable_txq_db(adap, &txq->q);
2190 for_each_port(adap, i)
2191 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2194 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2196 enum cxgb4_uld type = CXGB4_ULD_RDMA;
2198 if (adap->uld && adap->uld[type].handle)
2199 adap->uld[type].control(adap->uld[type].handle, cmd);
2202 static void process_db_full(struct work_struct *work)
2204 struct adapter *adap;
2206 adap = container_of(work, struct adapter, db_full_task);
2208 drain_db_fifo(adap, dbfifo_drain_delay);
2210 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2211 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2212 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2213 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2214 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2216 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2217 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2220 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2222 u16 hw_pidx, hw_cidx;
2225 spin_lock_irq(&q->db_lock);
2226 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2229 if (q->db_pidx != hw_pidx) {
2233 if (q->db_pidx >= hw_pidx)
2234 delta = q->db_pidx - hw_pidx;
2236 delta = q->size - hw_pidx + q->db_pidx;
2238 if (is_t4(adap->params.chip))
2239 val = PIDX_V(delta);
2241 val = PIDX_T5_V(delta);
2243 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2244 QID_V(q->cntxt_id) | val);
2249 spin_unlock_irq(&q->db_lock);
2251 CH_WARN(adap, "DB drop recovery failed.\n");
2254 static void recover_all_queues(struct adapter *adap)
2258 for_each_ethrxq(&adap->sge, i)
2259 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2260 if (is_offload(adap)) {
2261 struct sge_uld_txq_info *txq_info =
2262 adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2264 for_each_ofldtxq(&adap->sge, i) {
2265 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2267 sync_txq_pidx(adap, &txq->q);
2271 for_each_port(adap, i)
2272 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2275 static void process_db_drop(struct work_struct *work)
2277 struct adapter *adap;
2279 adap = container_of(work, struct adapter, db_drop_task);
2281 if (is_t4(adap->params.chip)) {
2282 drain_db_fifo(adap, dbfifo_drain_delay);
2283 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2284 drain_db_fifo(adap, dbfifo_drain_delay);
2285 recover_all_queues(adap);
2286 drain_db_fifo(adap, dbfifo_drain_delay);
2288 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2289 } else if (is_t5(adap->params.chip)) {
2290 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2291 u16 qid = (dropped_db >> 15) & 0x1ffff;
2292 u16 pidx_inc = dropped_db & 0x1fff;
2294 unsigned int bar2_qid;
2297 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2298 0, &bar2_qoffset, &bar2_qid);
2300 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2301 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2303 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2304 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2306 /* Re-enable BAR2 WC */
2307 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2310 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2311 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2314 void t4_db_full(struct adapter *adap)
2316 if (is_t4(adap->params.chip)) {
2318 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2319 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2320 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2321 queue_work(adap->workq, &adap->db_full_task);
2325 void t4_db_dropped(struct adapter *adap)
2327 if (is_t4(adap->params.chip)) {
2329 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2331 queue_work(adap->workq, &adap->db_drop_task);
2334 void t4_register_netevent_notifier(void)
2336 if (!netevent_registered) {
2337 register_netevent_notifier(&cxgb4_netevent_nb);
2338 netevent_registered = true;
2342 static void detach_ulds(struct adapter *adap)
2346 mutex_lock(&uld_mutex);
2347 list_del(&adap->list_node);
2349 for (i = 0; i < CXGB4_ULD_MAX; i++)
2350 if (adap->uld && adap->uld[i].handle)
2351 adap->uld[i].state_change(adap->uld[i].handle,
2352 CXGB4_STATE_DETACH);
2354 if (netevent_registered && list_empty(&adapter_list)) {
2355 unregister_netevent_notifier(&cxgb4_netevent_nb);
2356 netevent_registered = false;
2358 mutex_unlock(&uld_mutex);
2361 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2365 mutex_lock(&uld_mutex);
2366 for (i = 0; i < CXGB4_ULD_MAX; i++)
2367 if (adap->uld && adap->uld[i].handle)
2368 adap->uld[i].state_change(adap->uld[i].handle,
2370 mutex_unlock(&uld_mutex);
2373 #if IS_ENABLED(CONFIG_IPV6)
2374 static int cxgb4_inet6addr_handler(struct notifier_block *this,
2375 unsigned long event, void *data)
2377 struct inet6_ifaddr *ifa = data;
2378 struct net_device *event_dev = ifa->idev->dev;
2379 const struct device *parent = NULL;
2380 #if IS_ENABLED(CONFIG_BONDING)
2381 struct adapter *adap;
2383 if (is_vlan_dev(event_dev))
2384 event_dev = vlan_dev_real_dev(event_dev);
2385 #if IS_ENABLED(CONFIG_BONDING)
2386 if (event_dev->flags & IFF_MASTER) {
2387 list_for_each_entry(adap, &adapter_list, list_node) {
2390 cxgb4_clip_get(adap->port[0],
2391 (const u32 *)ifa, 1);
2394 cxgb4_clip_release(adap->port[0],
2395 (const u32 *)ifa, 1);
2406 parent = event_dev->dev.parent;
2408 if (parent && parent->driver == &cxgb4_driver.driver) {
2411 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2414 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2423 static bool inet6addr_registered;
2424 static struct notifier_block cxgb4_inet6addr_notifier = {
2425 .notifier_call = cxgb4_inet6addr_handler
2428 static void update_clip(const struct adapter *adap)
2431 struct net_device *dev;
2436 for (i = 0; i < MAX_NPORTS; i++) {
2437 dev = adap->port[i];
2441 ret = cxgb4_update_root_dev_clip(dev);
2448 #endif /* IS_ENABLED(CONFIG_IPV6) */
2451 * cxgb_up - enable the adapter
2452 * @adap: adapter being enabled
2454 * Called when the first port is enabled, this function performs the
2455 * actions necessary to make an adapter operational, such as completing
2456 * the initialization of HW modules, and enabling interrupts.
2458 * Must be called with the rtnl lock held.
2460 static int cxgb_up(struct adapter *adap)
2462 struct sge *s = &adap->sge;
2465 mutex_lock(&uld_mutex);
2466 err = setup_sge_queues(adap);
2469 err = setup_rss(adap);
2473 if (adap->flags & CXGB4_USING_MSIX) {
2474 if (s->nd_msix_idx < 0) {
2479 err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
2481 adap->msix_info[s->nd_msix_idx].desc, adap);
2485 err = request_msix_queue_irqs(adap);
2487 goto irq_err_free_nd_msix;
2489 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2490 (adap->flags & CXGB4_USING_MSI) ? 0
2492 adap->port[0]->name, adap);
2499 t4_intr_enable(adap);
2500 adap->flags |= CXGB4_FULL_INIT_DONE;
2501 mutex_unlock(&uld_mutex);
2503 notify_ulds(adap, CXGB4_STATE_UP);
2504 #if IS_ENABLED(CONFIG_IPV6)
2509 irq_err_free_nd_msix:
2510 free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
2512 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2514 t4_free_sge_resources(adap);
2516 mutex_unlock(&uld_mutex);
2520 static void cxgb_down(struct adapter *adapter)
2522 cancel_work_sync(&adapter->tid_release_task);
2523 cancel_work_sync(&adapter->db_full_task);
2524 cancel_work_sync(&adapter->db_drop_task);
2525 adapter->tid_release_task_busy = false;
2526 adapter->tid_release_head = NULL;
2528 t4_sge_stop(adapter);
2529 t4_free_sge_resources(adapter);
2531 adapter->flags &= ~CXGB4_FULL_INIT_DONE;
2535 * net_device operations
2537 int cxgb_open(struct net_device *dev)
2539 struct port_info *pi = netdev_priv(dev);
2540 struct adapter *adapter = pi->adapter;
2543 netif_carrier_off(dev);
2545 if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
2546 err = cxgb_up(adapter);
2551 /* It's possible that the basic port information could have
2552 * changed since we first read it.
2554 err = t4_update_port_info(pi);
2558 err = link_start(dev);
2560 netif_tx_start_all_queues(dev);
2564 int cxgb_close(struct net_device *dev)
2566 struct port_info *pi = netdev_priv(dev);
2567 struct adapter *adapter = pi->adapter;
2570 netif_tx_stop_all_queues(dev);
2571 netif_carrier_off(dev);
2572 ret = t4_enable_pi_params(adapter, adapter->pf, pi,
2573 false, false, false);
2574 #ifdef CONFIG_CHELSIO_T4_DCB
2575 cxgb4_dcb_reset(dev);
2576 dcb_tx_queue_prio_enable(dev, false);
2581 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2582 __be32 sip, __be16 sport, __be16 vlan,
2583 unsigned int queue, unsigned char port, unsigned char mask)
2586 struct filter_entry *f;
2587 struct adapter *adap;
2591 adap = netdev2adap(dev);
2593 /* Adjust stid to correct filter index */
2594 stid -= adap->tids.sftid_base;
2595 stid += adap->tids.nftids;
2597 /* Check to make sure the filter requested is writable ...
2599 f = &adap->tids.ftid_tab[stid];
2600 ret = writable_filter(f);
2604 /* Clear out any old resources being used by the filter before
2605 * we start constructing the new filter.
2608 clear_filter(adap, f);
2610 /* Clear out filter specifications */
2611 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2612 f->fs.val.lport = cpu_to_be16(sport);
2613 f->fs.mask.lport = ~0;
2615 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2616 for (i = 0; i < 4; i++) {
2617 f->fs.val.lip[i] = val[i];
2618 f->fs.mask.lip[i] = ~0;
2620 if (adap->params.tp.vlan_pri_map & PORT_F) {
2621 f->fs.val.iport = port;
2622 f->fs.mask.iport = mask;
2626 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2627 f->fs.val.proto = IPPROTO_TCP;
2628 f->fs.mask.proto = ~0;
2633 /* Mark filter as locked */
2637 /* Save the actual tid. We need this to get the corresponding
2638 * filter entry structure in filter_rpl.
2640 f->tid = stid + adap->tids.ftid_base;
2641 ret = set_filter_wr(adap, stid);
2643 clear_filter(adap, f);
2649 EXPORT_SYMBOL(cxgb4_create_server_filter);
2651 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2652 unsigned int queue, bool ipv6)
2654 struct filter_entry *f;
2655 struct adapter *adap;
2657 adap = netdev2adap(dev);
2659 /* Adjust stid to correct filter index */
2660 stid -= adap->tids.sftid_base;
2661 stid += adap->tids.nftids;
2663 f = &adap->tids.ftid_tab[stid];
2664 /* Unlock the filter */
2667 return delete_filter(adap, stid);
2669 EXPORT_SYMBOL(cxgb4_remove_server_filter);
2671 static void cxgb_get_stats(struct net_device *dev,
2672 struct rtnl_link_stats64 *ns)
2674 struct port_stats stats;
2675 struct port_info *p = netdev_priv(dev);
2676 struct adapter *adapter = p->adapter;
2678 /* Block retrieving statistics during EEH error
2679 * recovery. Otherwise, the recovery might fail
2680 * and the PCI device will be removed permanently
2682 spin_lock(&adapter->stats_lock);
2683 if (!netif_device_present(dev)) {
2684 spin_unlock(&adapter->stats_lock);
2687 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2689 spin_unlock(&adapter->stats_lock);
2691 ns->tx_bytes = stats.tx_octets;
2692 ns->tx_packets = stats.tx_frames;
2693 ns->rx_bytes = stats.rx_octets;
2694 ns->rx_packets = stats.rx_frames;
2695 ns->multicast = stats.rx_mcast_frames;
2697 /* detailed rx_errors */
2698 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2700 ns->rx_over_errors = 0;
2701 ns->rx_crc_errors = stats.rx_fcs_err;
2702 ns->rx_frame_errors = stats.rx_symbol_err;
2703 ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
2704 stats.rx_ovflow2 + stats.rx_ovflow3 +
2705 stats.rx_trunc0 + stats.rx_trunc1 +
2706 stats.rx_trunc2 + stats.rx_trunc3;
2707 ns->rx_missed_errors = 0;
2709 /* detailed tx_errors */
2710 ns->tx_aborted_errors = 0;
2711 ns->tx_carrier_errors = 0;
2712 ns->tx_fifo_errors = 0;
2713 ns->tx_heartbeat_errors = 0;
2714 ns->tx_window_errors = 0;
2716 ns->tx_errors = stats.tx_error_frames;
2717 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2718 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2721 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2724 int ret = 0, prtad, devad;
2725 struct port_info *pi = netdev_priv(dev);
2726 struct adapter *adapter = pi->adapter;
2727 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2731 if (pi->mdio_addr < 0)
2733 data->phy_id = pi->mdio_addr;
2737 if (mdio_phy_id_is_c45(data->phy_id)) {
2738 prtad = mdio_phy_id_prtad(data->phy_id);
2739 devad = mdio_phy_id_devad(data->phy_id);
2740 } else if (data->phy_id < 32) {
2741 prtad = data->phy_id;
2743 data->reg_num &= 0x1f;
2747 mbox = pi->adapter->pf;
2748 if (cmd == SIOCGMIIREG)
2749 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2750 data->reg_num, &data->val_out);
2752 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2753 data->reg_num, data->val_in);
2756 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2757 sizeof(pi->tstamp_config)) ?
2760 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
2761 sizeof(pi->tstamp_config)))
2764 if (!is_t4(adapter->params.chip)) {
2765 switch (pi->tstamp_config.tx_type) {
2766 case HWTSTAMP_TX_OFF:
2767 case HWTSTAMP_TX_ON:
2773 switch (pi->tstamp_config.rx_filter) {
2774 case HWTSTAMP_FILTER_NONE:
2775 pi->rxtstamp = false;
2777 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2778 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2779 cxgb4_ptprx_timestamping(pi, pi->port_id,
2782 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2783 cxgb4_ptprx_timestamping(pi, pi->port_id,
2786 case HWTSTAMP_FILTER_ALL:
2787 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2788 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2789 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2790 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2791 pi->rxtstamp = true;
2794 pi->tstamp_config.rx_filter =
2795 HWTSTAMP_FILTER_NONE;
2799 if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
2800 (pi->tstamp_config.rx_filter ==
2801 HWTSTAMP_FILTER_NONE)) {
2802 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
2803 pi->ptp_enable = false;
2806 if (pi->tstamp_config.rx_filter !=
2807 HWTSTAMP_FILTER_NONE) {
2808 if (cxgb4_ptp_redirect_rx_packet(adapter,
2810 pi->ptp_enable = true;
2813 /* For T4 Adapters */
2814 switch (pi->tstamp_config.rx_filter) {
2815 case HWTSTAMP_FILTER_NONE:
2816 pi->rxtstamp = false;
2818 case HWTSTAMP_FILTER_ALL:
2819 pi->rxtstamp = true;
2822 pi->tstamp_config.rx_filter =
2823 HWTSTAMP_FILTER_NONE;
2827 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2828 sizeof(pi->tstamp_config)) ?
2836 static void cxgb_set_rxmode(struct net_device *dev)
2838 /* unfortunately we can't return errors to the stack */
2839 set_rxmode(dev, -1, false);
2842 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2845 struct port_info *pi = netdev_priv(dev);
2847 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
2854 #ifdef CONFIG_PCI_IOV
2855 static int cxgb4_mgmt_open(struct net_device *dev)
2857 /* Turn carrier off since we don't have to transmit anything on this
2860 netif_carrier_off(dev);
2864 /* Fill MAC address that will be assigned by the FW */
2865 static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
2867 u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
2868 unsigned int i, vf, nvfs;
2873 adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
2875 err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
2879 na = adap->params.vpd.na;
2880 for (i = 0; i < ETH_ALEN; i++)
2881 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
2882 hex2val(na[2 * i + 1]));
2884 a = (hw_addr[0] << 8) | hw_addr[1];
2885 b = (hw_addr[1] << 8) | hw_addr[2];
2887 a |= 0x0200; /* locally assigned Ethernet MAC address */
2888 a &= ~0x0100; /* not a multicast Ethernet MAC address */
2889 macaddr[0] = a >> 8;
2890 macaddr[1] = a & 0xff;
2892 for (i = 2; i < 5; i++)
2893 macaddr[i] = hw_addr[i + 1];
2895 for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
2897 macaddr[5] = adap->pf * nvfs + vf;
2898 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
2902 static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
2904 struct port_info *pi = netdev_priv(dev);
2905 struct adapter *adap = pi->adapter;
2908 /* verify MAC addr is valid */
2909 if (!is_valid_ether_addr(mac)) {
2910 dev_err(pi->adapter->pdev_dev,
2911 "Invalid Ethernet address %pM for VF %d\n",
2916 dev_info(pi->adapter->pdev_dev,
2917 "Setting MAC %pM on VF %d\n", mac, vf);
2918 ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
2920 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
2924 static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
2925 int vf, struct ifla_vf_info *ivi)
2927 struct port_info *pi = netdev_priv(dev);
2928 struct adapter *adap = pi->adapter;
2929 struct vf_info *vfinfo;
2931 if (vf >= adap->num_vfs)
2933 vfinfo = &adap->vfinfo[vf];
2936 ivi->max_tx_rate = vfinfo->tx_rate;
2937 ivi->min_tx_rate = 0;
2938 ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
2939 ivi->vlan = vfinfo->vlan;
2940 ivi->linkstate = vfinfo->link_state;
2944 static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
2945 struct netdev_phys_item_id *ppid)
2947 struct port_info *pi = netdev_priv(dev);
2948 unsigned int phy_port_id;
2950 phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
2951 ppid->id_len = sizeof(phy_port_id);
2952 memcpy(ppid->id, &phy_port_id, ppid->id_len);
2956 static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
2957 int min_tx_rate, int max_tx_rate)
2959 struct port_info *pi = netdev_priv(dev);
2960 struct adapter *adap = pi->adapter;
2961 unsigned int link_ok, speed, mtu;
2962 u32 fw_pfvf, fw_class;
2967 if (vf >= adap->num_vfs)
2971 dev_err(adap->pdev_dev,
2972 "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
2977 if (max_tx_rate == 0) {
2978 /* unbind VF to to any Traffic Class */
2980 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2981 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
2982 fw_class = 0xffffffff;
2983 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
2984 &fw_pfvf, &fw_class);
2986 dev_err(adap->pdev_dev,
2987 "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n",
2991 dev_info(adap->pdev_dev,
2992 "PF %d VF %d is unbound from TX Rate Limiting\n",
2994 adap->vfinfo[vf].tx_rate = 0;
2998 ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
2999 if (ret != FW_SUCCESS) {
3000 dev_err(adap->pdev_dev,
3001 "Failed to get link information for VF %d\n", vf);
3006 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
3010 if (max_tx_rate > speed) {
3011 dev_err(adap->pdev_dev,
3012 "Max tx rate %d for VF %d can't be > link-speed %u",
3013 max_tx_rate, vf, speed);
3018 /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
3019 pktsize = pktsize - sizeof(struct ethhdr) - 4;
3020 /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
3021 pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
3022 /* configure Traffic Class for rate-limiting */
3023 ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
3024 SCHED_CLASS_LEVEL_CL_RL,
3025 SCHED_CLASS_MODE_CLASS,
3026 SCHED_CLASS_RATEUNIT_BITS,
3027 SCHED_CLASS_RATEMODE_ABS,
3028 pi->tx_chan, class_id, 0,
3029 max_tx_rate * 1000, 0, pktsize, 0);
3031 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
3035 dev_info(adap->pdev_dev,
3036 "Class %d with MSS %u configured with rate %u\n",
3037 class_id, pktsize, max_tx_rate);
3039 /* bind VF to configured Traffic Class */
3040 fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3041 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3042 fw_class = class_id;
3043 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
3046 dev_err(adap->pdev_dev,
3047 "Err %d in binding PF %d VF %d to Traffic Class %d\n",
3048 ret, adap->pf, vf, class_id);
3051 dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
3052 adap->pf, vf, class_id);
3053 adap->vfinfo[vf].tx_rate = max_tx_rate;
3057 static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
3058 u16 vlan, u8 qos, __be16 vlan_proto)
3060 struct port_info *pi = netdev_priv(dev);
3061 struct adapter *adap = pi->adapter;
3064 if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
3067 if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
3068 return -EPROTONOSUPPORT;
3070 ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
3072 adap->vfinfo[vf].vlan = vlan;
3076 dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
3077 ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
3081 static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
3084 struct port_info *pi = netdev_priv(dev);
3085 struct adapter *adap = pi->adapter;
3089 if (vf >= adap->num_vfs)
3093 case IFLA_VF_LINK_STATE_AUTO:
3094 val = FW_VF_LINK_STATE_AUTO;
3097 case IFLA_VF_LINK_STATE_ENABLE:
3098 val = FW_VF_LINK_STATE_ENABLE;
3101 case IFLA_VF_LINK_STATE_DISABLE:
3102 val = FW_VF_LINK_STATE_DISABLE;
3109 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3110 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
3111 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3114 dev_err(adap->pdev_dev,
3115 "Error %d in setting PF %d VF %d link state\n",
3120 adap->vfinfo[vf].link_state = link;
3123 #endif /* CONFIG_PCI_IOV */
3125 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3128 struct sockaddr *addr = p;
3129 struct port_info *pi = netdev_priv(dev);
3131 if (!is_valid_ether_addr(addr->sa_data))
3132 return -EADDRNOTAVAIL;
3134 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
3135 addr->sa_data, true, &pi->smt_idx);
3139 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3143 #ifdef CONFIG_NET_POLL_CONTROLLER
3144 static void cxgb_netpoll(struct net_device *dev)
3146 struct port_info *pi = netdev_priv(dev);
3147 struct adapter *adap = pi->adapter;
3149 if (adap->flags & CXGB4_USING_MSIX) {
3151 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3153 for (i = pi->nqsets; i; i--, rx++)
3154 t4_sge_intr_msix(0, &rx->rspq);
3156 t4_intr_handler(adap)(0, adap);
3160 static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
3162 struct port_info *pi = netdev_priv(dev);
3163 struct adapter *adap = pi->adapter;
3164 struct ch_sched_queue qe = { 0 };
3165 struct ch_sched_params p = { 0 };
3166 struct sched_class *e;
3170 if (!can_sched(dev))
3173 if (index < 0 || index > pi->nqsets - 1)
3176 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3177 dev_err(adap->pdev_dev,
3178 "Failed to rate limit on queue %d. Link Down?\n",
3184 e = cxgb4_sched_queue_lookup(dev, &qe);
3185 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
3186 dev_err(adap->pdev_dev,
3187 "Queue %u already bound to class %u of type: %u\n",
3188 index, e->idx, e->info.u.params.level);
3192 /* Convert from Mbps to Kbps */
3193 req_rate = rate * 1000;
3195 /* Max rate is 100 Gbps */
3196 if (req_rate > SCHED_MAX_RATE_KBPS) {
3197 dev_err(adap->pdev_dev,
3198 "Invalid rate %u Mbps, Max rate is %u Mbps\n",
3199 rate, SCHED_MAX_RATE_KBPS / 1000);
3203 /* First unbind the queue from any existing class */
3204 memset(&qe, 0, sizeof(qe));
3206 qe.class = SCHED_CLS_NONE;
3208 err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
3210 dev_err(adap->pdev_dev,
3211 "Unbinding Queue %d on port %d fail. Err: %d\n",
3212 index, pi->port_id, err);
3216 /* Queue already unbound */
3220 /* Fetch any available unused or matching scheduling class */
3221 p.type = SCHED_CLASS_TYPE_PACKET;
3222 p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
3223 p.u.params.mode = SCHED_CLASS_MODE_CLASS;
3224 p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
3225 p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
3226 p.u.params.channel = pi->tx_chan;
3227 p.u.params.class = SCHED_CLS_NONE;
3228 p.u.params.minrate = 0;
3229 p.u.params.maxrate = req_rate;
3230 p.u.params.weight = 0;
3231 p.u.params.pktsize = dev->mtu;
3233 e = cxgb4_sched_class_alloc(dev, &p);
3237 /* Bind the queue to a scheduling class */
3238 memset(&qe, 0, sizeof(qe));
3242 err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
3244 dev_err(adap->pdev_dev,
3245 "Queue rate limiting failed. Err: %d\n", err);
3249 static int cxgb_setup_tc_flower(struct net_device *dev,
3250 struct flow_cls_offload *cls_flower)
3252 switch (cls_flower->command) {
3253 case FLOW_CLS_REPLACE:
3254 return cxgb4_tc_flower_replace(dev, cls_flower);
3255 case FLOW_CLS_DESTROY:
3256 return cxgb4_tc_flower_destroy(dev, cls_flower);
3257 case FLOW_CLS_STATS:
3258 return cxgb4_tc_flower_stats(dev, cls_flower);
3264 static int cxgb_setup_tc_cls_u32(struct net_device *dev,
3265 struct tc_cls_u32_offload *cls_u32)
3267 switch (cls_u32->command) {
3268 case TC_CLSU32_NEW_KNODE:
3269 case TC_CLSU32_REPLACE_KNODE:
3270 return cxgb4_config_knode(dev, cls_u32);
3271 case TC_CLSU32_DELETE_KNODE:
3272 return cxgb4_delete_knode(dev, cls_u32);
3278 static int cxgb_setup_tc_matchall(struct net_device *dev,
3279 struct tc_cls_matchall_offload *cls_matchall,
3282 struct adapter *adap = netdev2adap(dev);
3284 if (!adap->tc_matchall)
3287 switch (cls_matchall->command) {
3288 case TC_CLSMATCHALL_REPLACE:
3289 return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
3290 case TC_CLSMATCHALL_DESTROY:
3291 return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
3292 case TC_CLSMATCHALL_STATS:
3294 return cxgb4_tc_matchall_stats(dev, cls_matchall);
3303 static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
3304 void *type_data, void *cb_priv)
3306 struct net_device *dev = cb_priv;
3307 struct port_info *pi = netdev2pinfo(dev);
3308 struct adapter *adap = netdev2adap(dev);
3310 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3311 dev_err(adap->pdev_dev,
3312 "Failed to setup tc on port %d. Link Down?\n",
3317 if (!tc_cls_can_offload_and_chain0(dev, type_data))
3321 case TC_SETUP_CLSU32:
3322 return cxgb_setup_tc_cls_u32(dev, type_data);
3323 case TC_SETUP_CLSFLOWER:
3324 return cxgb_setup_tc_flower(dev, type_data);
3325 case TC_SETUP_CLSMATCHALL:
3326 return cxgb_setup_tc_matchall(dev, type_data, true);
3332 static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
3333 void *type_data, void *cb_priv)
3335 struct net_device *dev = cb_priv;
3336 struct port_info *pi = netdev2pinfo(dev);
3337 struct adapter *adap = netdev2adap(dev);
3339 if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3340 dev_err(adap->pdev_dev,
3341 "Failed to setup tc on port %d. Link Down?\n",
3346 if (!tc_cls_can_offload_and_chain0(dev, type_data))
3350 case TC_SETUP_CLSMATCHALL:
3351 return cxgb_setup_tc_matchall(dev, type_data, false);
3359 static int cxgb_setup_tc_mqprio(struct net_device *dev,
3360 struct tc_mqprio_qopt_offload *mqprio)
3362 struct adapter *adap = netdev2adap(dev);
3364 if (!is_ethofld(adap) || !adap->tc_mqprio)
3367 return cxgb4_setup_tc_mqprio(dev, mqprio);
3370 static LIST_HEAD(cxgb_block_cb_list);
3372 static int cxgb_setup_tc_block(struct net_device *dev,
3373 struct flow_block_offload *f)
3375 struct port_info *pi = netdev_priv(dev);
3376 flow_setup_cb_t *cb;
3379 pi->tc_block_shared = f->block_shared;
3380 if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
3381 cb = cxgb_setup_tc_block_egress_cb;
3382 ingress_only = false;
3384 cb = cxgb_setup_tc_block_ingress_cb;
3385 ingress_only = true;
3388 return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
3389 cb, pi, dev, ingress_only);
3392 static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
3396 case TC_SETUP_QDISC_MQPRIO:
3397 return cxgb_setup_tc_mqprio(dev, type_data);
3398 case TC_SETUP_BLOCK:
3399 return cxgb_setup_tc_block(dev, type_data);
3405 static void cxgb_del_udp_tunnel(struct net_device *netdev,
3406 struct udp_tunnel_info *ti)
3408 struct port_info *pi = netdev_priv(netdev);
3409 struct adapter *adapter = pi->adapter;
3410 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3411 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3414 if (chip_ver < CHELSIO_T6)
3418 case UDP_TUNNEL_TYPE_VXLAN:
3419 if (!adapter->vxlan_port_cnt ||
3420 adapter->vxlan_port != ti->port)
3421 return; /* Invalid VxLAN destination port */
3423 adapter->vxlan_port_cnt--;
3424 if (adapter->vxlan_port_cnt)
3427 adapter->vxlan_port = 0;
3428 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
3430 case UDP_TUNNEL_TYPE_GENEVE:
3431 if (!adapter->geneve_port_cnt ||
3432 adapter->geneve_port != ti->port)
3433 return; /* Invalid GENEVE destination port */
3435 adapter->geneve_port_cnt--;
3436 if (adapter->geneve_port_cnt)
3439 adapter->geneve_port = 0;
3440 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
3446 /* Matchall mac entries can be deleted only after all tunnel ports
3447 * are brought down or removed.
3449 if (!adapter->rawf_cnt)
3451 for_each_port(adapter, i) {
3452 pi = adap2pinfo(adapter, i);
3453 ret = t4_free_raw_mac_filt(adapter, pi->viid,
3454 match_all_mac, match_all_mac,
3455 adapter->rawf_start +
3457 1, pi->port_id, false);
3459 netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
3466 static void cxgb_add_udp_tunnel(struct net_device *netdev,
3467 struct udp_tunnel_info *ti)
3469 struct port_info *pi = netdev_priv(netdev);
3470 struct adapter *adapter = pi->adapter;
3471 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
3472 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3475 if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
3479 case UDP_TUNNEL_TYPE_VXLAN:
3480 /* Callback for adding vxlan port can be called with the same
3481 * port for both IPv4 and IPv6. We should not disable the
3482 * offloading when the same port for both protocols is added
3483 * and later one of them is removed.
3485 if (adapter->vxlan_port_cnt &&
3486 adapter->vxlan_port == ti->port) {
3487 adapter->vxlan_port_cnt++;
3491 /* We will support only one VxLAN port */
3492 if (adapter->vxlan_port_cnt) {
3493 netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3494 be16_to_cpu(adapter->vxlan_port),
3495 be16_to_cpu(ti->port));
3499 adapter->vxlan_port = ti->port;
3500 adapter->vxlan_port_cnt = 1;
3502 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
3503 VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3505 case UDP_TUNNEL_TYPE_GENEVE:
3506 if (adapter->geneve_port_cnt &&
3507 adapter->geneve_port == ti->port) {
3508 adapter->geneve_port_cnt++;
3512 /* We will support only one GENEVE port */
3513 if (adapter->geneve_port_cnt) {
3514 netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
3515 be16_to_cpu(adapter->geneve_port),
3516 be16_to_cpu(ti->port));
3520 adapter->geneve_port = ti->port;
3521 adapter->geneve_port_cnt = 1;
3523 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3524 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3530 /* Create a 'match all' mac filter entry for inner mac,
3531 * if raw mac interface is supported. Once the linux kernel provides
3532 * driver entry points for adding/deleting the inner mac addresses,
3533 * we will remove this 'match all' entry and fallback to adding
3534 * exact match filters.
3536 for_each_port(adapter, i) {
3537 pi = adap2pinfo(adapter, i);
3539 ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3542 adapter->rawf_start +
3544 1, pi->port_id, false);
3546 netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
3547 be16_to_cpu(ti->port));
3548 cxgb_del_udp_tunnel(netdev, ti);
3554 static netdev_features_t cxgb_features_check(struct sk_buff *skb,
3555 struct net_device *dev,
3556 netdev_features_t features)
3558 struct port_info *pi = netdev_priv(dev);
3559 struct adapter *adapter = pi->adapter;
3561 if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3564 /* Check if hw supports offload for this packet */
3565 if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
3568 /* Offload is not supported for this encapsulated packet */
3569 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3572 static netdev_features_t cxgb_fix_features(struct net_device *dev,
3573 netdev_features_t features)
3575 /* Disable GRO, if RX_CSUM is disabled */
3576 if (!(features & NETIF_F_RXCSUM))
3577 features &= ~NETIF_F_GRO;
3582 static const struct net_device_ops cxgb4_netdev_ops = {
3583 .ndo_open = cxgb_open,
3584 .ndo_stop = cxgb_close,
3585 .ndo_start_xmit = t4_start_xmit,
3586 .ndo_select_queue = cxgb_select_queue,
3587 .ndo_get_stats64 = cxgb_get_stats,
3588 .ndo_set_rx_mode = cxgb_set_rxmode,
3589 .ndo_set_mac_address = cxgb_set_mac_addr,
3590 .ndo_set_features = cxgb_set_features,
3591 .ndo_validate_addr = eth_validate_addr,
3592 .ndo_do_ioctl = cxgb_ioctl,
3593 .ndo_change_mtu = cxgb_change_mtu,
3594 #ifdef CONFIG_NET_POLL_CONTROLLER
3595 .ndo_poll_controller = cxgb_netpoll,
3597 #ifdef CONFIG_CHELSIO_T4_FCOE
3598 .ndo_fcoe_enable = cxgb_fcoe_enable,
3599 .ndo_fcoe_disable = cxgb_fcoe_disable,
3600 #endif /* CONFIG_CHELSIO_T4_FCOE */
3601 .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
3602 .ndo_setup_tc = cxgb_setup_tc,
3603 .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
3604 .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
3605 .ndo_features_check = cxgb_features_check,
3606 .ndo_fix_features = cxgb_fix_features,
3609 #ifdef CONFIG_PCI_IOV
3610 static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
3611 .ndo_open = cxgb4_mgmt_open,
3612 .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
3613 .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
3614 .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
3615 .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
3616 .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
3617 .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state,
3621 static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
3622 struct ethtool_drvinfo *info)
3624 struct adapter *adapter = netdev2adap(dev);
3626 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3627 strlcpy(info->bus_info, pci_name(adapter->pdev),
3628 sizeof(info->bus_info));
3631 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
3632 .get_drvinfo = cxgb4_mgmt_get_drvinfo,
3635 static void notify_fatal_err(struct work_struct *work)
3637 struct adapter *adap;
3639 adap = container_of(work, struct adapter, fatal_err_notify_task);
3640 notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
3643 void t4_fatal_err(struct adapter *adap)
3647 if (pci_channel_offline(adap->pdev))
3650 /* Disable the SGE since ULDs are going to free resources that
3651 * could be exposed to the adapter. RDMA MWs for example...
3653 t4_shutdown_adapter(adap);
3654 for_each_port(adap, port) {
3655 struct net_device *dev = adap->port[port];
3657 /* If we get here in very early initialization the network
3658 * devices may not have been set up yet.
3663 netif_tx_stop_all_queues(dev);
3664 netif_carrier_off(dev);
3666 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3667 queue_work(adap->workq, &adap->fatal_err_notify_task);
3670 static void setup_memwin(struct adapter *adap)
3672 u32 nic_win_base = t4_get_util_window(adap);
3674 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3677 static void setup_memwin_rdma(struct adapter *adap)
3679 if (adap->vres.ocq.size) {
3683 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3684 start &= PCI_BASE_ADDRESS_MEM_MASK;
3685 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3686 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3688 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3689 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3691 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3692 adap->vres.ocq.start);
3694 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3698 /* HMA Definitions */
3700 /* The maximum number of address that can be send in a single FW cmd */
3701 #define HMA_MAX_ADDR_IN_CMD 5
3703 #define HMA_PAGE_SIZE PAGE_SIZE
3705 #define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */
3707 #define HMA_PAGE_ORDER \
3708 ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
3709 ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3711 /* The minimum and maximum possible HMA sizes that can be specified in the FW
3712 * configuration(in units of MB).
3714 #define HMA_MIN_TOTAL_SIZE 1
3715 #define HMA_MAX_TOTAL_SIZE \
3716 (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
3717 HMA_MAX_NO_FW_ADDRESS) >> 20)
3719 static void adap_free_hma_mem(struct adapter *adapter)
3721 struct scatterlist *iter;
3725 if (!adapter->hma.sgt)
3728 if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
3729 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
3730 adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
3731 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
3734 for_each_sg(adapter->hma.sgt->sgl, iter,
3735 adapter->hma.sgt->orig_nents, i) {
3736 page = sg_page(iter);
3738 __free_pages(page, HMA_PAGE_ORDER);
3741 kfree(adapter->hma.phy_addr);
3742 sg_free_table(adapter->hma.sgt);
3743 kfree(adapter->hma.sgt);
3744 adapter->hma.sgt = NULL;
3747 static int adap_config_hma(struct adapter *adapter)
3749 struct scatterlist *sgl, *iter;
3750 struct sg_table *sgt;
3751 struct page *newpage;
3752 unsigned int i, j, k;
3753 u32 param, hma_size;
3759 /* HMA is supported only for T6+ cards.
3760 * Avoid initializing HMA in kdump kernels.
3762 if (is_kdump_kernel() ||
3763 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3766 /* Get the HMA region size required by fw */
3767 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3768 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
3769 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3770 1, ¶m, &hma_size);
3771 /* An error means card has its own memory or HMA is not supported by
3772 * the firmware. Return without any errors.
3774 if (ret || !hma_size)
3777 if (hma_size < HMA_MIN_TOTAL_SIZE ||
3778 hma_size > HMA_MAX_TOTAL_SIZE) {
3779 dev_err(adapter->pdev_dev,
3780 "HMA size %uMB beyond bounds(%u-%lu)MB\n",
3781 hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
3785 page_size = HMA_PAGE_SIZE;
3786 page_order = HMA_PAGE_ORDER;
3787 adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
3788 if (unlikely(!adapter->hma.sgt)) {
3789 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
3792 sgt = adapter->hma.sgt;
3793 /* FW returned value will be in MB's
3795 sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
3796 if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
3797 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
3798 kfree(adapter->hma.sgt);
3799 adapter->hma.sgt = NULL;
3803 sgl = adapter->hma.sgt->sgl;
3804 node = dev_to_node(adapter->pdev_dev);
3805 for_each_sg(sgl, iter, sgt->orig_nents, i) {
3806 newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
3807 __GFP_ZERO, page_order);
3809 dev_err(adapter->pdev_dev,
3810 "Not enough memory for HMA page allocation\n");
3814 sg_set_page(iter, newpage, page_size << page_order, 0);
3817 sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
3820 dev_err(adapter->pdev_dev,
3821 "Not enough memory for HMA DMA mapping");
3825 adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
3827 adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
3829 if (unlikely(!adapter->hma.phy_addr))
3832 for_each_sg(sgl, iter, sgt->nents, i) {
3833 newpage = sg_page(iter);
3834 adapter->hma.phy_addr[i] = sg_dma_address(iter);
3837 ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
3838 /* Pass on the addresses to firmware */
3839 for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
3840 struct fw_hma_cmd hma_cmd;
3841 u8 naddr = HMA_MAX_ADDR_IN_CMD;
3842 u8 soc = 0, eoc = 0;
3843 u8 hma_mode = 1; /* Presently we support only Page table mode */
3845 soc = (i == 0) ? 1 : 0;
3846 eoc = (i == ncmds - 1) ? 1 : 0;
3848 /* For last cmd, set naddr corresponding to remaining
3851 if (i == ncmds - 1) {
3852 naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
3853 naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
3855 memset(&hma_cmd, 0, sizeof(hma_cmd));
3856 hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
3857 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3858 hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
3860 hma_cmd.mode_to_pcie_params =
3861 htonl(FW_HMA_CMD_MODE_V(hma_mode) |
3862 FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
3864 /* HMA cmd size specified in MB's */
3865 hma_cmd.naddr_size =
3866 htonl(FW_HMA_CMD_SIZE_V(hma_size) |
3867 FW_HMA_CMD_NADDR_V(naddr));
3869 /* Total Page size specified in units of 4K */
3870 hma_cmd.addr_size_pkd =
3871 htonl(FW_HMA_CMD_ADDR_SIZE_V
3872 ((page_size << page_order) >> 12));
3874 /* Fill the 5 addresses */
3875 for (j = 0; j < naddr; j++) {
3876 hma_cmd.phy_address[j] =
3877 cpu_to_be64(adapter->hma.phy_addr[j + k]);
3879 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
3880 sizeof(hma_cmd), &hma_cmd);
3882 dev_err(adapter->pdev_dev,
3883 "HMA FW command failed with err %d\n", ret);
3889 dev_info(adapter->pdev_dev,
3890 "Reserved %uMB host memory for HMA\n", hma_size);
3894 adap_free_hma_mem(adapter);
3898 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3903 /* Now that we've successfully configured and initialized the adapter
3904 * can ask the Firmware what resources it has provisioned for us.
3906 ret = t4_get_pfres(adap);
3908 dev_err(adap->pdev_dev,
3909 "Unable to retrieve resource provisioning information\n");
3913 /* get device capabilities */
3914 memset(c, 0, sizeof(*c));
3915 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3916 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3917 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
3918 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
3922 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3923 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3924 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
3928 ret = t4_config_glbl_rss(adap, adap->pf,
3929 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3930 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3931 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
3935 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
3936 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3943 /* tweak some settings */
3944 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
3945 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
3946 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3947 v = t4_read_reg(adap, TP_PIO_DATA_A);
3948 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
3950 /* first 4 Tx modulation queues point to consecutive Tx channels */
3951 adap->params.tp.tx_modq_map = 0xE4;
3952 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3953 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
3955 /* associate each Tx modulation queue with consecutive Tx channels */
3957 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3958 &v, 1, TP_TX_SCHED_HDR_A);
3959 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3960 &v, 1, TP_TX_SCHED_FIFO_A);
3961 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
3962 &v, 1, TP_TX_SCHED_PCMD_A);
3964 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3965 if (is_offload(adap)) {
3966 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3967 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3968 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3969 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3970 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3971 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3972 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3973 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3974 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3975 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3978 /* get basic stuff going */
3979 return t4_early_init(adap, adap->pf);
3983 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3985 #define MAX_ATIDS 8192U
3988 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3990 * If the firmware we're dealing with has Configuration File support, then
3991 * we use that to perform all configuration
3995 * Tweak configuration based on module parameters, etc. Most of these have
3996 * defaults assigned to them by Firmware Configuration Files (if we're using
3997 * them) but need to be explicitly set if we're using hard-coded
3998 * initialization. But even in the case of using Firmware Configuration
3999 * Files, we'd like to expose the ability to change these via module
4000 * parameters so these are essentially common tweaks/settings for
4001 * Configuration Files and hard-coded initialization ...
4003 static int adap_init0_tweaks(struct adapter *adapter)
4006 * Fix up various Host-Dependent Parameters like Page Size, Cache
4007 * Line Size, etc. The firmware default is for a 4KB Page Size and
4008 * 64B Cache Line Size ...
4010 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4013 * Process module parameters which affect early initialization.
4015 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4016 dev_err(&adapter->pdev->dev,
4017 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4021 t4_set_reg_field(adapter, SGE_CONTROL_A,
4022 PKTSHIFT_V(PKTSHIFT_M),
4023 PKTSHIFT_V(rx_dma_offset));
4026 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4027 * adds the pseudo header itself.
4029 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
4030 CSUM_HAS_PSEUDO_HDR_F, 0);
4035 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
4036 * unto themselves and they contain their own firmware to perform their
4039 static int phy_aq1202_version(const u8 *phy_fw_data,
4044 /* At offset 0x8 you're looking for the primary image's
4045 * starting offset which is 3 Bytes wide
4047 * At offset 0xa of the primary image, you look for the offset
4048 * of the DRAM segment which is 3 Bytes wide.
4050 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
4053 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
4054 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
4055 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
4057 offset = le24(phy_fw_data + 0x8) << 12;
4058 offset = le24(phy_fw_data + offset + 0xa);
4059 return be16(phy_fw_data + offset + 0x27e);
4066 static struct info_10gbt_phy_fw {
4067 unsigned int phy_fw_id; /* PCI Device ID */
4068 char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
4069 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
4070 int phy_flash; /* Has FLASH for PHY Firmware */
4071 } phy_info_array[] = {
4073 PHY_AQ1202_DEVICEID,
4074 PHY_AQ1202_FIRMWARE,
4079 PHY_BCM84834_DEVICEID,
4080 PHY_BCM84834_FIRMWARE,
4087 static struct info_10gbt_phy_fw *find_phy_info(int devid)
4091 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
4092 if (phy_info_array[i].phy_fw_id == devid)
4093 return &phy_info_array[i];
4098 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
4099 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
4100 * we return a negative error number. If we transfer new firmware we return 1
4101 * (from t4_load_phy_fw()). If we don't do anything we return 0.
4103 static int adap_init0_phy(struct adapter *adap)
4105 const struct firmware *phyf;
4107 struct info_10gbt_phy_fw *phy_info;
4109 /* Use the device ID to determine which PHY file to flash.
4111 phy_info = find_phy_info(adap->pdev->device);
4113 dev_warn(adap->pdev_dev,
4114 "No PHY Firmware file found for this PHY\n");
4118 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
4119 * use that. The adapter firmware provides us with a memory buffer
4120 * where we can load a PHY firmware file from the host if we want to
4121 * override the PHY firmware File in flash.
4123 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
4126 /* For adapters without FLASH attached to PHY for their
4127 * firmware, it's obviously a fatal error if we can't get the
4128 * firmware to the adapter. For adapters with PHY firmware
4129 * FLASH storage, it's worth a warning if we can't find the
4130 * PHY Firmware but we'll neuter the error ...
4132 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
4133 "/lib/firmware/%s, error %d\n",
4134 phy_info->phy_fw_file, -ret);
4135 if (phy_info->phy_flash) {
4136 int cur_phy_fw_ver = 0;
4138 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4139 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
4140 "FLASH copy, version %#x\n", cur_phy_fw_ver);
4147 /* Load PHY Firmware onto adapter.
4149 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
4150 phy_info->phy_fw_version,
4151 (u8 *)phyf->data, phyf->size);
4153 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
4156 int new_phy_fw_ver = 0;
4158 if (phy_info->phy_fw_version)
4159 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
4161 dev_info(adap->pdev_dev, "Successfully transferred PHY "
4162 "Firmware /lib/firmware/%s, version %#x\n",
4163 phy_info->phy_fw_file, new_phy_fw_ver);
4166 release_firmware(phyf);
4172 * Attempt to initialize the adapter via a Firmware Configuration File.
4174 static int adap_init0_config(struct adapter *adapter, int reset)
4176 char *fw_config_file, fw_config_file_path[256];
4177 u32 finiver, finicsum, cfcsum, param, val;
4178 struct fw_caps_config_cmd caps_cmd;
4179 unsigned long mtype = 0, maddr = 0;
4180 const struct firmware *cf;
4181 char *config_name = NULL;
4182 int config_issued = 0;
4186 * Reset device if necessary.
4189 ret = t4_fw_reset(adapter, adapter->mbox,
4190 PIORSTMODE_F | PIORST_F);
4195 /* If this is a 10Gb/s-BT adapter make sure the chip-external
4196 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
4197 * to be performed after any global adapter RESET above since some
4198 * PHYs only have local RAM copies of the PHY firmware.
4200 if (is_10gbt_device(adapter->pdev->device)) {
4201 ret = adap_init0_phy(adapter);
4206 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4207 * then use that. Otherwise, use the configuration file stored
4208 * in the adapter flash ...
4210 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4212 fw_config_file = FW4_CFNAME;
4215 fw_config_file = FW5_CFNAME;
4218 fw_config_file = FW6_CFNAME;
4221 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4222 adapter->pdev->device);
4227 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4229 config_name = "On FLASH";
4230 mtype = FW_MEMTYPE_CF_FLASH;
4231 maddr = t4_flash_cfg_addr(adapter);
4233 u32 params[7], val[7];
4235 sprintf(fw_config_file_path,
4236 "/lib/firmware/%s", fw_config_file);
4237 config_name = fw_config_file_path;
4239 if (cf->size >= FLASH_CFG_MAX_SIZE)
4242 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4243 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4244 ret = t4_query_params(adapter, adapter->mbox,
4245 adapter->pf, 0, 1, params, val);
4248 * For t4_memory_rw() below addresses and
4249 * sizes have to be in terms of multiples of 4
4250 * bytes. So, if the Configuration File isn't
4251 * a multiple of 4 bytes in length we'll have
4252 * to write that out separately since we can't
4253 * guarantee that the bytes following the
4254 * residual byte in the buffer returned by
4255 * request_firmware() are zeroed out ...
4257 size_t resid = cf->size & 0x3;
4258 size_t size = cf->size & ~0x3;
4259 __be32 *data = (__be32 *)cf->data;
4261 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
4262 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
4264 spin_lock(&adapter->win0_lock);
4265 ret = t4_memory_rw(adapter, 0, mtype, maddr,
4266 size, data, T4_MEMORY_WRITE);
4267 if (ret == 0 && resid != 0) {
4274 last.word = data[size >> 2];
4275 for (i = resid; i < 4; i++)
4277 ret = t4_memory_rw(adapter, 0, mtype,
4282 spin_unlock(&adapter->win0_lock);
4286 release_firmware(cf);
4293 /* Ofld + Hash filter is supported. Older fw will fail this request and
4296 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4297 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD));
4298 ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
4301 /* FW doesn't know about Hash filter + ofld support,
4302 * it's not a problem, don't return an error.
4305 dev_warn(adapter->pdev_dev,
4306 "Hash filter with ofld is not supported by FW\n");
4310 * Issue a Capability Configuration command to the firmware to get it
4311 * to parse the Configuration File. We don't use t4_fw_config_file()
4312 * because we want the ability to modify various features after we've
4313 * processed the configuration file ...
4315 memset(&caps_cmd, 0, sizeof(caps_cmd));
4316 caps_cmd.op_to_write =
4317 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4320 caps_cmd.cfvalid_to_len16 =
4321 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
4322 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
4323 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
4324 FW_LEN16(caps_cmd));
4325 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4328 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4329 * Configuration File in FLASH), our last gasp effort is to use the
4330 * Firmware Configuration File which is embedded in the firmware. A
4331 * very few early versions of the firmware didn't have one embedded
4332 * but we can ignore those.
4334 if (ret == -ENOENT) {
4335 memset(&caps_cmd, 0, sizeof(caps_cmd));
4336 caps_cmd.op_to_write =
4337 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4340 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4341 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4342 sizeof(caps_cmd), &caps_cmd);
4343 config_name = "Firmware Default";
4350 finiver = ntohl(caps_cmd.finiver);
4351 finicsum = ntohl(caps_cmd.finicsum);
4352 cfcsum = ntohl(caps_cmd.cfcsum);
4353 if (finicsum != cfcsum)
4354 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4355 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4359 * And now tell the firmware to use the configuration we just loaded.
4361 caps_cmd.op_to_write =
4362 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4365 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4366 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4372 * Tweak configuration based on system architecture, module
4375 ret = adap_init0_tweaks(adapter);
4379 /* We will proceed even if HMA init fails. */
4380 ret = adap_config_hma(adapter);
4382 dev_err(adapter->pdev_dev,
4383 "HMA configuration failed with error %d\n", ret);
4385 if (is_t6(adapter->params.chip)) {
4386 adap_config_hpfilter(adapter);
4387 ret = setup_ppod_edram(adapter);
4389 dev_info(adapter->pdev_dev, "Successfully enabled "
4390 "ppod edram feature\n");
4394 * And finally tell the firmware to initialize itself using the
4395 * parameters from the Configuration File.
4397 ret = t4_fw_initialize(adapter, adapter->mbox);
4401 /* Emit Firmware Configuration File information and return
4404 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4405 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4406 config_name, finiver, cfcsum);
4410 * Something bad happened. Return the error ... (If the "error"
4411 * is that there's no Configuration File on the adapter we don't
4412 * want to issue a warning since this is fairly common.)
4415 if (config_issued && ret != -ENOENT)
4416 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4421 static struct fw_info fw_info_array[] = {
4424 .fs_name = FW4_CFNAME,
4425 .fw_mod_name = FW4_FNAME,
4427 .chip = FW_HDR_CHIP_T4,
4428 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
4429 .intfver_nic = FW_INTFVER(T4, NIC),
4430 .intfver_vnic = FW_INTFVER(T4, VNIC),
4431 .intfver_ri = FW_INTFVER(T4, RI),
4432 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
4433 .intfver_fcoe = FW_INTFVER(T4, FCOE),
4437 .fs_name = FW5_CFNAME,
4438 .fw_mod_name = FW5_FNAME,
4440 .chip = FW_HDR_CHIP_T5,
4441 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
4442 .intfver_nic = FW_INTFVER(T5, NIC),
4443 .intfver_vnic = FW_INTFVER(T5, VNIC),
4444 .intfver_ri = FW_INTFVER(T5, RI),
4445 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
4446 .intfver_fcoe = FW_INTFVER(T5, FCOE),
4450 .fs_name = FW6_CFNAME,
4451 .fw_mod_name = FW6_FNAME,
4453 .chip = FW_HDR_CHIP_T6,
4454 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
4455 .intfver_nic = FW_INTFVER(T6, NIC),
4456 .intfver_vnic = FW_INTFVER(T6, VNIC),
4457 .intfver_ofld = FW_INTFVER(T6, OFLD),
4458 .intfver_ri = FW_INTFVER(T6, RI),
4459 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4460 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
4461 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4462 .intfver_fcoe = FW_INTFVER(T6, FCOE),
4468 static struct fw_info *find_fw_info(int chip)
4472 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
4473 if (fw_info_array[i].chip == chip)
4474 return &fw_info_array[i];
4480 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4482 static int adap_init0(struct adapter *adap, int vpd_skip)
4484 struct fw_caps_config_cmd caps_cmd;
4485 u32 params[7], val[7];
4486 enum dev_state state;
4491 /* Grab Firmware Device Log parameters as early as possible so we have
4492 * access to it for debugging, etc.
4494 ret = t4_init_devlog_params(adap);
4498 /* Contact FW, advertising Master capability */
4499 ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
4500 is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
4502 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4506 if (ret == adap->mbox)
4507 adap->flags |= CXGB4_MASTER_PF;
4510 * If we're the Master PF Driver and the device is uninitialized,
4511 * then let's consider upgrading the firmware ... (We always want
4512 * to check the firmware version number in order to A. get it for
4513 * later reporting and B. to warn if the currently loaded firmware
4514 * is excessively mismatched relative to the driver.)
4517 t4_get_version_info(adap);
4518 ret = t4_check_fw_version(adap);
4519 /* If firmware is too old (not supported by driver) force an update. */
4521 state = DEV_STATE_UNINIT;
4522 if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
4523 struct fw_info *fw_info;
4524 struct fw_hdr *card_fw;
4525 const struct firmware *fw;
4526 const u8 *fw_data = NULL;
4527 unsigned int fw_size = 0;
4529 /* This is the firmware whose headers the driver was compiled
4532 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
4533 if (fw_info == NULL) {
4534 dev_err(adap->pdev_dev,
4535 "unable to get firmware info for chip %d.\n",
4536 CHELSIO_CHIP_VERSION(adap->params.chip));
4540 /* allocate memory to read the header of the firmware on the
4543 card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
4549 /* Get FW from from /lib/firmware/ */
4550 ret = request_firmware(&fw, fw_info->fw_mod_name,
4553 dev_err(adap->pdev_dev,
4554 "unable to load firmware image %s, error %d\n",
4555 fw_info->fw_mod_name, ret);
4561 /* upgrade FW logic */
4562 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
4566 release_firmware(fw);
4573 /* If the firmware is initialized already, emit a simply note to that
4574 * effect. Otherwise, it's time to try initializing the adapter.
4576 if (state == DEV_STATE_INIT) {
4577 ret = adap_config_hma(adap);
4579 dev_err(adap->pdev_dev,
4580 "HMA configuration failed with error %d\n",
4582 dev_info(adap->pdev_dev, "Coming up as %s: "\
4583 "Adapter already initialized\n",
4584 adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
4586 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4587 "Initializing adapter\n");
4589 /* Find out whether we're dealing with a version of the
4590 * firmware which has configuration file support.
4592 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4593 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4594 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4597 /* If the firmware doesn't support Configuration Files,
4601 dev_err(adap->pdev_dev, "firmware doesn't support "
4602 "Firmware Configuration Files\n");
4606 /* The firmware provides us with a memory buffer where we can
4607 * load a Configuration File from the host if we want to
4608 * override the Configuration File in flash.
4610 ret = adap_init0_config(adap, reset);
4611 if (ret == -ENOENT) {
4612 dev_err(adap->pdev_dev, "no Configuration File "
4613 "present on adapter.\n");
4617 dev_err(adap->pdev_dev, "could not initialize "
4618 "adapter, error %d\n", -ret);
4623 /* Now that we've successfully configured and initialized the adapter
4624 * (or found it already initialized), we can ask the Firmware what
4625 * resources it has provisioned for us.
4627 ret = t4_get_pfres(adap);
4629 dev_err(adap->pdev_dev,
4630 "Unable to retrieve resource provisioning information\n");
4634 /* Grab VPD parameters. This should be done after we establish a
4635 * connection to the firmware since some of the VPD parameters
4636 * (notably the Core Clock frequency) are retrieved via requests to
4637 * the firmware. On the other hand, we need these fairly early on
4638 * so we do this right after getting ahold of the firmware.
4640 * We need to do this after initializing the adapter because someone
4641 * could have FLASHed a new VPD which won't be read by the firmware
4642 * until we do the RESET ...
4645 ret = t4_get_vpd_params(adap, &adap->params.vpd);
4650 /* Find out what ports are available to us. Note that we need to do
4651 * this before calling adap_init0_no_config() since it needs nports
4655 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4656 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
4657 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
4661 adap->params.nports = hweight32(port_vec);
4662 adap->params.portvec = port_vec;
4664 /* Give the SGE code a chance to pull in anything that it needs ...
4665 * Note that this must be called after we retrieve our VPD parameters
4666 * in order to know how to convert core ticks to seconds, etc.
4668 ret = t4_sge_init(adap);
4672 /* Grab the SGE Doorbell Queue Timer values. If successful, that
4673 * indicates that the Firmware and Hardware support this.
4675 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4676 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
4677 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4681 adap->sge.dbqtimer_tick = val[0];
4682 ret = t4_read_sge_dbqtimers(adap,
4683 ARRAY_SIZE(adap->sge.dbqtimer_val),
4684 adap->sge.dbqtimer_val);
4688 adap->flags |= CXGB4_SGE_DBQ_TIMER;
4690 if (is_bypass_device(adap->pdev->device))
4691 adap->params.bypass = 1;
4694 * Grab some of our basic fundamental operating parameters.
4696 params[0] = FW_PARAM_PFVF(EQ_START);
4697 params[1] = FW_PARAM_PFVF(L2T_START);
4698 params[2] = FW_PARAM_PFVF(L2T_END);
4699 params[3] = FW_PARAM_PFVF(FILTER_START);
4700 params[4] = FW_PARAM_PFVF(FILTER_END);
4701 params[5] = FW_PARAM_PFVF(IQFLINT_START);
4702 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
4705 adap->sge.egr_start = val[0];
4706 adap->l2t_start = val[1];
4707 adap->l2t_end = val[2];
4708 adap->tids.ftid_base = val[3];
4709 adap->tids.nftids = val[4] - val[3] + 1;
4710 adap->sge.ingr_start = val[5];
4712 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4713 params[0] = FW_PARAM_PFVF(HPFILTER_START);
4714 params[1] = FW_PARAM_PFVF(HPFILTER_END);
4715 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4720 adap->tids.hpftid_base = val[0];
4721 adap->tids.nhpftids = val[1] - val[0] + 1;
4723 /* Read the raw mps entries. In T6, the last 2 tcam entries
4724 * are reserved for raw mac addresses (rawf = 2, one per port).
4726 params[0] = FW_PARAM_PFVF(RAWF_START);
4727 params[1] = FW_PARAM_PFVF(RAWF_END);
4728 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4731 adap->rawf_start = val[0];
4732 adap->rawf_cnt = val[1] - val[0] + 1;
4735 adap->tids.tid_base =
4736 t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
4739 /* qids (ingress/egress) returned from firmware can be anywhere
4740 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
4741 * Hence driver needs to allocate memory for this range to
4742 * store the queue info. Get the highest IQFLINT/EQ index returned
4743 * in FW_EQ_*_CMD.alloc command.
4745 params[0] = FW_PARAM_PFVF(EQ_END);
4746 params[1] = FW_PARAM_PFVF(IQFLINT_END);
4747 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4750 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
4751 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
4753 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
4754 sizeof(*adap->sge.egr_map), GFP_KERNEL);
4755 if (!adap->sge.egr_map) {
4760 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
4761 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
4762 if (!adap->sge.ingr_map) {
4767 /* Allocate the memory for the vaious egress queue bitmaps
4768 * ie starving_fl, txq_maperr and blocked_fl.
4770 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4771 sizeof(long), GFP_KERNEL);
4772 if (!adap->sge.starving_fl) {
4777 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4778 sizeof(long), GFP_KERNEL);
4779 if (!adap->sge.txq_maperr) {
4784 #ifdef CONFIG_DEBUG_FS
4785 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
4786 sizeof(long), GFP_KERNEL);
4787 if (!adap->sge.blocked_fl) {
4793 params[0] = FW_PARAM_PFVF(CLIP_START);
4794 params[1] = FW_PARAM_PFVF(CLIP_END);
4795 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4798 adap->clipt_start = val[0];
4799 adap->clipt_end = val[1];
4801 /* Get the supported number of traffic classes */
4802 params[0] = FW_PARAM_DEV(NUM_TM_CLASS);
4803 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
4805 /* We couldn't retrieve the number of Traffic Classes
4806 * supported by the hardware/firmware. So we hard
4809 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
4811 adap->params.nsched_cls = val[0];
4814 /* query params related to active filter region */
4815 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4816 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4817 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4818 /* If Active filter size is set we enable establishing
4819 * offload connection through firmware work request
4821 if ((val[0] != val[1]) && (ret >= 0)) {
4822 adap->flags |= CXGB4_FW_OFLD_CONN;
4823 adap->tids.aftid_base = val[0];
4824 adap->tids.aftid_end = val[1];
4827 /* If we're running on newer firmware, let it know that we're
4828 * prepared to deal with encapsulated CPL messages. Older
4829 * firmware won't understand this and we'll just get
4830 * unencapsulated messages ...
4832 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
4834 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
4837 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
4838 * capability. Earlier versions of the firmware didn't have the
4839 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
4840 * permission to use ULPTX MEMWRITE DSGL.
4842 if (is_t4(adap->params.chip)) {
4843 adap->params.ulptx_memwrite_dsgl = false;
4845 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
4846 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4848 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
4851 /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
4852 params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
4853 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4855 adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
4857 /* See if FW supports FW_FILTER2 work request */
4858 if (is_t4(adap->params.chip)) {
4859 adap->params.filter2_wr_support = 0;
4861 params[0] = FW_PARAM_DEV(FILTER2_WR);
4862 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4864 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
4867 /* Check if FW supports returning vin and smt index.
4868 * If this is not supported, driver will interpret
4869 * these values from viid.
4871 params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
4872 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4874 adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
4877 * Get device capabilities so we can determine what resources we need
4880 memset(&caps_cmd, 0, sizeof(caps_cmd));
4881 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4882 FW_CMD_REQUEST_F | FW_CMD_READ_F);
4883 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4884 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4889 /* hash filter has some mandatory register settings to be tested and for
4890 * that it needs to test whether offload is enabled or not, hence
4891 * checking and setting it here.
4893 if (caps_cmd.ofldcaps)
4894 adap->params.offload = 1;
4896 if (caps_cmd.ofldcaps ||
4897 (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) ||
4898 (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) {
4899 /* query offload-related parameters */
4900 params[0] = FW_PARAM_DEV(NTID);
4901 params[1] = FW_PARAM_PFVF(SERVER_START);
4902 params[2] = FW_PARAM_PFVF(SERVER_END);
4903 params[3] = FW_PARAM_PFVF(TDDP_START);
4904 params[4] = FW_PARAM_PFVF(TDDP_END);
4905 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4906 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4910 adap->tids.ntids = val[0];
4911 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4912 adap->tids.stid_base = val[1];
4913 adap->tids.nstids = val[2] - val[1] + 1;
4915 * Setup server filter region. Divide the available filter
4916 * region into two parts. Regular filters get 1/3rd and server
4917 * filters get 2/3rd part. This is only enabled if workarond
4919 * 1. For regular filters.
4920 * 2. Server filter: This are special filters which are used
4921 * to redirect SYN packets to offload queue.
4923 if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
4924 adap->tids.sftid_base = adap->tids.ftid_base +
4925 DIV_ROUND_UP(adap->tids.nftids, 3);
4926 adap->tids.nsftids = adap->tids.nftids -
4927 DIV_ROUND_UP(adap->tids.nftids, 3);
4928 adap->tids.nftids = adap->tids.sftid_base -
4929 adap->tids.ftid_base;
4931 adap->vres.ddp.start = val[3];
4932 adap->vres.ddp.size = val[4] - val[3] + 1;
4933 adap->params.ofldq_wr_cred = val[5];
4935 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
4936 init_hash_filter(adap);
4938 adap->num_ofld_uld += 1;
4941 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) {
4942 params[0] = FW_PARAM_PFVF(ETHOFLD_START);
4943 params[1] = FW_PARAM_PFVF(ETHOFLD_END);
4944 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4947 adap->tids.eotid_base = val[0];
4948 adap->tids.neotids = min_t(u32, MAX_ATIDS,
4949 val[1] - val[0] + 1);
4950 adap->params.ethofld = 1;
4954 if (caps_cmd.rdmacaps) {
4955 params[0] = FW_PARAM_PFVF(STAG_START);
4956 params[1] = FW_PARAM_PFVF(STAG_END);
4957 params[2] = FW_PARAM_PFVF(RQ_START);
4958 params[3] = FW_PARAM_PFVF(RQ_END);
4959 params[4] = FW_PARAM_PFVF(PBL_START);
4960 params[5] = FW_PARAM_PFVF(PBL_END);
4961 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
4965 adap->vres.stag.start = val[0];
4966 adap->vres.stag.size = val[1] - val[0] + 1;
4967 adap->vres.rq.start = val[2];
4968 adap->vres.rq.size = val[3] - val[2] + 1;
4969 adap->vres.pbl.start = val[4];
4970 adap->vres.pbl.size = val[5] - val[4] + 1;
4972 params[0] = FW_PARAM_PFVF(SRQ_START);
4973 params[1] = FW_PARAM_PFVF(SRQ_END);
4974 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4977 adap->vres.srq.start = val[0];
4978 adap->vres.srq.size = val[1] - val[0] + 1;
4980 if (adap->vres.srq.size) {
4981 adap->srq = t4_init_srq(adap->vres.srq.size);
4983 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
4986 params[0] = FW_PARAM_PFVF(SQRQ_START);
4987 params[1] = FW_PARAM_PFVF(SQRQ_END);
4988 params[2] = FW_PARAM_PFVF(CQ_START);
4989 params[3] = FW_PARAM_PFVF(CQ_END);
4990 params[4] = FW_PARAM_PFVF(OCQ_START);
4991 params[5] = FW_PARAM_PFVF(OCQ_END);
4992 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
4996 adap->vres.qp.start = val[0];
4997 adap->vres.qp.size = val[1] - val[0] + 1;
4998 adap->vres.cq.start = val[2];
4999 adap->vres.cq.size = val[3] - val[2] + 1;
5000 adap->vres.ocq.start = val[4];
5001 adap->vres.ocq.size = val[5] - val[4] + 1;
5003 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5004 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5005 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
5008 adap->params.max_ordird_qp = 8;
5009 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5012 adap->params.max_ordird_qp = val[0];
5013 adap->params.max_ird_adapter = val[1];
5015 dev_info(adap->pdev_dev,
5016 "max_ordird_qp %d max_ird_adapter %d\n",
5017 adap->params.max_ordird_qp,
5018 adap->params.max_ird_adapter);
5020 /* Enable write_with_immediate if FW supports it */
5021 params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
5022 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5024 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
5026 /* Enable write_cmpl if FW supports it */
5027 params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
5028 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5030 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
5031 adap->num_ofld_uld += 2;
5033 if (caps_cmd.iscsicaps) {
5034 params[0] = FW_PARAM_PFVF(ISCSI_START);
5035 params[1] = FW_PARAM_PFVF(ISCSI_END);
5036 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5040 adap->vres.iscsi.start = val[0];
5041 adap->vres.iscsi.size = val[1] - val[0] + 1;
5042 if (is_t6(adap->params.chip)) {
5043 params[0] = FW_PARAM_PFVF(PPOD_EDRAM_START);
5044 params[1] = FW_PARAM_PFVF(PPOD_EDRAM_END);
5045 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5048 adap->vres.ppod_edram.start = val[0];
5049 adap->vres.ppod_edram.size =
5050 val[1] - val[0] + 1;
5052 dev_info(adap->pdev_dev,
5053 "ppod edram start 0x%x end 0x%x size 0x%x\n",
5055 adap->vres.ppod_edram.size);
5058 /* LIO target and cxgb4i initiaitor */
5059 adap->num_ofld_uld += 2;
5061 if (caps_cmd.cryptocaps) {
5062 if (ntohs(caps_cmd.cryptocaps) &
5063 FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
5064 params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
5065 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5071 adap->vres.ncrypto_fc = val[0];
5073 adap->num_ofld_uld += 1;
5075 if (ntohs(caps_cmd.cryptocaps) &
5076 FW_CAPS_CONFIG_TLS_INLINE) {
5077 params[0] = FW_PARAM_PFVF(TLS_START);
5078 params[1] = FW_PARAM_PFVF(TLS_END);
5079 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5083 adap->vres.key.start = val[0];
5084 adap->vres.key.size = val[1] - val[0] + 1;
5087 adap->params.crypto = ntohs(caps_cmd.cryptocaps);
5090 /* The MTU/MSS Table is initialized by now, so load their values. If
5091 * we're initializing the adapter, then we'll make any modifications
5092 * we want to the MTU/MSS Table and also initialize the congestion
5095 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5096 if (state != DEV_STATE_INIT) {
5099 /* The default MTU Table contains values 1492 and 1500.
5100 * However, for TCP, it's better to have two values which are
5101 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5102 * This allows us to have a TCP Data Payload which is a
5103 * multiple of 8 regardless of what combination of TCP Options
5104 * are in use (always a multiple of 4 bytes) which is
5105 * important for performance reasons. For instance, if no
5106 * options are in use, then we have a 20-byte IP header and a
5107 * 20-byte TCP header. In this case, a 1500-byte MSS would
5108 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5109 * which is not a multiple of 8. So using an MSS of 1488 in
5110 * this case results in a TCP Data Payload of 1448 bytes which
5111 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5112 * Stamps have been negotiated, then an MTU of 1500 bytes
5113 * results in a TCP Data Payload of 1448 bytes which, as
5114 * above, is a multiple of 8 bytes ...
5116 for (i = 0; i < NMTUS; i++)
5117 if (adap->params.mtus[i] == 1492) {
5118 adap->params.mtus[i] = 1488;
5122 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5123 adap->params.b_wnd);
5125 t4_init_sge_params(adap);
5126 adap->flags |= CXGB4_FW_OK;
5127 t4_init_tp_params(adap, true);
5131 * Something bad happened. If a command timed out or failed with EIO
5132 * FW does not operate within its spec or something catastrophic
5133 * happened to HW/FW, stop issuing commands.
5136 adap_free_hma_mem(adap);
5137 kfree(adap->sge.egr_map);
5138 kfree(adap->sge.ingr_map);
5139 kfree(adap->sge.starving_fl);
5140 kfree(adap->sge.txq_maperr);
5141 #ifdef CONFIG_DEBUG_FS
5142 kfree(adap->sge.blocked_fl);
5144 if (ret != -ETIMEDOUT && ret != -EIO)
5145 t4_fw_bye(adap, adap->mbox);
5151 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5152 pci_channel_state_t state)
5155 struct adapter *adap = pci_get_drvdata(pdev);
5161 adap->flags &= ~CXGB4_FW_OK;
5162 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5163 spin_lock(&adap->stats_lock);
5164 for_each_port(adap, i) {
5165 struct net_device *dev = adap->port[i];
5167 netif_device_detach(dev);
5168 netif_carrier_off(dev);
5171 spin_unlock(&adap->stats_lock);
5172 disable_interrupts(adap);
5173 if (adap->flags & CXGB4_FULL_INIT_DONE)
5176 if ((adap->flags & CXGB4_DEV_ENABLED)) {
5177 pci_disable_device(pdev);
5178 adap->flags &= ~CXGB4_DEV_ENABLED;
5180 out: return state == pci_channel_io_perm_failure ?
5181 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5184 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5187 struct fw_caps_config_cmd c;
5188 struct adapter *adap = pci_get_drvdata(pdev);
5191 pci_restore_state(pdev);
5192 pci_save_state(pdev);
5193 return PCI_ERS_RESULT_RECOVERED;
5196 if (!(adap->flags & CXGB4_DEV_ENABLED)) {
5197 if (pci_enable_device(pdev)) {
5198 dev_err(&pdev->dev, "Cannot reenable PCI "
5199 "device after reset\n");
5200 return PCI_ERS_RESULT_DISCONNECT;
5202 adap->flags |= CXGB4_DEV_ENABLED;
5205 pci_set_master(pdev);
5206 pci_restore_state(pdev);
5207 pci_save_state(pdev);
5209 if (t4_wait_dev_ready(adap->regs) < 0)
5210 return PCI_ERS_RESULT_DISCONNECT;
5211 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
5212 return PCI_ERS_RESULT_DISCONNECT;
5213 adap->flags |= CXGB4_FW_OK;
5214 if (adap_init1(adap, &c))
5215 return PCI_ERS_RESULT_DISCONNECT;
5217 for_each_port(adap, i) {
5218 struct port_info *pi = adap2pinfo(adap, i);
5219 u8 vivld = 0, vin = 0;
5221 ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
5222 NULL, NULL, &vivld, &vin);
5224 return PCI_ERS_RESULT_DISCONNECT;
5226 pi->xact_addr_filt = -1;
5227 /* If fw supports returning the VIN as part of FW_VI_CMD,
5228 * save the returned values.
5230 if (adap->params.viid_smt_extn_support) {
5234 /* Retrieve the values from VIID */
5235 pi->vivld = FW_VIID_VIVLD_G(pi->viid);
5236 pi->vin = FW_VIID_VIN_G(pi->viid);
5240 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5241 adap->params.b_wnd);
5244 return PCI_ERS_RESULT_DISCONNECT;
5245 return PCI_ERS_RESULT_RECOVERED;
5248 static void eeh_resume(struct pci_dev *pdev)
5251 struct adapter *adap = pci_get_drvdata(pdev);
5257 for_each_port(adap, i) {
5258 struct net_device *dev = adap->port[i];
5260 if (netif_running(dev)) {
5262 cxgb_set_rxmode(dev);
5264 netif_device_attach(dev);
5270 static void eeh_reset_prepare(struct pci_dev *pdev)
5272 struct adapter *adapter = pci_get_drvdata(pdev);
5275 if (adapter->pf != 4)
5278 adapter->flags &= ~CXGB4_FW_OK;
5280 notify_ulds(adapter, CXGB4_STATE_DOWN);
5282 for_each_port(adapter, i)
5283 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5284 cxgb_close(adapter->port[i]);
5286 disable_interrupts(adapter);
5287 cxgb4_free_mps_ref_entries(adapter);
5289 adap_free_hma_mem(adapter);
5291 if (adapter->flags & CXGB4_FULL_INIT_DONE)
5295 static void eeh_reset_done(struct pci_dev *pdev)
5297 struct adapter *adapter = pci_get_drvdata(pdev);
5300 if (adapter->pf != 4)
5303 err = t4_wait_dev_ready(adapter->regs);
5305 dev_err(adapter->pdev_dev,
5306 "Device not ready, err %d", err);
5310 setup_memwin(adapter);
5312 err = adap_init0(adapter, 1);
5314 dev_err(adapter->pdev_dev,
5315 "Adapter init failed, err %d", err);
5319 setup_memwin_rdma(adapter);
5321 if (adapter->flags & CXGB4_FW_OK) {
5322 err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
5324 dev_err(adapter->pdev_dev,
5325 "Port init failed, err %d", err);
5330 err = cfg_queues(adapter);
5332 dev_err(adapter->pdev_dev,
5333 "Config queues failed, err %d", err);
5337 cxgb4_init_mps_ref_entries(adapter);
5339 err = setup_fw_sge_queues(adapter);
5341 dev_err(adapter->pdev_dev,
5342 "FW sge queue allocation failed, err %d", err);
5346 for_each_port(adapter, i)
5347 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5348 cxgb_open(adapter->port[i]);
5351 static const struct pci_error_handlers cxgb4_eeh = {
5352 .error_detected = eeh_err_detected,
5353 .slot_reset = eeh_slot_reset,
5354 .resume = eeh_resume,
5355 .reset_prepare = eeh_reset_prepare,
5356 .reset_done = eeh_reset_done,
5359 /* Return true if the Link Configuration supports "High Speeds" (those greater
5362 static inline bool is_x_10g_port(const struct link_config *lc)
5364 unsigned int speeds, high_speeds;
5366 speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
5367 high_speeds = speeds &
5368 ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
5370 return high_speeds != 0;
5373 /* Perform default configuration of DMA queues depending on the number and type
5374 * of ports we found and the number of available CPUs. Most settings can be
5375 * modified by the admin prior to actual use.
5377 static int cfg_queues(struct adapter *adap)
5379 u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
5380 u32 i, n10g = 0, qidx = 0, n1g = 0;
5381 u32 ncpus = num_online_cpus();
5382 u32 niqflint, neq, num_ulds;
5383 struct sge *s = &adap->sge;
5386 /* Reduce memory usage in kdump environment, disable all offload. */
5387 if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
5388 adap->params.offload = 0;
5389 adap->params.crypto = 0;
5390 adap->params.ethofld = 0;
5393 /* Calculate the number of Ethernet Queue Sets available based on
5394 * resources provisioned for us. We always have an Asynchronous
5395 * Firmware Event Ingress Queue. If we're operating in MSI or Legacy
5396 * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
5397 * Ingress Queue. Meanwhile, we need two Egress Queues for each
5398 * Queue Set: one for the Free List and one for the Ethernet TX Queue.
5400 * Note that we should also take into account all of the various
5401 * Offload Queues. But, in any situation where we're operating in
5402 * a Resource Constrained Provisioning environment, doing any Offload
5403 * at all is problematic ...
5405 niqflint = adap->params.pfres.niqflint - 1;
5406 if (!(adap->flags & CXGB4_USING_MSIX))
5408 neq = adap->params.pfres.neq / 2;
5409 avail_qsets = min(niqflint, neq);
5411 if (avail_qsets < adap->params.nports) {
5412 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
5413 avail_qsets, adap->params.nports);
5417 /* Count the number of 10Gb/s or better ports */
5418 for_each_port(adap, i)
5419 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5421 avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
5423 /* We default to 1 queue per non-10G port and up to # of cores queues
5427 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
5429 n1g = adap->params.nports - n10g;
5430 #ifdef CONFIG_CHELSIO_T4_DCB
5431 /* For Data Center Bridging support we need to be able to support up
5432 * to 8 Traffic Priorities; each of which will be assigned to its
5433 * own TX Queue in order to prevent Head-Of-Line Blocking.
5436 if (adap->params.nports * 8 > avail_eth_qsets) {
5437 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
5438 avail_eth_qsets, adap->params.nports * 8);
5442 if (adap->params.nports * ncpus < avail_eth_qsets)
5443 q10g = max(8U, ncpus);
5445 q10g = max(8U, q10g);
5447 while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
5450 #else /* !CONFIG_CHELSIO_T4_DCB */
5452 q10g = min(q10g, ncpus);
5453 #endif /* !CONFIG_CHELSIO_T4_DCB */
5454 if (is_kdump_kernel()) {
5459 for_each_port(adap, i) {
5460 struct port_info *pi = adap2pinfo(adap, i);
5462 pi->first_qset = qidx;
5463 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
5468 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5469 avail_qsets -= qidx;
5472 /* For offload we use 1 queue/channel if all ports are up to 1G,
5473 * otherwise we divide all available queues amongst the channels
5474 * capped by the number of available cores.
5476 num_ulds = adap->num_uld + adap->num_ofld_uld;
5477 i = min_t(u32, MAX_OFLD_QSETS, ncpus);
5478 avail_uld_qsets = roundup(i, adap->params.nports);
5479 if (avail_qsets < num_ulds * adap->params.nports) {
5480 adap->params.offload = 0;
5481 adap->params.crypto = 0;
5483 } else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) {
5484 s->ofldqsets = adap->params.nports;
5486 s->ofldqsets = avail_uld_qsets;
5489 avail_qsets -= num_ulds * s->ofldqsets;
5492 /* ETHOFLD Queues used for QoS offload should follow same
5493 * allocation scheme as normal Ethernet Queues.
5495 if (is_ethofld(adap)) {
5496 if (avail_qsets < s->max_ethqsets) {
5497 adap->params.ethofld = 0;
5500 s->eoqsets = s->max_ethqsets;
5502 avail_qsets -= s->eoqsets;
5505 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5506 struct sge_eth_rxq *r = &s->ethrxq[i];
5508 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
5512 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5513 s->ethtxq[i].q.size = 1024;
5515 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5516 s->ctrlq[i].q.size = 512;
5518 if (!is_t4(adap->params.chip))
5519 s->ptptxq.q.size = 8;
5521 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5522 init_rspq(adap, &s->intrq, 0, 1, 512, 64);
5528 * Reduce the number of Ethernet queues across all ports to at most n.
5529 * n provides at least one queue per port.
5531 static void reduce_ethqs(struct adapter *adap, int n)
5534 struct port_info *pi;
5536 while (n < adap->sge.ethqsets)
5537 for_each_port(adap, i) {
5538 pi = adap2pinfo(adap, i);
5539 if (pi->nqsets > 1) {
5541 adap->sge.ethqsets--;
5542 if (adap->sge.ethqsets <= n)
5548 for_each_port(adap, i) {
5549 pi = adap2pinfo(adap, i);
5555 static int alloc_msix_info(struct adapter *adap, u32 num_vec)
5557 struct msix_info *msix_info;
5559 msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL);
5563 adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
5564 sizeof(long), GFP_KERNEL);
5565 if (!adap->msix_bmap.msix_bmap) {
5570 spin_lock_init(&adap->msix_bmap.lock);
5571 adap->msix_bmap.mapsize = num_vec;
5573 adap->msix_info = msix_info;
5577 static void free_msix_info(struct adapter *adap)
5579 kfree(adap->msix_bmap.msix_bmap);
5580 kfree(adap->msix_info);
5583 int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
5585 struct msix_bmap *bmap = &adap->msix_bmap;
5586 unsigned int msix_idx;
5587 unsigned long flags;
5589 spin_lock_irqsave(&bmap->lock, flags);
5590 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
5591 if (msix_idx < bmap->mapsize) {
5592 __set_bit(msix_idx, bmap->msix_bmap);
5594 spin_unlock_irqrestore(&bmap->lock, flags);
5598 spin_unlock_irqrestore(&bmap->lock, flags);
5602 void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
5603 unsigned int msix_idx)
5605 struct msix_bmap *bmap = &adap->msix_bmap;
5606 unsigned long flags;
5608 spin_lock_irqsave(&bmap->lock, flags);
5609 __clear_bit(msix_idx, bmap->msix_bmap);
5610 spin_unlock_irqrestore(&bmap->lock, flags);
5613 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5614 #define EXTRA_VECS 2
5616 static int enable_msix(struct adapter *adap)
5618 u32 eth_need, uld_need = 0, ethofld_need = 0;
5619 u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0;
5620 u8 num_uld = 0, nchan = adap->params.nports;
5621 u32 i, want, need, num_vec;
5622 struct sge *s = &adap->sge;
5623 struct msix_entry *entries;
5624 struct port_info *pi;
5627 want = s->max_ethqsets;
5628 #ifdef CONFIG_CHELSIO_T4_DCB
5629 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5638 num_uld = adap->num_ofld_uld + adap->num_uld;
5639 want += num_uld * s->ofldqsets;
5640 uld_need = num_uld * nchan;
5644 if (is_ethofld(adap)) {
5646 ethofld_need = eth_need;
5647 need += ethofld_need;
5653 entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL);
5657 for (i = 0; i < want; i++)
5658 entries[i].entry = i;
5660 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
5661 if (allocated < 0) {
5662 /* Disable offload and attempt to get vectors for NIC
5665 want = s->max_ethqsets + EXTRA_VECS;
5666 need = eth_need + EXTRA_VECS;
5667 allocated = pci_enable_msix_range(adap->pdev, entries,
5669 if (allocated < 0) {
5670 dev_info(adap->pdev_dev,
5671 "Disabling MSI-X due to insufficient MSI-X vectors\n");
5676 dev_info(adap->pdev_dev,
5677 "Disabling offload due to insufficient MSI-X vectors\n");
5678 adap->params.offload = 0;
5679 adap->params.crypto = 0;
5680 adap->params.ethofld = 0;
5687 num_vec = allocated;
5688 if (num_vec < want) {
5689 /* Distribute available vectors to the various queue groups.
5690 * Every group gets its minimum requirement and NIC gets top
5691 * priority for leftovers.
5693 ethqsets = eth_need;
5696 if (is_ethofld(adap))
5697 eoqsets = ethofld_need;
5701 if (num_vec < eth_need + ethofld_need ||
5702 ethqsets > s->max_ethqsets)
5705 for_each_port(adap, i) {
5706 pi = adap2pinfo(adap, i);
5721 if (num_vec < uld_need ||
5722 ofldqsets > s->ofldqsets)
5726 num_vec -= uld_need;
5730 ethqsets = s->max_ethqsets;
5732 ofldqsets = s->ofldqsets;
5733 if (is_ethofld(adap))
5734 eoqsets = s->eoqsets;
5737 if (ethqsets < s->max_ethqsets) {
5738 s->max_ethqsets = ethqsets;
5739 reduce_ethqs(adap, ethqsets);
5743 s->ofldqsets = ofldqsets;
5744 s->nqs_per_uld = s->ofldqsets;
5747 if (is_ethofld(adap))
5748 s->eoqsets = eoqsets;
5751 ret = alloc_msix_info(adap, allocated);
5753 goto out_disable_msix;
5755 for (i = 0; i < allocated; i++) {
5756 adap->msix_info[i].vec = entries[i].vector;
5757 adap->msix_info[i].idx = i;
5760 dev_info(adap->pdev_dev,
5761 "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d\n",
5762 allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld);
5768 pci_disable_msix(adap->pdev);
5777 static int init_rss(struct adapter *adap)
5782 err = t4_init_rss_mode(adap, adap->mbox);
5786 for_each_port(adap, i) {
5787 struct port_info *pi = adap2pinfo(adap, i);
5789 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5796 /* Dump basic information about the adapter */
5797 static void print_adapter_info(struct adapter *adapter)
5799 /* Hardware/Firmware/etc. Version/Revision IDs */
5800 t4_dump_version_info(adapter);
5802 /* Software/Hardware configuration */
5803 dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
5804 is_offload(adapter) ? "R" : "",
5805 ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
5806 (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
5807 is_offload(adapter) ? "Offload" : "non-Offload");
5810 static void print_port_info(const struct net_device *dev)
5814 const struct port_info *pi = netdev_priv(dev);
5815 const struct adapter *adap = pi->adapter;
5817 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
5818 bufp += sprintf(bufp, "100M/");
5819 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
5820 bufp += sprintf(bufp, "1G/");
5821 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
5822 bufp += sprintf(bufp, "10G/");
5823 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
5824 bufp += sprintf(bufp, "25G/");
5825 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
5826 bufp += sprintf(bufp, "40G/");
5827 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
5828 bufp += sprintf(bufp, "50G/");
5829 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
5830 bufp += sprintf(bufp, "100G/");
5831 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
5832 bufp += sprintf(bufp, "200G/");
5833 if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
5834 bufp += sprintf(bufp, "400G/");
5837 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
5839 netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
5840 dev->name, adap->params.vpd.id, adap->name, buf);
5844 * Free the following resources:
5845 * - memory used for tables
5848 * - resources FW is holding for us
5850 static void free_some_resources(struct adapter *adapter)
5854 kvfree(adapter->smt);
5855 kvfree(adapter->l2t);
5856 kvfree(adapter->srq);
5857 t4_cleanup_sched(adapter);
5858 kvfree(adapter->tids.tid_tab);
5859 cxgb4_cleanup_tc_matchall(adapter);
5860 cxgb4_cleanup_tc_mqprio(adapter);
5861 cxgb4_cleanup_tc_flower(adapter);
5862 cxgb4_cleanup_tc_u32(adapter);
5863 kfree(adapter->sge.egr_map);
5864 kfree(adapter->sge.ingr_map);
5865 kfree(adapter->sge.starving_fl);
5866 kfree(adapter->sge.txq_maperr);
5867 #ifdef CONFIG_DEBUG_FS
5868 kfree(adapter->sge.blocked_fl);
5870 disable_msi(adapter);
5872 for_each_port(adapter, i)
5873 if (adapter->port[i]) {
5874 struct port_info *pi = adap2pinfo(adapter, i);
5877 t4_free_vi(adapter, adapter->mbox, adapter->pf,
5879 kfree(adap2pinfo(adapter, i)->rss);
5880 free_netdev(adapter->port[i]);
5882 if (adapter->flags & CXGB4_FW_OK)
5883 t4_fw_bye(adapter, adapter->pf);
5886 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
5888 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5889 NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5890 #define SEGMENT_SIZE 128
5892 static int t4_get_chip_type(struct adapter *adap, int ver)
5894 u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
5898 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
5900 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
5902 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
5909 #ifdef CONFIG_PCI_IOV
5910 static void cxgb4_mgmt_setup(struct net_device *dev)
5912 dev->type = ARPHRD_NONE;
5914 dev->hard_header_len = 0;
5916 dev->tx_queue_len = 0;
5917 dev->flags |= IFF_NOARP;
5918 dev->priv_flags |= IFF_NO_QUEUE;
5920 /* Initialize the device structure. */
5921 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
5922 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
5925 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
5927 struct adapter *adap = pci_get_drvdata(pdev);
5929 int current_vfs = pci_num_vf(pdev);
5932 pcie_fw = readl(adap->regs + PCIE_FW_A);
5933 /* Check if fw is initialized */
5934 if (!(pcie_fw & PCIE_FW_INIT_F)) {
5935 dev_warn(&pdev->dev, "Device not initialized\n");
5939 /* If any of the VF's is already assigned to Guest OS, then
5940 * SRIOV for the same cannot be modified
5942 if (current_vfs && pci_vfs_assigned(pdev)) {
5944 "Cannot modify SR-IOV while VFs are assigned\n");
5947 /* Note that the upper-level code ensures that we're never called with
5948 * a non-zero "num_vfs" when we already have VFs instantiated. But
5949 * it never hurts to code defensively.
5951 if (num_vfs != 0 && current_vfs != 0)
5954 /* Nothing to do for no change. */
5955 if (num_vfs == current_vfs)
5958 /* Disable SRIOV when zero is passed. */
5960 pci_disable_sriov(pdev);
5961 /* free VF Management Interface */
5962 unregister_netdev(adap->port[0]);
5963 free_netdev(adap->port[0]);
5964 adap->port[0] = NULL;
5966 /* free VF resources */
5968 kfree(adap->vfinfo);
5969 adap->vfinfo = NULL;
5974 struct fw_pfvf_cmd port_cmd, port_rpl;
5975 struct net_device *netdev;
5976 unsigned int pmask, port;
5977 struct pci_dev *pbridge;
5978 struct port_info *pi;
5979 char name[IFNAMSIZ];
5983 /* If we want to instantiate Virtual Functions, then our
5984 * parent bridge's PCI-E needs to support Alternative Routing
5985 * ID (ARI) because our VFs will show up at function offset 8
5988 pbridge = pdev->bus->self;
5989 pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags);
5990 pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2);
5992 if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
5993 !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
5994 /* Our parent bridge does not support ARI so issue a
5995 * warning and skip instantiating the VFs. They
5996 * won't be reachable.
5998 dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
5999 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
6000 PCI_FUNC(pbridge->devfn));
6003 memset(&port_cmd, 0, sizeof(port_cmd));
6004 port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
6007 FW_PFVF_CMD_PFN_V(adap->pf) |
6008 FW_PFVF_CMD_VFN_V(0));
6009 port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
6010 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
6014 pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
6015 port = ffs(pmask) - 1;
6016 /* Allocate VF Management Interface. */
6017 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
6019 netdev = alloc_netdev(sizeof(struct port_info),
6020 name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
6024 pi = netdev_priv(netdev);
6028 SET_NETDEV_DEV(netdev, &pdev->dev);
6030 adap->port[0] = netdev;
6033 err = register_netdev(adap->port[0]);
6035 pr_info("Unable to register VF mgmt netdev %s\n", name);
6036 free_netdev(adap->port[0]);
6037 adap->port[0] = NULL;
6040 /* Allocate and set up VF Information. */
6041 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
6042 sizeof(struct vf_info), GFP_KERNEL);
6043 if (!adap->vfinfo) {
6044 unregister_netdev(adap->port[0]);
6045 free_netdev(adap->port[0]);
6046 adap->port[0] = NULL;
6049 cxgb4_mgmt_fill_vf_station_mac_addr(adap);
6051 /* Instantiate the requested number of VFs. */
6052 err = pci_enable_sriov(pdev, num_vfs);
6054 pr_info("Unable to instantiate %d VFs\n", num_vfs);
6056 unregister_netdev(adap->port[0]);
6057 free_netdev(adap->port[0]);
6058 adap->port[0] = NULL;
6059 kfree(adap->vfinfo);
6060 adap->vfinfo = NULL;
6065 adap->num_vfs = num_vfs;
6068 #endif /* CONFIG_PCI_IOV */
6070 #if defined(CONFIG_CHELSIO_TLS_DEVICE)
6072 static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
6073 enum tls_offload_ctx_dir direction,
6074 struct tls_crypto_info *crypto_info,
6077 struct adapter *adap = netdev2adap(netdev);
6080 mutex_lock(&uld_mutex);
6081 if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
6082 dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
6087 if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
6088 dev_err(adap->pdev_dev,
6089 "chcr driver has no registered tlsdev_ops()\n");
6094 ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
6098 ret = adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_add(netdev, sk,
6102 /* if there is a failure, clear the refcount */
6104 cxgb4_set_ktls_feature(adap,
6105 FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6107 mutex_unlock(&uld_mutex);
6111 static void cxgb4_ktls_dev_del(struct net_device *netdev,
6112 struct tls_context *tls_ctx,
6113 enum tls_offload_ctx_dir direction)
6115 struct adapter *adap = netdev2adap(netdev);
6117 mutex_lock(&uld_mutex);
6118 if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
6119 dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
6123 if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
6124 dev_err(adap->pdev_dev,
6125 "chcr driver has no registered tlsdev_ops\n");
6129 adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
6131 cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6134 mutex_unlock(&uld_mutex);
6137 static const struct tlsdev_ops cxgb4_ktls_ops = {
6138 .tls_dev_add = cxgb4_ktls_dev_add,
6139 .tls_dev_del = cxgb4_ktls_dev_del,
6141 #endif /* CONFIG_CHELSIO_TLS_DEVICE */
6143 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6145 struct net_device *netdev;
6146 struct adapter *adapter;
6147 static int adap_idx = 1;
6148 int s_qpp, qpp, num_seg;
6149 struct port_info *pi;
6150 bool highdma = false;
6151 enum chip_type chip;
6158 err = pci_request_regions(pdev, KBUILD_MODNAME);
6160 /* Just info, some other driver may have claimed the device. */
6161 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6165 err = pci_enable_device(pdev);
6167 dev_err(&pdev->dev, "cannot enable PCI device\n");
6168 goto out_release_regions;
6171 regs = pci_ioremap_bar(pdev, 0);
6173 dev_err(&pdev->dev, "cannot map device registers\n");
6175 goto out_disable_device;
6178 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6181 goto out_unmap_bar0;
6184 adapter->regs = regs;
6185 err = t4_wait_dev_ready(regs);
6187 goto out_free_adapter;
6189 /* We control everything through one PF */
6190 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
6191 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
6192 chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
6193 if ((int)chip < 0) {
6194 dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
6196 goto out_free_adapter;
6198 chip_ver = CHELSIO_CHIP_VERSION(chip);
6199 func = chip_ver <= CHELSIO_T5 ?
6200 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
6202 adapter->pdev = pdev;
6203 adapter->pdev_dev = &pdev->dev;
6204 adapter->name = pci_name(pdev);
6205 adapter->mbox = func;
6207 adapter->params.chip = chip;
6208 adapter->adap_idx = adap_idx;
6209 adapter->msg_enable = DFLT_MSG_ENABLE;
6210 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
6211 (sizeof(struct mbox_cmd) *
6212 T4_OS_LOG_MBOX_CMDS),
6214 if (!adapter->mbox_log) {
6216 goto out_free_adapter;
6218 spin_lock_init(&adapter->mbox_lock);
6219 INIT_LIST_HEAD(&adapter->mlist.list);
6220 adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
6221 pci_set_drvdata(pdev, adapter);
6223 if (func != ent->driver_data) {
6224 pci_disable_device(pdev);
6225 pci_save_state(pdev); /* to restore SR-IOV later */
6229 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6231 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6233 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6234 "coherent allocations\n");
6235 goto out_free_adapter;
6238 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6240 dev_err(&pdev->dev, "no usable DMA configuration\n");
6241 goto out_free_adapter;
6245 pci_enable_pcie_error_reporting(pdev);
6246 pci_set_master(pdev);
6247 pci_save_state(pdev);
6249 adapter->workq = create_singlethread_workqueue("cxgb4");
6250 if (!adapter->workq) {
6252 goto out_free_adapter;
6255 /* PCI device has been enabled */
6256 adapter->flags |= CXGB4_DEV_ENABLED;
6257 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6259 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
6260 * Ingress Packet Data to Free List Buffers in order to allow for
6261 * chipset performance optimizations between the Root Complex and
6262 * Memory Controllers. (Messages to the associated Ingress Queue
6263 * notifying new Packet Placement in the Free Lists Buffers will be
6264 * send without the Relaxed Ordering Attribute thus guaranteeing that
6265 * all preceding PCIe Transaction Layer Packets will be processed
6266 * first.) But some Root Complexes have various issues with Upstream
6267 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
6268 * The PCIe devices which under the Root Complexes will be cleared the
6269 * Relaxed Ordering bit in the configuration space, So we check our
6270 * PCIe configuration space to see if it's flagged with advice against
6271 * using Relaxed Ordering.
6273 if (!pcie_relaxed_ordering_enabled(pdev))
6274 adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
6276 spin_lock_init(&adapter->stats_lock);
6277 spin_lock_init(&adapter->tid_release_lock);
6278 spin_lock_init(&adapter->win0_lock);
6280 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6281 INIT_WORK(&adapter->db_full_task, process_db_full);
6282 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6283 INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
6285 err = t4_prep_adapter(adapter);
6287 goto out_free_adapter;
6289 if (is_kdump_kernel()) {
6290 /* Collect hardware state and append to /proc/vmcore */
6291 err = cxgb4_cudbg_vmcore_add_dump(adapter);
6293 dev_warn(adapter->pdev_dev,
6294 "Fail collecting vmcore device dump, err: %d. Continuing\n",
6300 if (!is_t4(adapter->params.chip)) {
6301 s_qpp = (QUEUESPERPAGEPF0_S +
6302 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6304 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
6305 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
6306 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6308 /* Each segment size is 128B. Write coalescing is enabled only
6309 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6310 * queue is less no of segments that can be accommodated in
6313 if (qpp > num_seg) {
6315 "Incorrect number of egress queues per page\n");
6317 goto out_free_adapter;
6319 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6320 pci_resource_len(pdev, 2));
6321 if (!adapter->bar2) {
6322 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6324 goto out_free_adapter;
6328 setup_memwin(adapter);
6329 err = adap_init0(adapter, 0);
6330 #ifdef CONFIG_DEBUG_FS
6331 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
6333 setup_memwin_rdma(adapter);
6337 /* configure SGE_STAT_CFG_A to read WC stats */
6338 if (!is_t4(adapter->params.chip))
6339 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
6340 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
6343 /* Initialize hash mac addr list */
6344 INIT_LIST_HEAD(&adapter->mac_hlist);
6346 for_each_port(adapter, i) {
6347 /* For supporting MQPRIO Offload, need some extra
6348 * queues for each ETHOFLD TIDs. Keep it equal to
6349 * MAX_ATIDs for now. Once we connect to firmware
6350 * later and query the EOTID params, we'll come to
6351 * know the actual # of EOTIDs supported.
6353 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6354 MAX_ETH_QSETS + MAX_ATIDS);
6360 SET_NETDEV_DEV(netdev, &pdev->dev);
6362 adapter->port[i] = netdev;
6363 pi = netdev_priv(netdev);
6364 pi->adapter = adapter;
6365 pi->xact_addr_filt = -1;
6367 netdev->irq = pdev->irq;
6369 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6370 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6371 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
6372 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
6375 if (chip_ver > CHELSIO_T5) {
6376 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
6379 NETIF_F_GSO_UDP_TUNNEL |
6380 NETIF_F_GSO_UDP_TUNNEL_CSUM |
6381 NETIF_F_TSO | NETIF_F_TSO6;
6383 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
6384 NETIF_F_GSO_UDP_TUNNEL_CSUM |
6385 NETIF_F_HW_TLS_RECORD;
6389 netdev->hw_features |= NETIF_F_HIGHDMA;
6390 netdev->features |= netdev->hw_features;
6391 netdev->vlan_features = netdev->features & VLAN_FEAT;
6392 #if defined(CONFIG_CHELSIO_TLS_DEVICE)
6393 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
6394 netdev->hw_features |= NETIF_F_HW_TLS_TX;
6395 netdev->tlsdev_ops = &cxgb4_ktls_ops;
6396 /* initialize the refcount */
6397 refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
6400 netdev->priv_flags |= IFF_UNICAST_FLT;
6402 /* MTU range: 81 - 9600 */
6403 netdev->min_mtu = 81; /* accommodate SACK */
6404 netdev->max_mtu = MAX_MTU;
6406 netdev->netdev_ops = &cxgb4_netdev_ops;
6407 #ifdef CONFIG_CHELSIO_T4_DCB
6408 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6409 cxgb4_dcb_state_init(netdev);
6410 cxgb4_dcb_version_init(netdev);
6412 cxgb4_set_ethtool_ops(netdev);
6415 cxgb4_init_ethtool_dump(adapter);
6417 pci_set_drvdata(pdev, adapter);
6419 if (adapter->flags & CXGB4_FW_OK) {
6420 err = t4_port_init(adapter, func, func, 0);
6423 } else if (adapter->params.nports == 1) {
6424 /* If we don't have a connection to the firmware -- possibly
6425 * because of an error -- grab the raw VPD parameters so we
6426 * can set the proper MAC Address on the debug network
6427 * interface that we've created.
6429 u8 hw_addr[ETH_ALEN];
6430 u8 *na = adapter->params.vpd.na;
6432 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
6434 for (i = 0; i < ETH_ALEN; i++)
6435 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
6436 hex2val(na[2 * i + 1]));
6437 t4_set_hw_addr(adapter, 0, hw_addr);
6441 if (!(adapter->flags & CXGB4_FW_OK))
6442 goto fw_attach_fail;
6444 /* Configure queues and allocate tables now, they can be needed as
6445 * soon as the first register_netdev completes.
6447 err = cfg_queues(adapter);
6451 adapter->smt = t4_init_smt();
6452 if (!adapter->smt) {
6453 /* We tolerate a lack of SMT, giving up some functionality */
6454 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
6457 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
6458 if (!adapter->l2t) {
6459 /* We tolerate a lack of L2T, giving up some functionality */
6460 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6461 adapter->params.offload = 0;
6464 #if IS_ENABLED(CONFIG_IPV6)
6465 if (chip_ver <= CHELSIO_T5 &&
6466 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
6467 /* CLIP functionality is not present in hardware,
6468 * hence disable all offload features
6470 dev_warn(&pdev->dev,
6471 "CLIP not enabled in hardware, continuing\n");
6472 adapter->params.offload = 0;
6474 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
6475 adapter->clipt_end);
6476 if (!adapter->clipt) {
6477 /* We tolerate a lack of clip_table, giving up
6478 * some functionality
6480 dev_warn(&pdev->dev,
6481 "could not allocate Clip table, continuing\n");
6482 adapter->params.offload = 0;
6487 for_each_port(adapter, i) {
6488 pi = adap2pinfo(adapter, i);
6489 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
6491 dev_warn(&pdev->dev,
6492 "could not activate scheduling on port %d\n",
6496 if (tid_init(&adapter->tids) < 0) {
6497 dev_warn(&pdev->dev, "could not allocate TID table, "
6499 adapter->params.offload = 0;
6501 adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
6502 if (!adapter->tc_u32)
6503 dev_warn(&pdev->dev,
6504 "could not offload tc u32, continuing\n");
6506 if (cxgb4_init_tc_flower(adapter))
6507 dev_warn(&pdev->dev,
6508 "could not offload tc flower, continuing\n");
6510 if (cxgb4_init_tc_mqprio(adapter))
6511 dev_warn(&pdev->dev,
6512 "could not offload tc mqprio, continuing\n");
6514 if (cxgb4_init_tc_matchall(adapter))
6515 dev_warn(&pdev->dev,
6516 "could not offload tc matchall, continuing\n");
6519 if (is_offload(adapter) || is_hashfilter(adapter)) {
6520 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
6521 u32 hash_base, hash_reg;
6523 if (chip_ver <= CHELSIO_T5) {
6524 hash_reg = LE_DB_TID_HASHBASE_A;
6525 hash_base = t4_read_reg(adapter, hash_reg);
6526 adapter->tids.hash_base = hash_base / 4;
6528 hash_reg = T6_LE_DB_HASH_TID_BASE_A;
6529 hash_base = t4_read_reg(adapter, hash_reg);
6530 adapter->tids.hash_base = hash_base;
6535 /* See what interrupts we'll be using */
6536 if (msi > 1 && enable_msix(adapter) == 0)
6537 adapter->flags |= CXGB4_USING_MSIX;
6538 else if (msi > 0 && pci_enable_msi(pdev) == 0) {
6539 adapter->flags |= CXGB4_USING_MSI;
6541 free_msix_info(adapter);
6544 /* check for PCI Express bandwidth capabiltites */
6545 pcie_print_link_status(pdev);
6547 cxgb4_init_mps_ref_entries(adapter);
6549 err = init_rss(adapter);
6553 err = setup_non_data_intr(adapter);
6555 dev_err(adapter->pdev_dev,
6556 "Non Data interrupt allocation failed, err: %d\n", err);
6560 err = setup_fw_sge_queues(adapter);
6562 dev_err(adapter->pdev_dev,
6563 "FW sge queue allocation failed, err %d", err);
6569 * The card is now ready to go. If any errors occur during device
6570 * registration we do not fail the whole card but rather proceed only
6571 * with the ports we manage to register successfully. However we must
6572 * register at least one net device.
6574 for_each_port(adapter, i) {
6575 pi = adap2pinfo(adapter, i);
6576 adapter->port[i]->dev_port = pi->lport;
6577 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6578 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6580 netif_carrier_off(adapter->port[i]);
6582 err = register_netdev(adapter->port[i]);
6585 adapter->chan_map[pi->tx_chan] = i;
6586 print_port_info(adapter->port[i]);
6589 dev_err(&pdev->dev, "could not register any net devices\n");
6593 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6597 if (cxgb4_debugfs_root) {
6598 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6599 cxgb4_debugfs_root);
6600 setup_debugfs(adapter);
6603 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6604 pdev->needs_freset = 1;
6606 if (is_uld(adapter))
6607 cxgb4_uld_enable(adapter);
6609 if (!is_t4(adapter->params.chip))
6610 cxgb4_ptp_init(adapter);
6612 if (IS_REACHABLE(CONFIG_THERMAL) &&
6613 !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
6614 cxgb4_thermal_init(adapter);
6616 print_adapter_info(adapter);
6620 t4_free_sge_resources(adapter);
6621 free_some_resources(adapter);
6622 if (adapter->flags & CXGB4_USING_MSIX)
6623 free_msix_info(adapter);
6624 if (adapter->num_uld || adapter->num_ofld_uld)
6625 t4_uld_mem_free(adapter);
6627 if (!is_t4(adapter->params.chip))
6628 iounmap(adapter->bar2);
6631 destroy_workqueue(adapter->workq);
6633 kfree(adapter->mbox_log);
6638 pci_disable_pcie_error_reporting(pdev);
6639 pci_disable_device(pdev);
6640 out_release_regions:
6641 pci_release_regions(pdev);
6645 static void remove_one(struct pci_dev *pdev)
6647 struct adapter *adapter = pci_get_drvdata(pdev);
6648 struct hash_mac_addr *entry, *tmp;
6651 pci_release_regions(pdev);
6655 /* If we allocated filters, free up state associated with any
6658 clear_all_filters(adapter);
6660 adapter->flags |= CXGB4_SHUTTING_DOWN;
6662 if (adapter->pf == 4) {
6665 /* Tear down per-adapter Work Queue first since it can contain
6666 * references to our adapter data structure.
6668 destroy_workqueue(adapter->workq);
6670 if (is_uld(adapter)) {
6671 detach_ulds(adapter);
6672 t4_uld_clean_up(adapter);
6675 adap_free_hma_mem(adapter);
6677 disable_interrupts(adapter);
6679 cxgb4_free_mps_ref_entries(adapter);
6681 for_each_port(adapter, i)
6682 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6683 unregister_netdev(adapter->port[i]);
6685 debugfs_remove_recursive(adapter->debugfs_root);
6687 if (!is_t4(adapter->params.chip))
6688 cxgb4_ptp_stop(adapter);
6689 if (IS_REACHABLE(CONFIG_THERMAL))
6690 cxgb4_thermal_remove(adapter);
6692 if (adapter->flags & CXGB4_FULL_INIT_DONE)
6695 if (adapter->flags & CXGB4_USING_MSIX)
6696 free_msix_info(adapter);
6697 if (adapter->num_uld || adapter->num_ofld_uld)
6698 t4_uld_mem_free(adapter);
6699 free_some_resources(adapter);
6700 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
6702 list_del(&entry->list);
6706 #if IS_ENABLED(CONFIG_IPV6)
6707 t4_cleanup_clip_tbl(adapter);
6709 if (!is_t4(adapter->params.chip))
6710 iounmap(adapter->bar2);
6712 #ifdef CONFIG_PCI_IOV
6714 cxgb4_iov_configure(adapter->pdev, 0);
6717 iounmap(adapter->regs);
6718 pci_disable_pcie_error_reporting(pdev);
6719 if ((adapter->flags & CXGB4_DEV_ENABLED)) {
6720 pci_disable_device(pdev);
6721 adapter->flags &= ~CXGB4_DEV_ENABLED;
6723 pci_release_regions(pdev);
6724 kfree(adapter->mbox_log);
6729 /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
6730 * delivery. This is essentially a stripped down version of the PCI remove()
6731 * function where we do the minimal amount of work necessary to shutdown any
6734 static void shutdown_one(struct pci_dev *pdev)
6736 struct adapter *adapter = pci_get_drvdata(pdev);
6738 /* As with remove_one() above (see extended comment), we only want do
6739 * do cleanup on PCI Devices which went all the way through init_one()
6743 pci_release_regions(pdev);
6747 adapter->flags |= CXGB4_SHUTTING_DOWN;
6749 if (adapter->pf == 4) {
6752 for_each_port(adapter, i)
6753 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6754 cxgb_close(adapter->port[i]);
6757 cxgb4_mqprio_stop_offload(adapter);
6760 if (is_uld(adapter)) {
6761 detach_ulds(adapter);
6762 t4_uld_clean_up(adapter);
6765 disable_interrupts(adapter);
6766 disable_msi(adapter);
6768 t4_sge_stop(adapter);
6769 if (adapter->flags & CXGB4_FW_OK)
6770 t4_fw_bye(adapter, adapter->mbox);
6774 static struct pci_driver cxgb4_driver = {
6775 .name = KBUILD_MODNAME,
6776 .id_table = cxgb4_pci_tbl,
6778 .remove = remove_one,
6779 .shutdown = shutdown_one,
6780 #ifdef CONFIG_PCI_IOV
6781 .sriov_configure = cxgb4_iov_configure,
6783 .err_handler = &cxgb4_eeh,
6786 static int __init cxgb4_init_module(void)
6790 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6792 ret = pci_register_driver(&cxgb4_driver);
6796 #if IS_ENABLED(CONFIG_IPV6)
6797 if (!inet6addr_registered) {
6798 ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6800 pci_unregister_driver(&cxgb4_driver);
6802 inet6addr_registered = true;
6810 debugfs_remove(cxgb4_debugfs_root);
6815 static void __exit cxgb4_cleanup_module(void)
6817 #if IS_ENABLED(CONFIG_IPV6)
6818 if (inet6addr_registered) {
6819 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6820 inet6addr_registered = false;
6823 pci_unregister_driver(&cxgb4_driver);
6824 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6827 module_init(cxgb4_init_module);
6828 module_exit(cxgb4_cleanup_module);