Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[sfrench/cifs-2.6.git] / drivers / net / ethernet / qlogic / qede / qede_main.c
1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/pci.h>
34 #include <linux/version.h>
35 #include <linux/device.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/errno.h>
40 #include <linux/list.h>
41 #include <linux/string.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/interrupt.h>
44 #include <asm/byteorder.h>
45 #include <asm/param.h>
46 #include <linux/io.h>
47 #include <linux/netdev_features.h>
48 #include <linux/udp.h>
49 #include <linux/tcp.h>
50 #include <net/udp_tunnel.h>
51 #include <linux/ip.h>
52 #include <net/ipv6.h>
53 #include <net/tcp.h>
54 #include <linux/if_ether.h>
55 #include <linux/if_vlan.h>
56 #include <linux/pkt_sched.h>
57 #include <linux/ethtool.h>
58 #include <linux/in.h>
59 #include <linux/random.h>
60 #include <net/ip6_checksum.h>
61 #include <linux/bitops.h>
62 #include <linux/vmalloc.h>
63 #include "qede.h"
64 #include "qede_ptp.h"
65
66 static char version[] =
67         "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
68
69 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static uint debug;
74 module_param(debug, uint, 0);
75 MODULE_PARM_DESC(debug, " Default debug msglevel");
76
77 static const struct qed_eth_ops *qed_ops;
78
79 #define CHIP_NUM_57980S_40              0x1634
80 #define CHIP_NUM_57980S_10              0x1666
81 #define CHIP_NUM_57980S_MF              0x1636
82 #define CHIP_NUM_57980S_100             0x1644
83 #define CHIP_NUM_57980S_50              0x1654
84 #define CHIP_NUM_57980S_25              0x1656
85 #define CHIP_NUM_57980S_IOV             0x1664
86 #define CHIP_NUM_AH                     0x8070
87 #define CHIP_NUM_AH_IOV                 0x8090
88
89 #ifndef PCI_DEVICE_ID_NX2_57980E
90 #define PCI_DEVICE_ID_57980S_40         CHIP_NUM_57980S_40
91 #define PCI_DEVICE_ID_57980S_10         CHIP_NUM_57980S_10
92 #define PCI_DEVICE_ID_57980S_MF         CHIP_NUM_57980S_MF
93 #define PCI_DEVICE_ID_57980S_100        CHIP_NUM_57980S_100
94 #define PCI_DEVICE_ID_57980S_50         CHIP_NUM_57980S_50
95 #define PCI_DEVICE_ID_57980S_25         CHIP_NUM_57980S_25
96 #define PCI_DEVICE_ID_57980S_IOV        CHIP_NUM_57980S_IOV
97 #define PCI_DEVICE_ID_AH                CHIP_NUM_AH
98 #define PCI_DEVICE_ID_AH_IOV            CHIP_NUM_AH_IOV
99
100 #endif
101
102 enum qede_pci_private {
103         QEDE_PRIVATE_PF,
104         QEDE_PRIVATE_VF
105 };
106
107 static const struct pci_device_id qede_pci_tbl[] = {
108         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
109         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
110         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
111         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
112         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
113         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
114 #ifdef CONFIG_QED_SRIOV
115         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
116 #endif
117         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
118 #ifdef CONFIG_QED_SRIOV
119         {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
120 #endif
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
125
126 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
127
128 #define TX_TIMEOUT              (5 * HZ)
129
130 /* Utilize last protocol index for XDP */
131 #define XDP_PI  11
132
133 static void qede_remove(struct pci_dev *pdev);
134 static void qede_shutdown(struct pci_dev *pdev);
135 static void qede_link_update(void *dev, struct qed_link_output *link);
136
137 /* The qede lock is used to protect driver state change and driver flows that
138  * are not reentrant.
139  */
140 void __qede_lock(struct qede_dev *edev)
141 {
142         mutex_lock(&edev->qede_lock);
143 }
144
145 void __qede_unlock(struct qede_dev *edev)
146 {
147         mutex_unlock(&edev->qede_lock);
148 }
149
150 #ifdef CONFIG_QED_SRIOV
151 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
152                             __be16 vlan_proto)
153 {
154         struct qede_dev *edev = netdev_priv(ndev);
155
156         if (vlan > 4095) {
157                 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
158                 return -EINVAL;
159         }
160
161         if (vlan_proto != htons(ETH_P_8021Q))
162                 return -EPROTONOSUPPORT;
163
164         DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
165                    vlan, vf);
166
167         return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
168 }
169
170 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
171 {
172         struct qede_dev *edev = netdev_priv(ndev);
173
174         DP_VERBOSE(edev, QED_MSG_IOV,
175                    "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
176                    mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
177
178         if (!is_valid_ether_addr(mac)) {
179                 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
180                 return -EINVAL;
181         }
182
183         return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
184 }
185
186 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
187 {
188         struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
189         struct qed_dev_info *qed_info = &edev->dev_info.common;
190         struct qed_update_vport_params *vport_params;
191         int rc;
192
193         vport_params = vzalloc(sizeof(*vport_params));
194         if (!vport_params)
195                 return -ENOMEM;
196         DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
197
198         rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
199
200         /* Enable/Disable Tx switching for PF */
201         if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
202             !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
203                 vport_params->vport_id = 0;
204                 vport_params->update_tx_switching_flg = 1;
205                 vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
206                 edev->ops->vport_update(edev->cdev, vport_params);
207         }
208
209         vfree(vport_params);
210         return rc;
211 }
212 #endif
213
214 static struct pci_driver qede_pci_driver = {
215         .name = "qede",
216         .id_table = qede_pci_tbl,
217         .probe = qede_probe,
218         .remove = qede_remove,
219         .shutdown = qede_shutdown,
220 #ifdef CONFIG_QED_SRIOV
221         .sriov_configure = qede_sriov_configure,
222 #endif
223 };
224
225 static struct qed_eth_cb_ops qede_ll_ops = {
226         {
227 #ifdef CONFIG_RFS_ACCEL
228                 .arfs_filter_op = qede_arfs_filter_op,
229 #endif
230                 .link_update = qede_link_update,
231         },
232         .force_mac = qede_force_mac,
233         .ports_update = qede_udp_ports_update,
234 };
235
236 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
237                              void *ptr)
238 {
239         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
240         struct ethtool_drvinfo drvinfo;
241         struct qede_dev *edev;
242
243         if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
244                 goto done;
245
246         /* Check whether this is a qede device */
247         if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
248                 goto done;
249
250         memset(&drvinfo, 0, sizeof(drvinfo));
251         ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
252         if (strcmp(drvinfo.driver, "qede"))
253                 goto done;
254         edev = netdev_priv(ndev);
255
256         switch (event) {
257         case NETDEV_CHANGENAME:
258                 /* Notify qed of the name change */
259                 if (!edev->ops || !edev->ops->common)
260                         goto done;
261                 edev->ops->common->set_name(edev->cdev, edev->ndev->name);
262                 break;
263         case NETDEV_CHANGEADDR:
264                 edev = netdev_priv(ndev);
265                 qede_rdma_event_changeaddr(edev);
266                 break;
267         }
268
269 done:
270         return NOTIFY_DONE;
271 }
272
273 static struct notifier_block qede_netdev_notifier = {
274         .notifier_call = qede_netdev_event,
275 };
276
277 static
278 int __init qede_init(void)
279 {
280         int ret;
281
282         pr_info("qede_init: %s\n", version);
283
284         qed_ops = qed_get_eth_ops();
285         if (!qed_ops) {
286                 pr_notice("Failed to get qed ethtool operations\n");
287                 return -EINVAL;
288         }
289
290         /* Must register notifier before pci ops, since we might miss
291          * interface rename after pci probe and netdev registration.
292          */
293         ret = register_netdevice_notifier(&qede_netdev_notifier);
294         if (ret) {
295                 pr_notice("Failed to register netdevice_notifier\n");
296                 qed_put_eth_ops();
297                 return -EINVAL;
298         }
299
300         ret = pci_register_driver(&qede_pci_driver);
301         if (ret) {
302                 pr_notice("Failed to register driver\n");
303                 unregister_netdevice_notifier(&qede_netdev_notifier);
304                 qed_put_eth_ops();
305                 return -EINVAL;
306         }
307
308         return 0;
309 }
310
311 static void __exit qede_cleanup(void)
312 {
313         if (debug & QED_LOG_INFO_MASK)
314                 pr_info("qede_cleanup called\n");
315
316         unregister_netdevice_notifier(&qede_netdev_notifier);
317         pci_unregister_driver(&qede_pci_driver);
318         qed_put_eth_ops();
319 }
320
321 module_init(qede_init);
322 module_exit(qede_cleanup);
323
324 static int qede_open(struct net_device *ndev);
325 static int qede_close(struct net_device *ndev);
326
327 void qede_fill_by_demand_stats(struct qede_dev *edev)
328 {
329         struct qede_stats_common *p_common = &edev->stats.common;
330         struct qed_eth_stats stats;
331
332         edev->ops->get_vport_stats(edev->cdev, &stats);
333
334         p_common->no_buff_discards = stats.common.no_buff_discards;
335         p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
336         p_common->ttl0_discard = stats.common.ttl0_discard;
337         p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
338         p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
339         p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
340         p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
341         p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
342         p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
343         p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
344         p_common->mac_filter_discards = stats.common.mac_filter_discards;
345
346         p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
347         p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
348         p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
349         p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
350         p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
351         p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
352         p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
353         p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
354         p_common->coalesced_events = stats.common.tpa_coalesced_events;
355         p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
356         p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
357         p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
358
359         p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
360         p_common->rx_65_to_127_byte_packets =
361             stats.common.rx_65_to_127_byte_packets;
362         p_common->rx_128_to_255_byte_packets =
363             stats.common.rx_128_to_255_byte_packets;
364         p_common->rx_256_to_511_byte_packets =
365             stats.common.rx_256_to_511_byte_packets;
366         p_common->rx_512_to_1023_byte_packets =
367             stats.common.rx_512_to_1023_byte_packets;
368         p_common->rx_1024_to_1518_byte_packets =
369             stats.common.rx_1024_to_1518_byte_packets;
370         p_common->rx_crc_errors = stats.common.rx_crc_errors;
371         p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
372         p_common->rx_pause_frames = stats.common.rx_pause_frames;
373         p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
374         p_common->rx_align_errors = stats.common.rx_align_errors;
375         p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
376         p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
377         p_common->rx_jabbers = stats.common.rx_jabbers;
378         p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
379         p_common->rx_fragments = stats.common.rx_fragments;
380         p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
381         p_common->tx_65_to_127_byte_packets =
382             stats.common.tx_65_to_127_byte_packets;
383         p_common->tx_128_to_255_byte_packets =
384             stats.common.tx_128_to_255_byte_packets;
385         p_common->tx_256_to_511_byte_packets =
386             stats.common.tx_256_to_511_byte_packets;
387         p_common->tx_512_to_1023_byte_packets =
388             stats.common.tx_512_to_1023_byte_packets;
389         p_common->tx_1024_to_1518_byte_packets =
390             stats.common.tx_1024_to_1518_byte_packets;
391         p_common->tx_pause_frames = stats.common.tx_pause_frames;
392         p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
393         p_common->brb_truncates = stats.common.brb_truncates;
394         p_common->brb_discards = stats.common.brb_discards;
395         p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
396
397         if (QEDE_IS_BB(edev)) {
398                 struct qede_stats_bb *p_bb = &edev->stats.bb;
399
400                 p_bb->rx_1519_to_1522_byte_packets =
401                     stats.bb.rx_1519_to_1522_byte_packets;
402                 p_bb->rx_1519_to_2047_byte_packets =
403                     stats.bb.rx_1519_to_2047_byte_packets;
404                 p_bb->rx_2048_to_4095_byte_packets =
405                     stats.bb.rx_2048_to_4095_byte_packets;
406                 p_bb->rx_4096_to_9216_byte_packets =
407                     stats.bb.rx_4096_to_9216_byte_packets;
408                 p_bb->rx_9217_to_16383_byte_packets =
409                     stats.bb.rx_9217_to_16383_byte_packets;
410                 p_bb->tx_1519_to_2047_byte_packets =
411                     stats.bb.tx_1519_to_2047_byte_packets;
412                 p_bb->tx_2048_to_4095_byte_packets =
413                     stats.bb.tx_2048_to_4095_byte_packets;
414                 p_bb->tx_4096_to_9216_byte_packets =
415                     stats.bb.tx_4096_to_9216_byte_packets;
416                 p_bb->tx_9217_to_16383_byte_packets =
417                     stats.bb.tx_9217_to_16383_byte_packets;
418                 p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
419                 p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
420         } else {
421                 struct qede_stats_ah *p_ah = &edev->stats.ah;
422
423                 p_ah->rx_1519_to_max_byte_packets =
424                     stats.ah.rx_1519_to_max_byte_packets;
425                 p_ah->tx_1519_to_max_byte_packets =
426                     stats.ah.tx_1519_to_max_byte_packets;
427         }
428 }
429
430 static void qede_get_stats64(struct net_device *dev,
431                              struct rtnl_link_stats64 *stats)
432 {
433         struct qede_dev *edev = netdev_priv(dev);
434         struct qede_stats_common *p_common;
435
436         qede_fill_by_demand_stats(edev);
437         p_common = &edev->stats.common;
438
439         stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
440                             p_common->rx_bcast_pkts;
441         stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
442                             p_common->tx_bcast_pkts;
443
444         stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
445                           p_common->rx_bcast_bytes;
446         stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
447                           p_common->tx_bcast_bytes;
448
449         stats->tx_errors = p_common->tx_err_drop_pkts;
450         stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
451
452         stats->rx_fifo_errors = p_common->no_buff_discards;
453
454         if (QEDE_IS_BB(edev))
455                 stats->collisions = edev->stats.bb.tx_total_collisions;
456         stats->rx_crc_errors = p_common->rx_crc_errors;
457         stats->rx_frame_errors = p_common->rx_align_errors;
458 }
459
460 #ifdef CONFIG_QED_SRIOV
461 static int qede_get_vf_config(struct net_device *dev, int vfidx,
462                               struct ifla_vf_info *ivi)
463 {
464         struct qede_dev *edev = netdev_priv(dev);
465
466         if (!edev->ops)
467                 return -EINVAL;
468
469         return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
470 }
471
472 static int qede_set_vf_rate(struct net_device *dev, int vfidx,
473                             int min_tx_rate, int max_tx_rate)
474 {
475         struct qede_dev *edev = netdev_priv(dev);
476
477         return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
478                                         max_tx_rate);
479 }
480
481 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
482 {
483         struct qede_dev *edev = netdev_priv(dev);
484
485         if (!edev->ops)
486                 return -EINVAL;
487
488         return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
489 }
490
491 static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
492                                   int link_state)
493 {
494         struct qede_dev *edev = netdev_priv(dev);
495
496         if (!edev->ops)
497                 return -EINVAL;
498
499         return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
500 }
501
502 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
503 {
504         struct qede_dev *edev = netdev_priv(dev);
505
506         if (!edev->ops)
507                 return -EINVAL;
508
509         return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
510 }
511 #endif
512
513 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
514 {
515         struct qede_dev *edev = netdev_priv(dev);
516
517         if (!netif_running(dev))
518                 return -EAGAIN;
519
520         switch (cmd) {
521         case SIOCSHWTSTAMP:
522                 return qede_ptp_hw_ts(edev, ifr);
523         default:
524                 DP_VERBOSE(edev, QED_MSG_DEBUG,
525                            "default IOCTL cmd 0x%x\n", cmd);
526                 return -EOPNOTSUPP;
527         }
528
529         return 0;
530 }
531
532 static const struct net_device_ops qede_netdev_ops = {
533         .ndo_open = qede_open,
534         .ndo_stop = qede_close,
535         .ndo_start_xmit = qede_start_xmit,
536         .ndo_set_rx_mode = qede_set_rx_mode,
537         .ndo_set_mac_address = qede_set_mac_addr,
538         .ndo_validate_addr = eth_validate_addr,
539         .ndo_change_mtu = qede_change_mtu,
540         .ndo_do_ioctl = qede_ioctl,
541 #ifdef CONFIG_QED_SRIOV
542         .ndo_set_vf_mac = qede_set_vf_mac,
543         .ndo_set_vf_vlan = qede_set_vf_vlan,
544         .ndo_set_vf_trust = qede_set_vf_trust,
545 #endif
546         .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
547         .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
548         .ndo_fix_features = qede_fix_features,
549         .ndo_set_features = qede_set_features,
550         .ndo_get_stats64 = qede_get_stats64,
551 #ifdef CONFIG_QED_SRIOV
552         .ndo_set_vf_link_state = qede_set_vf_link_state,
553         .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
554         .ndo_get_vf_config = qede_get_vf_config,
555         .ndo_set_vf_rate = qede_set_vf_rate,
556 #endif
557         .ndo_udp_tunnel_add = qede_udp_tunnel_add,
558         .ndo_udp_tunnel_del = qede_udp_tunnel_del,
559         .ndo_features_check = qede_features_check,
560         .ndo_bpf = qede_xdp,
561 #ifdef CONFIG_RFS_ACCEL
562         .ndo_rx_flow_steer = qede_rx_flow_steer,
563 #endif
564 };
565
566 static const struct net_device_ops qede_netdev_vf_ops = {
567         .ndo_open = qede_open,
568         .ndo_stop = qede_close,
569         .ndo_start_xmit = qede_start_xmit,
570         .ndo_set_rx_mode = qede_set_rx_mode,
571         .ndo_set_mac_address = qede_set_mac_addr,
572         .ndo_validate_addr = eth_validate_addr,
573         .ndo_change_mtu = qede_change_mtu,
574         .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
575         .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
576         .ndo_fix_features = qede_fix_features,
577         .ndo_set_features = qede_set_features,
578         .ndo_get_stats64 = qede_get_stats64,
579         .ndo_udp_tunnel_add = qede_udp_tunnel_add,
580         .ndo_udp_tunnel_del = qede_udp_tunnel_del,
581         .ndo_features_check = qede_features_check,
582 };
583
584 static const struct net_device_ops qede_netdev_vf_xdp_ops = {
585         .ndo_open = qede_open,
586         .ndo_stop = qede_close,
587         .ndo_start_xmit = qede_start_xmit,
588         .ndo_set_rx_mode = qede_set_rx_mode,
589         .ndo_set_mac_address = qede_set_mac_addr,
590         .ndo_validate_addr = eth_validate_addr,
591         .ndo_change_mtu = qede_change_mtu,
592         .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
593         .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
594         .ndo_fix_features = qede_fix_features,
595         .ndo_set_features = qede_set_features,
596         .ndo_get_stats64 = qede_get_stats64,
597         .ndo_udp_tunnel_add = qede_udp_tunnel_add,
598         .ndo_udp_tunnel_del = qede_udp_tunnel_del,
599         .ndo_features_check = qede_features_check,
600         .ndo_bpf = qede_xdp,
601 };
602
603 /* -------------------------------------------------------------------------
604  * START OF PROBE / REMOVE
605  * -------------------------------------------------------------------------
606  */
607
608 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
609                                             struct pci_dev *pdev,
610                                             struct qed_dev_eth_info *info,
611                                             u32 dp_module, u8 dp_level)
612 {
613         struct net_device *ndev;
614         struct qede_dev *edev;
615
616         ndev = alloc_etherdev_mqs(sizeof(*edev),
617                                   info->num_queues, info->num_queues);
618         if (!ndev) {
619                 pr_err("etherdev allocation failed\n");
620                 return NULL;
621         }
622
623         edev = netdev_priv(ndev);
624         edev->ndev = ndev;
625         edev->cdev = cdev;
626         edev->pdev = pdev;
627         edev->dp_module = dp_module;
628         edev->dp_level = dp_level;
629         edev->ops = qed_ops;
630         edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
631         edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
632
633         DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
634                 info->num_queues, info->num_queues);
635
636         SET_NETDEV_DEV(ndev, &pdev->dev);
637
638         memset(&edev->stats, 0, sizeof(edev->stats));
639         memcpy(&edev->dev_info, info, sizeof(*info));
640
641         /* As ethtool doesn't have the ability to show WoL behavior as
642          * 'default', if device supports it declare it's enabled.
643          */
644         if (edev->dev_info.common.wol_support)
645                 edev->wol_enabled = true;
646
647         INIT_LIST_HEAD(&edev->vlan_list);
648
649         return edev;
650 }
651
652 static void qede_init_ndev(struct qede_dev *edev)
653 {
654         struct net_device *ndev = edev->ndev;
655         struct pci_dev *pdev = edev->pdev;
656         bool udp_tunnel_enable = false;
657         netdev_features_t hw_features;
658
659         pci_set_drvdata(pdev, ndev);
660
661         ndev->mem_start = edev->dev_info.common.pci_mem_start;
662         ndev->base_addr = ndev->mem_start;
663         ndev->mem_end = edev->dev_info.common.pci_mem_end;
664         ndev->irq = edev->dev_info.common.pci_irq;
665
666         ndev->watchdog_timeo = TX_TIMEOUT;
667
668         if (IS_VF(edev)) {
669                 if (edev->dev_info.xdp_supported)
670                         ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
671                 else
672                         ndev->netdev_ops = &qede_netdev_vf_ops;
673         } else {
674                 ndev->netdev_ops = &qede_netdev_ops;
675         }
676
677         qede_set_ethtool_ops(ndev);
678
679         ndev->priv_flags |= IFF_UNICAST_FLT;
680
681         /* user-changeble features */
682         hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
683                       NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
684                       NETIF_F_TSO | NETIF_F_TSO6;
685
686         if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
687                 hw_features |= NETIF_F_NTUPLE;
688
689         if (edev->dev_info.common.vxlan_enable ||
690             edev->dev_info.common.geneve_enable)
691                 udp_tunnel_enable = true;
692
693         if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
694                 hw_features |= NETIF_F_TSO_ECN;
695                 ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
696                                         NETIF_F_SG | NETIF_F_TSO |
697                                         NETIF_F_TSO_ECN | NETIF_F_TSO6 |
698                                         NETIF_F_RXCSUM;
699         }
700
701         if (udp_tunnel_enable) {
702                 hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
703                                 NETIF_F_GSO_UDP_TUNNEL_CSUM);
704                 ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
705                                           NETIF_F_GSO_UDP_TUNNEL_CSUM);
706         }
707
708         if (edev->dev_info.common.gre_enable) {
709                 hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
710                 ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
711                                           NETIF_F_GSO_GRE_CSUM);
712         }
713
714         ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
715                               NETIF_F_HIGHDMA;
716         ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
717                          NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
718                          NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
719
720         ndev->hw_features = hw_features;
721
722         /* MTU range: 46 - 9600 */
723         ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
724         ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
725
726         /* Set network device HW mac */
727         ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
728
729         ndev->mtu = edev->dev_info.common.mtu;
730 }
731
732 /* This function converts from 32b param to two params of level and module
733  * Input 32b decoding:
734  * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
735  * 'happy' flow, e.g. memory allocation failed.
736  * b30 - enable all INFO prints. INFO prints are for major steps in the flow
737  * and provide important parameters.
738  * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
739  * module. VERBOSE prints are for tracking the specific flow in low level.
740  *
741  * Notice that the level should be that of the lowest required logs.
742  */
743 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
744 {
745         *p_dp_level = QED_LEVEL_NOTICE;
746         *p_dp_module = 0;
747
748         if (debug & QED_LOG_VERBOSE_MASK) {
749                 *p_dp_level = QED_LEVEL_VERBOSE;
750                 *p_dp_module = (debug & 0x3FFFFFFF);
751         } else if (debug & QED_LOG_INFO_MASK) {
752                 *p_dp_level = QED_LEVEL_INFO;
753         } else if (debug & QED_LOG_NOTICE_MASK) {
754                 *p_dp_level = QED_LEVEL_NOTICE;
755         }
756 }
757
758 static void qede_free_fp_array(struct qede_dev *edev)
759 {
760         if (edev->fp_array) {
761                 struct qede_fastpath *fp;
762                 int i;
763
764                 for_each_queue(i) {
765                         fp = &edev->fp_array[i];
766
767                         kfree(fp->sb_info);
768                         /* Handle mem alloc failure case where qede_init_fp
769                          * didn't register xdp_rxq_info yet.
770                          * Implicit only (fp->type & QEDE_FASTPATH_RX)
771                          */
772                         if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
773                                 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
774                         kfree(fp->rxq);
775                         kfree(fp->xdp_tx);
776                         kfree(fp->txq);
777                 }
778                 kfree(edev->fp_array);
779         }
780
781         edev->num_queues = 0;
782         edev->fp_num_tx = 0;
783         edev->fp_num_rx = 0;
784 }
785
786 static int qede_alloc_fp_array(struct qede_dev *edev)
787 {
788         u8 fp_combined, fp_rx = edev->fp_num_rx;
789         struct qede_fastpath *fp;
790         int i;
791
792         edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
793                                  sizeof(*edev->fp_array), GFP_KERNEL);
794         if (!edev->fp_array) {
795                 DP_NOTICE(edev, "fp array allocation failed\n");
796                 goto err;
797         }
798
799         fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
800
801         /* Allocate the FP elements for Rx queues followed by combined and then
802          * the Tx. This ordering should be maintained so that the respective
803          * queues (Rx or Tx) will be together in the fastpath array and the
804          * associated ids will be sequential.
805          */
806         for_each_queue(i) {
807                 fp = &edev->fp_array[i];
808
809                 fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
810                 if (!fp->sb_info) {
811                         DP_NOTICE(edev, "sb info struct allocation failed\n");
812                         goto err;
813                 }
814
815                 if (fp_rx) {
816                         fp->type = QEDE_FASTPATH_RX;
817                         fp_rx--;
818                 } else if (fp_combined) {
819                         fp->type = QEDE_FASTPATH_COMBINED;
820                         fp_combined--;
821                 } else {
822                         fp->type = QEDE_FASTPATH_TX;
823                 }
824
825                 if (fp->type & QEDE_FASTPATH_TX) {
826                         fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
827                         if (!fp->txq)
828                                 goto err;
829                 }
830
831                 if (fp->type & QEDE_FASTPATH_RX) {
832                         fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
833                         if (!fp->rxq)
834                                 goto err;
835
836                         if (edev->xdp_prog) {
837                                 fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
838                                                      GFP_KERNEL);
839                                 if (!fp->xdp_tx)
840                                         goto err;
841                                 fp->type |= QEDE_FASTPATH_XDP;
842                         }
843                 }
844         }
845
846         return 0;
847 err:
848         qede_free_fp_array(edev);
849         return -ENOMEM;
850 }
851
852 static void qede_sp_task(struct work_struct *work)
853 {
854         struct qede_dev *edev = container_of(work, struct qede_dev,
855                                              sp_task.work);
856
857         __qede_lock(edev);
858
859         if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
860                 if (edev->state == QEDE_STATE_OPEN)
861                         qede_config_rx_mode(edev->ndev);
862
863 #ifdef CONFIG_RFS_ACCEL
864         if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
865                 if (edev->state == QEDE_STATE_OPEN)
866                         qede_process_arfs_filters(edev, false);
867         }
868 #endif
869         __qede_unlock(edev);
870 }
871
872 static void qede_update_pf_params(struct qed_dev *cdev)
873 {
874         struct qed_pf_params pf_params;
875
876         /* 64 rx + 64 tx + 64 XDP */
877         memset(&pf_params, 0, sizeof(struct qed_pf_params));
878         pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
879
880         /* Same for VFs - make sure they'll have sufficient connections
881          * to support XDP Tx queues.
882          */
883         pf_params.eth_pf_params.num_vf_cons = 48;
884
885         pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
886         qed_ops->common->update_pf_params(cdev, &pf_params);
887 }
888
889 #define QEDE_FW_VER_STR_SIZE    80
890
891 static void qede_log_probe(struct qede_dev *edev)
892 {
893         struct qed_dev_info *p_dev_info = &edev->dev_info.common;
894         u8 buf[QEDE_FW_VER_STR_SIZE];
895         size_t left_size;
896
897         snprintf(buf, QEDE_FW_VER_STR_SIZE,
898                  "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
899                  p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
900                  p_dev_info->fw_eng,
901                  (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
902                  QED_MFW_VERSION_3_OFFSET,
903                  (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
904                  QED_MFW_VERSION_2_OFFSET,
905                  (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
906                  QED_MFW_VERSION_1_OFFSET,
907                  (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
908                  QED_MFW_VERSION_0_OFFSET);
909
910         left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
911         if (p_dev_info->mbi_version && left_size)
912                 snprintf(buf + strlen(buf), left_size,
913                          " [MBI %d.%d.%d]",
914                          (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
915                          QED_MBI_VERSION_2_OFFSET,
916                          (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
917                          QED_MBI_VERSION_1_OFFSET,
918                          (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
919                          QED_MBI_VERSION_0_OFFSET);
920
921         pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
922                 PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
923                 buf, edev->ndev->name);
924 }
925
926 enum qede_probe_mode {
927         QEDE_PROBE_NORMAL,
928 };
929
930 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
931                         bool is_vf, enum qede_probe_mode mode)
932 {
933         struct qed_probe_params probe_params;
934         struct qed_slowpath_params sp_params;
935         struct qed_dev_eth_info dev_info;
936         struct qede_dev *edev;
937         struct qed_dev *cdev;
938         int rc;
939
940         if (unlikely(dp_level & QED_LEVEL_INFO))
941                 pr_notice("Starting qede probe\n");
942
943         memset(&probe_params, 0, sizeof(probe_params));
944         probe_params.protocol = QED_PROTOCOL_ETH;
945         probe_params.dp_module = dp_module;
946         probe_params.dp_level = dp_level;
947         probe_params.is_vf = is_vf;
948         cdev = qed_ops->common->probe(pdev, &probe_params);
949         if (!cdev) {
950                 rc = -ENODEV;
951                 goto err0;
952         }
953
954         qede_update_pf_params(cdev);
955
956         /* Start the Slowpath-process */
957         memset(&sp_params, 0, sizeof(sp_params));
958         sp_params.int_mode = QED_INT_MODE_MSIX;
959         sp_params.drv_major = QEDE_MAJOR_VERSION;
960         sp_params.drv_minor = QEDE_MINOR_VERSION;
961         sp_params.drv_rev = QEDE_REVISION_VERSION;
962         sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
963         strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
964         rc = qed_ops->common->slowpath_start(cdev, &sp_params);
965         if (rc) {
966                 pr_notice("Cannot start slowpath\n");
967                 goto err1;
968         }
969
970         /* Learn information crucial for qede to progress */
971         rc = qed_ops->fill_dev_info(cdev, &dev_info);
972         if (rc)
973                 goto err2;
974
975         edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
976                                    dp_level);
977         if (!edev) {
978                 rc = -ENOMEM;
979                 goto err2;
980         }
981
982         if (is_vf)
983                 edev->flags |= QEDE_FLAG_IS_VF;
984
985         qede_init_ndev(edev);
986
987         rc = qede_rdma_dev_add(edev);
988         if (rc)
989                 goto err3;
990
991         /* Prepare the lock prior to the registration of the netdev,
992          * as once it's registered we might reach flows requiring it
993          * [it's even possible to reach a flow needing it directly
994          * from there, although it's unlikely].
995          */
996         INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
997         mutex_init(&edev->qede_lock);
998         rc = register_netdev(edev->ndev);
999         if (rc) {
1000                 DP_NOTICE(edev, "Cannot register net-device\n");
1001                 goto err4;
1002         }
1003
1004         edev->ops->common->set_name(cdev, edev->ndev->name);
1005
1006         /* PTP not supported on VFs */
1007         if (!is_vf)
1008                 qede_ptp_enable(edev, true);
1009
1010         edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1011
1012 #ifdef CONFIG_DCB
1013         if (!IS_VF(edev))
1014                 qede_set_dcbnl_ops(edev->ndev);
1015 #endif
1016
1017         edev->rx_copybreak = QEDE_RX_HDR_SIZE;
1018
1019         qede_log_probe(edev);
1020         return 0;
1021
1022 err4:
1023         qede_rdma_dev_remove(edev);
1024 err3:
1025         free_netdev(edev->ndev);
1026 err2:
1027         qed_ops->common->slowpath_stop(cdev);
1028 err1:
1029         qed_ops->common->remove(cdev);
1030 err0:
1031         return rc;
1032 }
1033
1034 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1035 {
1036         bool is_vf = false;
1037         u32 dp_module = 0;
1038         u8 dp_level = 0;
1039
1040         switch ((enum qede_pci_private)id->driver_data) {
1041         case QEDE_PRIVATE_VF:
1042                 if (debug & QED_LOG_VERBOSE_MASK)
1043                         dev_err(&pdev->dev, "Probing a VF\n");
1044                 is_vf = true;
1045                 break;
1046         default:
1047                 if (debug & QED_LOG_VERBOSE_MASK)
1048                         dev_err(&pdev->dev, "Probing a PF\n");
1049         }
1050
1051         qede_config_debug(debug, &dp_module, &dp_level);
1052
1053         return __qede_probe(pdev, dp_module, dp_level, is_vf,
1054                             QEDE_PROBE_NORMAL);
1055 }
1056
1057 enum qede_remove_mode {
1058         QEDE_REMOVE_NORMAL,
1059 };
1060
1061 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1062 {
1063         struct net_device *ndev = pci_get_drvdata(pdev);
1064         struct qede_dev *edev = netdev_priv(ndev);
1065         struct qed_dev *cdev = edev->cdev;
1066
1067         DP_INFO(edev, "Starting qede_remove\n");
1068
1069         qede_rdma_dev_remove(edev);
1070         unregister_netdev(ndev);
1071         cancel_delayed_work_sync(&edev->sp_task);
1072
1073         qede_ptp_disable(edev);
1074
1075         edev->ops->common->set_power_state(cdev, PCI_D0);
1076
1077         pci_set_drvdata(pdev, NULL);
1078
1079         /* Use global ops since we've freed edev */
1080         qed_ops->common->slowpath_stop(cdev);
1081         if (system_state == SYSTEM_POWER_OFF)
1082                 return;
1083         qed_ops->common->remove(cdev);
1084
1085         /* Since this can happen out-of-sync with other flows,
1086          * don't release the netdevice until after slowpath stop
1087          * has been called to guarantee various other contexts
1088          * [e.g., QED register callbacks] won't break anything when
1089          * accessing the netdevice.
1090          */
1091          free_netdev(ndev);
1092
1093         dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1094 }
1095
1096 static void qede_remove(struct pci_dev *pdev)
1097 {
1098         __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1099 }
1100
1101 static void qede_shutdown(struct pci_dev *pdev)
1102 {
1103         __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1104 }
1105
1106 /* -------------------------------------------------------------------------
1107  * START OF LOAD / UNLOAD
1108  * -------------------------------------------------------------------------
1109  */
1110
1111 static int qede_set_num_queues(struct qede_dev *edev)
1112 {
1113         int rc;
1114         u16 rss_num;
1115
1116         /* Setup queues according to possible resources*/
1117         if (edev->req_queues)
1118                 rss_num = edev->req_queues;
1119         else
1120                 rss_num = netif_get_num_default_rss_queues() *
1121                           edev->dev_info.common.num_hwfns;
1122
1123         rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1124
1125         rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1126         if (rc > 0) {
1127                 /* Managed to request interrupts for our queues */
1128                 edev->num_queues = rc;
1129                 DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1130                         QEDE_QUEUE_CNT(edev), rss_num);
1131                 rc = 0;
1132         }
1133
1134         edev->fp_num_tx = edev->req_num_tx;
1135         edev->fp_num_rx = edev->req_num_rx;
1136
1137         return rc;
1138 }
1139
1140 static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1141                              u16 sb_id)
1142 {
1143         if (sb_info->sb_virt) {
1144                 edev->ops->common->sb_release(edev->cdev, sb_info, sb_id);
1145                 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1146                                   (void *)sb_info->sb_virt, sb_info->sb_phys);
1147                 memset(sb_info, 0, sizeof(*sb_info));
1148         }
1149 }
1150
1151 /* This function allocates fast-path status block memory */
1152 static int qede_alloc_mem_sb(struct qede_dev *edev,
1153                              struct qed_sb_info *sb_info, u16 sb_id)
1154 {
1155         struct status_block_e4 *sb_virt;
1156         dma_addr_t sb_phys;
1157         int rc;
1158
1159         sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1160                                      sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1161         if (!sb_virt) {
1162                 DP_ERR(edev, "Status block allocation failed\n");
1163                 return -ENOMEM;
1164         }
1165
1166         rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1167                                         sb_virt, sb_phys, sb_id,
1168                                         QED_SB_TYPE_L2_QUEUE);
1169         if (rc) {
1170                 DP_ERR(edev, "Status block initialization failed\n");
1171                 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1172                                   sb_virt, sb_phys);
1173                 return rc;
1174         }
1175
1176         return 0;
1177 }
1178
1179 static void qede_free_rx_buffers(struct qede_dev *edev,
1180                                  struct qede_rx_queue *rxq)
1181 {
1182         u16 i;
1183
1184         for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1185                 struct sw_rx_data *rx_buf;
1186                 struct page *data;
1187
1188                 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1189                 data = rx_buf->data;
1190
1191                 dma_unmap_page(&edev->pdev->dev,
1192                                rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1193
1194                 rx_buf->data = NULL;
1195                 __free_page(data);
1196         }
1197 }
1198
1199 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1200 {
1201         /* Free rx buffers */
1202         qede_free_rx_buffers(edev, rxq);
1203
1204         /* Free the parallel SW ring */
1205         kfree(rxq->sw_rx_ring);
1206
1207         /* Free the real RQ ring used by FW */
1208         edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1209         edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1210 }
1211
1212 static void qede_set_tpa_param(struct qede_rx_queue *rxq)
1213 {
1214         int i;
1215
1216         for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1217                 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1218
1219                 tpa_info->state = QEDE_AGG_STATE_NONE;
1220         }
1221 }
1222
1223 /* This function allocates all memory needed per Rx queue */
1224 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1225 {
1226         int i, rc, size;
1227
1228         rxq->num_rx_buffers = edev->q_num_rx_buffers;
1229
1230         rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1231
1232         rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
1233         size = rxq->rx_headroom +
1234                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1235
1236         /* Make sure that the headroom and  payload fit in a single page */
1237         if (rxq->rx_buf_size + size > PAGE_SIZE)
1238                 rxq->rx_buf_size = PAGE_SIZE - size;
1239
1240         /* Segment size to spilt a page in multiple equal parts ,
1241          * unless XDP is used in which case we'd use the entire page.
1242          */
1243         if (!edev->xdp_prog) {
1244                 size = size + rxq->rx_buf_size;
1245                 rxq->rx_buf_seg_size = roundup_pow_of_two(size);
1246         } else {
1247                 rxq->rx_buf_seg_size = PAGE_SIZE;
1248         }
1249
1250         /* Allocate the parallel driver ring for Rx buffers */
1251         size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1252         rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1253         if (!rxq->sw_rx_ring) {
1254                 DP_ERR(edev, "Rx buffers ring allocation failed\n");
1255                 rc = -ENOMEM;
1256                 goto err;
1257         }
1258
1259         /* Allocate FW Rx ring  */
1260         rc = edev->ops->common->chain_alloc(edev->cdev,
1261                                             QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1262                                             QED_CHAIN_MODE_NEXT_PTR,
1263                                             QED_CHAIN_CNT_TYPE_U16,
1264                                             RX_RING_SIZE,
1265                                             sizeof(struct eth_rx_bd),
1266                                             &rxq->rx_bd_ring, NULL);
1267         if (rc)
1268                 goto err;
1269
1270         /* Allocate FW completion ring */
1271         rc = edev->ops->common->chain_alloc(edev->cdev,
1272                                             QED_CHAIN_USE_TO_CONSUME,
1273                                             QED_CHAIN_MODE_PBL,
1274                                             QED_CHAIN_CNT_TYPE_U16,
1275                                             RX_RING_SIZE,
1276                                             sizeof(union eth_rx_cqe),
1277                                             &rxq->rx_comp_ring, NULL);
1278         if (rc)
1279                 goto err;
1280
1281         /* Allocate buffers for the Rx ring */
1282         rxq->filled_buffers = 0;
1283         for (i = 0; i < rxq->num_rx_buffers; i++) {
1284                 rc = qede_alloc_rx_buffer(rxq, false);
1285                 if (rc) {
1286                         DP_ERR(edev,
1287                                "Rx buffers allocation failed at index %d\n", i);
1288                         goto err;
1289                 }
1290         }
1291
1292         if (!edev->gro_disable)
1293                 qede_set_tpa_param(rxq);
1294 err:
1295         return rc;
1296 }
1297
1298 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1299 {
1300         /* Free the parallel SW ring */
1301         if (txq->is_xdp)
1302                 kfree(txq->sw_tx_ring.xdp);
1303         else
1304                 kfree(txq->sw_tx_ring.skbs);
1305
1306         /* Free the real RQ ring used by FW */
1307         edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1308 }
1309
1310 /* This function allocates all memory needed per Tx queue */
1311 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1312 {
1313         union eth_tx_bd_types *p_virt;
1314         int size, rc;
1315
1316         txq->num_tx_buffers = edev->q_num_tx_buffers;
1317
1318         /* Allocate the parallel driver ring for Tx buffers */
1319         if (txq->is_xdp) {
1320                 size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
1321                 txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1322                 if (!txq->sw_tx_ring.xdp)
1323                         goto err;
1324         } else {
1325                 size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1326                 txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1327                 if (!txq->sw_tx_ring.skbs)
1328                         goto err;
1329         }
1330
1331         rc = edev->ops->common->chain_alloc(edev->cdev,
1332                                             QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1333                                             QED_CHAIN_MODE_PBL,
1334                                             QED_CHAIN_CNT_TYPE_U16,
1335                                             txq->num_tx_buffers,
1336                                             sizeof(*p_virt),
1337                                             &txq->tx_pbl, NULL);
1338         if (rc)
1339                 goto err;
1340
1341         return 0;
1342
1343 err:
1344         qede_free_mem_txq(edev, txq);
1345         return -ENOMEM;
1346 }
1347
1348 /* This function frees all memory of a single fp */
1349 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1350 {
1351         qede_free_mem_sb(edev, fp->sb_info, fp->id);
1352
1353         if (fp->type & QEDE_FASTPATH_RX)
1354                 qede_free_mem_rxq(edev, fp->rxq);
1355
1356         if (fp->type & QEDE_FASTPATH_XDP)
1357                 qede_free_mem_txq(edev, fp->xdp_tx);
1358
1359         if (fp->type & QEDE_FASTPATH_TX)
1360                 qede_free_mem_txq(edev, fp->txq);
1361 }
1362
1363 /* This function allocates all memory needed for a single fp (i.e. an entity
1364  * which contains status block, one rx queue and/or multiple per-TC tx queues.
1365  */
1366 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1367 {
1368         int rc = 0;
1369
1370         rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1371         if (rc)
1372                 goto out;
1373
1374         if (fp->type & QEDE_FASTPATH_RX) {
1375                 rc = qede_alloc_mem_rxq(edev, fp->rxq);
1376                 if (rc)
1377                         goto out;
1378         }
1379
1380         if (fp->type & QEDE_FASTPATH_XDP) {
1381                 rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1382                 if (rc)
1383                         goto out;
1384         }
1385
1386         if (fp->type & QEDE_FASTPATH_TX) {
1387                 rc = qede_alloc_mem_txq(edev, fp->txq);
1388                 if (rc)
1389                         goto out;
1390         }
1391
1392 out:
1393         return rc;
1394 }
1395
1396 static void qede_free_mem_load(struct qede_dev *edev)
1397 {
1398         int i;
1399
1400         for_each_queue(i) {
1401                 struct qede_fastpath *fp = &edev->fp_array[i];
1402
1403                 qede_free_mem_fp(edev, fp);
1404         }
1405 }
1406
1407 /* This function allocates all qede memory at NIC load. */
1408 static int qede_alloc_mem_load(struct qede_dev *edev)
1409 {
1410         int rc = 0, queue_id;
1411
1412         for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1413                 struct qede_fastpath *fp = &edev->fp_array[queue_id];
1414
1415                 rc = qede_alloc_mem_fp(edev, fp);
1416                 if (rc) {
1417                         DP_ERR(edev,
1418                                "Failed to allocate memory for fastpath - rss id = %d\n",
1419                                queue_id);
1420                         qede_free_mem_load(edev);
1421                         return rc;
1422                 }
1423         }
1424
1425         return 0;
1426 }
1427
1428 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
1429 static void qede_init_fp(struct qede_dev *edev)
1430 {
1431         int queue_id, rxq_index = 0, txq_index = 0;
1432         struct qede_fastpath *fp;
1433
1434         for_each_queue(queue_id) {
1435                 fp = &edev->fp_array[queue_id];
1436
1437                 fp->edev = edev;
1438                 fp->id = queue_id;
1439
1440                 if (fp->type & QEDE_FASTPATH_XDP) {
1441                         fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1442                                                                 rxq_index);
1443                         fp->xdp_tx->is_xdp = 1;
1444                 }
1445
1446                 if (fp->type & QEDE_FASTPATH_RX) {
1447                         fp->rxq->rxq_id = rxq_index++;
1448
1449                         /* Determine how to map buffers for this queue */
1450                         if (fp->type & QEDE_FASTPATH_XDP)
1451                                 fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1452                         else
1453                                 fp->rxq->data_direction = DMA_FROM_DEVICE;
1454                         fp->rxq->dev = &edev->pdev->dev;
1455
1456                         /* Driver have no error path from here */
1457                         WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1458                                                  fp->rxq->rxq_id) < 0);
1459                 }
1460
1461                 if (fp->type & QEDE_FASTPATH_TX) {
1462                         fp->txq->index = txq_index++;
1463                         if (edev->dev_info.is_legacy)
1464                                 fp->txq->is_legacy = 1;
1465                         fp->txq->dev = &edev->pdev->dev;
1466                 }
1467
1468                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1469                          edev->ndev->name, queue_id);
1470         }
1471
1472         edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
1473 }
1474
1475 static int qede_set_real_num_queues(struct qede_dev *edev)
1476 {
1477         int rc = 0;
1478
1479         rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
1480         if (rc) {
1481                 DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1482                 return rc;
1483         }
1484
1485         rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1486         if (rc) {
1487                 DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1488                 return rc;
1489         }
1490
1491         return 0;
1492 }
1493
1494 static void qede_napi_disable_remove(struct qede_dev *edev)
1495 {
1496         int i;
1497
1498         for_each_queue(i) {
1499                 napi_disable(&edev->fp_array[i].napi);
1500
1501                 netif_napi_del(&edev->fp_array[i].napi);
1502         }
1503 }
1504
1505 static void qede_napi_add_enable(struct qede_dev *edev)
1506 {
1507         int i;
1508
1509         /* Add NAPI objects */
1510         for_each_queue(i) {
1511                 netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1512                                qede_poll, NAPI_POLL_WEIGHT);
1513                 napi_enable(&edev->fp_array[i].napi);
1514         }
1515 }
1516
1517 static void qede_sync_free_irqs(struct qede_dev *edev)
1518 {
1519         int i;
1520
1521         for (i = 0; i < edev->int_info.used_cnt; i++) {
1522                 if (edev->int_info.msix_cnt) {
1523                         synchronize_irq(edev->int_info.msix[i].vector);
1524                         free_irq(edev->int_info.msix[i].vector,
1525                                  &edev->fp_array[i]);
1526                 } else {
1527                         edev->ops->common->simd_handler_clean(edev->cdev, i);
1528                 }
1529         }
1530
1531         edev->int_info.used_cnt = 0;
1532 }
1533
1534 static int qede_req_msix_irqs(struct qede_dev *edev)
1535 {
1536         int i, rc;
1537
1538         /* Sanitize number of interrupts == number of prepared RSS queues */
1539         if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1540                 DP_ERR(edev,
1541                        "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1542                        QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1543                 return -EINVAL;
1544         }
1545
1546         for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1547 #ifdef CONFIG_RFS_ACCEL
1548                 struct qede_fastpath *fp = &edev->fp_array[i];
1549
1550                 if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1551                         rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1552                                               edev->int_info.msix[i].vector);
1553                         if (rc) {
1554                                 DP_ERR(edev, "Failed to add CPU rmap\n");
1555                                 qede_free_arfs(edev);
1556                         }
1557                 }
1558 #endif
1559                 rc = request_irq(edev->int_info.msix[i].vector,
1560                                  qede_msix_fp_int, 0, edev->fp_array[i].name,
1561                                  &edev->fp_array[i]);
1562                 if (rc) {
1563                         DP_ERR(edev, "Request fp %d irq failed\n", i);
1564                         qede_sync_free_irqs(edev);
1565                         return rc;
1566                 }
1567                 DP_VERBOSE(edev, NETIF_MSG_INTR,
1568                            "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1569                            edev->fp_array[i].name, i,
1570                            &edev->fp_array[i]);
1571                 edev->int_info.used_cnt++;
1572         }
1573
1574         return 0;
1575 }
1576
1577 static void qede_simd_fp_handler(void *cookie)
1578 {
1579         struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1580
1581         napi_schedule_irqoff(&fp->napi);
1582 }
1583
1584 static int qede_setup_irqs(struct qede_dev *edev)
1585 {
1586         int i, rc = 0;
1587
1588         /* Learn Interrupt configuration */
1589         rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1590         if (rc)
1591                 return rc;
1592
1593         if (edev->int_info.msix_cnt) {
1594                 rc = qede_req_msix_irqs(edev);
1595                 if (rc)
1596                         return rc;
1597                 edev->ndev->irq = edev->int_info.msix[0].vector;
1598         } else {
1599                 const struct qed_common_ops *ops;
1600
1601                 /* qed should learn receive the RSS ids and callbacks */
1602                 ops = edev->ops->common;
1603                 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
1604                         ops->simd_handler_config(edev->cdev,
1605                                                  &edev->fp_array[i], i,
1606                                                  qede_simd_fp_handler);
1607                 edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
1608         }
1609         return 0;
1610 }
1611
1612 static int qede_drain_txq(struct qede_dev *edev,
1613                           struct qede_tx_queue *txq, bool allow_drain)
1614 {
1615         int rc, cnt = 1000;
1616
1617         while (txq->sw_tx_cons != txq->sw_tx_prod) {
1618                 if (!cnt) {
1619                         if (allow_drain) {
1620                                 DP_NOTICE(edev,
1621                                           "Tx queue[%d] is stuck, requesting MCP to drain\n",
1622                                           txq->index);
1623                                 rc = edev->ops->common->drain(edev->cdev);
1624                                 if (rc)
1625                                         return rc;
1626                                 return qede_drain_txq(edev, txq, false);
1627                         }
1628                         DP_NOTICE(edev,
1629                                   "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
1630                                   txq->index, txq->sw_tx_prod,
1631                                   txq->sw_tx_cons);
1632                         return -ENODEV;
1633                 }
1634                 cnt--;
1635                 usleep_range(1000, 2000);
1636                 barrier();
1637         }
1638
1639         /* FW finished processing, wait for HW to transmit all tx packets */
1640         usleep_range(1000, 2000);
1641
1642         return 0;
1643 }
1644
1645 static int qede_stop_txq(struct qede_dev *edev,
1646                          struct qede_tx_queue *txq, int rss_id)
1647 {
1648         return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
1649 }
1650
1651 static int qede_stop_queues(struct qede_dev *edev)
1652 {
1653         struct qed_update_vport_params *vport_update_params;
1654         struct qed_dev *cdev = edev->cdev;
1655         struct qede_fastpath *fp;
1656         int rc, i;
1657
1658         /* Disable the vport */
1659         vport_update_params = vzalloc(sizeof(*vport_update_params));
1660         if (!vport_update_params)
1661                 return -ENOMEM;
1662
1663         vport_update_params->vport_id = 0;
1664         vport_update_params->update_vport_active_flg = 1;
1665         vport_update_params->vport_active_flg = 0;
1666         vport_update_params->update_rss_flg = 0;
1667
1668         rc = edev->ops->vport_update(cdev, vport_update_params);
1669         vfree(vport_update_params);
1670
1671         if (rc) {
1672                 DP_ERR(edev, "Failed to update vport\n");
1673                 return rc;
1674         }
1675
1676         /* Flush Tx queues. If needed, request drain from MCP */
1677         for_each_queue(i) {
1678                 fp = &edev->fp_array[i];
1679
1680                 if (fp->type & QEDE_FASTPATH_TX) {
1681                         rc = qede_drain_txq(edev, fp->txq, true);
1682                         if (rc)
1683                                 return rc;
1684                 }
1685
1686                 if (fp->type & QEDE_FASTPATH_XDP) {
1687                         rc = qede_drain_txq(edev, fp->xdp_tx, true);
1688                         if (rc)
1689                                 return rc;
1690                 }
1691         }
1692
1693         /* Stop all Queues in reverse order */
1694         for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
1695                 fp = &edev->fp_array[i];
1696
1697                 /* Stop the Tx Queue(s) */
1698                 if (fp->type & QEDE_FASTPATH_TX) {
1699                         rc = qede_stop_txq(edev, fp->txq, i);
1700                         if (rc)
1701                                 return rc;
1702                 }
1703
1704                 /* Stop the Rx Queue */
1705                 if (fp->type & QEDE_FASTPATH_RX) {
1706                         rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
1707                         if (rc) {
1708                                 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
1709                                 return rc;
1710                         }
1711                 }
1712
1713                 /* Stop the XDP forwarding queue */
1714                 if (fp->type & QEDE_FASTPATH_XDP) {
1715                         rc = qede_stop_txq(edev, fp->xdp_tx, i);
1716                         if (rc)
1717                                 return rc;
1718
1719                         bpf_prog_put(fp->rxq->xdp_prog);
1720                 }
1721         }
1722
1723         /* Stop the vport */
1724         rc = edev->ops->vport_stop(cdev, 0);
1725         if (rc)
1726                 DP_ERR(edev, "Failed to stop VPORT\n");
1727
1728         return rc;
1729 }
1730
1731 static int qede_start_txq(struct qede_dev *edev,
1732                           struct qede_fastpath *fp,
1733                           struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
1734 {
1735         dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
1736         u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
1737         struct qed_queue_start_common_params params;
1738         struct qed_txq_start_ret_params ret_params;
1739         int rc;
1740
1741         memset(&params, 0, sizeof(params));
1742         memset(&ret_params, 0, sizeof(ret_params));
1743
1744         /* Let the XDP queue share the queue-zone with one of the regular txq.
1745          * We don't really care about its coalescing.
1746          */
1747         if (txq->is_xdp)
1748                 params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
1749         else
1750                 params.queue_id = txq->index;
1751
1752         params.p_sb = fp->sb_info;
1753         params.sb_idx = sb_idx;
1754
1755         rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
1756                                    page_cnt, &ret_params);
1757         if (rc) {
1758                 DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
1759                 return rc;
1760         }
1761
1762         txq->doorbell_addr = ret_params.p_doorbell;
1763         txq->handle = ret_params.p_handle;
1764
1765         /* Determine the FW consumer address associated */
1766         txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
1767
1768         /* Prepare the doorbell parameters */
1769         SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
1770         SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1771         SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
1772                   DQ_XCM_ETH_TX_BD_PROD_CMD);
1773         txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
1774
1775         return rc;
1776 }
1777
1778 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
1779 {
1780         int vlan_removal_en = 1;
1781         struct qed_dev *cdev = edev->cdev;
1782         struct qed_dev_info *qed_info = &edev->dev_info.common;
1783         struct qed_update_vport_params *vport_update_params;
1784         struct qed_queue_start_common_params q_params;
1785         struct qed_start_vport_params start = {0};
1786         int rc, i;
1787
1788         if (!edev->num_queues) {
1789                 DP_ERR(edev,
1790                        "Cannot update V-VPORT as active as there are no Rx queues\n");
1791                 return -EINVAL;
1792         }
1793
1794         vport_update_params = vzalloc(sizeof(*vport_update_params));
1795         if (!vport_update_params)
1796                 return -ENOMEM;
1797
1798         start.handle_ptp_pkts = !!(edev->ptp);
1799         start.gro_enable = !edev->gro_disable;
1800         start.mtu = edev->ndev->mtu;
1801         start.vport_id = 0;
1802         start.drop_ttl0 = true;
1803         start.remove_inner_vlan = vlan_removal_en;
1804         start.clear_stats = clear_stats;
1805
1806         rc = edev->ops->vport_start(cdev, &start);
1807
1808         if (rc) {
1809                 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
1810                 goto out;
1811         }
1812
1813         DP_VERBOSE(edev, NETIF_MSG_IFUP,
1814                    "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
1815                    start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
1816
1817         for_each_queue(i) {
1818                 struct qede_fastpath *fp = &edev->fp_array[i];
1819                 dma_addr_t p_phys_table;
1820                 u32 page_cnt;
1821
1822                 if (fp->type & QEDE_FASTPATH_RX) {
1823                         struct qed_rxq_start_ret_params ret_params;
1824                         struct qede_rx_queue *rxq = fp->rxq;
1825                         __le16 *val;
1826
1827                         memset(&ret_params, 0, sizeof(ret_params));
1828                         memset(&q_params, 0, sizeof(q_params));
1829                         q_params.queue_id = rxq->rxq_id;
1830                         q_params.vport_id = 0;
1831                         q_params.p_sb = fp->sb_info;
1832                         q_params.sb_idx = RX_PI;
1833
1834                         p_phys_table =
1835                             qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
1836                         page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
1837
1838                         rc = edev->ops->q_rx_start(cdev, i, &q_params,
1839                                                    rxq->rx_buf_size,
1840                                                    rxq->rx_bd_ring.p_phys_addr,
1841                                                    p_phys_table,
1842                                                    page_cnt, &ret_params);
1843                         if (rc) {
1844                                 DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
1845                                        rc);
1846                                 goto out;
1847                         }
1848
1849                         /* Use the return parameters */
1850                         rxq->hw_rxq_prod_addr = ret_params.p_prod;
1851                         rxq->handle = ret_params.p_handle;
1852
1853                         val = &fp->sb_info->sb_virt->pi_array[RX_PI];
1854                         rxq->hw_cons_ptr = val;
1855
1856                         qede_update_rx_prod(edev, rxq);
1857                 }
1858
1859                 if (fp->type & QEDE_FASTPATH_XDP) {
1860                         rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
1861                         if (rc)
1862                                 goto out;
1863
1864                         fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
1865                         if (IS_ERR(fp->rxq->xdp_prog)) {
1866                                 rc = PTR_ERR(fp->rxq->xdp_prog);
1867                                 fp->rxq->xdp_prog = NULL;
1868                                 goto out;
1869                         }
1870                 }
1871
1872                 if (fp->type & QEDE_FASTPATH_TX) {
1873                         rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
1874                         if (rc)
1875                                 goto out;
1876                 }
1877         }
1878
1879         /* Prepare and send the vport enable */
1880         vport_update_params->vport_id = start.vport_id;
1881         vport_update_params->update_vport_active_flg = 1;
1882         vport_update_params->vport_active_flg = 1;
1883
1884         if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
1885             qed_info->tx_switching) {
1886                 vport_update_params->update_tx_switching_flg = 1;
1887                 vport_update_params->tx_switching_flg = 1;
1888         }
1889
1890         qede_fill_rss_params(edev, &vport_update_params->rss_params,
1891                              &vport_update_params->update_rss_flg);
1892
1893         rc = edev->ops->vport_update(cdev, vport_update_params);
1894         if (rc)
1895                 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
1896
1897 out:
1898         vfree(vport_update_params);
1899         return rc;
1900 }
1901
1902 enum qede_unload_mode {
1903         QEDE_UNLOAD_NORMAL,
1904 };
1905
1906 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
1907                         bool is_locked)
1908 {
1909         struct qed_link_params link_params;
1910         int rc;
1911
1912         DP_INFO(edev, "Starting qede unload\n");
1913
1914         if (!is_locked)
1915                 __qede_lock(edev);
1916
1917         edev->state = QEDE_STATE_CLOSED;
1918
1919         qede_rdma_dev_event_close(edev);
1920
1921         /* Close OS Tx */
1922         netif_tx_disable(edev->ndev);
1923         netif_carrier_off(edev->ndev);
1924
1925         /* Reset the link */
1926         memset(&link_params, 0, sizeof(link_params));
1927         link_params.link_up = false;
1928         edev->ops->common->set_link(edev->cdev, &link_params);
1929         rc = qede_stop_queues(edev);
1930         if (rc) {
1931                 qede_sync_free_irqs(edev);
1932                 goto out;
1933         }
1934
1935         DP_INFO(edev, "Stopped Queues\n");
1936
1937         qede_vlan_mark_nonconfigured(edev);
1938         edev->ops->fastpath_stop(edev->cdev);
1939
1940         if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
1941                 qede_poll_for_freeing_arfs_filters(edev);
1942                 qede_free_arfs(edev);
1943         }
1944
1945         /* Release the interrupts */
1946         qede_sync_free_irqs(edev);
1947         edev->ops->common->set_fp_int(edev->cdev, 0);
1948
1949         qede_napi_disable_remove(edev);
1950
1951         qede_free_mem_load(edev);
1952         qede_free_fp_array(edev);
1953
1954 out:
1955         if (!is_locked)
1956                 __qede_unlock(edev);
1957         DP_INFO(edev, "Ending qede unload\n");
1958 }
1959
1960 enum qede_load_mode {
1961         QEDE_LOAD_NORMAL,
1962         QEDE_LOAD_RELOAD,
1963 };
1964
1965 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
1966                      bool is_locked)
1967 {
1968         struct qed_link_params link_params;
1969         int rc;
1970
1971         DP_INFO(edev, "Starting qede load\n");
1972
1973         if (!is_locked)
1974                 __qede_lock(edev);
1975
1976         rc = qede_set_num_queues(edev);
1977         if (rc)
1978                 goto out;
1979
1980         rc = qede_alloc_fp_array(edev);
1981         if (rc)
1982                 goto out;
1983
1984         qede_init_fp(edev);
1985
1986         rc = qede_alloc_mem_load(edev);
1987         if (rc)
1988                 goto err1;
1989         DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
1990                 QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
1991
1992         rc = qede_set_real_num_queues(edev);
1993         if (rc)
1994                 goto err2;
1995
1996         if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
1997                 rc = qede_alloc_arfs(edev);
1998                 if (rc)
1999                         DP_NOTICE(edev, "aRFS memory allocation failed\n");
2000         }
2001
2002         qede_napi_add_enable(edev);
2003         DP_INFO(edev, "Napi added and enabled\n");
2004
2005         rc = qede_setup_irqs(edev);
2006         if (rc)
2007                 goto err3;
2008         DP_INFO(edev, "Setup IRQs succeeded\n");
2009
2010         rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
2011         if (rc)
2012                 goto err4;
2013         DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2014
2015         /* Program un-configured VLANs */
2016         qede_configure_vlan_filters(edev);
2017
2018         /* Ask for link-up using current configuration */
2019         memset(&link_params, 0, sizeof(link_params));
2020         link_params.link_up = true;
2021         edev->ops->common->set_link(edev->cdev, &link_params);
2022
2023         edev->state = QEDE_STATE_OPEN;
2024
2025         DP_INFO(edev, "Ending successfully qede load\n");
2026
2027         goto out;
2028 err4:
2029         qede_sync_free_irqs(edev);
2030         memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
2031 err3:
2032         qede_napi_disable_remove(edev);
2033 err2:
2034         qede_free_mem_load(edev);
2035 err1:
2036         edev->ops->common->set_fp_int(edev->cdev, 0);
2037         qede_free_fp_array(edev);
2038         edev->num_queues = 0;
2039         edev->fp_num_tx = 0;
2040         edev->fp_num_rx = 0;
2041 out:
2042         if (!is_locked)
2043                 __qede_unlock(edev);
2044
2045         return rc;
2046 }
2047
2048 /* 'func' should be able to run between unload and reload assuming interface
2049  * is actually running, or afterwards in case it's currently DOWN.
2050  */
2051 void qede_reload(struct qede_dev *edev,
2052                  struct qede_reload_args *args, bool is_locked)
2053 {
2054         if (!is_locked)
2055                 __qede_lock(edev);
2056
2057         /* Since qede_lock is held, internal state wouldn't change even
2058          * if netdev state would start transitioning. Check whether current
2059          * internal configuration indicates device is up, then reload.
2060          */
2061         if (edev->state == QEDE_STATE_OPEN) {
2062                 qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2063                 if (args)
2064                         args->func(edev, args);
2065                 qede_load(edev, QEDE_LOAD_RELOAD, true);
2066
2067                 /* Since no one is going to do it for us, re-configure */
2068                 qede_config_rx_mode(edev->ndev);
2069         } else if (args) {
2070                 args->func(edev, args);
2071         }
2072
2073         if (!is_locked)
2074                 __qede_unlock(edev);
2075 }
2076
2077 /* called with rtnl_lock */
2078 static int qede_open(struct net_device *ndev)
2079 {
2080         struct qede_dev *edev = netdev_priv(ndev);
2081         int rc;
2082
2083         netif_carrier_off(ndev);
2084
2085         edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2086
2087         rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2088         if (rc)
2089                 return rc;
2090
2091         udp_tunnel_get_rx_info(ndev);
2092
2093         edev->ops->common->update_drv_state(edev->cdev, true);
2094
2095         return 0;
2096 }
2097
2098 static int qede_close(struct net_device *ndev)
2099 {
2100         struct qede_dev *edev = netdev_priv(ndev);
2101
2102         qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2103
2104         edev->ops->common->update_drv_state(edev->cdev, false);
2105
2106         return 0;
2107 }
2108
2109 static void qede_link_update(void *dev, struct qed_link_output *link)
2110 {
2111         struct qede_dev *edev = dev;
2112
2113         if (!netif_running(edev->ndev)) {
2114                 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
2115                 return;
2116         }
2117
2118         if (link->link_up) {
2119                 if (!netif_carrier_ok(edev->ndev)) {
2120                         DP_NOTICE(edev, "Link is up\n");
2121                         netif_tx_start_all_queues(edev->ndev);
2122                         netif_carrier_on(edev->ndev);
2123                         qede_rdma_dev_event_open(edev);
2124                 }
2125         } else {
2126                 if (netif_carrier_ok(edev->ndev)) {
2127                         DP_NOTICE(edev, "Link is down\n");
2128                         netif_tx_disable(edev->ndev);
2129                         netif_carrier_off(edev->ndev);
2130                         qede_rdma_dev_event_close(edev);
2131                 }
2132         }
2133 }