2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/if_vlan.h>
36 #include <linux/etherdevice.h>
37 #include <linux/timecounter.h>
38 #include <linux/net_tstamp.h>
39 #include <linux/ptp_clock_kernel.h>
40 #include <linux/crash_dump.h>
41 #include <linux/mlx5/driver.h>
42 #include <linux/mlx5/qp.h>
43 #include <linux/mlx5/cq.h>
44 #include <linux/mlx5/port.h>
45 #include <linux/mlx5/vport.h>
46 #include <linux/mlx5/transobj.h>
47 #include <linux/mlx5/fs.h>
48 #include <linux/rhashtable.h>
49 #include <net/switchdev.h>
51 #include <linux/net_dim.h>
53 #include "mlx5_core.h"
57 extern const struct net_device_ops mlx5e_netdev_ops;
60 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
61 #define MLX5E_METADATA_ETHER_LEN 8
63 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
65 #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
67 #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu))
68 #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu))
70 #define MLX5E_MAX_PRIORITY 8
71 #define MLX5E_MAX_DSCP 64
72 #define MLX5E_MAX_NUM_TC 8
74 #define MLX5_RX_HEADROOM NET_SKB_PAD
75 #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
76 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
78 #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
79 (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
80 #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
81 max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
82 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
83 #define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
84 #define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
85 (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
86 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
88 #define MLX5_MPWRQ_LOG_WQE_SZ 18
89 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
90 MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
91 #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
93 #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
94 #define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
95 #define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
96 #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
97 #define MLX5E_MAX_RQ_NUM_MTTS \
98 ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
99 #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
100 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
101 (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
102 #define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
103 (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
104 (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
106 #define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
107 #define MLX5E_LOG_MAX_RX_WQE_BULK \
108 (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ)))
110 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
111 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
112 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
114 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
115 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
116 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
117 MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
119 #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
121 #define MLX5E_RX_MAX_HEAD (256)
123 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
124 #define MLX5E_DEFAULT_LRO_TIMEOUT 32
125 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
127 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
128 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
129 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
130 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
131 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
132 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
133 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
134 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
136 #define MLX5E_LOG_INDIR_RQT_SIZE 0x7
137 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
138 #define MLX5E_MIN_NUM_CHANNELS 0x1
139 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
140 #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
141 #define MLX5E_TX_CQ_POLL_BUDGET 128
142 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
144 #define MLX5E_UMR_WQE_INLINE_SZ \
145 (sizeof(struct mlx5e_umr_wqe) + \
146 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
147 MLX5_UMR_MTT_ALIGNMENT))
148 #define MLX5E_UMR_WQEBBS \
149 (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
150 #define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
152 #define MLX5E_NUM_MAIN_GROUPS 9
154 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK
156 #define mlx5e_dbg(mlevel, priv, format, ...) \
158 if (NETIF_MSG_##mlevel & (priv)->msglevel) \
159 netdev_warn(priv->netdev, format, \
164 static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
167 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
168 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
171 return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
176 static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
178 return is_kdump_kernel() ?
179 MLX5E_MIN_NUM_CHANNELS :
180 min_t(int, mdev->priv.eq_table.num_comp_vectors,
181 MLX5E_MAX_NUM_CHANNELS);
184 struct mlx5e_tx_wqe {
185 struct mlx5_wqe_ctrl_seg ctrl;
186 struct mlx5_wqe_eth_seg eth;
187 struct mlx5_wqe_data_seg data[0];
190 struct mlx5e_rx_wqe_ll {
191 struct mlx5_wqe_srq_next_seg next;
192 struct mlx5_wqe_data_seg data[0];
195 struct mlx5e_rx_wqe_cyc {
196 struct mlx5_wqe_data_seg data[0];
199 struct mlx5e_umr_wqe {
200 struct mlx5_wqe_ctrl_seg ctrl;
201 struct mlx5_wqe_umr_ctrl_seg uctrl;
202 struct mlx5_mkey_seg mkc;
203 struct mlx5_mtt inline_mtts[0];
206 extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
208 enum mlx5e_priv_flag {
209 MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
210 MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
211 MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
212 MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
213 MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4),
216 #define MLX5E_SET_PFLAG(params, pflag, enable) \
219 (params)->pflags |= (pflag); \
221 (params)->pflags &= ~(pflag); \
224 #define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
226 #ifdef CONFIG_MLX5_CORE_EN_DCB
227 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
230 struct mlx5e_params {
233 u8 log_rq_mtu_frames;
236 bool rx_cqe_compress_def;
237 struct net_dim_cq_moder rx_cq_moderation;
238 struct net_dim_cq_moder tx_cq_moderation;
241 u8 tx_min_inline_mode;
243 u8 toeplitz_hash_key[40];
244 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
245 bool vlan_strip_disable;
251 struct bpf_prog *xdp_prog;
256 #ifdef CONFIG_MLX5_CORE_EN_DCB
257 struct mlx5e_cee_config {
258 /* bw pct for priority group */
259 u8 pg_bw_pct[CEE_DCBX_MAX_PGS];
260 u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO];
261 bool pfc_setting[CEE_DCBX_MAX_PRIO];
268 MLX5_DCB_CHG_NO_RESET,
272 enum mlx5_dcbx_oper_mode mode;
273 struct mlx5e_cee_config cee_cfg; /* pending configuration */
276 /* The only setting that cannot be read from FW */
277 u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
280 /* Buffer configuration */
286 struct mlx5e_dcbx_dp {
287 u8 dscp2prio[MLX5E_MAX_DSCP];
293 MLX5E_RQ_STATE_ENABLED,
295 MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
299 /* data path - accessed per cqe */
302 /* data path - accessed per napi poll */
304 struct napi_struct *napi;
305 struct mlx5_core_cq mcq;
306 struct mlx5e_channel *channel;
308 /* cqe decompression */
309 struct mlx5_cqe64 title;
310 struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
313 u16 decmprs_wqe_counter;
316 struct mlx5_core_dev *mdev;
317 struct mlx5_wq_ctrl wq_ctrl;
318 } ____cacheline_aligned_in_smp;
320 struct mlx5e_tx_wqe_info {
327 enum mlx5e_dma_map_type {
328 MLX5E_DMA_MAP_SINGLE,
332 struct mlx5e_sq_dma {
335 enum mlx5e_dma_map_type type;
339 MLX5E_SQ_STATE_ENABLED,
340 MLX5E_SQ_STATE_RECOVERING,
341 MLX5E_SQ_STATE_IPSEC,
344 MLX5E_SQ_STATE_REDIRECT,
347 struct mlx5e_sq_wqe_info {
354 /* dirtied @completion */
357 struct net_dim dim; /* Adaptive Moderation */
360 u16 pc ____cacheline_aligned_in_smp;
366 struct mlx5_wq_cyc wq;
368 struct mlx5e_sq_stats *stats;
370 struct mlx5e_sq_dma *dma_fifo;
371 struct mlx5e_tx_wqe_info *wqe_info;
373 void __iomem *uar_map;
374 struct netdev_queue *txq;
380 struct hwtstamp_config *tstamp;
381 struct mlx5_clock *clock;
384 struct mlx5_wq_ctrl wq_ctrl;
385 struct mlx5e_channel *channel;
388 struct mlx5e_txqsq_recover {
389 struct work_struct recover_work;
392 } ____cacheline_aligned_in_smp;
394 struct mlx5e_dma_info {
399 struct mlx5e_xdp_info {
400 struct xdp_frame *xdpf;
402 struct mlx5e_dma_info di;
408 /* dirtied @completion */
413 u16 pc ____cacheline_aligned_in_smp;
419 struct mlx5_wq_cyc wq;
420 struct mlx5e_xdpsq_stats *stats;
422 struct mlx5e_xdp_info *xdpi;
424 void __iomem *uar_map;
433 struct mlx5_wq_ctrl wq_ctrl;
434 struct mlx5e_channel *channel;
435 } ____cacheline_aligned_in_smp;
441 u16 pc ____cacheline_aligned_in_smp;
445 /* write@xmit, read@completion */
447 struct mlx5e_sq_wqe_info *ico_wqe;
451 struct mlx5_wq_cyc wq;
452 void __iomem *uar_map;
457 struct mlx5_wq_ctrl wq_ctrl;
458 struct mlx5e_channel *channel;
459 } ____cacheline_aligned_in_smp;
462 mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
464 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
467 struct mlx5e_wqe_frag_info {
468 struct mlx5e_dma_info *di;
473 struct mlx5e_umr_dma_info {
474 struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
477 struct mlx5e_mpw_info {
478 struct mlx5e_umr_dma_info umr;
479 u16 consumed_strides;
480 DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
483 #define MLX5E_MAX_RX_FRAGS 4
485 /* a single cache unit is capable to serve one napi call (for non-striding rq)
486 * or a MPWQE (for striding rq).
488 #define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
489 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
490 #define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
491 struct mlx5e_page_cache {
494 struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
498 typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
499 typedef struct sk_buff *
500 (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
501 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
502 typedef struct sk_buff *
503 (*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
504 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
505 typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
506 typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
509 MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
512 struct mlx5e_rq_frag_info {
517 struct mlx5e_rq_frags_info {
518 struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
528 struct mlx5_wq_cyc wq;
529 struct mlx5e_wqe_frag_info *frags;
530 struct mlx5e_dma_info *di;
531 struct mlx5e_rq_frags_info info;
532 mlx5e_fp_skb_from_cqe skb_from_cqe;
535 struct mlx5_wq_ll wq;
536 struct mlx5e_umr_wqe umr_wqe;
537 struct mlx5e_mpw_info *info;
538 mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
541 bool umr_in_progress;
546 u8 map_dir; /* dma map direction */
549 struct mlx5e_channel *channel;
551 struct net_device *netdev;
552 struct mlx5e_rq_stats *stats;
554 struct mlx5e_page_cache page_cache;
555 struct hwtstamp_config *tstamp;
556 struct mlx5_clock *clock;
558 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
559 mlx5e_fp_post_rx_wqes post_wqes;
560 mlx5e_fp_dealloc_wqe dealloc_wqe;
565 struct net_dim dim; /* Dynamic Interrupt Moderation */
568 struct bpf_prog *xdp_prog;
569 struct mlx5e_xdpsq xdpsq;
570 DECLARE_BITMAP(flags, 8);
571 struct page_pool *page_pool;
574 struct mlx5_wq_ctrl wq_ctrl;
578 struct mlx5_core_dev *mdev;
579 struct mlx5_core_mkey umr_mkey;
581 /* XDP read-mostly */
582 struct xdp_rxq_info xdp_rxq;
583 } ____cacheline_aligned_in_smp;
585 struct mlx5e_channel {
588 struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
589 struct mlx5e_icosq icosq; /* internal control operations */
591 struct napi_struct napi;
593 struct net_device *netdev;
598 struct mlx5e_xdpsq xdpsq;
600 /* data path - accessed per napi poll */
601 struct irq_desc *irq_desc;
602 struct mlx5e_ch_stats *stats;
605 struct mlx5e_priv *priv;
606 struct mlx5_core_dev *mdev;
607 struct hwtstamp_config *tstamp;
612 struct mlx5e_channels {
613 struct mlx5e_channel **c;
615 struct mlx5e_params params;
618 struct mlx5e_channel_stats {
619 struct mlx5e_ch_stats ch;
620 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
621 struct mlx5e_rq_stats rq;
622 struct mlx5e_xdpsq_stats rq_xdpsq;
623 struct mlx5e_xdpsq_stats xdpsq;
624 } ____cacheline_aligned_in_smp;
627 MLX5E_STATE_ASYNC_EVENTS_ENABLED,
629 MLX5E_STATE_DESTROYING,
639 struct mlx5e_rqt rqt;
640 struct list_head list;
649 /* priv data path fields - start */
650 struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
651 int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
652 #ifdef CONFIG_MLX5_CORE_EN_DCB
653 struct mlx5e_dcbx_dp dcbx_dp;
655 /* priv data path fields - end */
659 struct mutex state_lock; /* Protects Interface state */
660 struct mlx5e_rq drop_rq;
662 struct mlx5e_channels channels;
663 u32 tisn[MLX5E_MAX_NUM_TC];
664 struct mlx5e_rqt indir_rqt;
665 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
666 struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
667 struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
668 u32 tx_rates[MLX5E_MAX_NUM_SQS];
670 struct mlx5e_flow_steering fs;
672 struct workqueue_struct *wq;
673 struct work_struct update_carrier_work;
674 struct work_struct set_rx_mode_work;
675 struct work_struct tx_timeout_work;
676 struct work_struct update_stats_work;
678 struct mlx5_core_dev *mdev;
679 struct net_device *netdev;
680 struct mlx5e_stats stats;
681 struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
683 struct hwtstamp_config tstamp;
685 u16 drop_rq_q_counter;
686 #ifdef CONFIG_MLX5_CORE_EN_DCB
687 struct mlx5e_dcbx dcbx;
690 const struct mlx5e_profile *profile;
692 #ifdef CONFIG_MLX5_EN_IPSEC
693 struct mlx5e_ipsec *ipsec;
695 #ifdef CONFIG_MLX5_EN_TLS
696 struct mlx5e_tls *tls;
700 struct mlx5e_profile {
701 int (*init)(struct mlx5_core_dev *mdev,
702 struct net_device *netdev,
703 const struct mlx5e_profile *profile, void *ppriv);
704 void (*cleanup)(struct mlx5e_priv *priv);
705 int (*init_rx)(struct mlx5e_priv *priv);
706 void (*cleanup_rx)(struct mlx5e_priv *priv);
707 int (*init_tx)(struct mlx5e_priv *priv);
708 void (*cleanup_tx)(struct mlx5e_priv *priv);
709 void (*enable)(struct mlx5e_priv *priv);
710 void (*disable)(struct mlx5e_priv *priv);
711 void (*update_stats)(struct mlx5e_priv *priv);
712 void (*update_carrier)(struct mlx5e_priv *priv);
713 int (*max_nch)(struct mlx5_core_dev *mdev);
715 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
716 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
721 void mlx5e_build_ptys2ethtool_map(void);
723 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
724 struct net_device *sb_dev,
725 select_queue_fallback_t fallback);
726 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
727 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
728 struct mlx5e_tx_wqe *wqe, u16 pi);
730 void mlx5e_completion_event(struct mlx5_core_cq *mcq);
731 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
732 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
733 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
734 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
735 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
737 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
738 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
739 struct mlx5e_params *params);
741 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
742 void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
744 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
745 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
746 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
747 bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
748 void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
749 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
751 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
752 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
754 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
755 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
757 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
758 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
760 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
761 struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
763 void mlx5e_update_stats(struct mlx5e_priv *priv);
765 void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
766 int mlx5e_self_test_num(struct mlx5e_priv *priv);
767 void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
769 void mlx5e_set_rx_mode_work(struct work_struct *work);
771 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
772 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
773 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
775 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
777 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
779 void mlx5e_timestamp_init(struct mlx5e_priv *priv);
781 struct mlx5e_redirect_rqt_param {
784 u32 rqn; /* Direct RQN (Non-RSS) */
787 struct mlx5e_channels *channels;
788 } rss; /* RSS data */
792 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
793 struct mlx5e_redirect_rqt_param rrp);
794 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
795 enum mlx5e_traffic_types tt,
796 void *tirc, bool inner);
798 int mlx5e_open_locked(struct net_device *netdev);
799 int mlx5e_close_locked(struct net_device *netdev);
801 int mlx5e_open_channels(struct mlx5e_priv *priv,
802 struct mlx5e_channels *chs);
803 void mlx5e_close_channels(struct mlx5e_channels *chs);
805 /* Function pointer to be used to modify WH settings while
808 typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
809 void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
810 struct mlx5e_channels *new_chs,
811 mlx5e_fp_hw_modify hw_modify);
812 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
813 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
815 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
817 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
819 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
821 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
822 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
823 struct mlx5e_params *params);
825 static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
827 return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) &&
828 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
831 static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
832 struct mlx5e_tx_wqe **wqe,
835 struct mlx5_wq_cyc *wq = &sq->wq;
837 *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
838 *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
839 memset(*wqe, 0, sizeof(**wqe));
843 struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc)
845 u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc);
846 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
847 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
849 memset(cseg, 0, sizeof(*cseg));
851 cseg->opmod_idx_opcode = cpu_to_be32((*pc << 8) | MLX5_OPCODE_NOP);
852 cseg->qpn_ds = cpu_to_be32((sqn << 8) | 0x01);
860 void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
861 void __iomem *uar_map,
862 struct mlx5_wqe_ctrl_seg *ctrl)
864 ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
865 /* ensure wqe is visible to device before updating doorbell record */
868 *wq->db = cpu_to_be32(pc);
870 /* ensure doorbell record is visible to device before ringing the
875 mlx5_write64((__be32 *)ctrl, uar_map, NULL);
878 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
880 struct mlx5_core_cq *mcq;
883 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
886 extern const struct ethtool_ops mlx5e_ethtool_ops;
887 #ifdef CONFIG_MLX5_CORE_EN_DCB
888 extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
889 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
890 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
891 void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
892 void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
895 int mlx5e_create_tir(struct mlx5_core_dev *mdev,
896 struct mlx5e_tir *tir, u32 *in, int inlen);
897 void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
898 struct mlx5e_tir *tir);
899 int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
900 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
901 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
903 /* common netdev helpers */
904 void mlx5e_create_q_counters(struct mlx5e_priv *priv);
905 void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
906 int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
907 struct mlx5e_rq *drop_rq);
908 void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
910 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
912 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
913 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
915 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
916 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
917 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
918 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
919 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
921 int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
922 u32 underlay_qpn, u32 *tisn);
923 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
925 int mlx5e_create_tises(struct mlx5e_priv *priv);
926 void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
927 int mlx5e_close(struct net_device *netdev);
928 int mlx5e_open(struct net_device *netdev);
930 void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
931 int mlx5e_bits_invert(unsigned long a, int size);
933 typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
934 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
935 change_hw_mtu_cb set_mtu_cb);
937 /* ethtool helpers */
938 void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
939 struct ethtool_drvinfo *drvinfo);
940 void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
941 uint32_t stringset, uint8_t *data);
942 int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
943 void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
944 struct ethtool_stats *stats, u64 *data);
945 void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
946 struct ethtool_ringparam *param);
947 int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
948 struct ethtool_ringparam *param);
949 void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
950 struct ethtool_channels *ch);
951 int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
952 struct ethtool_channels *ch);
953 int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
954 struct ethtool_coalesce *coal);
955 int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
956 struct ethtool_coalesce *coal);
957 u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
958 u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
959 int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
960 struct ethtool_ts_info *info);
961 int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
962 struct ethtool_flash *flash);
964 /* mlx5e generic netdev management API */
965 int mlx5e_netdev_init(struct net_device *netdev,
966 struct mlx5e_priv *priv,
967 struct mlx5_core_dev *mdev,
968 const struct mlx5e_profile *profile,
970 void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
972 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
974 int mlx5e_attach_netdev(struct mlx5e_priv *priv);
975 void mlx5e_detach_netdev(struct mlx5e_priv *priv);
976 void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
977 void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
978 struct mlx5e_params *params,
979 u16 max_channels, u16 mtu);
980 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
981 struct mlx5e_params *params);
982 void mlx5e_build_rss_params(struct mlx5e_params *params);
983 u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
984 void mlx5e_rx_dim_work(struct work_struct *work);
985 void mlx5e_tx_dim_work(struct work_struct *work);
986 #endif /* __MLX5_EN_H__ */