net/mlx5e: Introduce net device priv flags infrastructure
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en.h
index 24344aafbd36c2b4e78fba478d6413074d0009b5..02fa4daef59d403cc2abeaf10090cae56a06da75 100644 (file)
@@ -46,6 +46,9 @@
 #include <linux/rhashtable.h>
 #include "wq.h"
 #include "mlx5_core.h"
+#include "en_stats.h"
+
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
 
 #define MLX5E_MAX_NUM_TC       8
 
 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
 
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x1
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW            0x4
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW            0x6
+
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE             6  /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS        8  /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_WQE_SZ                  17
+#define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
+                                   MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
+#define MLX5_MPWRQ_PAGES_PER_WQE               BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_MPWRQ_STRIDES_PER_PAGE            (MLX5_MPWRQ_NUM_STRIDES >> \
+                                                MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
+                                  BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
+#define MLX5_UMR_ALIGN                         (2048)
+#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD      (128)
+
 #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
 
 #define MLX5E_LOG_INDIR_RQT_SIZE       0x7
 #define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
 #define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
+#define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
 #define MLX5E_TX_CQ_POLL_BUDGET        128
 #define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
 #define MLX5E_SQ_BF_BUDGET             16
 
 #define MLX5E_NUM_MAIN_GROUPS 9
 
+static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
+{
+       switch (wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
+                            wq_size / 2);
+       default:
+               return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
+                            wq_size / 2);
+       }
+}
+
+static inline int mlx5_min_log_rq_size(int wq_type)
+{
+       switch (wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+       default:
+               return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+       }
+}
+
+static inline int mlx5_max_log_rq_size(int wq_type)
+{
+       switch (wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
+       default:
+               return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+       }
+}
+
+struct mlx5e_tx_wqe {
+       struct mlx5_wqe_ctrl_seg ctrl;
+       struct mlx5_wqe_eth_seg  eth;
+};
+
+struct mlx5e_rx_wqe {
+       struct mlx5_wqe_srq_next_seg  next;
+       struct mlx5_wqe_data_seg      data;
+};
+
+struct mlx5e_umr_wqe {
+       struct mlx5_wqe_ctrl_seg       ctrl;
+       struct mlx5_wqe_umr_ctrl_seg   uctrl;
+       struct mlx5_mkey_seg           mkc;
+       struct mlx5_wqe_data_seg       data;
+};
+
+static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
+       "nop",
+};
+
+enum mlx5e_priv_flag {
+       MLX5E_PFLAG_NOP = (1 << 0),
+};
+
+#define MLX5E_SET_PRIV_FLAG(priv, pflag, enable)    \
+       do {                                        \
+               if (enable)                         \
+                       priv->pflags |= pflag;      \
+               else                                \
+                       priv->pflags &= ~pflag;     \
+       } while (0)
+
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
 #define MLX5E_MIN_BW_ALLOC 1   /* Min percentage of BW allocation */
 #endif
 
-static const char vport_strings[][ETH_GSTRING_LEN] = {
-       /* vport statistics */
-       "rx_packets",
-       "rx_bytes",
-       "tx_packets",
-       "tx_bytes",
-       "rx_error_packets",
-       "rx_error_bytes",
-       "tx_error_packets",
-       "tx_error_bytes",
-       "rx_unicast_packets",
-       "rx_unicast_bytes",
-       "tx_unicast_packets",
-       "tx_unicast_bytes",
-       "rx_multicast_packets",
-       "rx_multicast_bytes",
-       "tx_multicast_packets",
-       "tx_multicast_bytes",
-       "rx_broadcast_packets",
-       "rx_broadcast_bytes",
-       "tx_broadcast_packets",
-       "tx_broadcast_bytes",
-
-       /* SW counters */
-       "tso_packets",
-       "tso_bytes",
-       "tso_inner_packets",
-       "tso_inner_bytes",
-       "lro_packets",
-       "lro_bytes",
-       "rx_csum_good",
-       "rx_csum_none",
-       "rx_csum_sw",
-       "tx_csum_offload",
-       "tx_csum_inner",
-       "tx_queue_stopped",
-       "tx_queue_wake",
-       "tx_queue_dropped",
-       "rx_wqe_err",
-};
-
-struct mlx5e_vport_stats {
-       /* HW counters */
-       u64 rx_packets;
-       u64 rx_bytes;
-       u64 tx_packets;
-       u64 tx_bytes;
-       u64 rx_error_packets;
-       u64 rx_error_bytes;
-       u64 tx_error_packets;
-       u64 tx_error_bytes;
-       u64 rx_unicast_packets;
-       u64 rx_unicast_bytes;
-       u64 tx_unicast_packets;
-       u64 tx_unicast_bytes;
-       u64 rx_multicast_packets;
-       u64 rx_multicast_bytes;
-       u64 tx_multicast_packets;
-       u64 tx_multicast_bytes;
-       u64 rx_broadcast_packets;
-       u64 rx_broadcast_bytes;
-       u64 tx_broadcast_packets;
-       u64 tx_broadcast_bytes;
-
-       /* SW counters */
-       u64 tso_packets;
-       u64 tso_bytes;
-       u64 tso_inner_packets;
-       u64 tso_inner_bytes;
-       u64 lro_packets;
-       u64 lro_bytes;
-       u64 rx_csum_good;
-       u64 rx_csum_none;
-       u64 rx_csum_sw;
-       u64 tx_csum_offload;
-       u64 tx_csum_inner;
-       u64 tx_queue_stopped;
-       u64 tx_queue_wake;
-       u64 tx_queue_dropped;
-       u64 rx_wqe_err;
-
-#define NUM_VPORT_COUNTERS     35
-};
-
-static const char pport_strings[][ETH_GSTRING_LEN] = {
-       /* IEEE802.3 counters */
-       "frames_tx",
-       "frames_rx",
-       "check_seq_err",
-       "alignment_err",
-       "octets_tx",
-       "octets_received",
-       "multicast_xmitted",
-       "broadcast_xmitted",
-       "multicast_rx",
-       "broadcast_rx",
-       "in_range_len_errors",
-       "out_of_range_len",
-       "too_long_errors",
-       "symbol_err",
-       "mac_control_tx",
-       "mac_control_rx",
-       "unsupported_op_rx",
-       "pause_ctrl_rx",
-       "pause_ctrl_tx",
-
-       /* RFC2863 counters */
-       "in_octets",
-       "in_ucast_pkts",
-       "in_discards",
-       "in_errors",
-       "in_unknown_protos",
-       "out_octets",
-       "out_ucast_pkts",
-       "out_discards",
-       "out_errors",
-       "in_multicast_pkts",
-       "in_broadcast_pkts",
-       "out_multicast_pkts",
-       "out_broadcast_pkts",
-
-       /* RFC2819 counters */
-       "drop_events",
-       "octets",
-       "pkts",
-       "broadcast_pkts",
-       "multicast_pkts",
-       "crc_align_errors",
-       "undersize_pkts",
-       "oversize_pkts",
-       "fragments",
-       "jabbers",
-       "collisions",
-       "p64octets",
-       "p65to127octets",
-       "p128to255octets",
-       "p256to511octets",
-       "p512to1023octets",
-       "p1024to1518octets",
-       "p1519to2047octets",
-       "p2048to4095octets",
-       "p4096to8191octets",
-       "p8192to10239octets",
-};
-
-#define NUM_IEEE_802_3_COUNTERS                19
-#define NUM_RFC_2863_COUNTERS          13
-#define NUM_RFC_2819_COUNTERS          21
-#define NUM_PPORT_COUNTERS             (NUM_IEEE_802_3_COUNTERS + \
-                                        NUM_RFC_2863_COUNTERS + \
-                                        NUM_RFC_2819_COUNTERS)
-
-struct mlx5e_pport_stats {
-       __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
-       __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
-       __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
-};
-
-static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
-       "packets",
-       "bytes",
-       "csum_none",
-       "csum_sw",
-       "lro_packets",
-       "lro_bytes",
-       "wqe_err"
-};
-
-struct mlx5e_rq_stats {
-       u64 packets;
-       u64 bytes;
-       u64 csum_none;
-       u64 csum_sw;
-       u64 lro_packets;
-       u64 lro_bytes;
-       u64 wqe_err;
-#define NUM_RQ_STATS 7
-};
-
-static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
-       "packets",
-       "bytes",
-       "tso_packets",
-       "tso_bytes",
-       "tso_inner_packets",
-       "tso_inner_bytes",
-       "csum_offload_inner",
-       "nop",
-       "csum_offload_none",
-       "stopped",
-       "wake",
-       "dropped",
-};
-
-struct mlx5e_sq_stats {
-       /* commonly accessed in data path */
-       u64 packets;
-       u64 bytes;
-       u64 tso_packets;
-       u64 tso_bytes;
-       u64 tso_inner_packets;
-       u64 tso_inner_bytes;
-       u64 csum_offload_inner;
-       u64 nop;
-       /* less likely accessed in data path */
-       u64 csum_offload_none;
-       u64 stopped;
-       u64 wake;
-       u64 dropped;
-#define NUM_SQ_STATS 12
-};
-
-struct mlx5e_stats {
-       struct mlx5e_vport_stats   vport;
-       struct mlx5e_pport_stats   pport;
-};
-
 struct mlx5e_params {
        u8  log_sq_size;
+       u8  rq_wq_type;
+       u8  mpwqe_log_stride_sz;
+       u8  mpwqe_log_num_strides;
        u8  log_rq_size;
        u16 num_channels;
        u8  num_tc;
+       bool rx_cqe_compress_admin;
+       bool rx_cqe_compress;
        u16 rx_cq_moderation_usec;
        u16 rx_cq_moderation_pkts;
        u16 tx_cq_moderation_usec;
@@ -311,6 +186,7 @@ struct mlx5e_params {
        u8  rss_hfunc;
        u8  toeplitz_hash_key[40];
        u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
+       bool vlan_strip_disable;
 #ifdef CONFIG_MLX5_CORE_EN_DCB
        struct ieee_ets ets;
 #endif
@@ -331,6 +207,7 @@ struct mlx5e_tstamp {
 
 enum {
        MLX5E_RQ_STATE_POST_WQES_ENABLE,
+       MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
 };
 
 struct mlx5e_cq {
@@ -343,32 +220,88 @@ struct mlx5e_cq {
        struct mlx5e_channel      *channel;
        struct mlx5e_priv         *priv;
 
+       /* cqe decompression */
+       struct mlx5_cqe64          title;
+       struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
+       u8                         mini_arr_idx;
+       u16                        decmprs_left;
+       u16                        decmprs_wqe_counter;
+
        /* control */
        struct mlx5_wq_ctrl        wq_ctrl;
 } ____cacheline_aligned_in_smp;
 
+struct mlx5e_rq;
+typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
+                                      struct mlx5_cqe64 *cqe);
+typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
+                                 u16 ix);
+
+struct mlx5e_dma_info {
+       struct page     *page;
+       dma_addr_t      addr;
+};
+
 struct mlx5e_rq {
        /* data path */
        struct mlx5_wq_ll      wq;
        u32                    wqe_sz;
        struct sk_buff       **skb;
+       struct mlx5e_mpw_info *wqe_info;
+       __be32                 mkey_be;
+       __be32                 umr_mkey_be;
 
        struct device         *pdev;
        struct net_device     *netdev;
        struct mlx5e_tstamp   *tstamp;
        struct mlx5e_rq_stats  stats;
        struct mlx5e_cq        cq;
+       mlx5e_fp_handle_rx_cqe handle_rx_cqe;
+       mlx5e_fp_alloc_wqe     alloc_wqe;
 
        unsigned long          state;
        int                    ix;
 
        /* control */
        struct mlx5_wq_ctrl    wq_ctrl;
+       u8                     wq_type;
+       u32                    mpwqe_stride_sz;
+       u32                    mpwqe_num_strides;
        u32                    rqn;
        struct mlx5e_channel  *channel;
        struct mlx5e_priv     *priv;
 } ____cacheline_aligned_in_smp;
 
+struct mlx5e_umr_dma_info {
+       __be64                *mtt;
+       __be64                *mtt_no_align;
+       dma_addr_t             mtt_addr;
+       struct mlx5e_dma_info *dma_info;
+};
+
+struct mlx5e_mpw_info {
+       union {
+               struct mlx5e_dma_info     dma_info;
+               struct mlx5e_umr_dma_info umr;
+       };
+       u16 consumed_strides;
+       u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
+
+       void (*dma_pre_sync)(struct device *pdev,
+                            struct mlx5e_mpw_info *wi,
+                            u32 wqe_offset, u32 len);
+       void (*add_skb_frag)(struct mlx5e_rq *rq,
+                            struct sk_buff *skb,
+                            struct mlx5e_mpw_info *wi,
+                            u32 page_idx, u32 frag_offset, u32 len);
+       void (*copy_skb_header)(struct device *pdev,
+                               struct sk_buff *skb,
+                               struct mlx5e_mpw_info *wi,
+                               u32 page_idx, u32 offset,
+                               u32 headlen);
+       void (*free_wqe)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
+};
+
 struct mlx5e_tx_wqe_info {
        u32 num_bytes;
        u8  num_wqebbs;
@@ -391,6 +324,11 @@ enum {
        MLX5E_SQ_STATE_BF_ENABLE,
 };
 
+struct mlx5e_ico_wqe_info {
+       u8  opcode;
+       u8  num_wqebbs;
+};
+
 struct mlx5e_sq {
        /* data path */
 
@@ -432,6 +370,8 @@ struct mlx5e_sq {
        struct mlx5_uar            uar;
        struct mlx5e_channel      *channel;
        int                        tc;
+       struct mlx5e_ico_wqe_info *ico_wqe_info;
+       u32                        rate_limit;
 } ____cacheline_aligned_in_smp;
 
 static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
@@ -448,6 +388,7 @@ struct mlx5e_channel {
        /* data path */
        struct mlx5e_rq            rq;
        struct mlx5e_sq            sq[MLX5E_MAX_NUM_TC];
+       struct mlx5e_sq            icosq;   /* internal control operations */
        struct napi_struct         napi;
        struct device             *pdev;
        struct net_device         *netdev;
@@ -474,42 +415,42 @@ enum mlx5e_traffic_types {
        MLX5E_TT_IPV6,
        MLX5E_TT_ANY,
        MLX5E_NUM_TT,
+       MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
 };
 
-#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY)
+enum {
+       MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+       MLX5E_STATE_OPENED,
+       MLX5E_STATE_DESTROYING,
+};
 
-enum mlx5e_rqt_ix {
-       MLX5E_INDIRECTION_RQT,
-       MLX5E_SINGLE_RQ_RQT,
-       MLX5E_NUM_RQT,
+struct mlx5e_vxlan_db {
+       spinlock_t                      lock; /* protect vxlan table */
+       struct radix_tree_root          tree;
 };
 
-struct mlx5e_eth_addr_info {
+struct mlx5e_l2_rule {
        u8  addr[ETH_ALEN + 2];
-       u32 tt_vec;
-       struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
+       struct mlx5_flow_rule *rule;
 };
 
-#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
-
-struct mlx5e_eth_addr_db {
-       struct hlist_head          netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
-       struct hlist_head          netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
-       struct mlx5e_eth_addr_info broadcast;
-       struct mlx5e_eth_addr_info allmulti;
-       struct mlx5e_eth_addr_info promisc;
-       bool                       broadcast_enabled;
-       bool                       allmulti_enabled;
-       bool                       promisc_enabled;
+struct mlx5e_flow_table {
+       int num_groups;
+       struct mlx5_flow_table *t;
+       struct mlx5_flow_group **g;
 };
 
-enum {
-       MLX5E_STATE_ASYNC_EVENTS_ENABLE,
-       MLX5E_STATE_OPENED,
-       MLX5E_STATE_DESTROYING,
+#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct mlx5e_tc_table {
+       struct mlx5_flow_table          *t;
+
+       struct rhashtable_params        ht_params;
+       struct rhashtable               ht;
 };
 
-struct mlx5e_vlan_db {
+struct mlx5e_vlan_table {
+       struct mlx5e_flow_table         ft;
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
        struct mlx5_flow_rule   *active_vlans_rule[VLAN_N_VID];
        struct mlx5_flow_rule   *untagged_rule;
@@ -517,29 +458,74 @@ struct mlx5e_vlan_db {
        bool          filter_disabled;
 };
 
-struct mlx5e_vxlan_db {
-       spinlock_t                      lock; /* protect vxlan table */
-       struct radix_tree_root          tree;
+struct mlx5e_l2_table {
+       struct mlx5e_flow_table    ft;
+       struct hlist_head          netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
+       struct hlist_head          netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
+       struct mlx5e_l2_rule       broadcast;
+       struct mlx5e_l2_rule       allmulti;
+       struct mlx5e_l2_rule       promisc;
+       bool                       broadcast_enabled;
+       bool                       allmulti_enabled;
+       bool                       promisc_enabled;
 };
 
-struct mlx5e_flow_table {
-       int num_groups;
-       struct mlx5_flow_table          *t;
-       struct mlx5_flow_group          **g;
+/* L3/L4 traffic type classifier */
+struct mlx5e_ttc_table {
+       struct mlx5e_flow_table  ft;
+       struct mlx5_flow_rule    *rules[MLX5E_NUM_TT];
 };
 
-struct mlx5e_tc_flow_table {
-       struct mlx5_flow_table          *t;
+#define ARFS_HASH_SHIFT BITS_PER_BYTE
+#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
+struct arfs_table {
+       struct mlx5e_flow_table  ft;
+       struct mlx5_flow_rule    *default_rule;
+       struct hlist_head        rules_hash[ARFS_HASH_SIZE];
+};
 
-       struct rhashtable_params        ht_params;
-       struct rhashtable               ht;
+enum  arfs_type {
+       ARFS_IPV4_TCP,
+       ARFS_IPV6_TCP,
+       ARFS_IPV4_UDP,
+       ARFS_IPV6_UDP,
+       ARFS_NUM_TYPES,
+};
+
+struct mlx5e_arfs_tables {
+       struct arfs_table arfs_tables[ARFS_NUM_TYPES];
+       /* Protect aRFS rules list */
+       spinlock_t                     arfs_lock;
+       struct list_head               rules;
+       int                            last_filter_id;
+       struct workqueue_struct        *wq;
+};
+
+/* NIC prio FTS */
+enum {
+       MLX5E_VLAN_FT_LEVEL = 0,
+       MLX5E_L2_FT_LEVEL,
+       MLX5E_TTC_FT_LEVEL,
+       MLX5E_ARFS_FT_LEVEL
+};
+
+struct mlx5e_flow_steering {
+       struct mlx5_flow_namespace      *ns;
+       struct mlx5e_tc_table           tc;
+       struct mlx5e_vlan_table         vlan;
+       struct mlx5e_l2_table           l2;
+       struct mlx5e_ttc_table          ttc;
+       struct mlx5e_arfs_tables        arfs;
 };
 
-struct mlx5e_flow_tables {
-       struct mlx5_flow_namespace      *ns;
-       struct mlx5e_tc_flow_table      tc;
-       struct mlx5e_flow_table         vlan;
-       struct mlx5e_flow_table         main;
+struct mlx5e_direct_tir {
+       u32              tirn;
+       u32              rqtn;
+};
+
+enum {
+       MLX5E_TC_PRIO = 0,
+       MLX5E_NIC_PRIO
 };
 
 struct mlx5e_priv {
@@ -554,19 +540,18 @@ struct mlx5e_priv {
        u32                        pdn;
        u32                        tdn;
        struct mlx5_core_mkey      mkey;
+       struct mlx5_core_mkey      umr_mkey;
        struct mlx5e_rq            drop_rq;
 
        struct mlx5e_channel     **channel;
        u32                        tisn[MLX5E_MAX_NUM_TC];
-       u32                        rqtn[MLX5E_NUM_RQT];
-       u32                        tirn[MLX5E_NUM_TT];
+       u32                        indir_rqtn;
+       u32                        indir_tirn[MLX5E_NUM_INDIR_TIRS];
+       struct mlx5e_direct_tir    direct_tir[MLX5E_MAX_NUM_CHANNELS];
+       u32                        tx_rates[MLX5E_MAX_NUM_SQS];
 
-       struct mlx5e_flow_tables   fts;
-       struct mlx5e_eth_addr_db   eth_addr;
-       struct mlx5e_vlan_db       vlan;
-#ifdef CONFIG_MLX5_CORE_EN_VXLAN
+       struct mlx5e_flow_steering fs;
        struct mlx5e_vxlan_db      vxlan;
-#endif
 
        struct mlx5e_params        params;
        struct workqueue_struct    *wq;
@@ -574,22 +559,12 @@ struct mlx5e_priv {
        struct work_struct         set_rx_mode_work;
        struct delayed_work        update_stats_work;
 
+       u32                        pflags;
        struct mlx5_core_dev      *mdev;
        struct net_device         *netdev;
        struct mlx5e_stats         stats;
        struct mlx5e_tstamp        tstamp;
-};
-
-#define MLX5E_NET_IP_ALIGN 2
-
-struct mlx5e_tx_wqe {
-       struct mlx5_wqe_ctrl_seg ctrl;
-       struct mlx5_wqe_eth_seg  eth;
-};
-
-struct mlx5e_rx_wqe {
-       struct mlx5_wqe_srq_next_seg  next;
-       struct mlx5_wqe_data_seg      data;
+       u16 q_counter;
 };
 
 enum mlx5e_link_mode {
@@ -634,14 +609,35 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+
+void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
+void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
+                                   struct mlx5_cqe64 *cqe,
+                                   u16 byte_cnt,
+                                   struct mlx5e_mpw_info *wi,
+                                   struct sk_buff *skb);
+void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+                                       struct mlx5_cqe64 *cqe,
+                                       u16 byte_cnt,
+                                       struct mlx5e_mpw_info *wi,
+                                       struct sk_buff *skb);
+void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
+                               struct mlx5e_mpw_info *wi);
+void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+                                   struct mlx5e_mpw_info *wi);
 struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
 
 void mlx5e_update_stats(struct mlx5e_priv *priv);
 
-int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
-void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
 void mlx5e_set_rx_mode_work(struct work_struct *work);
 
 void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
@@ -650,6 +646,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv);
 void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
 int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
 int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
+void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
                          u16 vid);
@@ -658,16 +655,20 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
 
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
 void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+                                  u32 *indirection_rqt, int len,
                                   int num_channels);
+int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
 
 static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
-                                     struct mlx5e_tx_wqe *wqe, int bf_sz)
+                                     struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
 {
        u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
 
@@ -681,9 +682,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
         */
        wmb();
        if (bf_sz)
-               __iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz);
+               __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
        else
-               mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+               mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
        /* flush the write-combining mapped buffer */
        wmb();
 
@@ -704,12 +705,43 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
                     MLX5E_MAX_NUM_CHANNELS);
 }
 
+static inline int mlx5e_get_mtt_octw(int npages)
+{
+       return ALIGN(npages, 8) / 2;
+}
+
 extern const struct ethtool_ops mlx5e_ethtool_ops;
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
 #endif
 
+#ifndef CONFIG_RFS_ACCEL
+static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+{
+       return 0;
+}
+
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
+
+static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+{
+       return -ENOTSUPP;
+}
+
+static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+{
+       return -ENOTSUPP;
+}
+#else
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
+int mlx5e_arfs_enable(struct mlx5e_priv *priv);
+int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+                       u16 rxq_index, u32 flow_id);
+#endif
+
 u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
 
 #endif /* __MLX5_EN_H__ */