0c167e5fc346674beb094ffba988b8bdbc7904bc
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
1 /*
2  * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <linux/bpf.h>
38 #include <net/page_pool.h>
39 #include "eswitch.h"
40 #include "en.h"
41 #include "en_tc.h"
42 #include "en_rep.h"
43 #include "en_accel/ipsec.h"
44 #include "en_accel/ipsec_rxtx.h"
45 #include "en_accel/tls.h"
46 #include "accel/ipsec.h"
47 #include "accel/tls.h"
48 #include "vxlan.h"
49 #include "en/port.h"
50
51 struct mlx5e_rq_param {
52         u32                     rqc[MLX5_ST_SZ_DW(rqc)];
53         struct mlx5_wq_param    wq;
54 };
55
56 struct mlx5e_sq_param {
57         u32                        sqc[MLX5_ST_SZ_DW(sqc)];
58         struct mlx5_wq_param       wq;
59 };
60
61 struct mlx5e_cq_param {
62         u32                        cqc[MLX5_ST_SZ_DW(cqc)];
63         struct mlx5_wq_param       wq;
64         u16                        eq_ix;
65         u8                         cq_period_mode;
66 };
67
68 struct mlx5e_channel_param {
69         struct mlx5e_rq_param      rq;
70         struct mlx5e_sq_param      sq;
71         struct mlx5e_sq_param      xdp_sq;
72         struct mlx5e_sq_param      icosq;
73         struct mlx5e_cq_param      rx_cq;
74         struct mlx5e_cq_param      tx_cq;
75         struct mlx5e_cq_param      icosq_cq;
76 };
77
78 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
79 {
80         bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
81                 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
82                 MLX5_CAP_ETH(mdev, reg_umr_sq);
83         u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
84         bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
85
86         if (!striding_rq_umr)
87                 return false;
88         if (!inline_umr) {
89                 mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
90                                (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
91                 return false;
92         }
93         return true;
94 }
95
96 static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params)
97 {
98         if (!params->xdp_prog) {
99                 u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
100                 u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN;
101
102                 return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu);
103         }
104
105         return PAGE_SIZE;
106 }
107
108 static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
109 {
110         u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
111
112         return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
113 }
114
115 static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
116                                          struct mlx5e_params *params)
117 {
118         u32 frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
119         s8 signed_log_num_strides_param;
120         u8 log_num_strides;
121
122         if (params->lro_en || frag_sz > PAGE_SIZE)
123                 return false;
124
125         if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
126                 return true;
127
128         log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz);
129         signed_log_num_strides_param =
130                 (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
131
132         return signed_log_num_strides_param >= 0;
133 }
134
135 static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
136 {
137         if (params->log_rq_mtu_frames <
138             mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
139                 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
140
141         return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params);
142 }
143
144 static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
145                                           struct mlx5e_params *params)
146 {
147         if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
148                 return order_base_2(mlx5e_mpwqe_get_linear_frag_sz(params));
149
150         return MLX5E_MPWQE_STRIDE_SZ(mdev,
151                 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
152 }
153
154 static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
155                                           struct mlx5e_params *params)
156 {
157         return MLX5_MPWRQ_LOG_WQE_SZ -
158                 mlx5e_mpwqe_get_log_stride_size(mdev, params);
159 }
160
161 static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
162                                  struct mlx5e_params *params)
163 {
164         u16 linear_rq_headroom = params->xdp_prog ?
165                 XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
166
167         linear_rq_headroom += NET_IP_ALIGN;
168
169         if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST)
170                 return linear_rq_headroom;
171
172         if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
173                 return linear_rq_headroom;
174
175         return 0;
176 }
177
178 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
179                                struct mlx5e_params *params)
180 {
181         params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
182         params->log_rq_mtu_frames = is_kdump_kernel() ?
183                 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
184                 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
185         switch (params->rq_wq_type) {
186         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
187                 break;
188         default: /* MLX5_WQ_TYPE_LINKED_LIST */
189                 /* Extra room needed for build_skb */
190                 params->lro_wqe_sz -= mlx5e_get_rq_headroom(mdev, params) +
191                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
192         }
193
194         mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
195                        params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
196                        params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
197                        BIT(mlx5e_mpwqe_get_log_rq_size(params)) :
198                        BIT(params->log_rq_mtu_frames),
199                        BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)),
200                        MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
201 }
202
203 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
204                                 struct mlx5e_params *params)
205 {
206         return mlx5e_check_fragmented_striding_rq_cap(mdev) &&
207                 !MLX5_IPSEC_DEV(mdev) &&
208                 !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params));
209 }
210
211 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
212 {
213         params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
214                 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
215                 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
216                 MLX5_WQ_TYPE_LINKED_LIST;
217 }
218
219 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
220 {
221         struct mlx5_core_dev *mdev = priv->mdev;
222         u8 port_state;
223
224         port_state = mlx5_query_vport_state(mdev,
225                                             MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
226                                             0);
227
228         if (port_state == VPORT_STATE_UP) {
229                 netdev_info(priv->netdev, "Link up\n");
230                 netif_carrier_on(priv->netdev);
231         } else {
232                 netdev_info(priv->netdev, "Link down\n");
233                 netif_carrier_off(priv->netdev);
234         }
235 }
236
237 static void mlx5e_update_carrier_work(struct work_struct *work)
238 {
239         struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
240                                                update_carrier_work);
241
242         mutex_lock(&priv->state_lock);
243         if (test_bit(MLX5E_STATE_OPENED, &priv->state))
244                 if (priv->profile->update_carrier)
245                         priv->profile->update_carrier(priv);
246         mutex_unlock(&priv->state_lock);
247 }
248
249 void mlx5e_update_stats(struct mlx5e_priv *priv)
250 {
251         int i;
252
253         for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
254                 if (mlx5e_stats_grps[i].update_stats)
255                         mlx5e_stats_grps[i].update_stats(priv);
256 }
257
258 static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
259 {
260         int i;
261
262         for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
263                 if (mlx5e_stats_grps[i].update_stats_mask &
264                     MLX5E_NDO_UPDATE_STATS)
265                         mlx5e_stats_grps[i].update_stats(priv);
266 }
267
268 void mlx5e_update_stats_work(struct work_struct *work)
269 {
270         struct delayed_work *dwork = to_delayed_work(work);
271         struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
272                                                update_stats_work);
273         mutex_lock(&priv->state_lock);
274         if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
275                 priv->profile->update_stats(priv);
276                 queue_delayed_work(priv->wq, dwork,
277                                    msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
278         }
279         mutex_unlock(&priv->state_lock);
280 }
281
282 static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
283                               enum mlx5_dev_event event, unsigned long param)
284 {
285         struct mlx5e_priv *priv = vpriv;
286
287         if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
288                 return;
289
290         switch (event) {
291         case MLX5_DEV_EVENT_PORT_UP:
292         case MLX5_DEV_EVENT_PORT_DOWN:
293                 queue_work(priv->wq, &priv->update_carrier_work);
294                 break;
295         default:
296                 break;
297         }
298 }
299
300 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
301 {
302         set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
303 }
304
305 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
306 {
307         clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
308         synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
309 }
310
311 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
312                                        struct mlx5e_icosq *sq,
313                                        struct mlx5e_umr_wqe *wqe)
314 {
315         struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
316         struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
317         u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
318
319         cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
320                                       ds_cnt);
321         cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
322         cseg->imm       = rq->mkey_be;
323
324         ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
325         ucseg->xlt_octowords =
326                 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
327         ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
328 }
329
330 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
331                                      struct mlx5e_channel *c)
332 {
333         int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
334
335         rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
336                                       GFP_KERNEL, cpu_to_node(c->cpu));
337         if (!rq->mpwqe.info)
338                 return -ENOMEM;
339
340         mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
341
342         return 0;
343 }
344
345 static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
346                                  u64 npages, u8 page_shift,
347                                  struct mlx5_core_mkey *umr_mkey)
348 {
349         int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
350         void *mkc;
351         u32 *in;
352         int err;
353
354         in = kvzalloc(inlen, GFP_KERNEL);
355         if (!in)
356                 return -ENOMEM;
357
358         mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
359
360         MLX5_SET(mkc, mkc, free, 1);
361         MLX5_SET(mkc, mkc, umr_en, 1);
362         MLX5_SET(mkc, mkc, lw, 1);
363         MLX5_SET(mkc, mkc, lr, 1);
364         MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
365
366         MLX5_SET(mkc, mkc, qpn, 0xffffff);
367         MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
368         MLX5_SET64(mkc, mkc, len, npages << page_shift);
369         MLX5_SET(mkc, mkc, translations_octword_size,
370                  MLX5_MTT_OCTW(npages));
371         MLX5_SET(mkc, mkc, log_page_size, page_shift);
372
373         err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
374
375         kvfree(in);
376         return err;
377 }
378
379 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
380 {
381         u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
382
383         return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
384 }
385
386 static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
387 {
388         return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
389 }
390
391 static int mlx5e_alloc_rq(struct mlx5e_channel *c,
392                           struct mlx5e_params *params,
393                           struct mlx5e_rq_param *rqp,
394                           struct mlx5e_rq *rq)
395 {
396         struct page_pool_params pp_params = { 0 };
397         struct mlx5_core_dev *mdev = c->mdev;
398         void *rqc = rqp->rqc;
399         void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
400         u32 byte_count, pool_size;
401         int npages;
402         int wq_sz;
403         int err;
404         int i;
405
406         rqp->wq.db_numa_node = cpu_to_node(c->cpu);
407
408         err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
409                                 &rq->wq_ctrl);
410         if (err)
411                 return err;
412
413         rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
414
415         wq_sz = mlx5_wq_ll_get_size(&rq->wq);
416
417         rq->wq_type = params->rq_wq_type;
418         rq->pdev    = c->pdev;
419         rq->netdev  = c->netdev;
420         rq->tstamp  = c->tstamp;
421         rq->clock   = &mdev->clock;
422         rq->channel = c;
423         rq->ix      = c->ix;
424         rq->mdev    = mdev;
425         rq->hw_mtu  = MLX5E_SW2HW_MTU(params, params->sw_mtu);
426
427         rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
428         if (IS_ERR(rq->xdp_prog)) {
429                 err = PTR_ERR(rq->xdp_prog);
430                 rq->xdp_prog = NULL;
431                 goto err_rq_wq_destroy;
432         }
433
434         err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
435         if (err < 0)
436                 goto err_rq_wq_destroy;
437
438         rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
439         rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params);
440         pool_size = 1 << params->log_rq_mtu_frames;
441
442         switch (rq->wq_type) {
443         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
444
445                 pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params);
446                 rq->post_wqes = mlx5e_post_rx_mpwqes;
447                 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
448
449                 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
450 #ifdef CONFIG_MLX5_EN_IPSEC
451                 if (MLX5_IPSEC_DEV(mdev)) {
452                         err = -EINVAL;
453                         netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
454                         goto err_rq_wq_destroy;
455                 }
456 #endif
457                 if (!rq->handle_rx_cqe) {
458                         err = -EINVAL;
459                         netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
460                         goto err_rq_wq_destroy;
461                 }
462
463                 rq->mpwqe.skb_from_cqe_mpwrq =
464                         mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ?
465                         mlx5e_skb_from_cqe_mpwrq_linear :
466                         mlx5e_skb_from_cqe_mpwrq_nonlinear;
467                 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params);
468                 rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params));
469
470                 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
471
472                 err = mlx5e_create_rq_umr_mkey(mdev, rq);
473                 if (err)
474                         goto err_rq_wq_destroy;
475                 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
476
477                 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
478                 if (err)
479                         goto err_destroy_umr_mkey;
480                 break;
481         default: /* MLX5_WQ_TYPE_LINKED_LIST */
482                 rq->wqe.frag_info =
483                         kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
484                                      GFP_KERNEL, cpu_to_node(c->cpu));
485                 if (!rq->wqe.frag_info) {
486                         err = -ENOMEM;
487                         goto err_rq_wq_destroy;
488                 }
489                 rq->post_wqes = mlx5e_post_rx_wqes;
490                 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
491
492 #ifdef CONFIG_MLX5_EN_IPSEC
493                 if (c->priv->ipsec)
494                         rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
495                 else
496 #endif
497                         rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
498                 if (!rq->handle_rx_cqe) {
499                         kfree(rq->wqe.frag_info);
500                         err = -EINVAL;
501                         netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
502                         goto err_rq_wq_destroy;
503                 }
504
505                 byte_count = params->lro_en  ?
506                                 params->lro_wqe_sz :
507                                 MLX5E_SW2HW_MTU(params, params->sw_mtu);
508 #ifdef CONFIG_MLX5_EN_IPSEC
509                 if (MLX5_IPSEC_DEV(mdev))
510                         byte_count += MLX5E_METADATA_ETHER_LEN;
511 #endif
512                 rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
513
514                 /* calc the required page order */
515                 rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count);
516                 npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
517                 rq->buff.page_order = order_base_2(npages);
518
519                 byte_count |= MLX5_HW_START_PADDING;
520                 rq->mkey_be = c->mkey_be;
521         }
522
523         /* Create a page_pool and register it with rxq */
524         pp_params.order     = rq->buff.page_order;
525         pp_params.flags     = 0; /* No-internal DMA mapping in page_pool */
526         pp_params.pool_size = pool_size;
527         pp_params.nid       = cpu_to_node(c->cpu);
528         pp_params.dev       = c->pdev;
529         pp_params.dma_dir   = rq->buff.map_dir;
530
531         /* page_pool can be used even when there is no rq->xdp_prog,
532          * given page_pool does not handle DMA mapping there is no
533          * required state to clear. And page_pool gracefully handle
534          * elevated refcnt.
535          */
536         rq->page_pool = page_pool_create(&pp_params);
537         if (IS_ERR(rq->page_pool)) {
538                 if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
539                         kfree(rq->wqe.frag_info);
540                 err = PTR_ERR(rq->page_pool);
541                 rq->page_pool = NULL;
542                 goto err_rq_wq_destroy;
543         }
544         err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
545                                          MEM_TYPE_PAGE_POOL, rq->page_pool);
546         if (err)
547                 goto err_rq_wq_destroy;
548
549         for (i = 0; i < wq_sz; i++) {
550                 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
551
552                 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
553                         u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
554
555                         wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom);
556                 }
557
558                 wqe->data.byte_count = cpu_to_be32(byte_count);
559                 wqe->data.lkey = rq->mkey_be;
560         }
561
562         INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
563
564         switch (params->rx_cq_moderation.cq_period_mode) {
565         case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
566                 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
567                 break;
568         case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
569         default:
570                 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
571         }
572
573         rq->page_cache.head = 0;
574         rq->page_cache.tail = 0;
575
576         return 0;
577
578 err_destroy_umr_mkey:
579         mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
580
581 err_rq_wq_destroy:
582         if (rq->xdp_prog)
583                 bpf_prog_put(rq->xdp_prog);
584         xdp_rxq_info_unreg(&rq->xdp_rxq);
585         if (rq->page_pool)
586                 page_pool_destroy(rq->page_pool);
587         mlx5_wq_destroy(&rq->wq_ctrl);
588
589         return err;
590 }
591
592 static void mlx5e_free_rq(struct mlx5e_rq *rq)
593 {
594         int i;
595
596         if (rq->xdp_prog)
597                 bpf_prog_put(rq->xdp_prog);
598
599         xdp_rxq_info_unreg(&rq->xdp_rxq);
600         if (rq->page_pool)
601                 page_pool_destroy(rq->page_pool);
602
603         switch (rq->wq_type) {
604         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
605                 kfree(rq->mpwqe.info);
606                 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
607                 break;
608         default: /* MLX5_WQ_TYPE_LINKED_LIST */
609                 kfree(rq->wqe.frag_info);
610         }
611
612         for (i = rq->page_cache.head; i != rq->page_cache.tail;
613              i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
614                 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
615
616                 mlx5e_page_release(rq, dma_info, false);
617         }
618         mlx5_wq_destroy(&rq->wq_ctrl);
619 }
620
621 static int mlx5e_create_rq(struct mlx5e_rq *rq,
622                            struct mlx5e_rq_param *param)
623 {
624         struct mlx5_core_dev *mdev = rq->mdev;
625
626         void *in;
627         void *rqc;
628         void *wq;
629         int inlen;
630         int err;
631
632         inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
633                 sizeof(u64) * rq->wq_ctrl.buf.npages;
634         in = kvzalloc(inlen, GFP_KERNEL);
635         if (!in)
636                 return -ENOMEM;
637
638         rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
639         wq  = MLX5_ADDR_OF(rqc, rqc, wq);
640
641         memcpy(rqc, param->rqc, sizeof(param->rqc));
642
643         MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
644         MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
645         MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
646                                                 MLX5_ADAPTER_PAGE_SHIFT);
647         MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
648
649         mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
650                                   (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
651
652         err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
653
654         kvfree(in);
655
656         return err;
657 }
658
659 static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
660                                  int next_state)
661 {
662         struct mlx5_core_dev *mdev = rq->mdev;
663
664         void *in;
665         void *rqc;
666         int inlen;
667         int err;
668
669         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
670         in = kvzalloc(inlen, GFP_KERNEL);
671         if (!in)
672                 return -ENOMEM;
673
674         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
675
676         MLX5_SET(modify_rq_in, in, rq_state, curr_state);
677         MLX5_SET(rqc, rqc, state, next_state);
678
679         err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
680
681         kvfree(in);
682
683         return err;
684 }
685
686 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
687 {
688         struct mlx5e_channel *c = rq->channel;
689         struct mlx5e_priv *priv = c->priv;
690         struct mlx5_core_dev *mdev = priv->mdev;
691
692         void *in;
693         void *rqc;
694         int inlen;
695         int err;
696
697         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
698         in = kvzalloc(inlen, GFP_KERNEL);
699         if (!in)
700                 return -ENOMEM;
701
702         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
703
704         MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
705         MLX5_SET64(modify_rq_in, in, modify_bitmask,
706                    MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
707         MLX5_SET(rqc, rqc, scatter_fcs, enable);
708         MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
709
710         err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
711
712         kvfree(in);
713
714         return err;
715 }
716
717 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
718 {
719         struct mlx5e_channel *c = rq->channel;
720         struct mlx5_core_dev *mdev = c->mdev;
721         void *in;
722         void *rqc;
723         int inlen;
724         int err;
725
726         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
727         in = kvzalloc(inlen, GFP_KERNEL);
728         if (!in)
729                 return -ENOMEM;
730
731         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
732
733         MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
734         MLX5_SET64(modify_rq_in, in, modify_bitmask,
735                    MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
736         MLX5_SET(rqc, rqc, vsd, vsd);
737         MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
738
739         err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
740
741         kvfree(in);
742
743         return err;
744 }
745
746 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
747 {
748         mlx5_core_destroy_rq(rq->mdev, rq->rqn);
749 }
750
751 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
752 {
753         unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
754         struct mlx5e_channel *c = rq->channel;
755
756         struct mlx5_wq_ll *wq = &rq->wq;
757         u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
758
759         do {
760                 if (wq->cur_sz >= min_wqes)
761                         return 0;
762
763                 msleep(20);
764         } while (time_before(jiffies, exp_time));
765
766         netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
767                     c->ix, rq->rqn, wq->cur_sz, min_wqes);
768
769         return -ETIMEDOUT;
770 }
771
772 static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
773 {
774         struct mlx5_wq_ll *wq = &rq->wq;
775         struct mlx5e_rx_wqe *wqe;
776         __be16 wqe_ix_be;
777         u16 wqe_ix;
778
779         /* UMR WQE (if in progress) is always at wq->head */
780         if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
781             rq->mpwqe.umr_in_progress)
782                 mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
783
784         while (!mlx5_wq_ll_is_empty(wq)) {
785                 wqe_ix_be = *wq->tail_next;
786                 wqe_ix    = be16_to_cpu(wqe_ix_be);
787                 wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
788                 rq->dealloc_wqe(rq, wqe_ix);
789                 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
790                                &wqe->next.next_wqe_index);
791         }
792
793         if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
794                 /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
795                  * but yet to be re-posted.
796                  */
797                 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
798
799                 for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
800                         rq->dealloc_wqe(rq, wqe_ix);
801         }
802 }
803
804 static int mlx5e_open_rq(struct mlx5e_channel *c,
805                          struct mlx5e_params *params,
806                          struct mlx5e_rq_param *param,
807                          struct mlx5e_rq *rq)
808 {
809         int err;
810
811         err = mlx5e_alloc_rq(c, params, param, rq);
812         if (err)
813                 return err;
814
815         err = mlx5e_create_rq(rq, param);
816         if (err)
817                 goto err_free_rq;
818
819         err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
820         if (err)
821                 goto err_destroy_rq;
822
823         if (params->rx_dim_enabled)
824                 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
825
826         return 0;
827
828 err_destroy_rq:
829         mlx5e_destroy_rq(rq);
830 err_free_rq:
831         mlx5e_free_rq(rq);
832
833         return err;
834 }
835
836 static void mlx5e_activate_rq(struct mlx5e_rq *rq)
837 {
838         struct mlx5e_icosq *sq = &rq->channel->icosq;
839         struct mlx5_wq_cyc *wq = &sq->wq;
840         struct mlx5e_tx_wqe *nopwqe;
841
842         u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
843
844         set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
845         sq->db.ico_wqe[pi].opcode     = MLX5_OPCODE_NOP;
846         nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
847         mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
848 }
849
850 static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
851 {
852         clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
853         napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
854 }
855
856 static void mlx5e_close_rq(struct mlx5e_rq *rq)
857 {
858         cancel_work_sync(&rq->dim.work);
859         mlx5e_destroy_rq(rq);
860         mlx5e_free_rx_descs(rq);
861         mlx5e_free_rq(rq);
862 }
863
864 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
865 {
866         kfree(sq->db.di);
867 }
868
869 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
870 {
871         int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
872
873         sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
874                                      GFP_KERNEL, numa);
875         if (!sq->db.di) {
876                 mlx5e_free_xdpsq_db(sq);
877                 return -ENOMEM;
878         }
879
880         return 0;
881 }
882
883 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
884                              struct mlx5e_params *params,
885                              struct mlx5e_sq_param *param,
886                              struct mlx5e_xdpsq *sq)
887 {
888         void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
889         struct mlx5_core_dev *mdev = c->mdev;
890         struct mlx5_wq_cyc *wq = &sq->wq;
891         int err;
892
893         sq->pdev      = c->pdev;
894         sq->mkey_be   = c->mkey_be;
895         sq->channel   = c;
896         sq->uar_map   = mdev->mlx5e_res.bfreg.map;
897         sq->min_inline_mode = params->tx_min_inline_mode;
898
899         param->wq.db_numa_node = cpu_to_node(c->cpu);
900         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
901         if (err)
902                 return err;
903         wq->db = &wq->db[MLX5_SND_DBR];
904
905         err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
906         if (err)
907                 goto err_sq_wq_destroy;
908
909         return 0;
910
911 err_sq_wq_destroy:
912         mlx5_wq_destroy(&sq->wq_ctrl);
913
914         return err;
915 }
916
917 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
918 {
919         mlx5e_free_xdpsq_db(sq);
920         mlx5_wq_destroy(&sq->wq_ctrl);
921 }
922
923 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
924 {
925         kfree(sq->db.ico_wqe);
926 }
927
928 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
929 {
930         u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
931
932         sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
933                                       GFP_KERNEL, numa);
934         if (!sq->db.ico_wqe)
935                 return -ENOMEM;
936
937         return 0;
938 }
939
940 static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
941                              struct mlx5e_sq_param *param,
942                              struct mlx5e_icosq *sq)
943 {
944         void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
945         struct mlx5_core_dev *mdev = c->mdev;
946         struct mlx5_wq_cyc *wq = &sq->wq;
947         int err;
948
949         sq->channel   = c;
950         sq->uar_map   = mdev->mlx5e_res.bfreg.map;
951
952         param->wq.db_numa_node = cpu_to_node(c->cpu);
953         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
954         if (err)
955                 return err;
956         wq->db = &wq->db[MLX5_SND_DBR];
957
958         err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
959         if (err)
960                 goto err_sq_wq_destroy;
961
962         return 0;
963
964 err_sq_wq_destroy:
965         mlx5_wq_destroy(&sq->wq_ctrl);
966
967         return err;
968 }
969
970 static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
971 {
972         mlx5e_free_icosq_db(sq);
973         mlx5_wq_destroy(&sq->wq_ctrl);
974 }
975
976 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
977 {
978         kfree(sq->db.wqe_info);
979         kfree(sq->db.dma_fifo);
980 }
981
982 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
983 {
984         int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
985         int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
986
987         sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
988                                            GFP_KERNEL, numa);
989         sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
990                                            GFP_KERNEL, numa);
991         if (!sq->db.dma_fifo || !sq->db.wqe_info) {
992                 mlx5e_free_txqsq_db(sq);
993                 return -ENOMEM;
994         }
995
996         sq->dma_fifo_mask = df_sz - 1;
997
998         return 0;
999 }
1000
1001 static void mlx5e_sq_recover(struct work_struct *work);
1002 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1003                              int txq_ix,
1004                              struct mlx5e_params *params,
1005                              struct mlx5e_sq_param *param,
1006                              struct mlx5e_txqsq *sq)
1007 {
1008         void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
1009         struct mlx5_core_dev *mdev = c->mdev;
1010         struct mlx5_wq_cyc *wq = &sq->wq;
1011         int err;
1012
1013         sq->pdev      = c->pdev;
1014         sq->tstamp    = c->tstamp;
1015         sq->clock     = &mdev->clock;
1016         sq->mkey_be   = c->mkey_be;
1017         sq->channel   = c;
1018         sq->txq_ix    = txq_ix;
1019         sq->uar_map   = mdev->mlx5e_res.bfreg.map;
1020         sq->min_inline_mode = params->tx_min_inline_mode;
1021         INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
1022         if (MLX5_IPSEC_DEV(c->priv->mdev))
1023                 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1024         if (mlx5_accel_is_tls_device(c->priv->mdev))
1025                 set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
1026
1027         param->wq.db_numa_node = cpu_to_node(c->cpu);
1028         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
1029         if (err)
1030                 return err;
1031         wq->db    = &wq->db[MLX5_SND_DBR];
1032
1033         err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1034         if (err)
1035                 goto err_sq_wq_destroy;
1036
1037         INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
1038         sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
1039
1040         return 0;
1041
1042 err_sq_wq_destroy:
1043         mlx5_wq_destroy(&sq->wq_ctrl);
1044
1045         return err;
1046 }
1047
1048 static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
1049 {
1050         mlx5e_free_txqsq_db(sq);
1051         mlx5_wq_destroy(&sq->wq_ctrl);
1052 }
1053
1054 struct mlx5e_create_sq_param {
1055         struct mlx5_wq_ctrl        *wq_ctrl;
1056         u32                         cqn;
1057         u32                         tisn;
1058         u8                          tis_lst_sz;
1059         u8                          min_inline_mode;
1060 };
1061
1062 static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
1063                            struct mlx5e_sq_param *param,
1064                            struct mlx5e_create_sq_param *csp,
1065                            u32 *sqn)
1066 {
1067         void *in;
1068         void *sqc;
1069         void *wq;
1070         int inlen;
1071         int err;
1072
1073         inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1074                 sizeof(u64) * csp->wq_ctrl->buf.npages;
1075         in = kvzalloc(inlen, GFP_KERNEL);
1076         if (!in)
1077                 return -ENOMEM;
1078
1079         sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1080         wq = MLX5_ADDR_OF(sqc, sqc, wq);
1081
1082         memcpy(sqc, param->sqc, sizeof(param->sqc));
1083         MLX5_SET(sqc,  sqc, tis_lst_sz, csp->tis_lst_sz);
1084         MLX5_SET(sqc,  sqc, tis_num_0, csp->tisn);
1085         MLX5_SET(sqc,  sqc, cqn, csp->cqn);
1086
1087         if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1088                 MLX5_SET(sqc,  sqc, min_wqe_inline_mode, csp->min_inline_mode);
1089
1090         MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
1091         MLX5_SET(sqc,  sqc, flush_in_error_en, 1);
1092
1093         MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
1094         MLX5_SET(wq,   wq, uar_page,      mdev->mlx5e_res.bfreg.index);
1095         MLX5_SET(wq,   wq, log_wq_pg_sz,  csp->wq_ctrl->buf.page_shift -
1096                                           MLX5_ADAPTER_PAGE_SHIFT);
1097         MLX5_SET64(wq, wq, dbr_addr,      csp->wq_ctrl->db.dma);
1098
1099         mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
1100                                   (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1101
1102         err = mlx5_core_create_sq(mdev, in, inlen, sqn);
1103
1104         kvfree(in);
1105
1106         return err;
1107 }
1108
1109 struct mlx5e_modify_sq_param {
1110         int curr_state;
1111         int next_state;
1112         bool rl_update;
1113         int rl_index;
1114 };
1115
1116 static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1117                            struct mlx5e_modify_sq_param *p)
1118 {
1119         void *in;
1120         void *sqc;
1121         int inlen;
1122         int err;
1123
1124         inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1125         in = kvzalloc(inlen, GFP_KERNEL);
1126         if (!in)
1127                 return -ENOMEM;
1128
1129         sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1130
1131         MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1132         MLX5_SET(sqc, sqc, state, p->next_state);
1133         if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
1134                 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
1135                 MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, p->rl_index);
1136         }
1137
1138         err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
1139
1140         kvfree(in);
1141
1142         return err;
1143 }
1144
1145 static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
1146 {
1147         mlx5_core_destroy_sq(mdev, sqn);
1148 }
1149
1150 static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1151                                struct mlx5e_sq_param *param,
1152                                struct mlx5e_create_sq_param *csp,
1153                                u32 *sqn)
1154 {
1155         struct mlx5e_modify_sq_param msp = {0};
1156         int err;
1157
1158         err = mlx5e_create_sq(mdev, param, csp, sqn);
1159         if (err)
1160                 return err;
1161
1162         msp.curr_state = MLX5_SQC_STATE_RST;
1163         msp.next_state = MLX5_SQC_STATE_RDY;
1164         err = mlx5e_modify_sq(mdev, *sqn, &msp);
1165         if (err)
1166                 mlx5e_destroy_sq(mdev, *sqn);
1167
1168         return err;
1169 }
1170
1171 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1172                                 struct mlx5e_txqsq *sq, u32 rate);
1173
1174 static int mlx5e_open_txqsq(struct mlx5e_channel *c,
1175                             u32 tisn,
1176                             int txq_ix,
1177                             struct mlx5e_params *params,
1178                             struct mlx5e_sq_param *param,
1179                             struct mlx5e_txqsq *sq)
1180 {
1181         struct mlx5e_create_sq_param csp = {};
1182         u32 tx_rate;
1183         int err;
1184
1185         err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
1186         if (err)
1187                 return err;
1188
1189         csp.tisn            = tisn;
1190         csp.tis_lst_sz      = 1;
1191         csp.cqn             = sq->cq.mcq.cqn;
1192         csp.wq_ctrl         = &sq->wq_ctrl;
1193         csp.min_inline_mode = sq->min_inline_mode;
1194         err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1195         if (err)
1196                 goto err_free_txqsq;
1197
1198         tx_rate = c->priv->tx_rates[sq->txq_ix];
1199         if (tx_rate)
1200                 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
1201
1202         if (params->tx_dim_enabled)
1203                 sq->state |= BIT(MLX5E_SQ_STATE_AM);
1204
1205         return 0;
1206
1207 err_free_txqsq:
1208         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1209         mlx5e_free_txqsq(sq);
1210
1211         return err;
1212 }
1213
1214 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
1215 {
1216         WARN_ONCE(sq->cc != sq->pc,
1217                   "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
1218                   sq->sqn, sq->cc, sq->pc);
1219         sq->cc = 0;
1220         sq->dma_fifo_cc = 0;
1221         sq->pc = 0;
1222 }
1223
1224 static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1225 {
1226         sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
1227         clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
1228         set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1229         netdev_tx_reset_queue(sq->txq);
1230         netif_tx_start_queue(sq->txq);
1231 }
1232
1233 static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1234 {
1235         __netif_tx_lock_bh(txq);
1236         netif_tx_stop_queue(txq);
1237         __netif_tx_unlock_bh(txq);
1238 }
1239
1240 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1241 {
1242         struct mlx5e_channel *c = sq->channel;
1243         struct mlx5_wq_cyc *wq = &sq->wq;
1244
1245         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1246         /* prevent netif_tx_wake_queue */
1247         napi_synchronize(&c->napi);
1248
1249         netif_tx_disable_queue(sq->txq);
1250
1251         /* last doorbell out, godspeed .. */
1252         if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1253                 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1254                 struct mlx5e_tx_wqe *nop;
1255
1256                 sq->db.wqe_info[pi].skb = NULL;
1257                 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1258                 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
1259         }
1260 }
1261
1262 static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1263 {
1264         struct mlx5e_channel *c = sq->channel;
1265         struct mlx5_core_dev *mdev = c->mdev;
1266         struct mlx5_rate_limit rl = {0};
1267
1268         mlx5e_destroy_sq(mdev, sq->sqn);
1269         if (sq->rate_limit) {
1270                 rl.rate = sq->rate_limit;
1271                 mlx5_rl_remove_rate(mdev, &rl);
1272         }
1273         mlx5e_free_txqsq_descs(sq);
1274         mlx5e_free_txqsq(sq);
1275 }
1276
1277 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
1278 {
1279         unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
1280
1281         while (time_before(jiffies, exp_time)) {
1282                 if (sq->cc == sq->pc)
1283                         return 0;
1284
1285                 msleep(20);
1286         }
1287
1288         netdev_err(sq->channel->netdev,
1289                    "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
1290                    sq->sqn, sq->cc, sq->pc);
1291
1292         return -ETIMEDOUT;
1293 }
1294
1295 static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
1296 {
1297         struct mlx5_core_dev *mdev = sq->channel->mdev;
1298         struct net_device *dev = sq->channel->netdev;
1299         struct mlx5e_modify_sq_param msp = {0};
1300         int err;
1301
1302         msp.curr_state = curr_state;
1303         msp.next_state = MLX5_SQC_STATE_RST;
1304
1305         err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1306         if (err) {
1307                 netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
1308                 return err;
1309         }
1310
1311         memset(&msp, 0, sizeof(msp));
1312         msp.curr_state = MLX5_SQC_STATE_RST;
1313         msp.next_state = MLX5_SQC_STATE_RDY;
1314
1315         err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1316         if (err) {
1317                 netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
1318                 return err;
1319         }
1320
1321         return 0;
1322 }
1323
1324 static void mlx5e_sq_recover(struct work_struct *work)
1325 {
1326         struct mlx5e_txqsq_recover *recover =
1327                 container_of(work, struct mlx5e_txqsq_recover,
1328                              recover_work);
1329         struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq,
1330                                               recover);
1331         struct mlx5_core_dev *mdev = sq->channel->mdev;
1332         struct net_device *dev = sq->channel->netdev;
1333         u8 state;
1334         int err;
1335
1336         err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
1337         if (err) {
1338                 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
1339                            sq->sqn, err);
1340                 return;
1341         }
1342
1343         if (state != MLX5_RQC_STATE_ERR) {
1344                 netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
1345                 return;
1346         }
1347
1348         netif_tx_disable_queue(sq->txq);
1349
1350         if (mlx5e_wait_for_sq_flush(sq))
1351                 return;
1352
1353         /* If the interval between two consecutive recovers per SQ is too
1354          * short, don't recover to avoid infinite loop of ERR_CQE -> recover.
1355          * If we reached this state, there is probably a bug that needs to be
1356          * fixed. let's keep the queue close and let tx timeout cleanup.
1357          */
1358         if (jiffies_to_msecs(jiffies - recover->last_recover) <
1359             MLX5E_SQ_RECOVER_MIN_INTERVAL) {
1360                 netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n",
1361                            sq->sqn);
1362                 return;
1363         }
1364
1365         /* At this point, no new packets will arrive from the stack as TXQ is
1366          * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
1367          * pending WQEs.  SQ can safely reset the SQ.
1368          */
1369         if (mlx5e_sq_to_ready(sq, state))
1370                 return;
1371
1372         mlx5e_reset_txqsq_cc_pc(sq);
1373         sq->stats.recover++;
1374         recover->last_recover = jiffies;
1375         mlx5e_activate_txqsq(sq);
1376 }
1377
1378 static int mlx5e_open_icosq(struct mlx5e_channel *c,
1379                             struct mlx5e_params *params,
1380                             struct mlx5e_sq_param *param,
1381                             struct mlx5e_icosq *sq)
1382 {
1383         struct mlx5e_create_sq_param csp = {};
1384         int err;
1385
1386         err = mlx5e_alloc_icosq(c, param, sq);
1387         if (err)
1388                 return err;
1389
1390         csp.cqn             = sq->cq.mcq.cqn;
1391         csp.wq_ctrl         = &sq->wq_ctrl;
1392         csp.min_inline_mode = params->tx_min_inline_mode;
1393         set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1394         err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1395         if (err)
1396                 goto err_free_icosq;
1397
1398         return 0;
1399
1400 err_free_icosq:
1401         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1402         mlx5e_free_icosq(sq);
1403
1404         return err;
1405 }
1406
1407 static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1408 {
1409         struct mlx5e_channel *c = sq->channel;
1410
1411         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1412         napi_synchronize(&c->napi);
1413
1414         mlx5e_destroy_sq(c->mdev, sq->sqn);
1415         mlx5e_free_icosq(sq);
1416 }
1417
1418 static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
1419                             struct mlx5e_params *params,
1420                             struct mlx5e_sq_param *param,
1421                             struct mlx5e_xdpsq *sq)
1422 {
1423         unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1424         struct mlx5e_create_sq_param csp = {};
1425         unsigned int inline_hdr_sz = 0;
1426         int err;
1427         int i;
1428
1429         err = mlx5e_alloc_xdpsq(c, params, param, sq);
1430         if (err)
1431                 return err;
1432
1433         csp.tis_lst_sz      = 1;
1434         csp.tisn            = c->priv->tisn[0]; /* tc = 0 */
1435         csp.cqn             = sq->cq.mcq.cqn;
1436         csp.wq_ctrl         = &sq->wq_ctrl;
1437         csp.min_inline_mode = sq->min_inline_mode;
1438         set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1439         err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1440         if (err)
1441                 goto err_free_xdpsq;
1442
1443         if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1444                 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1445                 ds_cnt++;
1446         }
1447
1448         /* Pre initialize fixed WQE fields */
1449         for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1450                 struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1451                 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1452                 struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
1453                 struct mlx5_wqe_data_seg *dseg;
1454
1455                 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1456                 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1457
1458                 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1459                 dseg->lkey = sq->mkey_be;
1460         }
1461
1462         return 0;
1463
1464 err_free_xdpsq:
1465         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1466         mlx5e_free_xdpsq(sq);
1467
1468         return err;
1469 }
1470
1471 static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1472 {
1473         struct mlx5e_channel *c = sq->channel;
1474
1475         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1476         napi_synchronize(&c->napi);
1477
1478         mlx5e_destroy_sq(c->mdev, sq->sqn);
1479         mlx5e_free_xdpsq_descs(sq);
1480         mlx5e_free_xdpsq(sq);
1481 }
1482
1483 static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1484                                  struct mlx5e_cq_param *param,
1485                                  struct mlx5e_cq *cq)
1486 {
1487         struct mlx5_core_cq *mcq = &cq->mcq;
1488         int eqn_not_used;
1489         unsigned int irqn;
1490         int err;
1491         u32 i;
1492
1493         err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1494                                &cq->wq_ctrl);
1495         if (err)
1496                 return err;
1497
1498         mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1499
1500         mcq->cqe_sz     = 64;
1501         mcq->set_ci_db  = cq->wq_ctrl.db.db;
1502         mcq->arm_db     = cq->wq_ctrl.db.db + 1;
1503         *mcq->set_ci_db = 0;
1504         *mcq->arm_db    = 0;
1505         mcq->vector     = param->eq_ix;
1506         mcq->comp       = mlx5e_completion_event;
1507         mcq->event      = mlx5e_cq_error_event;
1508         mcq->irqn       = irqn;
1509
1510         for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1511                 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1512
1513                 cqe->op_own = 0xf1;
1514         }
1515
1516         cq->mdev = mdev;
1517
1518         return 0;
1519 }
1520
1521 static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1522                           struct mlx5e_cq_param *param,
1523                           struct mlx5e_cq *cq)
1524 {
1525         struct mlx5_core_dev *mdev = c->priv->mdev;
1526         int err;
1527
1528         param->wq.buf_numa_node = cpu_to_node(c->cpu);
1529         param->wq.db_numa_node  = cpu_to_node(c->cpu);
1530         param->eq_ix   = c->ix;
1531
1532         err = mlx5e_alloc_cq_common(mdev, param, cq);
1533
1534         cq->napi    = &c->napi;
1535         cq->channel = c;
1536
1537         return err;
1538 }
1539
1540 static void mlx5e_free_cq(struct mlx5e_cq *cq)
1541 {
1542         mlx5_wq_destroy(&cq->wq_ctrl);
1543 }
1544
1545 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1546 {
1547         struct mlx5_core_dev *mdev = cq->mdev;
1548         struct mlx5_core_cq *mcq = &cq->mcq;
1549
1550         void *in;
1551         void *cqc;
1552         int inlen;
1553         unsigned int irqn_not_used;
1554         int eqn;
1555         int err;
1556
1557         inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1558                 sizeof(u64) * cq->wq_ctrl.buf.npages;
1559         in = kvzalloc(inlen, GFP_KERNEL);
1560         if (!in)
1561                 return -ENOMEM;
1562
1563         cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1564
1565         memcpy(cqc, param->cqc, sizeof(param->cqc));
1566
1567         mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
1568                                   (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1569
1570         mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1571
1572         MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
1573         MLX5_SET(cqc,   cqc, c_eqn,         eqn);
1574         MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
1575         MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1576                                             MLX5_ADAPTER_PAGE_SHIFT);
1577         MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
1578
1579         err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1580
1581         kvfree(in);
1582
1583         if (err)
1584                 return err;
1585
1586         mlx5e_cq_arm(cq);
1587
1588         return 0;
1589 }
1590
1591 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
1592 {
1593         mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
1594 }
1595
1596 static int mlx5e_open_cq(struct mlx5e_channel *c,
1597                          struct net_dim_cq_moder moder,
1598                          struct mlx5e_cq_param *param,
1599                          struct mlx5e_cq *cq)
1600 {
1601         struct mlx5_core_dev *mdev = c->mdev;
1602         int err;
1603
1604         err = mlx5e_alloc_cq(c, param, cq);
1605         if (err)
1606                 return err;
1607
1608         err = mlx5e_create_cq(cq, param);
1609         if (err)
1610                 goto err_free_cq;
1611
1612         if (MLX5_CAP_GEN(mdev, cq_moderation))
1613                 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
1614         return 0;
1615
1616 err_free_cq:
1617         mlx5e_free_cq(cq);
1618
1619         return err;
1620 }
1621
1622 static void mlx5e_close_cq(struct mlx5e_cq *cq)
1623 {
1624         mlx5e_destroy_cq(cq);
1625         mlx5e_free_cq(cq);
1626 }
1627
1628 static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1629 {
1630         return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1631 }
1632
1633 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1634                              struct mlx5e_params *params,
1635                              struct mlx5e_channel_param *cparam)
1636 {
1637         int err;
1638         int tc;
1639
1640         for (tc = 0; tc < c->num_tc; tc++) {
1641                 err = mlx5e_open_cq(c, params->tx_cq_moderation,
1642                                     &cparam->tx_cq, &c->sq[tc].cq);
1643                 if (err)
1644                         goto err_close_tx_cqs;
1645         }
1646
1647         return 0;
1648
1649 err_close_tx_cqs:
1650         for (tc--; tc >= 0; tc--)
1651                 mlx5e_close_cq(&c->sq[tc].cq);
1652
1653         return err;
1654 }
1655
1656 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1657 {
1658         int tc;
1659
1660         for (tc = 0; tc < c->num_tc; tc++)
1661                 mlx5e_close_cq(&c->sq[tc].cq);
1662 }
1663
1664 static int mlx5e_open_sqs(struct mlx5e_channel *c,
1665                           struct mlx5e_params *params,
1666                           struct mlx5e_channel_param *cparam)
1667 {
1668         int err;
1669         int tc;
1670
1671         for (tc = 0; tc < params->num_tc; tc++) {
1672                 int txq_ix = c->ix + tc * params->num_channels;
1673
1674                 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
1675                                        params, &cparam->sq, &c->sq[tc]);
1676                 if (err)
1677                         goto err_close_sqs;
1678         }
1679
1680         return 0;
1681
1682 err_close_sqs:
1683         for (tc--; tc >= 0; tc--)
1684                 mlx5e_close_txqsq(&c->sq[tc]);
1685
1686         return err;
1687 }
1688
1689 static void mlx5e_close_sqs(struct mlx5e_channel *c)
1690 {
1691         int tc;
1692
1693         for (tc = 0; tc < c->num_tc; tc++)
1694                 mlx5e_close_txqsq(&c->sq[tc]);
1695 }
1696
1697 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1698                                 struct mlx5e_txqsq *sq, u32 rate)
1699 {
1700         struct mlx5e_priv *priv = netdev_priv(dev);
1701         struct mlx5_core_dev *mdev = priv->mdev;
1702         struct mlx5e_modify_sq_param msp = {0};
1703         struct mlx5_rate_limit rl = {0};
1704         u16 rl_index = 0;
1705         int err;
1706
1707         if (rate == sq->rate_limit)
1708                 /* nothing to do */
1709                 return 0;
1710
1711         if (sq->rate_limit) {
1712                 rl.rate = sq->rate_limit;
1713                 /* remove current rl index to free space to next ones */
1714                 mlx5_rl_remove_rate(mdev, &rl);
1715         }
1716
1717         sq->rate_limit = 0;
1718
1719         if (rate) {
1720                 rl.rate = rate;
1721                 err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
1722                 if (err) {
1723                         netdev_err(dev, "Failed configuring rate %u: %d\n",
1724                                    rate, err);
1725                         return err;
1726                 }
1727         }
1728
1729         msp.curr_state = MLX5_SQC_STATE_RDY;
1730         msp.next_state = MLX5_SQC_STATE_RDY;
1731         msp.rl_index   = rl_index;
1732         msp.rl_update  = true;
1733         err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1734         if (err) {
1735                 netdev_err(dev, "Failed configuring rate %u: %d\n",
1736                            rate, err);
1737                 /* remove the rate from the table */
1738                 if (rate)
1739                         mlx5_rl_remove_rate(mdev, &rl);
1740                 return err;
1741         }
1742
1743         sq->rate_limit = rate;
1744         return 0;
1745 }
1746
1747 static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1748 {
1749         struct mlx5e_priv *priv = netdev_priv(dev);
1750         struct mlx5_core_dev *mdev = priv->mdev;
1751         struct mlx5e_txqsq *sq = priv->txq2sq[index];
1752         int err = 0;
1753
1754         if (!mlx5_rl_is_supported(mdev)) {
1755                 netdev_err(dev, "Rate limiting is not supported on this device\n");
1756                 return -EINVAL;
1757         }
1758
1759         /* rate is given in Mb/sec, HW config is in Kb/sec */
1760         rate = rate << 10;
1761
1762         /* Check whether rate in valid range, 0 is always valid */
1763         if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1764                 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1765                 return -ERANGE;
1766         }
1767
1768         mutex_lock(&priv->state_lock);
1769         if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1770                 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1771         if (!err)
1772                 priv->tx_rates[index] = rate;
1773         mutex_unlock(&priv->state_lock);
1774
1775         return err;
1776 }
1777
1778 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1779                               struct mlx5e_params *params,
1780                               struct mlx5e_channel_param *cparam,
1781                               struct mlx5e_channel **cp)
1782 {
1783         struct net_dim_cq_moder icocq_moder = {0, 0};
1784         struct net_device *netdev = priv->netdev;
1785         int cpu = mlx5e_get_cpu(priv, ix);
1786         struct mlx5e_channel *c;
1787         unsigned int irq;
1788         int err;
1789         int eqn;
1790
1791         c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1792         if (!c)
1793                 return -ENOMEM;
1794
1795         c->priv     = priv;
1796         c->mdev     = priv->mdev;
1797         c->tstamp   = &priv->tstamp;
1798         c->ix       = ix;
1799         c->cpu      = cpu;
1800         c->pdev     = &priv->mdev->pdev->dev;
1801         c->netdev   = priv->netdev;
1802         c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1803         c->num_tc   = params->num_tc;
1804         c->xdp      = !!params->xdp_prog;
1805
1806         mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1807         c->irq_desc = irq_to_desc(irq);
1808
1809         netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1810
1811         err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
1812         if (err)
1813                 goto err_napi_del;
1814
1815         err = mlx5e_open_tx_cqs(c, params, cparam);
1816         if (err)
1817                 goto err_close_icosq_cq;
1818
1819         err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
1820         if (err)
1821                 goto err_close_tx_cqs;
1822
1823         /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1824         err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
1825                                      &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
1826         if (err)
1827                 goto err_close_rx_cq;
1828
1829         napi_enable(&c->napi);
1830
1831         err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
1832         if (err)
1833                 goto err_disable_napi;
1834
1835         err = mlx5e_open_sqs(c, params, cparam);
1836         if (err)
1837                 goto err_close_icosq;
1838
1839         err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
1840         if (err)
1841                 goto err_close_sqs;
1842
1843         err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
1844         if (err)
1845                 goto err_close_xdp_sq;
1846
1847         *cp = c;
1848
1849         return 0;
1850 err_close_xdp_sq:
1851         if (c->xdp)
1852                 mlx5e_close_xdpsq(&c->rq.xdpsq);
1853
1854 err_close_sqs:
1855         mlx5e_close_sqs(c);
1856
1857 err_close_icosq:
1858         mlx5e_close_icosq(&c->icosq);
1859
1860 err_disable_napi:
1861         napi_disable(&c->napi);
1862         if (c->xdp)
1863                 mlx5e_close_cq(&c->rq.xdpsq.cq);
1864
1865 err_close_rx_cq:
1866         mlx5e_close_cq(&c->rq.cq);
1867
1868 err_close_tx_cqs:
1869         mlx5e_close_tx_cqs(c);
1870
1871 err_close_icosq_cq:
1872         mlx5e_close_cq(&c->icosq.cq);
1873
1874 err_napi_del:
1875         netif_napi_del(&c->napi);
1876         kfree(c);
1877
1878         return err;
1879 }
1880
1881 static void mlx5e_activate_channel(struct mlx5e_channel *c)
1882 {
1883         int tc;
1884
1885         for (tc = 0; tc < c->num_tc; tc++)
1886                 mlx5e_activate_txqsq(&c->sq[tc]);
1887         mlx5e_activate_rq(&c->rq);
1888         netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
1889 }
1890
1891 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
1892 {
1893         int tc;
1894
1895         mlx5e_deactivate_rq(&c->rq);
1896         for (tc = 0; tc < c->num_tc; tc++)
1897                 mlx5e_deactivate_txqsq(&c->sq[tc]);
1898 }
1899
1900 static void mlx5e_close_channel(struct mlx5e_channel *c)
1901 {
1902         mlx5e_close_rq(&c->rq);
1903         if (c->xdp)
1904                 mlx5e_close_xdpsq(&c->rq.xdpsq);
1905         mlx5e_close_sqs(c);
1906         mlx5e_close_icosq(&c->icosq);
1907         napi_disable(&c->napi);
1908         if (c->xdp)
1909                 mlx5e_close_cq(&c->rq.xdpsq.cq);
1910         mlx5e_close_cq(&c->rq.cq);
1911         mlx5e_close_tx_cqs(c);
1912         mlx5e_close_cq(&c->icosq.cq);
1913         netif_napi_del(&c->napi);
1914
1915         kfree(c);
1916 }
1917
1918 static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1919                                  struct mlx5e_params *params,
1920                                  struct mlx5e_rq_param *param)
1921 {
1922         struct mlx5_core_dev *mdev = priv->mdev;
1923         void *rqc = param->rqc;
1924         void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1925
1926         switch (params->rq_wq_type) {
1927         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1928                 MLX5_SET(wq, wq, log_wqe_num_of_strides,
1929                          mlx5e_mpwqe_get_log_num_strides(mdev, params) -
1930                          MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
1931                 MLX5_SET(wq, wq, log_wqe_stride_size,
1932                          mlx5e_mpwqe_get_log_stride_size(mdev, params) -
1933                          MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
1934                 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1935                 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params));
1936                 break;
1937         default: /* MLX5_WQ_TYPE_LINKED_LIST */
1938                 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1939                 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
1940         }
1941
1942         MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1943         MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
1944         MLX5_SET(wq, wq, pd,               mdev->mlx5e_res.pdn);
1945         MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
1946         MLX5_SET(rqc, rqc, vsd,            params->vlan_strip_disable);
1947         MLX5_SET(rqc, rqc, scatter_fcs,    params->scatter_fcs_en);
1948
1949         param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
1950 }
1951
1952 static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
1953                                       struct mlx5e_rq_param *param)
1954 {
1955         struct mlx5_core_dev *mdev = priv->mdev;
1956         void *rqc = param->rqc;
1957         void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1958
1959         MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1960         MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
1961         MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
1962
1963         param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
1964 }
1965
1966 static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1967                                         struct mlx5e_sq_param *param)
1968 {
1969         void *sqc = param->sqc;
1970         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1971
1972         MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1973         MLX5_SET(wq, wq, pd,            priv->mdev->mlx5e_res.pdn);
1974
1975         param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1976 }
1977
1978 static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1979                                  struct mlx5e_params *params,
1980                                  struct mlx5e_sq_param *param)
1981 {
1982         void *sqc = param->sqc;
1983         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1984
1985         mlx5e_build_sq_param_common(priv, param);
1986         MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
1987         MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
1988 }
1989
1990 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1991                                         struct mlx5e_cq_param *param)
1992 {
1993         void *cqc = param->cqc;
1994
1995         MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
1996 }
1997
1998 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1999                                     struct mlx5e_params *params,
2000                                     struct mlx5e_cq_param *param)
2001 {
2002         struct mlx5_core_dev *mdev = priv->mdev;
2003         void *cqc = param->cqc;
2004         u8 log_cq_size;
2005
2006         switch (params->rq_wq_type) {
2007         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2008                 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) +
2009                         mlx5e_mpwqe_get_log_num_strides(mdev, params);
2010                 break;
2011         default: /* MLX5_WQ_TYPE_LINKED_LIST */
2012                 log_cq_size = params->log_rq_mtu_frames;
2013         }
2014
2015         MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
2016         if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
2017                 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
2018                 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
2019         }
2020
2021         mlx5e_build_common_cq_param(priv, param);
2022         param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
2023 }
2024
2025 static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
2026                                     struct mlx5e_params *params,
2027                                     struct mlx5e_cq_param *param)
2028 {
2029         void *cqc = param->cqc;
2030
2031         MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
2032
2033         mlx5e_build_common_cq_param(priv, param);
2034         param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
2035 }
2036
2037 static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
2038                                      u8 log_wq_size,
2039                                      struct mlx5e_cq_param *param)
2040 {
2041         void *cqc = param->cqc;
2042
2043         MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2044
2045         mlx5e_build_common_cq_param(priv, param);
2046
2047         param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2048 }
2049
2050 static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
2051                                     u8 log_wq_size,
2052                                     struct mlx5e_sq_param *param)
2053 {
2054         void *sqc = param->sqc;
2055         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2056
2057         mlx5e_build_sq_param_common(priv, param);
2058
2059         MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
2060         MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
2061 }
2062
2063 static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
2064                                     struct mlx5e_params *params,
2065                                     struct mlx5e_sq_param *param)
2066 {
2067         void *sqc = param->sqc;
2068         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2069
2070         mlx5e_build_sq_param_common(priv, param);
2071         MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2072 }
2073
2074 static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2075                                       struct mlx5e_params *params,
2076                                       struct mlx5e_channel_param *cparam)
2077 {
2078         u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
2079
2080         mlx5e_build_rq_param(priv, params, &cparam->rq);
2081         mlx5e_build_sq_param(priv, params, &cparam->sq);
2082         mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2083         mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2084         mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
2085         mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2086         mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
2087 }
2088
2089 int mlx5e_open_channels(struct mlx5e_priv *priv,
2090                         struct mlx5e_channels *chs)
2091 {
2092         struct mlx5e_channel_param *cparam;
2093         int err = -ENOMEM;
2094         int i;
2095
2096         chs->num = chs->params.num_channels;
2097
2098         chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
2099         cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
2100         if (!chs->c || !cparam)
2101                 goto err_free;
2102
2103         mlx5e_build_channel_param(priv, &chs->params, cparam);
2104         for (i = 0; i < chs->num; i++) {
2105                 err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
2106                 if (err)
2107                         goto err_close_channels;
2108         }
2109
2110         kfree(cparam);
2111         return 0;
2112
2113 err_close_channels:
2114         for (i--; i >= 0; i--)
2115                 mlx5e_close_channel(chs->c[i]);
2116
2117 err_free:
2118         kfree(chs->c);
2119         kfree(cparam);
2120         chs->num = 0;
2121         return err;
2122 }
2123
2124 static void mlx5e_activate_channels(struct mlx5e_channels *chs)
2125 {
2126         int i;
2127
2128         for (i = 0; i < chs->num; i++)
2129                 mlx5e_activate_channel(chs->c[i]);
2130 }
2131
2132 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2133 {
2134         int err = 0;
2135         int i;
2136
2137         for (i = 0; i < chs->num; i++)
2138                 err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq,
2139                                                   err ? 0 : 20000);
2140
2141         return err ? -ETIMEDOUT : 0;
2142 }
2143
2144 static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2145 {
2146         int i;
2147
2148         for (i = 0; i < chs->num; i++)
2149                 mlx5e_deactivate_channel(chs->c[i]);
2150 }
2151
2152 void mlx5e_close_channels(struct mlx5e_channels *chs)
2153 {
2154         int i;
2155
2156         for (i = 0; i < chs->num; i++)
2157                 mlx5e_close_channel(chs->c[i]);
2158
2159         kfree(chs->c);
2160         chs->num = 0;
2161 }
2162
2163 static int
2164 mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
2165 {
2166         struct mlx5_core_dev *mdev = priv->mdev;
2167         void *rqtc;
2168         int inlen;
2169         int err;
2170         u32 *in;
2171         int i;
2172
2173         inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2174         in = kvzalloc(inlen, GFP_KERNEL);
2175         if (!in)
2176                 return -ENOMEM;
2177
2178         rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2179
2180         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2181         MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2182
2183         for (i = 0; i < sz; i++)
2184                 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2185
2186         err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2187         if (!err)
2188                 rqt->enabled = true;
2189
2190         kvfree(in);
2191         return err;
2192 }
2193
2194 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
2195 {
2196         rqt->enabled = false;
2197         mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
2198 }
2199
2200 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
2201 {
2202         struct mlx5e_rqt *rqt = &priv->indir_rqt;
2203         int err;
2204
2205         err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2206         if (err)
2207                 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2208         return err;
2209 }
2210
2211 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
2212 {
2213         struct mlx5e_rqt *rqt;
2214         int err;
2215         int ix;
2216
2217         for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2218                 rqt = &priv->direct_tir[ix].rqt;
2219                 err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
2220                 if (err)
2221                         goto err_destroy_rqts;
2222         }
2223
2224         return 0;
2225
2226 err_destroy_rqts:
2227         mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
2228         for (ix--; ix >= 0; ix--)
2229                 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
2230
2231         return err;
2232 }
2233
2234 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
2235 {
2236         int i;
2237
2238         for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
2239                 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
2240 }
2241
2242 static int mlx5e_rx_hash_fn(int hfunc)
2243 {
2244         return (hfunc == ETH_RSS_HASH_TOP) ?
2245                MLX5_RX_HASH_FN_TOEPLITZ :
2246                MLX5_RX_HASH_FN_INVERTED_XOR8;
2247 }
2248
2249 int mlx5e_bits_invert(unsigned long a, int size)
2250 {
2251         int inv = 0;
2252         int i;
2253
2254         for (i = 0; i < size; i++)
2255                 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2256
2257         return inv;
2258 }
2259
2260 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2261                                 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2262 {
2263         int i;
2264
2265         for (i = 0; i < sz; i++) {
2266                 u32 rqn;
2267
2268                 if (rrp.is_rss) {
2269                         int ix = i;
2270
2271                         if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2272                                 ix = mlx5e_bits_invert(i, ilog2(sz));
2273
2274                         ix = priv->channels.params.indirection_rqt[ix];
2275                         rqn = rrp.rss.channels->c[ix]->rq.rqn;
2276                 } else {
2277                         rqn = rrp.rqn;
2278                 }
2279                 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2280         }
2281 }
2282
2283 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2284                        struct mlx5e_redirect_rqt_param rrp)
2285 {
2286         struct mlx5_core_dev *mdev = priv->mdev;
2287         void *rqtc;
2288         int inlen;
2289         u32 *in;
2290         int err;
2291
2292         inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
2293         in = kvzalloc(inlen, GFP_KERNEL);
2294         if (!in)
2295                 return -ENOMEM;
2296
2297         rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2298
2299         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2300         MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
2301         mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
2302         err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
2303
2304         kvfree(in);
2305         return err;
2306 }
2307
2308 static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2309                                 struct mlx5e_redirect_rqt_param rrp)
2310 {
2311         if (!rrp.is_rss)
2312                 return rrp.rqn;
2313
2314         if (ix >= rrp.rss.channels->num)
2315                 return priv->drop_rq.rqn;
2316
2317         return rrp.rss.channels->c[ix]->rq.rqn;
2318 }
2319
2320 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2321                                 struct mlx5e_redirect_rqt_param rrp)
2322 {
2323         u32 rqtn;
2324         int ix;
2325
2326         if (priv->indir_rqt.enabled) {
2327                 /* RSS RQ table */
2328                 rqtn = priv->indir_rqt.rqtn;
2329                 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
2330         }
2331
2332         for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2333                 struct mlx5e_redirect_rqt_param direct_rrp = {
2334                         .is_rss = false,
2335                         {
2336                                 .rqn    = mlx5e_get_direct_rqn(priv, ix, rrp)
2337                         },
2338                 };
2339
2340                 /* Direct RQ Tables */
2341                 if (!priv->direct_tir[ix].rqt.enabled)
2342                         continue;
2343
2344                 rqtn = priv->direct_tir[ix].rqt.rqtn;
2345                 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
2346         }
2347 }
2348
2349 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2350                                             struct mlx5e_channels *chs)
2351 {
2352         struct mlx5e_redirect_rqt_param rrp = {
2353                 .is_rss        = true,
2354                 {
2355                         .rss = {
2356                                 .channels  = chs,
2357                                 .hfunc     = chs->params.rss_hfunc,
2358                         }
2359                 },
2360         };
2361
2362         mlx5e_redirect_rqts(priv, rrp);
2363 }
2364
2365 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2366 {
2367         struct mlx5e_redirect_rqt_param drop_rrp = {
2368                 .is_rss = false,
2369                 {
2370                         .rqn = priv->drop_rq.rqn,
2371                 },
2372         };
2373
2374         mlx5e_redirect_rqts(priv, drop_rrp);
2375 }
2376
2377 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
2378 {
2379         if (!params->lro_en)
2380                 return;
2381
2382 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2383
2384         MLX5_SET(tirc, tirc, lro_enable_mask,
2385                  MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2386                  MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2387         MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
2388                  (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2389         MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
2390 }
2391
2392 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
2393                                     enum mlx5e_traffic_types tt,
2394                                     void *tirc, bool inner)
2395 {
2396         void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2397                              MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2398
2399 #define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2400                                  MLX5_HASH_FIELD_SEL_DST_IP)
2401
2402 #define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2403                                  MLX5_HASH_FIELD_SEL_DST_IP   |\
2404                                  MLX5_HASH_FIELD_SEL_L4_SPORT |\
2405                                  MLX5_HASH_FIELD_SEL_L4_DPORT)
2406
2407 #define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2408                                  MLX5_HASH_FIELD_SEL_DST_IP   |\
2409                                  MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2410
2411         MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
2412         if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
2413                 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2414                                              rx_hash_toeplitz_key);
2415                 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2416                                                rx_hash_toeplitz_key);
2417
2418                 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2419                 memcpy(rss_key, params->toeplitz_hash_key, len);
2420         }
2421
2422         switch (tt) {
2423         case MLX5E_TT_IPV4_TCP:
2424                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2425                          MLX5_L3_PROT_TYPE_IPV4);
2426                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2427                          MLX5_L4_PROT_TYPE_TCP);
2428                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2429                          MLX5_HASH_IP_L4PORTS);
2430                 break;
2431
2432         case MLX5E_TT_IPV6_TCP:
2433                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2434                          MLX5_L3_PROT_TYPE_IPV6);
2435                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2436                          MLX5_L4_PROT_TYPE_TCP);
2437                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2438                          MLX5_HASH_IP_L4PORTS);
2439                 break;
2440
2441         case MLX5E_TT_IPV4_UDP:
2442                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2443                          MLX5_L3_PROT_TYPE_IPV4);
2444                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2445                          MLX5_L4_PROT_TYPE_UDP);
2446                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2447                          MLX5_HASH_IP_L4PORTS);
2448                 break;
2449
2450         case MLX5E_TT_IPV6_UDP:
2451                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2452                          MLX5_L3_PROT_TYPE_IPV6);
2453                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2454                          MLX5_L4_PROT_TYPE_UDP);
2455                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2456                          MLX5_HASH_IP_L4PORTS);
2457                 break;
2458
2459         case MLX5E_TT_IPV4_IPSEC_AH:
2460                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2461                          MLX5_L3_PROT_TYPE_IPV4);
2462                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2463                          MLX5_HASH_IP_IPSEC_SPI);
2464                 break;
2465
2466         case MLX5E_TT_IPV6_IPSEC_AH:
2467                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2468                          MLX5_L3_PROT_TYPE_IPV6);
2469                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2470                          MLX5_HASH_IP_IPSEC_SPI);
2471                 break;
2472
2473         case MLX5E_TT_IPV4_IPSEC_ESP:
2474                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2475                          MLX5_L3_PROT_TYPE_IPV4);
2476                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2477                          MLX5_HASH_IP_IPSEC_SPI);
2478                 break;
2479
2480         case MLX5E_TT_IPV6_IPSEC_ESP:
2481                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2482                          MLX5_L3_PROT_TYPE_IPV6);
2483                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2484                          MLX5_HASH_IP_IPSEC_SPI);
2485                 break;
2486
2487         case MLX5E_TT_IPV4:
2488                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2489                          MLX5_L3_PROT_TYPE_IPV4);
2490                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2491                          MLX5_HASH_IP);
2492                 break;
2493
2494         case MLX5E_TT_IPV6:
2495                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2496                          MLX5_L3_PROT_TYPE_IPV6);
2497                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2498                          MLX5_HASH_IP);
2499                 break;
2500         default:
2501                 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2502         }
2503 }
2504
2505 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
2506 {
2507         struct mlx5_core_dev *mdev = priv->mdev;
2508
2509         void *in;
2510         void *tirc;
2511         int inlen;
2512         int err;
2513         int tt;
2514         int ix;
2515
2516         inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
2517         in = kvzalloc(inlen, GFP_KERNEL);
2518         if (!in)
2519                 return -ENOMEM;
2520
2521         MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2522         tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2523
2524         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2525
2526         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2527                 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
2528                                            inlen);
2529                 if (err)
2530                         goto free_in;
2531         }
2532
2533         for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2534                 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2535                                            in, inlen);
2536                 if (err)
2537                         goto free_in;
2538         }
2539
2540 free_in:
2541         kvfree(in);
2542
2543         return err;
2544 }
2545
2546 static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
2547                                             enum mlx5e_traffic_types tt,
2548                                             u32 *tirc)
2549 {
2550         MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2551
2552         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2553
2554         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2555         MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2556         MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
2557
2558         mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
2559 }
2560
2561 static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
2562                          struct mlx5e_params *params, u16 mtu)
2563 {
2564         u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
2565         int err;
2566
2567         err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
2568         if (err)
2569                 return err;
2570
2571         /* Update vport context MTU */
2572         mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2573         return 0;
2574 }
2575
2576 static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
2577                             struct mlx5e_params *params, u16 *mtu)
2578 {
2579         u16 hw_mtu = 0;
2580         int err;
2581
2582         err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2583         if (err || !hw_mtu) /* fallback to port oper mtu */
2584                 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2585
2586         *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
2587 }
2588
2589 static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
2590 {
2591         struct mlx5e_params *params = &priv->channels.params;
2592         struct net_device *netdev = priv->netdev;
2593         struct mlx5_core_dev *mdev = priv->mdev;
2594         u16 mtu;
2595         int err;
2596
2597         err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
2598         if (err)
2599                 return err;
2600
2601         mlx5e_query_mtu(mdev, params, &mtu);
2602         if (mtu != params->sw_mtu)
2603                 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2604                             __func__, mtu, params->sw_mtu);
2605
2606         params->sw_mtu = mtu;
2607         return 0;
2608 }
2609
2610 static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2611 {
2612         struct mlx5e_priv *priv = netdev_priv(netdev);
2613         int nch = priv->channels.params.num_channels;
2614         int ntc = priv->channels.params.num_tc;
2615         int tc;
2616
2617         netdev_reset_tc(netdev);
2618
2619         if (ntc == 1)
2620                 return;
2621
2622         netdev_set_num_tc(netdev, ntc);
2623
2624         /* Map netdev TCs to offset 0
2625          * We have our own UP to TXQ mapping for QoS
2626          */
2627         for (tc = 0; tc < ntc; tc++)
2628                 netdev_set_tc_queue(netdev, tc, nch, 0);
2629 }
2630
2631 static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
2632 {
2633         struct mlx5e_channel *c;
2634         struct mlx5e_txqsq *sq;
2635         int i, tc;
2636
2637         for (i = 0; i < priv->channels.num; i++)
2638                 for (tc = 0; tc < priv->profile->max_tc; tc++)
2639                         priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
2640
2641         for (i = 0; i < priv->channels.num; i++) {
2642                 c = priv->channels.c[i];
2643                 for (tc = 0; tc < c->num_tc; tc++) {
2644                         sq = &c->sq[tc];
2645                         priv->txq2sq[sq->txq_ix] = sq;
2646                 }
2647         }
2648 }
2649
2650 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2651 {
2652         int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2653         struct net_device *netdev = priv->netdev;
2654
2655         mlx5e_netdev_set_tcs(netdev);
2656         netif_set_real_num_tx_queues(netdev, num_txqs);
2657         netif_set_real_num_rx_queues(netdev, priv->channels.num);
2658
2659         mlx5e_build_channels_tx_maps(priv);
2660         mlx5e_activate_channels(&priv->channels);
2661         netif_tx_start_all_queues(priv->netdev);
2662
2663         if (MLX5_VPORT_MANAGER(priv->mdev))
2664                 mlx5e_add_sqs_fwd_rules(priv);
2665
2666         mlx5e_wait_channels_min_rx_wqes(&priv->channels);
2667         mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
2668 }
2669
2670 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2671 {
2672         mlx5e_redirect_rqts_to_drop(priv);
2673
2674         if (MLX5_VPORT_MANAGER(priv->mdev))
2675                 mlx5e_remove_sqs_fwd_rules(priv);
2676
2677         /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2678          * polling for inactive tx queues.
2679          */
2680         netif_tx_stop_all_queues(priv->netdev);
2681         netif_tx_disable(priv->netdev);
2682         mlx5e_deactivate_channels(&priv->channels);
2683 }
2684
2685 void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2686                                 struct mlx5e_channels *new_chs,
2687                                 mlx5e_fp_hw_modify hw_modify)
2688 {
2689         struct net_device *netdev = priv->netdev;
2690         int new_num_txqs;
2691         int carrier_ok;
2692         new_num_txqs = new_chs->num * new_chs->params.num_tc;
2693
2694         carrier_ok = netif_carrier_ok(netdev);
2695         netif_carrier_off(netdev);
2696
2697         if (new_num_txqs < netdev->real_num_tx_queues)
2698                 netif_set_real_num_tx_queues(netdev, new_num_txqs);
2699
2700         mlx5e_deactivate_priv_channels(priv);
2701         mlx5e_close_channels(&priv->channels);
2702
2703         priv->channels = *new_chs;
2704
2705         /* New channels are ready to roll, modify HW settings if needed */
2706         if (hw_modify)
2707                 hw_modify(priv);
2708
2709         mlx5e_refresh_tirs(priv, false);
2710         mlx5e_activate_priv_channels(priv);
2711
2712         /* return carrier back if needed */
2713         if (carrier_ok)
2714                 netif_carrier_on(netdev);
2715 }
2716
2717 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2718 {
2719         priv->tstamp.tx_type   = HWTSTAMP_TX_OFF;
2720         priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
2721 }
2722
2723 int mlx5e_open_locked(struct net_device *netdev)
2724 {
2725         struct mlx5e_priv *priv = netdev_priv(netdev);
2726         int err;
2727
2728         set_bit(MLX5E_STATE_OPENED, &priv->state);
2729
2730         err = mlx5e_open_channels(priv, &priv->channels);
2731         if (err)
2732                 goto err_clear_state_opened_flag;
2733
2734         mlx5e_refresh_tirs(priv, false);
2735         mlx5e_activate_priv_channels(priv);
2736         if (priv->profile->update_carrier)
2737                 priv->profile->update_carrier(priv);
2738
2739         if (priv->profile->update_stats)
2740                 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
2741
2742         return 0;
2743
2744 err_clear_state_opened_flag:
2745         clear_bit(MLX5E_STATE_OPENED, &priv->state);
2746         return err;
2747 }
2748
2749 int mlx5e_open(struct net_device *netdev)
2750 {
2751         struct mlx5e_priv *priv = netdev_priv(netdev);
2752         int err;
2753
2754         mutex_lock(&priv->state_lock);
2755         err = mlx5e_open_locked(netdev);
2756         if (!err)
2757                 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
2758         mutex_unlock(&priv->state_lock);
2759
2760         if (mlx5e_vxlan_allowed(priv->mdev))
2761                 udp_tunnel_get_rx_info(netdev);
2762
2763         return err;
2764 }
2765
2766 int mlx5e_close_locked(struct net_device *netdev)
2767 {
2768         struct mlx5e_priv *priv = netdev_priv(netdev);
2769
2770         /* May already be CLOSED in case a previous configuration operation
2771          * (e.g RX/TX queue size change) that involves close&open failed.
2772          */
2773         if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2774                 return 0;
2775
2776         clear_bit(MLX5E_STATE_OPENED, &priv->state);
2777
2778         netif_carrier_off(priv->netdev);
2779         mlx5e_deactivate_priv_channels(priv);
2780         mlx5e_close_channels(&priv->channels);
2781
2782         return 0;
2783 }
2784
2785 int mlx5e_close(struct net_device *netdev)
2786 {
2787         struct mlx5e_priv *priv = netdev_priv(netdev);
2788         int err;
2789
2790         if (!netif_device_present(netdev))
2791                 return -ENODEV;
2792
2793         mutex_lock(&priv->state_lock);
2794         mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
2795         err = mlx5e_close_locked(netdev);
2796         mutex_unlock(&priv->state_lock);
2797
2798         return err;
2799 }
2800
2801 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
2802                                struct mlx5e_rq *rq,
2803                                struct mlx5e_rq_param *param)
2804 {
2805         void *rqc = param->rqc;
2806         void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2807         int err;
2808
2809         param->wq.db_numa_node = param->wq.buf_numa_node;
2810
2811         err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
2812                                 &rq->wq_ctrl);
2813         if (err)
2814                 return err;
2815
2816         /* Mark as unused given "Drop-RQ" packets never reach XDP */
2817         xdp_rxq_info_unused(&rq->xdp_rxq);
2818
2819         rq->mdev = mdev;
2820
2821         return 0;
2822 }
2823
2824 static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
2825                                struct mlx5e_cq *cq,
2826                                struct mlx5e_cq_param *param)
2827 {
2828         param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2829         param->wq.db_numa_node  = dev_to_node(&mdev->pdev->dev);
2830
2831         return mlx5e_alloc_cq_common(mdev, param, cq);
2832 }
2833
2834 static int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
2835                               struct mlx5e_rq *drop_rq)
2836 {
2837         struct mlx5_core_dev *mdev = priv->mdev;
2838         struct mlx5e_cq_param cq_param = {};
2839         struct mlx5e_rq_param rq_param = {};
2840         struct mlx5e_cq *cq = &drop_rq->cq;
2841         int err;
2842
2843         mlx5e_build_drop_rq_param(priv, &rq_param);
2844
2845         err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
2846         if (err)
2847                 return err;
2848
2849         err = mlx5e_create_cq(cq, &cq_param);
2850         if (err)
2851                 goto err_free_cq;
2852
2853         err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
2854         if (err)
2855                 goto err_destroy_cq;
2856
2857         err = mlx5e_create_rq(drop_rq, &rq_param);
2858         if (err)
2859                 goto err_free_rq;
2860
2861         err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
2862         if (err)
2863                 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
2864
2865         return 0;
2866
2867 err_free_rq:
2868         mlx5e_free_rq(drop_rq);
2869
2870 err_destroy_cq:
2871         mlx5e_destroy_cq(cq);
2872
2873 err_free_cq:
2874         mlx5e_free_cq(cq);
2875
2876         return err;
2877 }
2878
2879 static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
2880 {
2881         mlx5e_destroy_rq(drop_rq);
2882         mlx5e_free_rq(drop_rq);
2883         mlx5e_destroy_cq(&drop_rq->cq);
2884         mlx5e_free_cq(&drop_rq->cq);
2885 }
2886
2887 int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
2888                      u32 underlay_qpn, u32 *tisn)
2889 {
2890         u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
2891         void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2892
2893         MLX5_SET(tisc, tisc, prio, tc << 1);
2894         MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
2895         MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
2896
2897         if (mlx5_lag_is_lacp_owner(mdev))
2898                 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
2899
2900         return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
2901 }
2902
2903 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
2904 {
2905         mlx5_core_destroy_tis(mdev, tisn);
2906 }
2907
2908 int mlx5e_create_tises(struct mlx5e_priv *priv)
2909 {
2910         int err;
2911         int tc;
2912
2913         for (tc = 0; tc < priv->profile->max_tc; tc++) {
2914                 err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
2915                 if (err)
2916                         goto err_close_tises;
2917         }
2918
2919         return 0;
2920
2921 err_close_tises:
2922         for (tc--; tc >= 0; tc--)
2923                 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
2924
2925         return err;
2926 }
2927
2928 void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
2929 {
2930         int tc;
2931
2932         for (tc = 0; tc < priv->profile->max_tc; tc++)
2933                 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
2934 }
2935
2936 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
2937                                       enum mlx5e_traffic_types tt,
2938                                       u32 *tirc)
2939 {
2940         MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2941
2942         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2943
2944         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2945         MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2946         mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
2947 }
2948
2949 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
2950 {
2951         MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2952
2953         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2954
2955         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2956         MLX5_SET(tirc, tirc, indirect_table, rqtn);
2957         MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2958 }
2959
2960 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
2961 {
2962         struct mlx5e_tir *tir;
2963         void *tirc;
2964         int inlen;
2965         int i = 0;
2966         int err;
2967         u32 *in;
2968         int tt;
2969
2970         inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2971         in = kvzalloc(inlen, GFP_KERNEL);
2972         if (!in)
2973                 return -ENOMEM;
2974
2975         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2976                 memset(in, 0, inlen);
2977                 tir = &priv->indir_tir[tt];
2978                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2979                 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
2980                 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2981                 if (err) {
2982                         mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
2983                         goto err_destroy_inner_tirs;
2984                 }
2985         }
2986
2987         if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
2988                 goto out;
2989
2990         for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
2991                 memset(in, 0, inlen);
2992                 tir = &priv->inner_indir_tir[i];
2993                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2994                 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
2995                 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2996                 if (err) {
2997                         mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
2998                         goto err_destroy_inner_tirs;
2999                 }
3000         }
3001
3002 out:
3003         kvfree(in);
3004
3005         return 0;
3006
3007 err_destroy_inner_tirs:
3008         for (i--; i >= 0; i--)
3009                 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3010
3011         for (tt--; tt >= 0; tt--)
3012                 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
3013
3014         kvfree(in);
3015
3016         return err;
3017 }
3018
3019 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
3020 {
3021         int nch = priv->profile->max_nch(priv->mdev);
3022         struct mlx5e_tir *tir;
3023         void *tirc;
3024         int inlen;
3025         int err;
3026         u32 *in;
3027         int ix;
3028
3029         inlen = MLX5_ST_SZ_BYTES(create_tir_in);
3030         in = kvzalloc(inlen, GFP_KERNEL);
3031         if (!in)
3032                 return -ENOMEM;
3033
3034         for (ix = 0; ix < nch; ix++) {
3035                 memset(in, 0, inlen);
3036                 tir = &priv->direct_tir[ix];
3037                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3038                 mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
3039                 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3040                 if (err)
3041                         goto err_destroy_ch_tirs;
3042         }
3043
3044         kvfree(in);
3045
3046         return 0;
3047
3048 err_destroy_ch_tirs:
3049         mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
3050         for (ix--; ix >= 0; ix--)
3051                 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
3052
3053         kvfree(in);
3054
3055         return err;
3056 }
3057
3058 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
3059 {
3060         int i;
3061
3062         for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3063                 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
3064
3065         if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
3066                 return;
3067
3068         for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3069                 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3070 }
3071
3072 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
3073 {
3074         int nch = priv->profile->max_nch(priv->mdev);
3075         int i;
3076
3077         for (i = 0; i < nch; i++)
3078                 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
3079 }
3080
3081 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3082 {
3083         int err = 0;
3084         int i;
3085
3086         for (i = 0; i < chs->num; i++) {
3087                 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3088                 if (err)
3089                         return err;
3090         }
3091
3092         return 0;
3093 }
3094
3095 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
3096 {
3097         int err = 0;
3098         int i;
3099
3100         for (i = 0; i < chs->num; i++) {
3101                 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
3102                 if (err)
3103                         return err;
3104         }
3105
3106         return 0;
3107 }
3108
3109 static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
3110                                  struct tc_mqprio_qopt *mqprio)
3111 {
3112         struct mlx5e_priv *priv = netdev_priv(netdev);
3113         struct mlx5e_channels new_channels = {};
3114         u8 tc = mqprio->num_tc;
3115         int err = 0;
3116
3117         mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3118
3119         if (tc && tc != MLX5E_MAX_NUM_TC)
3120                 return -EINVAL;
3121
3122         mutex_lock(&priv->state_lock);
3123
3124         new_channels.params = priv->channels.params;
3125         new_channels.params.num_tc = tc ? tc : 1;
3126
3127         if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
3128                 priv->channels.params = new_channels.params;
3129                 goto out;
3130         }
3131
3132         err = mlx5e_open_channels(priv, &new_channels);
3133         if (err)
3134                 goto out;
3135
3136         mlx5e_switch_priv_channels(priv, &new_channels, NULL);
3137 out:
3138         mutex_unlock(&priv->state_lock);
3139         return err;
3140 }
3141
3142 #ifdef CONFIG_MLX5_ESWITCH
3143 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
3144                                      struct tc_cls_flower_offload *cls_flower,
3145                                      int flags)
3146 {
3147         switch (cls_flower->command) {
3148         case TC_CLSFLOWER_REPLACE:
3149                 return mlx5e_configure_flower(priv, cls_flower, flags);
3150         case TC_CLSFLOWER_DESTROY:
3151                 return mlx5e_delete_flower(priv, cls_flower, flags);
3152         case TC_CLSFLOWER_STATS:
3153                 return mlx5e_stats_flower(priv, cls_flower, flags);
3154         default:
3155                 return -EOPNOTSUPP;
3156         }
3157 }
3158
3159 static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3160                                    void *cb_priv)
3161 {
3162         struct mlx5e_priv *priv = cb_priv;
3163
3164         if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
3165                 return -EOPNOTSUPP;
3166
3167         switch (type) {
3168         case TC_SETUP_CLSFLOWER:
3169                 return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
3170         default:
3171                 return -EOPNOTSUPP;
3172         }
3173 }
3174
3175 static int mlx5e_setup_tc_block(struct net_device *dev,
3176                                 struct tc_block_offload *f)
3177 {
3178         struct mlx5e_priv *priv = netdev_priv(dev);
3179
3180         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3181                 return -EOPNOTSUPP;
3182
3183         switch (f->command) {
3184         case TC_BLOCK_BIND:
3185                 return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
3186                                              priv, priv);
3187         case TC_BLOCK_UNBIND:
3188                 tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
3189                                         priv);
3190                 return 0;
3191         default:
3192                 return -EOPNOTSUPP;
3193         }
3194 }
3195 #endif
3196
3197 static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3198                           void *type_data)
3199 {
3200         switch (type) {
3201 #ifdef CONFIG_MLX5_ESWITCH
3202         case TC_SETUP_BLOCK:
3203                 return mlx5e_setup_tc_block(dev, type_data);
3204 #endif
3205         case TC_SETUP_QDISC_MQPRIO:
3206                 return mlx5e_setup_tc_mqprio(dev, type_data);
3207         default:
3208                 return -EOPNOTSUPP;
3209         }
3210 }
3211
3212 static void
3213 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3214 {
3215         struct mlx5e_priv *priv = netdev_priv(dev);
3216         struct mlx5e_sw_stats *sstats = &priv->stats.sw;
3217         struct mlx5e_vport_stats *vstats = &priv->stats.vport;
3218         struct mlx5e_pport_stats *pstats = &priv->stats.pport;
3219
3220         if (mlx5e_is_uplink_rep(priv)) {
3221                 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3222                 stats->rx_bytes   = PPORT_802_3_GET(pstats, a_octets_received_ok);
3223                 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3224                 stats->tx_bytes   = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3225         } else {
3226                 stats->rx_packets = sstats->rx_packets;
3227                 stats->rx_bytes   = sstats->rx_bytes;
3228                 stats->tx_packets = sstats->tx_packets;
3229                 stats->tx_bytes   = sstats->tx_bytes;
3230                 stats->tx_dropped = sstats->tx_queue_dropped;
3231         }
3232
3233         stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
3234
3235         stats->rx_length_errors =
3236                 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3237                 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3238                 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
3239         stats->rx_crc_errors =
3240                 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3241         stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3242         stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
3243         stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3244                            stats->rx_frame_errors;
3245         stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3246
3247         /* vport multicast also counts packets that are dropped due to steering
3248          * or rx out of buffer
3249          */
3250         stats->multicast =
3251                 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
3252 }
3253
3254 static void mlx5e_set_rx_mode(struct net_device *dev)
3255 {
3256         struct mlx5e_priv *priv = netdev_priv(dev);
3257
3258         queue_work(priv->wq, &priv->set_rx_mode_work);
3259 }
3260
3261 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3262 {
3263         struct mlx5e_priv *priv = netdev_priv(netdev);
3264         struct sockaddr *saddr = addr;
3265
3266         if (!is_valid_ether_addr(saddr->sa_data))
3267                 return -EADDRNOTAVAIL;
3268
3269         netif_addr_lock_bh(netdev);
3270         ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3271         netif_addr_unlock_bh(netdev);
3272
3273         queue_work(priv->wq, &priv->set_rx_mode_work);
3274
3275         return 0;
3276 }
3277
3278 #define MLX5E_SET_FEATURE(features, feature, enable)    \
3279         do {                                            \
3280                 if (enable)                             \
3281                         *features |= feature;           \
3282                 else                                    \
3283                         *features &= ~feature;          \
3284         } while (0)
3285
3286 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3287
3288 static int set_feature_lro(struct net_device *netdev, bool enable)
3289 {
3290         struct mlx5e_priv *priv = netdev_priv(netdev);
3291         struct mlx5_core_dev *mdev = priv->mdev;
3292         struct mlx5e_channels new_channels = {};
3293         struct mlx5e_params *old_params;
3294         int err = 0;
3295         bool reset;
3296
3297         mutex_lock(&priv->state_lock);
3298
3299         old_params = &priv->channels.params;
3300         reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
3301
3302         new_channels.params = *old_params;
3303         new_channels.params.lro_en = enable;
3304
3305         if (old_params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) {
3306                 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) ==
3307                     mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params))
3308                         reset = false;
3309         }
3310
3311         if (!reset) {
3312                 *old_params = new_channels.params;
3313                 err = mlx5e_modify_tirs_lro(priv);
3314                 goto out;
3315         }
3316
3317         err = mlx5e_open_channels(priv, &new_channels);
3318         if (err)
3319                 goto out;
3320
3321         mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
3322 out:
3323         mutex_unlock(&priv->state_lock);
3324         return err;
3325 }
3326
3327 static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
3328 {
3329         struct mlx5e_priv *priv = netdev_priv(netdev);
3330
3331         if (enable)
3332                 mlx5e_enable_cvlan_filter(priv);
3333         else
3334                 mlx5e_disable_cvlan_filter(priv);
3335
3336         return 0;
3337 }
3338
3339 static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3340 {
3341         struct mlx5e_priv *priv = netdev_priv(netdev);
3342
3343         if (!enable && mlx5e_tc_num_filters(priv)) {
3344                 netdev_err(netdev,
3345                            "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3346                 return -EINVAL;
3347         }
3348
3349         return 0;
3350 }
3351
3352 static int set_feature_rx_all(struct net_device *netdev, bool enable)
3353 {
3354         struct mlx5e_priv *priv = netdev_priv(netdev);
3355         struct mlx5_core_dev *mdev = priv->mdev;
3356
3357         return mlx5_set_port_fcs(mdev, !enable);
3358 }
3359
3360 static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3361 {
3362         struct mlx5e_priv *priv = netdev_priv(netdev);
3363         int err;
3364
3365         mutex_lock(&priv->state_lock);
3366
3367         priv->channels.params.scatter_fcs_en = enable;
3368         err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3369         if (err)
3370                 priv->channels.params.scatter_fcs_en = !enable;
3371
3372         mutex_unlock(&priv->state_lock);
3373
3374         return err;
3375 }
3376
3377 static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3378 {
3379         struct mlx5e_priv *priv = netdev_priv(netdev);
3380         int err = 0;
3381
3382         mutex_lock(&priv->state_lock);
3383
3384         priv->channels.params.vlan_strip_disable = !enable;
3385         if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3386                 goto unlock;
3387
3388         err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
3389         if (err)
3390                 priv->channels.params.vlan_strip_disable = enable;
3391
3392 unlock:
3393         mutex_unlock(&priv->state_lock);
3394
3395         return err;
3396 }
3397
3398 #ifdef CONFIG_RFS_ACCEL
3399 static int set_feature_arfs(struct net_device *netdev, bool enable)
3400 {
3401         struct mlx5e_priv *priv = netdev_priv(netdev);
3402         int err;
3403
3404         if (enable)
3405                 err = mlx5e_arfs_enable(priv);
3406         else
3407                 err = mlx5e_arfs_disable(priv);
3408
3409         return err;
3410 }
3411 #endif
3412
3413 static int mlx5e_handle_feature(struct net_device *netdev,
3414                                 netdev_features_t *features,
3415                                 netdev_features_t wanted_features,
3416                                 netdev_features_t feature,
3417                                 mlx5e_feature_handler feature_handler)
3418 {
3419         netdev_features_t changes = wanted_features ^ netdev->features;
3420         bool enable = !!(wanted_features & feature);
3421         int err;
3422
3423         if (!(changes & feature))
3424                 return 0;
3425
3426         err = feature_handler(netdev, enable);
3427         if (err) {
3428                 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3429                            enable ? "Enable" : "Disable", &feature, err);
3430                 return err;
3431         }
3432
3433         MLX5E_SET_FEATURE(features, feature, enable);
3434         return 0;
3435 }
3436
3437 static int mlx5e_set_features(struct net_device *netdev,
3438                               netdev_features_t features)
3439 {
3440         netdev_features_t oper_features = netdev->features;
3441         int err = 0;
3442
3443 #define MLX5E_HANDLE_FEATURE(feature, handler) \
3444         mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
3445
3446         err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
3447         err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
3448                                     set_feature_cvlan_filter);
3449         err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
3450         err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
3451         err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
3452         err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
3453 #ifdef CONFIG_RFS_ACCEL
3454         err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
3455 #endif
3456
3457         if (err) {
3458                 netdev->features = oper_features;
3459                 return -EINVAL;
3460         }
3461
3462         return 0;
3463 }
3464
3465 static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
3466                                             netdev_features_t features)
3467 {
3468         struct mlx5e_priv *priv = netdev_priv(netdev);
3469
3470         mutex_lock(&priv->state_lock);
3471         if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
3472                 /* HW strips the outer C-tag header, this is a problem
3473                  * for S-tag traffic.
3474                  */
3475                 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3476                 if (!priv->channels.params.vlan_strip_disable)
3477                         netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3478         }
3479         mutex_unlock(&priv->state_lock);
3480
3481         return features;
3482 }
3483
3484 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
3485 {
3486         struct mlx5e_priv *priv = netdev_priv(netdev);
3487         struct mlx5e_channels new_channels = {};
3488         struct mlx5e_params *params;
3489         int err = 0;
3490         bool reset;
3491
3492         mutex_lock(&priv->state_lock);
3493
3494         params = &priv->channels.params;
3495
3496         reset = !params->lro_en;
3497         reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
3498
3499         new_channels.params = *params;
3500         new_channels.params.sw_mtu = new_mtu;
3501
3502         if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) {
3503                 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
3504                 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
3505
3506                 reset = reset && (ppw_old != ppw_new);
3507         }
3508
3509         if (!reset) {
3510                 params->sw_mtu = new_mtu;
3511                 mlx5e_set_dev_port_mtu(priv);
3512                 netdev->mtu = params->sw_mtu;
3513                 goto out;
3514         }
3515
3516         err = mlx5e_open_channels(priv, &new_channels);
3517         if (err)
3518                 goto out;
3519
3520         mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
3521         netdev->mtu = new_channels.params.sw_mtu;
3522
3523 out:
3524         mutex_unlock(&priv->state_lock);
3525         return err;
3526 }
3527
3528 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
3529 {
3530         struct hwtstamp_config config;
3531         int err;
3532
3533         if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3534                 return -EOPNOTSUPP;
3535
3536         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
3537                 return -EFAULT;
3538
3539         /* TX HW timestamp */
3540         switch (config.tx_type) {
3541         case HWTSTAMP_TX_OFF:
3542         case HWTSTAMP_TX_ON:
3543                 break;
3544         default:
3545                 return -ERANGE;
3546         }
3547
3548         mutex_lock(&priv->state_lock);
3549         /* RX HW timestamp */
3550         switch (config.rx_filter) {
3551         case HWTSTAMP_FILTER_NONE:
3552                 /* Reset CQE compression to Admin default */
3553                 mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
3554                 break;
3555         case HWTSTAMP_FILTER_ALL:
3556         case HWTSTAMP_FILTER_SOME:
3557         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3558         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3559         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3560         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3561         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3562         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3563         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3564         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3565         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3566         case HWTSTAMP_FILTER_PTP_V2_EVENT:
3567         case HWTSTAMP_FILTER_PTP_V2_SYNC:
3568         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3569         case HWTSTAMP_FILTER_NTP_ALL:
3570                 /* Disable CQE compression */
3571                 netdev_warn(priv->netdev, "Disabling cqe compression");
3572                 err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
3573                 if (err) {
3574                         netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
3575                         mutex_unlock(&priv->state_lock);
3576                         return err;
3577                 }
3578                 config.rx_filter = HWTSTAMP_FILTER_ALL;
3579                 break;
3580         default:
3581                 mutex_unlock(&priv->state_lock);
3582                 return -ERANGE;
3583         }
3584
3585         memcpy(&priv->tstamp, &config, sizeof(config));
3586         mutex_unlock(&priv->state_lock);
3587
3588         return copy_to_user(ifr->ifr_data, &config,
3589                             sizeof(config)) ? -EFAULT : 0;
3590 }
3591
3592 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
3593 {
3594         struct hwtstamp_config *cfg = &priv->tstamp;
3595
3596         if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3597                 return -EOPNOTSUPP;
3598
3599         return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
3600 }
3601
3602 static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3603 {
3604         struct mlx5e_priv *priv = netdev_priv(dev);
3605
3606         switch (cmd) {
3607         case SIOCSHWTSTAMP:
3608                 return mlx5e_hwstamp_set(priv, ifr);
3609         case SIOCGHWTSTAMP:
3610                 return mlx5e_hwstamp_get(priv, ifr);
3611         default:
3612                 return -EOPNOTSUPP;
3613         }
3614 }
3615
3616 #ifdef CONFIG_MLX5_ESWITCH
3617 static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3618 {
3619         struct mlx5e_priv *priv = netdev_priv(dev);
3620         struct mlx5_core_dev *mdev = priv->mdev;
3621
3622         return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
3623 }
3624
3625 static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
3626                              __be16 vlan_proto)
3627 {
3628         struct mlx5e_priv *priv = netdev_priv(dev);
3629         struct mlx5_core_dev *mdev = priv->mdev;
3630
3631         if (vlan_proto != htons(ETH_P_8021Q))
3632                 return -EPROTONOSUPPORT;
3633
3634         return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
3635                                            vlan, qos);
3636 }
3637
3638 static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
3639 {
3640         struct mlx5e_priv *priv = netdev_priv(dev);
3641         struct mlx5_core_dev *mdev = priv->mdev;
3642
3643         return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
3644 }
3645
3646 static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
3647 {
3648         struct mlx5e_priv *priv = netdev_priv(dev);
3649         struct mlx5_core_dev *mdev = priv->mdev;
3650
3651         return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
3652 }
3653
3654 static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
3655                              int max_tx_rate)
3656 {
3657         struct mlx5e_priv *priv = netdev_priv(dev);
3658         struct mlx5_core_dev *mdev = priv->mdev;
3659
3660         return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
3661                                            max_tx_rate, min_tx_rate);
3662 }
3663
3664 static int mlx5_vport_link2ifla(u8 esw_link)
3665 {
3666         switch (esw_link) {
3667         case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
3668                 return IFLA_VF_LINK_STATE_DISABLE;
3669         case MLX5_ESW_VPORT_ADMIN_STATE_UP:
3670                 return IFLA_VF_LINK_STATE_ENABLE;
3671         }
3672         return IFLA_VF_LINK_STATE_AUTO;
3673 }
3674
3675 static int mlx5_ifla_link2vport(u8 ifla_link)
3676 {
3677         switch (ifla_link) {
3678         case IFLA_VF_LINK_STATE_DISABLE:
3679                 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
3680         case IFLA_VF_LINK_STATE_ENABLE:
3681                 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
3682         }
3683         return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
3684 }
3685
3686 static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
3687                                    int link_state)
3688 {
3689         struct mlx5e_priv *priv = netdev_priv(dev);
3690         struct mlx5_core_dev *mdev = priv->mdev;
3691
3692         return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
3693                                             mlx5_ifla_link2vport(link_state));
3694 }
3695
3696 static int mlx5e_get_vf_config(struct net_device *dev,
3697                                int vf, struct ifla_vf_info *ivi)
3698 {
3699         struct mlx5e_priv *priv = netdev_priv(dev);
3700         struct mlx5_core_dev *mdev = priv->mdev;
3701         int err;
3702
3703         err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
3704         if (err)
3705                 return err;
3706         ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
3707         return 0;
3708 }
3709
3710 static int mlx5e_get_vf_stats(struct net_device *dev,
3711                               int vf, struct ifla_vf_stats *vf_stats)
3712 {
3713         struct mlx5e_priv *priv = netdev_priv(dev);
3714         struct mlx5_core_dev *mdev = priv->mdev;
3715
3716         return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
3717                                             vf_stats);
3718 }
3719 #endif
3720
3721 static void mlx5e_add_vxlan_port(struct net_device *netdev,
3722                                  struct udp_tunnel_info *ti)
3723 {
3724         struct mlx5e_priv *priv = netdev_priv(netdev);
3725
3726         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3727                 return;
3728
3729         if (!mlx5e_vxlan_allowed(priv->mdev))
3730                 return;
3731
3732         mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
3733 }
3734
3735 static void mlx5e_del_vxlan_port(struct net_device *netdev,
3736                                  struct udp_tunnel_info *ti)
3737 {
3738         struct mlx5e_priv *priv = netdev_priv(netdev);
3739
3740         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3741                 return;
3742
3743         if (!mlx5e_vxlan_allowed(priv->mdev))
3744                 return;
3745
3746         mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
3747 }
3748
3749 static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
3750                                                      struct sk_buff *skb,
3751                                                      netdev_features_t features)
3752 {
3753         unsigned int offset = 0;
3754         struct udphdr *udph;
3755         u8 proto;
3756         u16 port;
3757
3758         switch (vlan_get_protocol(skb)) {
3759         case htons(ETH_P_IP):
3760                 proto = ip_hdr(skb)->protocol;
3761                 break;
3762         case htons(ETH_P_IPV6):
3763                 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
3764                 break;
3765         default:
3766                 goto out;
3767         }
3768
3769         switch (proto) {
3770         case IPPROTO_GRE:
3771                 return features;
3772         case IPPROTO_UDP:
3773                 udph = udp_hdr(skb);
3774                 port = be16_to_cpu(udph->dest);
3775
3776                 /* Verify if UDP port is being offloaded by HW */
3777                 if (mlx5e_vxlan_lookup_port(priv, port))
3778                         return features;
3779         }
3780
3781 out:
3782         /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3783         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3784 }
3785
3786 static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
3787                                               struct net_device *netdev,
3788                                               netdev_features_t features)
3789 {
3790         struct mlx5e_priv *priv = netdev_priv(netdev);
3791
3792         features = vlan_features_check(skb, features);
3793         features = vxlan_features_check(skb, features);
3794
3795 #ifdef CONFIG_MLX5_EN_IPSEC
3796         if (mlx5e_ipsec_feature_check(skb, netdev, features))
3797                 return features;
3798 #endif
3799
3800         /* Validate if the tunneled packet is being offloaded by HW */
3801         if (skb->encapsulation &&
3802             (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
3803                 return mlx5e_tunnel_features_check(priv, skb, features);
3804
3805         return features;
3806 }
3807
3808 static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
3809                                         struct mlx5e_txqsq *sq)
3810 {
3811         struct mlx5_eq *eq = sq->cq.mcq.eq;
3812         u32 eqe_count;
3813
3814         netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
3815                    eq->eqn, eq->cons_index, eq->irqn);
3816
3817         eqe_count = mlx5_eq_poll_irq_disabled(eq);
3818         if (!eqe_count)
3819                 return false;
3820
3821         netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
3822         sq->channel->stats.eq_rearm++;
3823         return true;
3824 }
3825
3826 static void mlx5e_tx_timeout_work(struct work_struct *work)
3827 {
3828         struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
3829                                                tx_timeout_work);
3830         struct net_device *dev = priv->netdev;
3831         bool reopen_channels = false;
3832         int i, err;
3833
3834         rtnl_lock();
3835         mutex_lock(&priv->state_lock);
3836
3837         if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3838                 goto unlock;
3839
3840         for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
3841                 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
3842                 struct mlx5e_txqsq *sq = priv->txq2sq[i];
3843
3844                 if (!netif_xmit_stopped(dev_queue))
3845                         continue;
3846
3847                 netdev_err(dev,
3848                            "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
3849                            i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
3850                            jiffies_to_usecs(jiffies - dev_queue->trans_start));
3851
3852                 /* If we recover a lost interrupt, most likely TX timeout will
3853                  * be resolved, skip reopening channels
3854                  */
3855                 if (!mlx5e_tx_timeout_eq_recover(dev, sq)) {
3856                         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
3857                         reopen_channels = true;
3858                 }
3859         }
3860
3861         if (!reopen_channels)
3862                 goto unlock;
3863
3864         mlx5e_close_locked(dev);
3865         err = mlx5e_open_locked(dev);
3866         if (err)
3867                 netdev_err(priv->netdev,
3868                            "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
3869                            err);
3870
3871 unlock:
3872         mutex_unlock(&priv->state_lock);
3873         rtnl_unlock();
3874 }
3875
3876 static void mlx5e_tx_timeout(struct net_device *dev)
3877 {
3878         struct mlx5e_priv *priv = netdev_priv(dev);
3879
3880         netdev_err(dev, "TX timeout detected\n");
3881         queue_work(priv->wq, &priv->tx_timeout_work);
3882 }
3883
3884 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
3885 {
3886         struct mlx5e_priv *priv = netdev_priv(netdev);
3887         struct bpf_prog *old_prog;
3888         int err = 0;
3889         bool reset, was_opened;
3890         int i;
3891
3892         mutex_lock(&priv->state_lock);
3893
3894         if ((netdev->features & NETIF_F_LRO) && prog) {
3895                 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
3896                 err = -EINVAL;
3897                 goto unlock;
3898         }
3899
3900         if ((netdev->features & NETIF_F_HW_ESP) && prog) {
3901                 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
3902                 err = -EINVAL;
3903                 goto unlock;
3904         }
3905
3906         was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3907         /* no need for full reset when exchanging programs */
3908         reset = (!priv->channels.params.xdp_prog || !prog);
3909
3910         if (was_opened && reset)
3911                 mlx5e_close_locked(netdev);
3912         if (was_opened && !reset) {
3913                 /* num_channels is invariant here, so we can take the
3914                  * batched reference right upfront.
3915                  */
3916                 prog = bpf_prog_add(prog, priv->channels.num);
3917                 if (IS_ERR(prog)) {
3918                         err = PTR_ERR(prog);
3919                         goto unlock;
3920                 }
3921         }
3922
3923         /* exchange programs, extra prog reference we got from caller
3924          * as long as we don't fail from this point onwards.
3925          */
3926         old_prog = xchg(&priv->channels.params.xdp_prog, prog);
3927         if (old_prog)
3928                 bpf_prog_put(old_prog);
3929
3930         if (reset) /* change RQ type according to priv->xdp_prog */
3931                 mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
3932
3933         if (was_opened && reset)
3934                 mlx5e_open_locked(netdev);
3935
3936         if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
3937                 goto unlock;
3938
3939         /* exchanging programs w/o reset, we update ref counts on behalf
3940          * of the channels RQs here.
3941          */
3942         for (i = 0; i < priv->channels.num; i++) {
3943                 struct mlx5e_channel *c = priv->channels.c[i];
3944
3945                 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
3946                 napi_synchronize(&c->napi);
3947                 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3948
3949                 old_prog = xchg(&c->rq.xdp_prog, prog);
3950
3951                 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
3952                 /* napi_schedule in case we have missed anything */
3953                 napi_schedule(&c->napi);
3954
3955                 if (old_prog)
3956                         bpf_prog_put(old_prog);
3957         }
3958
3959 unlock:
3960         mutex_unlock(&priv->state_lock);
3961         return err;
3962 }
3963
3964 static u32 mlx5e_xdp_query(struct net_device *dev)
3965 {
3966         struct mlx5e_priv *priv = netdev_priv(dev);
3967         const struct bpf_prog *xdp_prog;
3968         u32 prog_id = 0;
3969
3970         mutex_lock(&priv->state_lock);
3971         xdp_prog = priv->channels.params.xdp_prog;
3972         if (xdp_prog)
3973                 prog_id = xdp_prog->aux->id;
3974         mutex_unlock(&priv->state_lock);
3975
3976         return prog_id;
3977 }
3978
3979 static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
3980 {
3981         switch (xdp->command) {
3982         case XDP_SETUP_PROG:
3983                 return mlx5e_xdp_set(dev, xdp->prog);
3984         case XDP_QUERY_PROG:
3985                 xdp->prog_id = mlx5e_xdp_query(dev);
3986                 xdp->prog_attached = !!xdp->prog_id;
3987                 return 0;
3988         default:
3989                 return -EINVAL;
3990         }
3991 }
3992
3993 #ifdef CONFIG_NET_POLL_CONTROLLER
3994 /* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
3995  * reenabling interrupts.
3996  */
3997 static void mlx5e_netpoll(struct net_device *dev)
3998 {
3999         struct mlx5e_priv *priv = netdev_priv(dev);
4000         struct mlx5e_channels *chs = &priv->channels;
4001
4002         int i;
4003
4004         for (i = 0; i < chs->num; i++)
4005                 napi_schedule(&chs->c[i]->napi);
4006 }
4007 #endif
4008
4009 static const struct net_device_ops mlx5e_netdev_ops = {
4010         .ndo_open                = mlx5e_open,
4011         .ndo_stop                = mlx5e_close,
4012         .ndo_start_xmit          = mlx5e_xmit,
4013         .ndo_setup_tc            = mlx5e_setup_tc,
4014         .ndo_select_queue        = mlx5e_select_queue,
4015         .ndo_get_stats64         = mlx5e_get_stats,
4016         .ndo_set_rx_mode         = mlx5e_set_rx_mode,
4017         .ndo_set_mac_address     = mlx5e_set_mac,
4018         .ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
4019         .ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
4020         .ndo_set_features        = mlx5e_set_features,
4021         .ndo_fix_features        = mlx5e_fix_features,
4022         .ndo_change_mtu          = mlx5e_change_mtu,
4023         .ndo_do_ioctl            = mlx5e_ioctl,
4024         .ndo_set_tx_maxrate      = mlx5e_set_tx_maxrate,
4025         .ndo_udp_tunnel_add      = mlx5e_add_vxlan_port,
4026         .ndo_udp_tunnel_del      = mlx5e_del_vxlan_port,
4027         .ndo_features_check      = mlx5e_features_check,
4028 #ifdef CONFIG_RFS_ACCEL
4029         .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,
4030 #endif
4031         .ndo_tx_timeout          = mlx5e_tx_timeout,
4032         .ndo_bpf                 = mlx5e_xdp,
4033 #ifdef CONFIG_NET_POLL_CONTROLLER
4034         .ndo_poll_controller     = mlx5e_netpoll,
4035 #endif
4036 #ifdef CONFIG_MLX5_ESWITCH
4037         /* SRIOV E-Switch NDOs */
4038         .ndo_set_vf_mac          = mlx5e_set_vf_mac,
4039         .ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
4040         .ndo_set_vf_spoofchk     = mlx5e_set_vf_spoofchk,
4041         .ndo_set_vf_trust        = mlx5e_set_vf_trust,
4042         .ndo_set_vf_rate         = mlx5e_set_vf_rate,
4043         .ndo_get_vf_config       = mlx5e_get_vf_config,
4044         .ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
4045         .ndo_get_vf_stats        = mlx5e_get_vf_stats,
4046         .ndo_has_offload_stats   = mlx5e_has_offload_stats,
4047         .ndo_get_offload_stats   = mlx5e_get_offload_stats,
4048 #endif
4049 };
4050
4051 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
4052 {
4053         if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
4054                 return -EOPNOTSUPP;
4055         if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
4056             !MLX5_CAP_GEN(mdev, nic_flow_table) ||
4057             !MLX5_CAP_ETH(mdev, csum_cap) ||
4058             !MLX5_CAP_ETH(mdev, max_lso_cap) ||
4059             !MLX5_CAP_ETH(mdev, vlan_cap) ||
4060             !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
4061             MLX5_CAP_FLOWTABLE(mdev,
4062                                flow_table_properties_nic_receive.max_ft_level)
4063                                < 3) {
4064                 mlx5_core_warn(mdev,
4065                                "Not creating net device, some required device capabilities are missing\n");
4066                 return -EOPNOTSUPP;
4067         }
4068         if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
4069                 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
4070         if (!MLX5_CAP_GEN(mdev, cq_moderation))
4071                 mlx5_core_warn(mdev, "CQ moderation is not supported\n");
4072
4073         return 0;
4074 }
4075
4076 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
4077                                    int num_channels)
4078 {
4079         int i;
4080
4081         for (i = 0; i < len; i++)
4082                 indirection_rqt[i] = i % num_channels;
4083 }
4084
4085 static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
4086 {
4087         u32 link_speed = 0;
4088         u32 pci_bw = 0;
4089
4090         mlx5e_port_max_linkspeed(mdev, &link_speed);
4091         pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
4092         mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
4093                            link_speed, pci_bw);
4094
4095 #define MLX5E_SLOW_PCI_RATIO (2)
4096
4097         return link_speed && pci_bw &&
4098                 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
4099 }
4100
4101 static struct net_dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
4102 {
4103         struct net_dim_cq_moder moder;
4104
4105         moder.cq_period_mode = cq_period_mode;
4106         moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
4107         moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
4108         if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4109                 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
4110
4111         return moder;
4112 }
4113
4114 static struct net_dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
4115 {
4116         struct net_dim_cq_moder moder;
4117
4118         moder.cq_period_mode = cq_period_mode;
4119         moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
4120         moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
4121         if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4122                 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
4123
4124         return moder;
4125 }
4126
4127 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
4128 {
4129         return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
4130                 NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE :
4131                 NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4132 }
4133
4134 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4135 {
4136         if (params->tx_dim_enabled) {
4137                 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4138
4139                 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
4140         } else {
4141                 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
4142         }
4143
4144         MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
4145                         params->tx_cq_moderation.cq_period_mode ==
4146                                 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4147 }
4148
4149 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4150 {
4151         if (params->rx_dim_enabled) {
4152                 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4153
4154                 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
4155         } else {
4156                 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
4157         }
4158
4159         MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
4160                         params->rx_cq_moderation.cq_period_mode ==
4161                                 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4162 }
4163
4164 static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
4165 {
4166         int i;
4167
4168         /* The supported periods are organized in ascending order */
4169         for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4170                 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4171                         break;
4172
4173         return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4174 }
4175
4176 void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
4177                             struct mlx5e_params *params,
4178                             u16 max_channels, u16 mtu)
4179 {
4180         u8 rx_cq_period_mode;
4181
4182         params->sw_mtu = mtu;
4183         params->hard_mtu = MLX5E_ETH_HARD_MTU;
4184         params->num_channels = max_channels;
4185         params->num_tc       = 1;
4186
4187         /* SQ */
4188         params->log_sq_size = is_kdump_kernel() ?
4189                 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
4190                 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
4191
4192         /* set CQE compression */
4193         params->rx_cqe_compress_def = false;
4194         if (MLX5_CAP_GEN(mdev, cqe_compression) &&
4195             MLX5_CAP_GEN(mdev, vport_group_manager))
4196                 params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
4197
4198         MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
4199
4200         /* RQ */
4201         if (mlx5e_striding_rq_possible(mdev, params))
4202                 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ,
4203                                 !slow_pci_heuristic(mdev));
4204         mlx5e_set_rq_type(mdev, params);
4205         mlx5e_init_rq_type_params(mdev, params);
4206
4207         /* HW LRO */
4208
4209         /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
4210         if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
4211                 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
4212                         params->lro_en = !slow_pci_heuristic(mdev);
4213         params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
4214
4215         /* CQ moderation params */
4216         rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
4217                         MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
4218                         MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
4219         params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
4220         params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
4221         mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
4222         mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
4223
4224         /* TX inline */
4225         params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
4226
4227         /* RSS */
4228         params->rss_hfunc = ETH_RSS_HASH_XOR;
4229         netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
4230         mlx5e_build_default_indir_rqt(params->indirection_rqt,
4231                                       MLX5E_INDIR_RQT_SIZE, max_channels);
4232 }
4233
4234 static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
4235                                         struct net_device *netdev,
4236                                         const struct mlx5e_profile *profile,
4237                                         void *ppriv)
4238 {
4239         struct mlx5e_priv *priv = netdev_priv(netdev);
4240
4241         priv->mdev        = mdev;
4242         priv->netdev      = netdev;
4243         priv->profile     = profile;
4244         priv->ppriv       = ppriv;
4245         priv->msglevel    = MLX5E_MSG_LEVEL;
4246
4247         mlx5e_build_nic_params(mdev, &priv->channels.params,
4248                                profile->max_nch(mdev), netdev->mtu);
4249
4250         mutex_init(&priv->state_lock);
4251
4252         INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
4253         INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
4254         INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
4255         INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
4256
4257         mlx5e_timestamp_init(priv);
4258 }
4259
4260 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4261 {
4262         struct mlx5e_priv *priv = netdev_priv(netdev);
4263
4264         mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
4265         if (is_zero_ether_addr(netdev->dev_addr) &&
4266             !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4267                 eth_hw_addr_random(netdev);
4268                 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4269         }
4270 }
4271
4272 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4273 static const struct switchdev_ops mlx5e_switchdev_ops = {
4274         .switchdev_port_attr_get        = mlx5e_attr_get,
4275 };
4276 #endif
4277
4278 static void mlx5e_build_nic_netdev(struct net_device *netdev)
4279 {
4280         struct mlx5e_priv *priv = netdev_priv(netdev);
4281         struct mlx5_core_dev *mdev = priv->mdev;
4282         bool fcs_supported;
4283         bool fcs_enabled;
4284
4285         SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
4286
4287         netdev->netdev_ops = &mlx5e_netdev_ops;
4288
4289 #ifdef CONFIG_MLX5_CORE_EN_DCB
4290         if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
4291                 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
4292 #endif
4293
4294         netdev->watchdog_timeo    = 15 * HZ;
4295
4296         netdev->ethtool_ops       = &mlx5e_ethtool_ops;
4297
4298         netdev->vlan_features    |= NETIF_F_SG;
4299         netdev->vlan_features    |= NETIF_F_IP_CSUM;
4300         netdev->vlan_features    |= NETIF_F_IPV6_CSUM;
4301         netdev->vlan_features    |= NETIF_F_GRO;
4302         netdev->vlan_features    |= NETIF_F_TSO;
4303         netdev->vlan_features    |= NETIF_F_TSO6;
4304         netdev->vlan_features    |= NETIF_F_RXCSUM;
4305         netdev->vlan_features    |= NETIF_F_RXHASH;
4306
4307         netdev->hw_enc_features  |= NETIF_F_HW_VLAN_CTAG_TX;
4308         netdev->hw_enc_features  |= NETIF_F_HW_VLAN_CTAG_RX;
4309
4310         if (!!MLX5_CAP_ETH(mdev, lro_cap))
4311                 netdev->vlan_features    |= NETIF_F_LRO;
4312
4313         netdev->hw_features       = netdev->vlan_features;
4314         netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_TX;
4315         netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
4316         netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
4317         netdev->hw_features      |= NETIF_F_HW_VLAN_STAG_TX;
4318
4319         if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4320                 netdev->hw_features     |= NETIF_F_GSO_PARTIAL;
4321                 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
4322                 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
4323                 netdev->hw_enc_features |= NETIF_F_TSO;
4324                 netdev->hw_enc_features |= NETIF_F_TSO6;
4325                 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4326         }
4327
4328         if (mlx5e_vxlan_allowed(mdev)) {
4329                 netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
4330                                            NETIF_F_GSO_UDP_TUNNEL_CSUM;
4331                 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4332                                            NETIF_F_GSO_UDP_TUNNEL_CSUM;
4333                 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
4334         }
4335
4336         if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4337                 netdev->hw_features     |= NETIF_F_GSO_GRE |
4338                                            NETIF_F_GSO_GRE_CSUM;
4339                 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4340                                            NETIF_F_GSO_GRE_CSUM;
4341                 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4342                                                 NETIF_F_GSO_GRE_CSUM;
4343         }
4344
4345         mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4346
4347         if (fcs_supported)
4348                 netdev->hw_features |= NETIF_F_RXALL;
4349
4350         if (MLX5_CAP_ETH(mdev, scatter_fcs))
4351                 netdev->hw_features |= NETIF_F_RXFCS;
4352
4353         netdev->features          = netdev->hw_features;
4354         if (!priv->channels.params.lro_en)
4355                 netdev->features  &= ~NETIF_F_LRO;
4356
4357         if (fcs_enabled)
4358                 netdev->features  &= ~NETIF_F_RXALL;
4359
4360         if (!priv->channels.params.scatter_fcs_en)
4361                 netdev->features  &= ~NETIF_F_RXFCS;
4362
4363 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4364         if (FT_CAP(flow_modify_en) &&
4365             FT_CAP(modify_root) &&
4366             FT_CAP(identified_miss_table_mode) &&
4367             FT_CAP(flow_table_modify)) {
4368                 netdev->hw_features      |= NETIF_F_HW_TC;
4369 #ifdef CONFIG_RFS_ACCEL
4370                 netdev->hw_features      |= NETIF_F_NTUPLE;
4371 #endif
4372         }
4373
4374         netdev->features         |= NETIF_F_HIGHDMA;
4375         netdev->features         |= NETIF_F_HW_VLAN_STAG_FILTER;
4376
4377         netdev->priv_flags       |= IFF_UNICAST_FLT;
4378
4379         mlx5e_set_netdev_dev_addr(netdev);
4380
4381 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4382         if (MLX5_VPORT_MANAGER(mdev))
4383                 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4384 #endif
4385
4386         mlx5e_ipsec_build_netdev(priv);
4387         mlx5e_tls_build_netdev(priv);
4388 }
4389
4390 static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
4391 {
4392         struct mlx5_core_dev *mdev = priv->mdev;
4393         int err;
4394
4395         err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
4396         if (err) {
4397                 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
4398                 priv->q_counter = 0;
4399         }
4400
4401         err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
4402         if (err) {
4403                 mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
4404                 priv->drop_rq_q_counter = 0;
4405         }
4406 }
4407
4408 static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
4409 {
4410         if (priv->q_counter)
4411                 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
4412
4413         if (priv->drop_rq_q_counter)
4414                 mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
4415 }
4416
4417 static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
4418                            struct net_device *netdev,
4419                            const struct mlx5e_profile *profile,
4420                            void *ppriv)
4421 {
4422         struct mlx5e_priv *priv = netdev_priv(netdev);
4423         int err;
4424
4425         mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
4426         err = mlx5e_ipsec_init(priv);
4427         if (err)
4428                 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
4429         err = mlx5e_tls_init(priv);
4430         if (err)
4431                 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
4432         mlx5e_build_nic_netdev(netdev);
4433         mlx5e_vxlan_init(priv);
4434 }
4435
4436 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4437 {
4438         mlx5e_tls_cleanup(priv);
4439         mlx5e_ipsec_cleanup(priv);
4440         mlx5e_vxlan_cleanup(priv);
4441 }
4442
4443 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
4444 {
4445         struct mlx5_core_dev *mdev = priv->mdev;
4446         int err;
4447
4448         err = mlx5e_create_indirect_rqt(priv);
4449         if (err)
4450                 return err;
4451
4452         err = mlx5e_create_direct_rqts(priv);
4453         if (err)
4454                 goto err_destroy_indirect_rqts;
4455
4456         err = mlx5e_create_indirect_tirs(priv);
4457         if (err)
4458                 goto err_destroy_direct_rqts;
4459
4460         err = mlx5e_create_direct_tirs(priv);
4461         if (err)
4462                 goto err_destroy_indirect_tirs;
4463
4464         err = mlx5e_create_flow_steering(priv);
4465         if (err) {
4466                 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
4467                 goto err_destroy_direct_tirs;
4468         }
4469
4470         err = mlx5e_tc_nic_init(priv);
4471         if (err)
4472                 goto err_destroy_flow_steering;
4473
4474         return 0;
4475
4476 err_destroy_flow_steering:
4477         mlx5e_destroy_flow_steering(priv);
4478 err_destroy_direct_tirs:
4479         mlx5e_destroy_direct_tirs(priv);
4480 err_destroy_indirect_tirs:
4481         mlx5e_destroy_indirect_tirs(priv);
4482 err_destroy_direct_rqts:
4483         mlx5e_destroy_direct_rqts(priv);
4484 err_destroy_indirect_rqts:
4485         mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4486         return err;
4487 }
4488
4489 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
4490 {
4491         mlx5e_tc_nic_cleanup(priv);
4492         mlx5e_destroy_flow_steering(priv);
4493         mlx5e_destroy_direct_tirs(priv);
4494         mlx5e_destroy_indirect_tirs(priv);
4495         mlx5e_destroy_direct_rqts(priv);
4496         mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4497 }
4498
4499 static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
4500 {
4501         int err;
4502
4503         err = mlx5e_create_tises(priv);
4504         if (err) {
4505                 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
4506                 return err;
4507         }
4508
4509 #ifdef CONFIG_MLX5_CORE_EN_DCB
4510         mlx5e_dcbnl_initialize(priv);
4511 #endif
4512         return 0;
4513 }
4514
4515 static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4516 {
4517         struct net_device *netdev = priv->netdev;
4518         struct mlx5_core_dev *mdev = priv->mdev;
4519         u16 max_mtu;
4520
4521         mlx5e_init_l2_addr(priv);
4522
4523         /* Marking the link as currently not needed by the Driver */
4524         if (!netif_running(netdev))
4525                 mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
4526
4527         /* MTU range: 68 - hw-specific max */
4528         netdev->min_mtu = ETH_MIN_MTU;
4529         mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
4530         netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
4531         mlx5e_set_dev_port_mtu(priv);
4532
4533         mlx5_lag_add(mdev, netdev);
4534
4535         mlx5e_enable_async_events(priv);
4536
4537         if (MLX5_VPORT_MANAGER(priv->mdev))
4538                 mlx5e_register_vport_reps(priv);
4539
4540         if (netdev->reg_state != NETREG_REGISTERED)
4541                 return;
4542 #ifdef CONFIG_MLX5_CORE_EN_DCB
4543         mlx5e_dcbnl_init_app(priv);
4544 #endif
4545
4546         queue_work(priv->wq, &priv->set_rx_mode_work);
4547
4548         rtnl_lock();
4549         if (netif_running(netdev))
4550                 mlx5e_open(netdev);
4551         netif_device_attach(netdev);
4552         rtnl_unlock();
4553 }
4554
4555 static void mlx5e_nic_disable(struct mlx5e_priv *priv)
4556 {
4557         struct mlx5_core_dev *mdev = priv->mdev;
4558
4559 #ifdef CONFIG_MLX5_CORE_EN_DCB
4560         if (priv->netdev->reg_state == NETREG_REGISTERED)
4561                 mlx5e_dcbnl_delete_app(priv);
4562 #endif
4563
4564         rtnl_lock();
4565         if (netif_running(priv->netdev))
4566                 mlx5e_close(priv->netdev);
4567         netif_device_detach(priv->netdev);
4568         rtnl_unlock();
4569
4570         queue_work(priv->wq, &priv->set_rx_mode_work);
4571
4572         if (MLX5_VPORT_MANAGER(priv->mdev))
4573                 mlx5e_unregister_vport_reps(priv);
4574
4575         mlx5e_disable_async_events(priv);
4576         mlx5_lag_remove(mdev);
4577 }
4578
4579 static const struct mlx5e_profile mlx5e_nic_profile = {
4580         .init              = mlx5e_nic_init,
4581         .cleanup           = mlx5e_nic_cleanup,
4582         .init_rx           = mlx5e_init_nic_rx,
4583         .cleanup_rx        = mlx5e_cleanup_nic_rx,
4584         .init_tx           = mlx5e_init_nic_tx,
4585         .cleanup_tx        = mlx5e_cleanup_nic_tx,
4586         .enable            = mlx5e_nic_enable,
4587         .disable           = mlx5e_nic_disable,
4588         .update_stats      = mlx5e_update_ndo_stats,
4589         .max_nch           = mlx5e_get_max_num_channels,
4590         .update_carrier    = mlx5e_update_carrier,
4591         .rx_handlers.handle_rx_cqe       = mlx5e_handle_rx_cqe,
4592         .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
4593         .max_tc            = MLX5E_MAX_NUM_TC,
4594 };
4595
4596 /* mlx5e generic netdev management API (move to en_common.c) */
4597
4598 struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4599                                        const struct mlx5e_profile *profile,
4600                                        void *ppriv)
4601 {
4602         int nch = profile->max_nch(mdev);
4603         struct net_device *netdev;
4604         struct mlx5e_priv *priv;
4605
4606         netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
4607                                     nch * profile->max_tc,
4608                                     nch);
4609         if (!netdev) {
4610                 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
4611                 return NULL;
4612         }
4613
4614 #ifdef CONFIG_RFS_ACCEL
4615         netdev->rx_cpu_rmap = mdev->rmap;
4616 #endif
4617
4618         profile->init(mdev, netdev, profile, ppriv);
4619
4620         netif_carrier_off(netdev);
4621
4622         priv = netdev_priv(netdev);
4623
4624         priv->wq = create_singlethread_workqueue("mlx5e");
4625         if (!priv->wq)
4626                 goto err_cleanup_nic;
4627
4628         return netdev;
4629
4630 err_cleanup_nic:
4631         if (profile->cleanup)
4632                 profile->cleanup(priv);
4633         free_netdev(netdev);
4634
4635         return NULL;
4636 }
4637
4638 int mlx5e_attach_netdev(struct mlx5e_priv *priv)
4639 {
4640         struct mlx5_core_dev *mdev = priv->mdev;
4641         const struct mlx5e_profile *profile;
4642         int err;
4643
4644         profile = priv->profile;
4645         clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
4646
4647         err = profile->init_tx(priv);
4648         if (err)
4649                 goto out;
4650
4651         mlx5e_create_q_counters(priv);
4652
4653         err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
4654         if (err) {
4655                 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
4656                 goto err_destroy_q_counters;
4657         }
4658
4659         err = profile->init_rx(priv);
4660         if (err)
4661                 goto err_close_drop_rq;
4662
4663         if (profile->enable)
4664                 profile->enable(priv);
4665
4666         return 0;
4667
4668 err_close_drop_rq:
4669         mlx5e_close_drop_rq(&priv->drop_rq);
4670
4671 err_destroy_q_counters:
4672         mlx5e_destroy_q_counters(priv);
4673         profile->cleanup_tx(priv);
4674
4675 out:
4676         return err;
4677 }
4678
4679 void mlx5e_detach_netdev(struct mlx5e_priv *priv)
4680 {
4681         const struct mlx5e_profile *profile = priv->profile;
4682
4683         set_bit(MLX5E_STATE_DESTROYING, &priv->state);
4684
4685         if (profile->disable)
4686                 profile->disable(priv);
4687         flush_workqueue(priv->wq);
4688
4689         profile->cleanup_rx(priv);
4690         mlx5e_close_drop_rq(&priv->drop_rq);
4691         mlx5e_destroy_q_counters(priv);
4692         profile->cleanup_tx(priv);
4693         cancel_delayed_work_sync(&priv->update_stats_work);
4694 }
4695
4696 void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
4697 {
4698         const struct mlx5e_profile *profile = priv->profile;
4699         struct net_device *netdev = priv->netdev;
4700
4701         destroy_workqueue(priv->wq);
4702         if (profile->cleanup)
4703                 profile->cleanup(priv);
4704         free_netdev(netdev);
4705 }
4706
4707 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
4708  * hardware contexts and to connect it to the current netdev.
4709  */
4710 static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
4711 {
4712         struct mlx5e_priv *priv = vpriv;
4713         struct net_device *netdev = priv->netdev;
4714         int err;
4715
4716         if (netif_device_present(netdev))
4717                 return 0;
4718
4719         err = mlx5e_create_mdev_resources(mdev);
4720         if (err)
4721                 return err;
4722
4723         err = mlx5e_attach_netdev(priv);
4724         if (err) {
4725                 mlx5e_destroy_mdev_resources(mdev);
4726                 return err;
4727         }
4728
4729         return 0;
4730 }
4731
4732 static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
4733 {
4734         struct mlx5e_priv *priv = vpriv;
4735         struct net_device *netdev = priv->netdev;
4736
4737         if (!netif_device_present(netdev))
4738                 return;
4739
4740         mlx5e_detach_netdev(priv);
4741         mlx5e_destroy_mdev_resources(mdev);
4742 }
4743
4744 static void *mlx5e_add(struct mlx5_core_dev *mdev)
4745 {
4746         struct net_device *netdev;
4747         void *rpriv = NULL;
4748         void *priv;
4749         int err;
4750
4751         err = mlx5e_check_required_hca_cap(mdev);
4752         if (err)
4753                 return NULL;
4754
4755 #ifdef CONFIG_MLX5_ESWITCH
4756         if (MLX5_VPORT_MANAGER(mdev)) {
4757                 rpriv = mlx5e_alloc_nic_rep_priv(mdev);
4758                 if (!rpriv) {
4759                         mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
4760                         return NULL;
4761                 }
4762         }
4763 #endif
4764
4765         netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
4766         if (!netdev) {
4767                 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
4768                 goto err_free_rpriv;
4769         }
4770
4771         priv = netdev_priv(netdev);
4772
4773         err = mlx5e_attach(mdev, priv);
4774         if (err) {
4775                 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
4776                 goto err_destroy_netdev;
4777         }
4778
4779         err = register_netdev(netdev);
4780         if (err) {
4781                 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
4782                 goto err_detach;
4783         }
4784
4785 #ifdef CONFIG_MLX5_CORE_EN_DCB
4786         mlx5e_dcbnl_init_app(priv);
4787 #endif
4788         return priv;
4789
4790 err_detach:
4791         mlx5e_detach(mdev, priv);
4792 err_destroy_netdev:
4793         mlx5e_destroy_netdev(priv);
4794 err_free_rpriv:
4795         kfree(rpriv);
4796         return NULL;
4797 }
4798
4799 static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
4800 {
4801         struct mlx5e_priv *priv = vpriv;
4802         void *ppriv = priv->ppriv;
4803
4804 #ifdef CONFIG_MLX5_CORE_EN_DCB
4805         mlx5e_dcbnl_delete_app(priv);
4806 #endif
4807         unregister_netdev(priv->netdev);
4808         mlx5e_detach(mdev, vpriv);
4809         mlx5e_destroy_netdev(priv);
4810         kfree(ppriv);
4811 }
4812
4813 static void *mlx5e_get_netdev(void *vpriv)
4814 {
4815         struct mlx5e_priv *priv = vpriv;
4816
4817         return priv->netdev;
4818 }
4819
4820 static struct mlx5_interface mlx5e_interface = {
4821         .add       = mlx5e_add,
4822         .remove    = mlx5e_remove,
4823         .attach    = mlx5e_attach,
4824         .detach    = mlx5e_detach,
4825         .event     = mlx5e_async_event,
4826         .protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
4827         .get_dev   = mlx5e_get_netdev,
4828 };
4829
4830 void mlx5e_init(void)
4831 {
4832         mlx5e_ipsec_build_inverse_table();
4833         mlx5e_build_ptys2ethtool_map();
4834         mlx5_register_interface(&mlx5e_interface);
4835 }
4836
4837 void mlx5e_cleanup(void)
4838 {
4839         mlx5_unregister_interface(&mlx5e_interface);
4840 }