net/mlx5e: Introducing new statistics rwlock
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
1 /*
2  * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <linux/bpf.h>
38 #include <net/page_pool.h>
39 #include "eswitch.h"
40 #include "en.h"
41 #include "en_tc.h"
42 #include "en_rep.h"
43 #include "en_accel/ipsec.h"
44 #include "en_accel/ipsec_rxtx.h"
45 #include "en_accel/tls.h"
46 #include "accel/ipsec.h"
47 #include "accel/tls.h"
48 #include "vxlan.h"
49 #include "en/port.h"
50
51 struct mlx5e_rq_param {
52         u32                     rqc[MLX5_ST_SZ_DW(rqc)];
53         struct mlx5_wq_param    wq;
54 };
55
56 struct mlx5e_sq_param {
57         u32                        sqc[MLX5_ST_SZ_DW(sqc)];
58         struct mlx5_wq_param       wq;
59 };
60
61 struct mlx5e_cq_param {
62         u32                        cqc[MLX5_ST_SZ_DW(cqc)];
63         struct mlx5_wq_param       wq;
64         u16                        eq_ix;
65         u8                         cq_period_mode;
66 };
67
68 struct mlx5e_channel_param {
69         struct mlx5e_rq_param      rq;
70         struct mlx5e_sq_param      sq;
71         struct mlx5e_sq_param      xdp_sq;
72         struct mlx5e_sq_param      icosq;
73         struct mlx5e_cq_param      rx_cq;
74         struct mlx5e_cq_param      tx_cq;
75         struct mlx5e_cq_param      icosq_cq;
76 };
77
78 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
79 {
80         bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
81                 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
82                 MLX5_CAP_ETH(mdev, reg_umr_sq);
83         u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
84         bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
85
86         if (!striding_rq_umr)
87                 return false;
88         if (!inline_umr) {
89                 mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
90                                (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
91                 return false;
92         }
93         return true;
94 }
95
96 static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params)
97 {
98         if (!params->xdp_prog) {
99                 u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
100                 u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN;
101
102                 return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu);
103         }
104
105         return PAGE_SIZE;
106 }
107
108 static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
109 {
110         u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
111
112         return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
113 }
114
115 static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
116                                          struct mlx5e_params *params)
117 {
118         u32 frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
119         s8 signed_log_num_strides_param;
120         u8 log_num_strides;
121
122         if (params->lro_en || frag_sz > PAGE_SIZE)
123                 return false;
124
125         if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
126                 return true;
127
128         log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz);
129         signed_log_num_strides_param =
130                 (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
131
132         return signed_log_num_strides_param >= 0;
133 }
134
135 static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
136 {
137         if (params->log_rq_mtu_frames <
138             mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
139                 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
140
141         return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params);
142 }
143
144 static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
145                                           struct mlx5e_params *params)
146 {
147         if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
148                 return order_base_2(mlx5e_mpwqe_get_linear_frag_sz(params));
149
150         return MLX5E_MPWQE_STRIDE_SZ(mdev,
151                 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
152 }
153
154 static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
155                                           struct mlx5e_params *params)
156 {
157         return MLX5_MPWRQ_LOG_WQE_SZ -
158                 mlx5e_mpwqe_get_log_stride_size(mdev, params);
159 }
160
161 static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
162                                  struct mlx5e_params *params)
163 {
164         u16 linear_rq_headroom = params->xdp_prog ?
165                 XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
166
167         linear_rq_headroom += NET_IP_ALIGN;
168
169         if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST)
170                 return linear_rq_headroom;
171
172         if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
173                 return linear_rq_headroom;
174
175         return 0;
176 }
177
178 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
179                                struct mlx5e_params *params)
180 {
181         params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
182         params->log_rq_mtu_frames = is_kdump_kernel() ?
183                 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
184                 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
185         switch (params->rq_wq_type) {
186         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
187                 break;
188         default: /* MLX5_WQ_TYPE_LINKED_LIST */
189                 /* Extra room needed for build_skb */
190                 params->lro_wqe_sz -= mlx5e_get_rq_headroom(mdev, params) +
191                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
192         }
193
194         mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
195                        params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
196                        params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
197                        BIT(mlx5e_mpwqe_get_log_rq_size(params)) :
198                        BIT(params->log_rq_mtu_frames),
199                        BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)),
200                        MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
201 }
202
203 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
204                                 struct mlx5e_params *params)
205 {
206         return mlx5e_check_fragmented_striding_rq_cap(mdev) &&
207                 !MLX5_IPSEC_DEV(mdev) &&
208                 !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params));
209 }
210
211 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
212 {
213         params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
214                 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
215                 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
216                 MLX5_WQ_TYPE_LINKED_LIST;
217 }
218
219 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
220 {
221         struct mlx5_core_dev *mdev = priv->mdev;
222         u8 port_state;
223
224         port_state = mlx5_query_vport_state(mdev,
225                                             MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
226                                             0);
227
228         if (port_state == VPORT_STATE_UP) {
229                 netdev_info(priv->netdev, "Link up\n");
230                 netif_carrier_on(priv->netdev);
231         } else {
232                 netdev_info(priv->netdev, "Link down\n");
233                 netif_carrier_off(priv->netdev);
234         }
235 }
236
237 static void mlx5e_update_carrier_work(struct work_struct *work)
238 {
239         struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
240                                                update_carrier_work);
241
242         mutex_lock(&priv->state_lock);
243         if (test_bit(MLX5E_STATE_OPENED, &priv->state))
244                 if (priv->profile->update_carrier)
245                         priv->profile->update_carrier(priv);
246         mutex_unlock(&priv->state_lock);
247 }
248
249 void mlx5e_update_stats(struct mlx5e_priv *priv)
250 {
251         int i;
252
253         for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
254                 if (mlx5e_stats_grps[i].update_stats)
255                         mlx5e_stats_grps[i].update_stats(priv);
256 }
257
258 static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
259 {
260         int i;
261
262         for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
263                 if (mlx5e_stats_grps[i].update_stats_mask &
264                     MLX5E_NDO_UPDATE_STATS)
265                         mlx5e_stats_grps[i].update_stats(priv);
266 }
267
268 void mlx5e_update_stats_work(struct work_struct *work)
269 {
270         struct delayed_work *dwork = to_delayed_work(work);
271         struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
272                                                update_stats_work);
273         mutex_lock(&priv->state_lock);
274         if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
275                 priv->profile->update_stats(priv);
276                 queue_delayed_work(priv->wq, dwork,
277                                    msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
278         }
279         mutex_unlock(&priv->state_lock);
280 }
281
282 static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
283                               enum mlx5_dev_event event, unsigned long param)
284 {
285         struct mlx5e_priv *priv = vpriv;
286
287         if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
288                 return;
289
290         switch (event) {
291         case MLX5_DEV_EVENT_PORT_UP:
292         case MLX5_DEV_EVENT_PORT_DOWN:
293                 queue_work(priv->wq, &priv->update_carrier_work);
294                 break;
295         default:
296                 break;
297         }
298 }
299
300 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
301 {
302         set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
303 }
304
305 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
306 {
307         clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
308         synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
309 }
310
311 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
312                                        struct mlx5e_icosq *sq,
313                                        struct mlx5e_umr_wqe *wqe)
314 {
315         struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
316         struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
317         u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
318
319         cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
320                                       ds_cnt);
321         cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
322         cseg->imm       = rq->mkey_be;
323
324         ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
325         ucseg->xlt_octowords =
326                 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
327         ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
328 }
329
330 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
331                                      struct mlx5e_channel *c)
332 {
333         int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
334
335         rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
336                                       GFP_KERNEL, cpu_to_node(c->cpu));
337         if (!rq->mpwqe.info)
338                 return -ENOMEM;
339
340         mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
341
342         return 0;
343 }
344
345 static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
346                                  u64 npages, u8 page_shift,
347                                  struct mlx5_core_mkey *umr_mkey)
348 {
349         int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
350         void *mkc;
351         u32 *in;
352         int err;
353
354         in = kvzalloc(inlen, GFP_KERNEL);
355         if (!in)
356                 return -ENOMEM;
357
358         mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
359
360         MLX5_SET(mkc, mkc, free, 1);
361         MLX5_SET(mkc, mkc, umr_en, 1);
362         MLX5_SET(mkc, mkc, lw, 1);
363         MLX5_SET(mkc, mkc, lr, 1);
364         MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
365
366         MLX5_SET(mkc, mkc, qpn, 0xffffff);
367         MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
368         MLX5_SET64(mkc, mkc, len, npages << page_shift);
369         MLX5_SET(mkc, mkc, translations_octword_size,
370                  MLX5_MTT_OCTW(npages));
371         MLX5_SET(mkc, mkc, log_page_size, page_shift);
372
373         err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
374
375         kvfree(in);
376         return err;
377 }
378
379 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
380 {
381         u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
382
383         return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
384 }
385
386 static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
387 {
388         return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
389 }
390
391 static int mlx5e_alloc_rq(struct mlx5e_channel *c,
392                           struct mlx5e_params *params,
393                           struct mlx5e_rq_param *rqp,
394                           struct mlx5e_rq *rq)
395 {
396         struct page_pool_params pp_params = { 0 };
397         struct mlx5_core_dev *mdev = c->mdev;
398         void *rqc = rqp->rqc;
399         void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
400         u32 byte_count, pool_size;
401         int npages;
402         int wq_sz;
403         int err;
404         int i;
405
406         rqp->wq.db_numa_node = cpu_to_node(c->cpu);
407
408         err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
409                                 &rq->wq_ctrl);
410         if (err)
411                 return err;
412
413         rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
414
415         wq_sz = mlx5_wq_ll_get_size(&rq->wq);
416
417         rq->wq_type = params->rq_wq_type;
418         rq->pdev    = c->pdev;
419         rq->netdev  = c->netdev;
420         rq->tstamp  = c->tstamp;
421         rq->clock   = &mdev->clock;
422         rq->channel = c;
423         rq->ix      = c->ix;
424         rq->mdev    = mdev;
425         rq->hw_mtu  = MLX5E_SW2HW_MTU(params, params->sw_mtu);
426
427         rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
428         if (IS_ERR(rq->xdp_prog)) {
429                 err = PTR_ERR(rq->xdp_prog);
430                 rq->xdp_prog = NULL;
431                 goto err_rq_wq_destroy;
432         }
433
434         err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
435         if (err < 0)
436                 goto err_rq_wq_destroy;
437
438         rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
439         rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params);
440         pool_size = 1 << params->log_rq_mtu_frames;
441
442         switch (rq->wq_type) {
443         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
444
445                 pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params);
446                 rq->post_wqes = mlx5e_post_rx_mpwqes;
447                 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
448
449                 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
450 #ifdef CONFIG_MLX5_EN_IPSEC
451                 if (MLX5_IPSEC_DEV(mdev)) {
452                         err = -EINVAL;
453                         netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
454                         goto err_rq_wq_destroy;
455                 }
456 #endif
457                 if (!rq->handle_rx_cqe) {
458                         err = -EINVAL;
459                         netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
460                         goto err_rq_wq_destroy;
461                 }
462
463                 rq->mpwqe.skb_from_cqe_mpwrq =
464                         mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ?
465                         mlx5e_skb_from_cqe_mpwrq_linear :
466                         mlx5e_skb_from_cqe_mpwrq_nonlinear;
467                 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params);
468                 rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params));
469
470                 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
471
472                 err = mlx5e_create_rq_umr_mkey(mdev, rq);
473                 if (err)
474                         goto err_rq_wq_destroy;
475                 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
476
477                 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
478                 if (err)
479                         goto err_destroy_umr_mkey;
480                 break;
481         default: /* MLX5_WQ_TYPE_LINKED_LIST */
482                 rq->wqe.frag_info =
483                         kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
484                                      GFP_KERNEL, cpu_to_node(c->cpu));
485                 if (!rq->wqe.frag_info) {
486                         err = -ENOMEM;
487                         goto err_rq_wq_destroy;
488                 }
489                 rq->post_wqes = mlx5e_post_rx_wqes;
490                 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
491
492 #ifdef CONFIG_MLX5_EN_IPSEC
493                 if (c->priv->ipsec)
494                         rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
495                 else
496 #endif
497                         rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
498                 if (!rq->handle_rx_cqe) {
499                         kfree(rq->wqe.frag_info);
500                         err = -EINVAL;
501                         netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
502                         goto err_rq_wq_destroy;
503                 }
504
505                 byte_count = params->lro_en  ?
506                                 params->lro_wqe_sz :
507                                 MLX5E_SW2HW_MTU(params, params->sw_mtu);
508 #ifdef CONFIG_MLX5_EN_IPSEC
509                 if (MLX5_IPSEC_DEV(mdev))
510                         byte_count += MLX5E_METADATA_ETHER_LEN;
511 #endif
512                 rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
513
514                 /* calc the required page order */
515                 rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count);
516                 npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
517                 rq->buff.page_order = order_base_2(npages);
518
519                 byte_count |= MLX5_HW_START_PADDING;
520                 rq->mkey_be = c->mkey_be;
521         }
522
523         /* Create a page_pool and register it with rxq */
524         pp_params.order     = rq->buff.page_order;
525         pp_params.flags     = 0; /* No-internal DMA mapping in page_pool */
526         pp_params.pool_size = pool_size;
527         pp_params.nid       = cpu_to_node(c->cpu);
528         pp_params.dev       = c->pdev;
529         pp_params.dma_dir   = rq->buff.map_dir;
530
531         /* page_pool can be used even when there is no rq->xdp_prog,
532          * given page_pool does not handle DMA mapping there is no
533          * required state to clear. And page_pool gracefully handle
534          * elevated refcnt.
535          */
536         rq->page_pool = page_pool_create(&pp_params);
537         if (IS_ERR(rq->page_pool)) {
538                 if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
539                         kfree(rq->wqe.frag_info);
540                 err = PTR_ERR(rq->page_pool);
541                 rq->page_pool = NULL;
542                 goto err_rq_wq_destroy;
543         }
544         err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
545                                          MEM_TYPE_PAGE_POOL, rq->page_pool);
546         if (err)
547                 goto err_rq_wq_destroy;
548
549         for (i = 0; i < wq_sz; i++) {
550                 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
551
552                 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
553                         u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
554
555                         wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom);
556                 }
557
558                 wqe->data.byte_count = cpu_to_be32(byte_count);
559                 wqe->data.lkey = rq->mkey_be;
560         }
561
562         INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
563
564         switch (params->rx_cq_moderation.cq_period_mode) {
565         case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
566                 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
567                 break;
568         case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
569         default:
570                 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
571         }
572
573         rq->page_cache.head = 0;
574         rq->page_cache.tail = 0;
575
576         return 0;
577
578 err_destroy_umr_mkey:
579         mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
580
581 err_rq_wq_destroy:
582         if (rq->xdp_prog)
583                 bpf_prog_put(rq->xdp_prog);
584         xdp_rxq_info_unreg(&rq->xdp_rxq);
585         if (rq->page_pool)
586                 page_pool_destroy(rq->page_pool);
587         mlx5_wq_destroy(&rq->wq_ctrl);
588
589         return err;
590 }
591
592 static void mlx5e_free_rq(struct mlx5e_rq *rq)
593 {
594         int i;
595
596         if (rq->xdp_prog)
597                 bpf_prog_put(rq->xdp_prog);
598
599         xdp_rxq_info_unreg(&rq->xdp_rxq);
600         if (rq->page_pool)
601                 page_pool_destroy(rq->page_pool);
602
603         switch (rq->wq_type) {
604         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
605                 kfree(rq->mpwqe.info);
606                 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
607                 break;
608         default: /* MLX5_WQ_TYPE_LINKED_LIST */
609                 kfree(rq->wqe.frag_info);
610         }
611
612         for (i = rq->page_cache.head; i != rq->page_cache.tail;
613              i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
614                 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
615
616                 mlx5e_page_release(rq, dma_info, false);
617         }
618         mlx5_wq_destroy(&rq->wq_ctrl);
619 }
620
621 static int mlx5e_create_rq(struct mlx5e_rq *rq,
622                            struct mlx5e_rq_param *param)
623 {
624         struct mlx5_core_dev *mdev = rq->mdev;
625
626         void *in;
627         void *rqc;
628         void *wq;
629         int inlen;
630         int err;
631
632         inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
633                 sizeof(u64) * rq->wq_ctrl.buf.npages;
634         in = kvzalloc(inlen, GFP_KERNEL);
635         if (!in)
636                 return -ENOMEM;
637
638         rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
639         wq  = MLX5_ADDR_OF(rqc, rqc, wq);
640
641         memcpy(rqc, param->rqc, sizeof(param->rqc));
642
643         MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
644         MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
645         MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
646                                                 MLX5_ADAPTER_PAGE_SHIFT);
647         MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
648
649         mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
650                                   (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
651
652         err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
653
654         kvfree(in);
655
656         return err;
657 }
658
659 static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
660                                  int next_state)
661 {
662         struct mlx5_core_dev *mdev = rq->mdev;
663
664         void *in;
665         void *rqc;
666         int inlen;
667         int err;
668
669         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
670         in = kvzalloc(inlen, GFP_KERNEL);
671         if (!in)
672                 return -ENOMEM;
673
674         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
675
676         MLX5_SET(modify_rq_in, in, rq_state, curr_state);
677         MLX5_SET(rqc, rqc, state, next_state);
678
679         err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
680
681         kvfree(in);
682
683         return err;
684 }
685
686 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
687 {
688         struct mlx5e_channel *c = rq->channel;
689         struct mlx5e_priv *priv = c->priv;
690         struct mlx5_core_dev *mdev = priv->mdev;
691
692         void *in;
693         void *rqc;
694         int inlen;
695         int err;
696
697         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
698         in = kvzalloc(inlen, GFP_KERNEL);
699         if (!in)
700                 return -ENOMEM;
701
702         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
703
704         MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
705         MLX5_SET64(modify_rq_in, in, modify_bitmask,
706                    MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
707         MLX5_SET(rqc, rqc, scatter_fcs, enable);
708         MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
709
710         err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
711
712         kvfree(in);
713
714         return err;
715 }
716
717 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
718 {
719         struct mlx5e_channel *c = rq->channel;
720         struct mlx5_core_dev *mdev = c->mdev;
721         void *in;
722         void *rqc;
723         int inlen;
724         int err;
725
726         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
727         in = kvzalloc(inlen, GFP_KERNEL);
728         if (!in)
729                 return -ENOMEM;
730
731         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
732
733         MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
734         MLX5_SET64(modify_rq_in, in, modify_bitmask,
735                    MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
736         MLX5_SET(rqc, rqc, vsd, vsd);
737         MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
738
739         err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
740
741         kvfree(in);
742
743         return err;
744 }
745
746 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
747 {
748         mlx5_core_destroy_rq(rq->mdev, rq->rqn);
749 }
750
751 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
752 {
753         unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
754         struct mlx5e_channel *c = rq->channel;
755
756         struct mlx5_wq_ll *wq = &rq->wq;
757         u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
758
759         do {
760                 if (wq->cur_sz >= min_wqes)
761                         return 0;
762
763                 msleep(20);
764         } while (time_before(jiffies, exp_time));
765
766         netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
767                     c->ix, rq->rqn, wq->cur_sz, min_wqes);
768
769         return -ETIMEDOUT;
770 }
771
772 static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
773 {
774         struct mlx5_wq_ll *wq = &rq->wq;
775         struct mlx5e_rx_wqe *wqe;
776         __be16 wqe_ix_be;
777         u16 wqe_ix;
778
779         /* UMR WQE (if in progress) is always at wq->head */
780         if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
781             rq->mpwqe.umr_in_progress)
782                 mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
783
784         while (!mlx5_wq_ll_is_empty(wq)) {
785                 wqe_ix_be = *wq->tail_next;
786                 wqe_ix    = be16_to_cpu(wqe_ix_be);
787                 wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
788                 rq->dealloc_wqe(rq, wqe_ix);
789                 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
790                                &wqe->next.next_wqe_index);
791         }
792
793         if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
794                 /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
795                  * but yet to be re-posted.
796                  */
797                 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
798
799                 for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
800                         rq->dealloc_wqe(rq, wqe_ix);
801         }
802 }
803
804 static int mlx5e_open_rq(struct mlx5e_channel *c,
805                          struct mlx5e_params *params,
806                          struct mlx5e_rq_param *param,
807                          struct mlx5e_rq *rq)
808 {
809         int err;
810
811         err = mlx5e_alloc_rq(c, params, param, rq);
812         if (err)
813                 return err;
814
815         err = mlx5e_create_rq(rq, param);
816         if (err)
817                 goto err_free_rq;
818
819         err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
820         if (err)
821                 goto err_destroy_rq;
822
823         if (params->rx_dim_enabled)
824                 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
825
826         return 0;
827
828 err_destroy_rq:
829         mlx5e_destroy_rq(rq);
830 err_free_rq:
831         mlx5e_free_rq(rq);
832
833         return err;
834 }
835
836 static void mlx5e_activate_rq(struct mlx5e_rq *rq)
837 {
838         struct mlx5e_icosq *sq = &rq->channel->icosq;
839         struct mlx5_wq_cyc *wq = &sq->wq;
840         struct mlx5e_tx_wqe *nopwqe;
841
842         u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
843
844         set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
845         sq->db.ico_wqe[pi].opcode     = MLX5_OPCODE_NOP;
846         nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
847         mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
848 }
849
850 static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
851 {
852         clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
853         napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
854 }
855
856 static void mlx5e_close_rq(struct mlx5e_rq *rq)
857 {
858         cancel_work_sync(&rq->dim.work);
859         mlx5e_destroy_rq(rq);
860         mlx5e_free_rx_descs(rq);
861         mlx5e_free_rq(rq);
862 }
863
864 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
865 {
866         kfree(sq->db.di);
867 }
868
869 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
870 {
871         int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
872
873         sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
874                                      GFP_KERNEL, numa);
875         if (!sq->db.di) {
876                 mlx5e_free_xdpsq_db(sq);
877                 return -ENOMEM;
878         }
879
880         return 0;
881 }
882
883 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
884                              struct mlx5e_params *params,
885                              struct mlx5e_sq_param *param,
886                              struct mlx5e_xdpsq *sq)
887 {
888         void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
889         struct mlx5_core_dev *mdev = c->mdev;
890         struct mlx5_wq_cyc *wq = &sq->wq;
891         int err;
892
893         sq->pdev      = c->pdev;
894         sq->mkey_be   = c->mkey_be;
895         sq->channel   = c;
896         sq->uar_map   = mdev->mlx5e_res.bfreg.map;
897         sq->min_inline_mode = params->tx_min_inline_mode;
898
899         param->wq.db_numa_node = cpu_to_node(c->cpu);
900         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
901         if (err)
902                 return err;
903         wq->db = &wq->db[MLX5_SND_DBR];
904
905         err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
906         if (err)
907                 goto err_sq_wq_destroy;
908
909         return 0;
910
911 err_sq_wq_destroy:
912         mlx5_wq_destroy(&sq->wq_ctrl);
913
914         return err;
915 }
916
917 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
918 {
919         mlx5e_free_xdpsq_db(sq);
920         mlx5_wq_destroy(&sq->wq_ctrl);
921 }
922
923 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
924 {
925         kfree(sq->db.ico_wqe);
926 }
927
928 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
929 {
930         u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
931
932         sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
933                                       GFP_KERNEL, numa);
934         if (!sq->db.ico_wqe)
935                 return -ENOMEM;
936
937         return 0;
938 }
939
940 static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
941                              struct mlx5e_sq_param *param,
942                              struct mlx5e_icosq *sq)
943 {
944         void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
945         struct mlx5_core_dev *mdev = c->mdev;
946         struct mlx5_wq_cyc *wq = &sq->wq;
947         int err;
948
949         sq->channel   = c;
950         sq->uar_map   = mdev->mlx5e_res.bfreg.map;
951
952         param->wq.db_numa_node = cpu_to_node(c->cpu);
953         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
954         if (err)
955                 return err;
956         wq->db = &wq->db[MLX5_SND_DBR];
957
958         err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
959         if (err)
960                 goto err_sq_wq_destroy;
961
962         return 0;
963
964 err_sq_wq_destroy:
965         mlx5_wq_destroy(&sq->wq_ctrl);
966
967         return err;
968 }
969
970 static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
971 {
972         mlx5e_free_icosq_db(sq);
973         mlx5_wq_destroy(&sq->wq_ctrl);
974 }
975
976 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
977 {
978         kfree(sq->db.wqe_info);
979         kfree(sq->db.dma_fifo);
980 }
981
982 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
983 {
984         int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
985         int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
986
987         sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
988                                            GFP_KERNEL, numa);
989         sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
990                                            GFP_KERNEL, numa);
991         if (!sq->db.dma_fifo || !sq->db.wqe_info) {
992                 mlx5e_free_txqsq_db(sq);
993                 return -ENOMEM;
994         }
995
996         sq->dma_fifo_mask = df_sz - 1;
997
998         return 0;
999 }
1000
1001 static void mlx5e_sq_recover(struct work_struct *work);
1002 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1003                              int txq_ix,
1004                              struct mlx5e_params *params,
1005                              struct mlx5e_sq_param *param,
1006                              struct mlx5e_txqsq *sq)
1007 {
1008         void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
1009         struct mlx5_core_dev *mdev = c->mdev;
1010         struct mlx5_wq_cyc *wq = &sq->wq;
1011         int err;
1012
1013         sq->pdev      = c->pdev;
1014         sq->tstamp    = c->tstamp;
1015         sq->clock     = &mdev->clock;
1016         sq->mkey_be   = c->mkey_be;
1017         sq->channel   = c;
1018         sq->txq_ix    = txq_ix;
1019         sq->uar_map   = mdev->mlx5e_res.bfreg.map;
1020         sq->min_inline_mode = params->tx_min_inline_mode;
1021         INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
1022         if (MLX5_IPSEC_DEV(c->priv->mdev))
1023                 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1024         if (mlx5_accel_is_tls_device(c->priv->mdev))
1025                 set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
1026
1027         param->wq.db_numa_node = cpu_to_node(c->cpu);
1028         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
1029         if (err)
1030                 return err;
1031         wq->db    = &wq->db[MLX5_SND_DBR];
1032
1033         err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1034         if (err)
1035                 goto err_sq_wq_destroy;
1036
1037         INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
1038         sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
1039
1040         return 0;
1041
1042 err_sq_wq_destroy:
1043         mlx5_wq_destroy(&sq->wq_ctrl);
1044
1045         return err;
1046 }
1047
1048 static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
1049 {
1050         mlx5e_free_txqsq_db(sq);
1051         mlx5_wq_destroy(&sq->wq_ctrl);
1052 }
1053
1054 struct mlx5e_create_sq_param {
1055         struct mlx5_wq_ctrl        *wq_ctrl;
1056         u32                         cqn;
1057         u32                         tisn;
1058         u8                          tis_lst_sz;
1059         u8                          min_inline_mode;
1060 };
1061
1062 static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
1063                            struct mlx5e_sq_param *param,
1064                            struct mlx5e_create_sq_param *csp,
1065                            u32 *sqn)
1066 {
1067         void *in;
1068         void *sqc;
1069         void *wq;
1070         int inlen;
1071         int err;
1072
1073         inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1074                 sizeof(u64) * csp->wq_ctrl->buf.npages;
1075         in = kvzalloc(inlen, GFP_KERNEL);
1076         if (!in)
1077                 return -ENOMEM;
1078
1079         sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1080         wq = MLX5_ADDR_OF(sqc, sqc, wq);
1081
1082         memcpy(sqc, param->sqc, sizeof(param->sqc));
1083         MLX5_SET(sqc,  sqc, tis_lst_sz, csp->tis_lst_sz);
1084         MLX5_SET(sqc,  sqc, tis_num_0, csp->tisn);
1085         MLX5_SET(sqc,  sqc, cqn, csp->cqn);
1086
1087         if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1088                 MLX5_SET(sqc,  sqc, min_wqe_inline_mode, csp->min_inline_mode);
1089
1090         MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
1091         MLX5_SET(sqc,  sqc, flush_in_error_en, 1);
1092
1093         MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
1094         MLX5_SET(wq,   wq, uar_page,      mdev->mlx5e_res.bfreg.index);
1095         MLX5_SET(wq,   wq, log_wq_pg_sz,  csp->wq_ctrl->buf.page_shift -
1096                                           MLX5_ADAPTER_PAGE_SHIFT);
1097         MLX5_SET64(wq, wq, dbr_addr,      csp->wq_ctrl->db.dma);
1098
1099         mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
1100                                   (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1101
1102         err = mlx5_core_create_sq(mdev, in, inlen, sqn);
1103
1104         kvfree(in);
1105
1106         return err;
1107 }
1108
1109 struct mlx5e_modify_sq_param {
1110         int curr_state;
1111         int next_state;
1112         bool rl_update;
1113         int rl_index;
1114 };
1115
1116 static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1117                            struct mlx5e_modify_sq_param *p)
1118 {
1119         void *in;
1120         void *sqc;
1121         int inlen;
1122         int err;
1123
1124         inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1125         in = kvzalloc(inlen, GFP_KERNEL);
1126         if (!in)
1127                 return -ENOMEM;
1128
1129         sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1130
1131         MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1132         MLX5_SET(sqc, sqc, state, p->next_state);
1133         if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
1134                 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
1135                 MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, p->rl_index);
1136         }
1137
1138         err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
1139
1140         kvfree(in);
1141
1142         return err;
1143 }
1144
1145 static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
1146 {
1147         mlx5_core_destroy_sq(mdev, sqn);
1148 }
1149
1150 static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1151                                struct mlx5e_sq_param *param,
1152                                struct mlx5e_create_sq_param *csp,
1153                                u32 *sqn)
1154 {
1155         struct mlx5e_modify_sq_param msp = {0};
1156         int err;
1157
1158         err = mlx5e_create_sq(mdev, param, csp, sqn);
1159         if (err)
1160                 return err;
1161
1162         msp.curr_state = MLX5_SQC_STATE_RST;
1163         msp.next_state = MLX5_SQC_STATE_RDY;
1164         err = mlx5e_modify_sq(mdev, *sqn, &msp);
1165         if (err)
1166                 mlx5e_destroy_sq(mdev, *sqn);
1167
1168         return err;
1169 }
1170
1171 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1172                                 struct mlx5e_txqsq *sq, u32 rate);
1173
1174 static int mlx5e_open_txqsq(struct mlx5e_channel *c,
1175                             u32 tisn,
1176                             int txq_ix,
1177                             struct mlx5e_params *params,
1178                             struct mlx5e_sq_param *param,
1179                             struct mlx5e_txqsq *sq)
1180 {
1181         struct mlx5e_create_sq_param csp = {};
1182         u32 tx_rate;
1183         int err;
1184
1185         err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
1186         if (err)
1187                 return err;
1188
1189         csp.tisn            = tisn;
1190         csp.tis_lst_sz      = 1;
1191         csp.cqn             = sq->cq.mcq.cqn;
1192         csp.wq_ctrl         = &sq->wq_ctrl;
1193         csp.min_inline_mode = sq->min_inline_mode;
1194         err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1195         if (err)
1196                 goto err_free_txqsq;
1197
1198         tx_rate = c->priv->tx_rates[sq->txq_ix];
1199         if (tx_rate)
1200                 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
1201
1202         if (params->tx_dim_enabled)
1203                 sq->state |= BIT(MLX5E_SQ_STATE_AM);
1204
1205         return 0;
1206
1207 err_free_txqsq:
1208         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1209         mlx5e_free_txqsq(sq);
1210
1211         return err;
1212 }
1213
1214 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
1215 {
1216         WARN_ONCE(sq->cc != sq->pc,
1217                   "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
1218                   sq->sqn, sq->cc, sq->pc);
1219         sq->cc = 0;
1220         sq->dma_fifo_cc = 0;
1221         sq->pc = 0;
1222 }
1223
1224 static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1225 {
1226         sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
1227         clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
1228         set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1229         netdev_tx_reset_queue(sq->txq);
1230         netif_tx_start_queue(sq->txq);
1231 }
1232
1233 static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1234 {
1235         __netif_tx_lock_bh(txq);
1236         netif_tx_stop_queue(txq);
1237         __netif_tx_unlock_bh(txq);
1238 }
1239
1240 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1241 {
1242         struct mlx5e_channel *c = sq->channel;
1243         struct mlx5_wq_cyc *wq = &sq->wq;
1244
1245         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1246         /* prevent netif_tx_wake_queue */
1247         napi_synchronize(&c->napi);
1248
1249         netif_tx_disable_queue(sq->txq);
1250
1251         /* last doorbell out, godspeed .. */
1252         if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1253                 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1254                 struct mlx5e_tx_wqe *nop;
1255
1256                 sq->db.wqe_info[pi].skb = NULL;
1257                 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1258                 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
1259         }
1260 }
1261
1262 static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1263 {
1264         struct mlx5e_channel *c = sq->channel;
1265         struct mlx5_core_dev *mdev = c->mdev;
1266         struct mlx5_rate_limit rl = {0};
1267
1268         mlx5e_destroy_sq(mdev, sq->sqn);
1269         if (sq->rate_limit) {
1270                 rl.rate = sq->rate_limit;
1271                 mlx5_rl_remove_rate(mdev, &rl);
1272         }
1273         mlx5e_free_txqsq_descs(sq);
1274         mlx5e_free_txqsq(sq);
1275 }
1276
1277 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
1278 {
1279         unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
1280
1281         while (time_before(jiffies, exp_time)) {
1282                 if (sq->cc == sq->pc)
1283                         return 0;
1284
1285                 msleep(20);
1286         }
1287
1288         netdev_err(sq->channel->netdev,
1289                    "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
1290                    sq->sqn, sq->cc, sq->pc);
1291
1292         return -ETIMEDOUT;
1293 }
1294
1295 static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
1296 {
1297         struct mlx5_core_dev *mdev = sq->channel->mdev;
1298         struct net_device *dev = sq->channel->netdev;
1299         struct mlx5e_modify_sq_param msp = {0};
1300         int err;
1301
1302         msp.curr_state = curr_state;
1303         msp.next_state = MLX5_SQC_STATE_RST;
1304
1305         err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1306         if (err) {
1307                 netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
1308                 return err;
1309         }
1310
1311         memset(&msp, 0, sizeof(msp));
1312         msp.curr_state = MLX5_SQC_STATE_RST;
1313         msp.next_state = MLX5_SQC_STATE_RDY;
1314
1315         err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1316         if (err) {
1317                 netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
1318                 return err;
1319         }
1320
1321         return 0;
1322 }
1323
1324 static void mlx5e_sq_recover(struct work_struct *work)
1325 {
1326         struct mlx5e_txqsq_recover *recover =
1327                 container_of(work, struct mlx5e_txqsq_recover,
1328                              recover_work);
1329         struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq,
1330                                               recover);
1331         struct mlx5_core_dev *mdev = sq->channel->mdev;
1332         struct net_device *dev = sq->channel->netdev;
1333         u8 state;
1334         int err;
1335
1336         err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
1337         if (err) {
1338                 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
1339                            sq->sqn, err);
1340                 return;
1341         }
1342
1343         if (state != MLX5_RQC_STATE_ERR) {
1344                 netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
1345                 return;
1346         }
1347
1348         netif_tx_disable_queue(sq->txq);
1349
1350         if (mlx5e_wait_for_sq_flush(sq))
1351                 return;
1352
1353         /* If the interval between two consecutive recovers per SQ is too
1354          * short, don't recover to avoid infinite loop of ERR_CQE -> recover.
1355          * If we reached this state, there is probably a bug that needs to be
1356          * fixed. let's keep the queue close and let tx timeout cleanup.
1357          */
1358         if (jiffies_to_msecs(jiffies - recover->last_recover) <
1359             MLX5E_SQ_RECOVER_MIN_INTERVAL) {
1360                 netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n",
1361                            sq->sqn);
1362                 return;
1363         }
1364
1365         /* At this point, no new packets will arrive from the stack as TXQ is
1366          * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
1367          * pending WQEs.  SQ can safely reset the SQ.
1368          */
1369         if (mlx5e_sq_to_ready(sq, state))
1370                 return;
1371
1372         mlx5e_reset_txqsq_cc_pc(sq);
1373         sq->stats.recover++;
1374         recover->last_recover = jiffies;
1375         mlx5e_activate_txqsq(sq);
1376 }
1377
1378 static int mlx5e_open_icosq(struct mlx5e_channel *c,
1379                             struct mlx5e_params *params,
1380                             struct mlx5e_sq_param *param,
1381                             struct mlx5e_icosq *sq)
1382 {
1383         struct mlx5e_create_sq_param csp = {};
1384         int err;
1385
1386         err = mlx5e_alloc_icosq(c, param, sq);
1387         if (err)
1388                 return err;
1389
1390         csp.cqn             = sq->cq.mcq.cqn;
1391         csp.wq_ctrl         = &sq->wq_ctrl;
1392         csp.min_inline_mode = params->tx_min_inline_mode;
1393         set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1394         err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1395         if (err)
1396                 goto err_free_icosq;
1397
1398         return 0;
1399
1400 err_free_icosq:
1401         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1402         mlx5e_free_icosq(sq);
1403
1404         return err;
1405 }
1406
1407 static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1408 {
1409         struct mlx5e_channel *c = sq->channel;
1410
1411         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1412         napi_synchronize(&c->napi);
1413
1414         mlx5e_destroy_sq(c->mdev, sq->sqn);
1415         mlx5e_free_icosq(sq);
1416 }
1417
1418 static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
1419                             struct mlx5e_params *params,
1420                             struct mlx5e_sq_param *param,
1421                             struct mlx5e_xdpsq *sq)
1422 {
1423         unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1424         struct mlx5e_create_sq_param csp = {};
1425         unsigned int inline_hdr_sz = 0;
1426         int err;
1427         int i;
1428
1429         err = mlx5e_alloc_xdpsq(c, params, param, sq);
1430         if (err)
1431                 return err;
1432
1433         csp.tis_lst_sz      = 1;
1434         csp.tisn            = c->priv->tisn[0]; /* tc = 0 */
1435         csp.cqn             = sq->cq.mcq.cqn;
1436         csp.wq_ctrl         = &sq->wq_ctrl;
1437         csp.min_inline_mode = sq->min_inline_mode;
1438         set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1439         err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1440         if (err)
1441                 goto err_free_xdpsq;
1442
1443         if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1444                 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1445                 ds_cnt++;
1446         }
1447
1448         /* Pre initialize fixed WQE fields */
1449         for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1450                 struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1451                 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1452                 struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
1453                 struct mlx5_wqe_data_seg *dseg;
1454
1455                 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1456                 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1457
1458                 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1459                 dseg->lkey = sq->mkey_be;
1460         }
1461
1462         return 0;
1463
1464 err_free_xdpsq:
1465         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1466         mlx5e_free_xdpsq(sq);
1467
1468         return err;
1469 }
1470
1471 static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1472 {
1473         struct mlx5e_channel *c = sq->channel;
1474
1475         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1476         napi_synchronize(&c->napi);
1477
1478         mlx5e_destroy_sq(c->mdev, sq->sqn);
1479         mlx5e_free_xdpsq_descs(sq);
1480         mlx5e_free_xdpsq(sq);
1481 }
1482
1483 static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1484                                  struct mlx5e_cq_param *param,
1485                                  struct mlx5e_cq *cq)
1486 {
1487         struct mlx5_core_cq *mcq = &cq->mcq;
1488         int eqn_not_used;
1489         unsigned int irqn;
1490         int err;
1491         u32 i;
1492
1493         err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1494                                &cq->wq_ctrl);
1495         if (err)
1496                 return err;
1497
1498         mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1499
1500         mcq->cqe_sz     = 64;
1501         mcq->set_ci_db  = cq->wq_ctrl.db.db;
1502         mcq->arm_db     = cq->wq_ctrl.db.db + 1;
1503         *mcq->set_ci_db = 0;
1504         *mcq->arm_db    = 0;
1505         mcq->vector     = param->eq_ix;
1506         mcq->comp       = mlx5e_completion_event;
1507         mcq->event      = mlx5e_cq_error_event;
1508         mcq->irqn       = irqn;
1509
1510         for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1511                 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1512
1513                 cqe->op_own = 0xf1;
1514         }
1515
1516         cq->mdev = mdev;
1517
1518         return 0;
1519 }
1520
1521 static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1522                           struct mlx5e_cq_param *param,
1523                           struct mlx5e_cq *cq)
1524 {
1525         struct mlx5_core_dev *mdev = c->priv->mdev;
1526         int err;
1527
1528         param->wq.buf_numa_node = cpu_to_node(c->cpu);
1529         param->wq.db_numa_node  = cpu_to_node(c->cpu);
1530         param->eq_ix   = c->ix;
1531
1532         err = mlx5e_alloc_cq_common(mdev, param, cq);
1533
1534         cq->napi    = &c->napi;
1535         cq->channel = c;
1536
1537         return err;
1538 }
1539
1540 static void mlx5e_free_cq(struct mlx5e_cq *cq)
1541 {
1542         mlx5_wq_destroy(&cq->wq_ctrl);
1543 }
1544
1545 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1546 {
1547         struct mlx5_core_dev *mdev = cq->mdev;
1548         struct mlx5_core_cq *mcq = &cq->mcq;
1549
1550         void *in;
1551         void *cqc;
1552         int inlen;
1553         unsigned int irqn_not_used;
1554         int eqn;
1555         int err;
1556
1557         inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1558                 sizeof(u64) * cq->wq_ctrl.buf.npages;
1559         in = kvzalloc(inlen, GFP_KERNEL);
1560         if (!in)
1561                 return -ENOMEM;
1562
1563         cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1564
1565         memcpy(cqc, param->cqc, sizeof(param->cqc));
1566
1567         mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
1568                                   (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1569
1570         mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1571
1572         MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
1573         MLX5_SET(cqc,   cqc, c_eqn,         eqn);
1574         MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
1575         MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1576                                             MLX5_ADAPTER_PAGE_SHIFT);
1577         MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
1578
1579         err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1580
1581         kvfree(in);
1582
1583         if (err)
1584                 return err;
1585
1586         mlx5e_cq_arm(cq);
1587
1588         return 0;
1589 }
1590
1591 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
1592 {
1593         mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
1594 }
1595
1596 static int mlx5e_open_cq(struct mlx5e_channel *c,
1597                          struct net_dim_cq_moder moder,
1598                          struct mlx5e_cq_param *param,
1599                          struct mlx5e_cq *cq)
1600 {
1601         struct mlx5_core_dev *mdev = c->mdev;
1602         int err;
1603
1604         err = mlx5e_alloc_cq(c, param, cq);
1605         if (err)
1606                 return err;
1607
1608         err = mlx5e_create_cq(cq, param);
1609         if (err)
1610                 goto err_free_cq;
1611
1612         if (MLX5_CAP_GEN(mdev, cq_moderation))
1613                 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
1614         return 0;
1615
1616 err_free_cq:
1617         mlx5e_free_cq(cq);
1618
1619         return err;
1620 }
1621
1622 static void mlx5e_close_cq(struct mlx5e_cq *cq)
1623 {
1624         mlx5e_destroy_cq(cq);
1625         mlx5e_free_cq(cq);
1626 }
1627
1628 static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1629 {
1630         return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1631 }
1632
1633 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1634                              struct mlx5e_params *params,
1635                              struct mlx5e_channel_param *cparam)
1636 {
1637         int err;
1638         int tc;
1639
1640         for (tc = 0; tc < c->num_tc; tc++) {
1641                 err = mlx5e_open_cq(c, params->tx_cq_moderation,
1642                                     &cparam->tx_cq, &c->sq[tc].cq);
1643                 if (err)
1644                         goto err_close_tx_cqs;
1645         }
1646
1647         return 0;
1648
1649 err_close_tx_cqs:
1650         for (tc--; tc >= 0; tc--)
1651                 mlx5e_close_cq(&c->sq[tc].cq);
1652
1653         return err;
1654 }
1655
1656 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1657 {
1658         int tc;
1659
1660         for (tc = 0; tc < c->num_tc; tc++)
1661                 mlx5e_close_cq(&c->sq[tc].cq);
1662 }
1663
1664 static int mlx5e_open_sqs(struct mlx5e_channel *c,
1665                           struct mlx5e_params *params,
1666                           struct mlx5e_channel_param *cparam)
1667 {
1668         int err;
1669         int tc;
1670
1671         for (tc = 0; tc < params->num_tc; tc++) {
1672                 int txq_ix = c->ix + tc * params->num_channels;
1673
1674                 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
1675                                        params, &cparam->sq, &c->sq[tc]);
1676                 if (err)
1677                         goto err_close_sqs;
1678         }
1679
1680         return 0;
1681
1682 err_close_sqs:
1683         for (tc--; tc >= 0; tc--)
1684                 mlx5e_close_txqsq(&c->sq[tc]);
1685
1686         return err;
1687 }
1688
1689 static void mlx5e_close_sqs(struct mlx5e_channel *c)
1690 {
1691         int tc;
1692
1693         for (tc = 0; tc < c->num_tc; tc++)
1694                 mlx5e_close_txqsq(&c->sq[tc]);
1695 }
1696
1697 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1698                                 struct mlx5e_txqsq *sq, u32 rate)
1699 {
1700         struct mlx5e_priv *priv = netdev_priv(dev);
1701         struct mlx5_core_dev *mdev = priv->mdev;
1702         struct mlx5e_modify_sq_param msp = {0};
1703         struct mlx5_rate_limit rl = {0};
1704         u16 rl_index = 0;
1705         int err;
1706
1707         if (rate == sq->rate_limit)
1708                 /* nothing to do */
1709                 return 0;
1710
1711         if (sq->rate_limit) {
1712                 rl.rate = sq->rate_limit;
1713                 /* remove current rl index to free space to next ones */
1714                 mlx5_rl_remove_rate(mdev, &rl);
1715         }
1716
1717         sq->rate_limit = 0;
1718
1719         if (rate) {
1720                 rl.rate = rate;
1721                 err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
1722                 if (err) {
1723                         netdev_err(dev, "Failed configuring rate %u: %d\n",
1724                                    rate, err);
1725                         return err;
1726                 }
1727         }
1728
1729         msp.curr_state = MLX5_SQC_STATE_RDY;
1730         msp.next_state = MLX5_SQC_STATE_RDY;
1731         msp.rl_index   = rl_index;
1732         msp.rl_update  = true;
1733         err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1734         if (err) {
1735                 netdev_err(dev, "Failed configuring rate %u: %d\n",
1736                            rate, err);
1737                 /* remove the rate from the table */
1738                 if (rate)
1739                         mlx5_rl_remove_rate(mdev, &rl);
1740                 return err;
1741         }
1742
1743         sq->rate_limit = rate;
1744         return 0;
1745 }
1746
1747 static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1748 {
1749         struct mlx5e_priv *priv = netdev_priv(dev);
1750         struct mlx5_core_dev *mdev = priv->mdev;
1751         struct mlx5e_txqsq *sq = priv->txq2sq[index];
1752         int err = 0;
1753
1754         if (!mlx5_rl_is_supported(mdev)) {
1755                 netdev_err(dev, "Rate limiting is not supported on this device\n");
1756                 return -EINVAL;
1757         }
1758
1759         /* rate is given in Mb/sec, HW config is in Kb/sec */
1760         rate = rate << 10;
1761
1762         /* Check whether rate in valid range, 0 is always valid */
1763         if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1764                 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1765                 return -ERANGE;
1766         }
1767
1768         mutex_lock(&priv->state_lock);
1769         if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1770                 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1771         if (!err)
1772                 priv->tx_rates[index] = rate;
1773         mutex_unlock(&priv->state_lock);
1774
1775         return err;
1776 }
1777
1778 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1779                               struct mlx5e_params *params,
1780                               struct mlx5e_channel_param *cparam,
1781                               struct mlx5e_channel **cp)
1782 {
1783         struct net_dim_cq_moder icocq_moder = {0, 0};
1784         struct net_device *netdev = priv->netdev;
1785         int cpu = mlx5e_get_cpu(priv, ix);
1786         struct mlx5e_channel *c;
1787         unsigned int irq;
1788         int err;
1789         int eqn;
1790
1791         c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1792         if (!c)
1793                 return -ENOMEM;
1794
1795         c->priv     = priv;
1796         c->mdev     = priv->mdev;
1797         c->tstamp   = &priv->tstamp;
1798         c->ix       = ix;
1799         c->cpu      = cpu;
1800         c->pdev     = &priv->mdev->pdev->dev;
1801         c->netdev   = priv->netdev;
1802         c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1803         c->num_tc   = params->num_tc;
1804         c->xdp      = !!params->xdp_prog;
1805
1806         mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1807         c->irq_desc = irq_to_desc(irq);
1808
1809         netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1810
1811         err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
1812         if (err)
1813                 goto err_napi_del;
1814
1815         err = mlx5e_open_tx_cqs(c, params, cparam);
1816         if (err)
1817                 goto err_close_icosq_cq;
1818
1819         err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
1820         if (err)
1821                 goto err_close_tx_cqs;
1822
1823         /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1824         err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
1825                                      &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
1826         if (err)
1827                 goto err_close_rx_cq;
1828
1829         napi_enable(&c->napi);
1830
1831         err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
1832         if (err)
1833                 goto err_disable_napi;
1834
1835         err = mlx5e_open_sqs(c, params, cparam);
1836         if (err)
1837                 goto err_close_icosq;
1838
1839         err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
1840         if (err)
1841                 goto err_close_sqs;
1842
1843         err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
1844         if (err)
1845                 goto err_close_xdp_sq;
1846
1847         *cp = c;
1848
1849         return 0;
1850 err_close_xdp_sq:
1851         if (c->xdp)
1852                 mlx5e_close_xdpsq(&c->rq.xdpsq);
1853
1854 err_close_sqs:
1855         mlx5e_close_sqs(c);
1856
1857 err_close_icosq:
1858         mlx5e_close_icosq(&c->icosq);
1859
1860 err_disable_napi:
1861         napi_disable(&c->napi);
1862         if (c->xdp)
1863                 mlx5e_close_cq(&c->rq.xdpsq.cq);
1864
1865 err_close_rx_cq:
1866         mlx5e_close_cq(&c->rq.cq);
1867
1868 err_close_tx_cqs:
1869         mlx5e_close_tx_cqs(c);
1870
1871 err_close_icosq_cq:
1872         mlx5e_close_cq(&c->icosq.cq);
1873
1874 err_napi_del:
1875         netif_napi_del(&c->napi);
1876         kfree(c);
1877
1878         return err;
1879 }
1880
1881 static void mlx5e_activate_channel(struct mlx5e_channel *c)
1882 {
1883         int tc;
1884
1885         for (tc = 0; tc < c->num_tc; tc++)
1886                 mlx5e_activate_txqsq(&c->sq[tc]);
1887         mlx5e_activate_rq(&c->rq);
1888         netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
1889 }
1890
1891 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
1892 {
1893         int tc;
1894
1895         mlx5e_deactivate_rq(&c->rq);
1896         for (tc = 0; tc < c->num_tc; tc++)
1897                 mlx5e_deactivate_txqsq(&c->sq[tc]);
1898 }
1899
1900 static void mlx5e_close_channel(struct mlx5e_channel *c)
1901 {
1902         mlx5e_close_rq(&c->rq);
1903         if (c->xdp)
1904                 mlx5e_close_xdpsq(&c->rq.xdpsq);
1905         mlx5e_close_sqs(c);
1906         mlx5e_close_icosq(&c->icosq);
1907         napi_disable(&c->napi);
1908         if (c->xdp)
1909                 mlx5e_close_cq(&c->rq.xdpsq.cq);
1910         mlx5e_close_cq(&c->rq.cq);
1911         mlx5e_close_tx_cqs(c);
1912         mlx5e_close_cq(&c->icosq.cq);
1913         netif_napi_del(&c->napi);
1914
1915         kfree(c);
1916 }
1917
1918 static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1919                                  struct mlx5e_params *params,
1920                                  struct mlx5e_rq_param *param)
1921 {
1922         struct mlx5_core_dev *mdev = priv->mdev;
1923         void *rqc = param->rqc;
1924         void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1925
1926         switch (params->rq_wq_type) {
1927         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1928                 MLX5_SET(wq, wq, log_wqe_num_of_strides,
1929                          mlx5e_mpwqe_get_log_num_strides(mdev, params) -
1930                          MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
1931                 MLX5_SET(wq, wq, log_wqe_stride_size,
1932                          mlx5e_mpwqe_get_log_stride_size(mdev, params) -
1933                          MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
1934                 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1935                 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params));
1936                 break;
1937         default: /* MLX5_WQ_TYPE_LINKED_LIST */
1938                 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1939                 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
1940         }
1941
1942         MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1943         MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
1944         MLX5_SET(wq, wq, pd,               mdev->mlx5e_res.pdn);
1945         MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
1946         MLX5_SET(rqc, rqc, vsd,            params->vlan_strip_disable);
1947         MLX5_SET(rqc, rqc, scatter_fcs,    params->scatter_fcs_en);
1948
1949         param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
1950 }
1951
1952 static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
1953                                       struct mlx5e_rq_param *param)
1954 {
1955         struct mlx5_core_dev *mdev = priv->mdev;
1956         void *rqc = param->rqc;
1957         void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1958
1959         MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1960         MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
1961         MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
1962
1963         param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
1964 }
1965
1966 static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1967                                         struct mlx5e_sq_param *param)
1968 {
1969         void *sqc = param->sqc;
1970         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1971
1972         MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1973         MLX5_SET(wq, wq, pd,            priv->mdev->mlx5e_res.pdn);
1974
1975         param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1976 }
1977
1978 static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1979                                  struct mlx5e_params *params,
1980                                  struct mlx5e_sq_param *param)
1981 {
1982         void *sqc = param->sqc;
1983         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1984
1985         mlx5e_build_sq_param_common(priv, param);
1986         MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
1987         MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
1988 }
1989
1990 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1991                                         struct mlx5e_cq_param *param)
1992 {
1993         void *cqc = param->cqc;
1994
1995         MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
1996 }
1997
1998 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1999                                     struct mlx5e_params *params,
2000                                     struct mlx5e_cq_param *param)
2001 {
2002         struct mlx5_core_dev *mdev = priv->mdev;
2003         void *cqc = param->cqc;
2004         u8 log_cq_size;
2005
2006         switch (params->rq_wq_type) {
2007         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2008                 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) +
2009                         mlx5e_mpwqe_get_log_num_strides(mdev, params);
2010                 break;
2011         default: /* MLX5_WQ_TYPE_LINKED_LIST */
2012                 log_cq_size = params->log_rq_mtu_frames;
2013         }
2014
2015         MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
2016         if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
2017                 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
2018                 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
2019         }
2020
2021         mlx5e_build_common_cq_param(priv, param);
2022         param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
2023 }
2024
2025 static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
2026                                     struct mlx5e_params *params,
2027                                     struct mlx5e_cq_param *param)
2028 {
2029         void *cqc = param->cqc;
2030
2031         MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
2032
2033         mlx5e_build_common_cq_param(priv, param);
2034         param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
2035 }
2036
2037 static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
2038                                      u8 log_wq_size,
2039                                      struct mlx5e_cq_param *param)
2040 {
2041         void *cqc = param->cqc;
2042
2043         MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2044
2045         mlx5e_build_common_cq_param(priv, param);
2046
2047         param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2048 }
2049
2050 static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
2051                                     u8 log_wq_size,
2052                                     struct mlx5e_sq_param *param)
2053 {
2054         void *sqc = param->sqc;
2055         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2056
2057         mlx5e_build_sq_param_common(priv, param);
2058
2059         MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
2060         MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
2061 }
2062
2063 static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
2064                                     struct mlx5e_params *params,
2065                                     struct mlx5e_sq_param *param)
2066 {
2067         void *sqc = param->sqc;
2068         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2069
2070         mlx5e_build_sq_param_common(priv, param);
2071         MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2072 }
2073
2074 static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2075                                       struct mlx5e_params *params,
2076                                       struct mlx5e_channel_param *cparam)
2077 {
2078         u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
2079
2080         mlx5e_build_rq_param(priv, params, &cparam->rq);
2081         mlx5e_build_sq_param(priv, params, &cparam->sq);
2082         mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2083         mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2084         mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
2085         mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2086         mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
2087 }
2088
2089 int mlx5e_open_channels(struct mlx5e_priv *priv,
2090                         struct mlx5e_channels *chs)
2091 {
2092         struct mlx5e_channel_param *cparam;
2093         int err = -ENOMEM;
2094         int i;
2095
2096         chs->num = chs->params.num_channels;
2097
2098         chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
2099         cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
2100         if (!chs->c || !cparam)
2101                 goto err_free;
2102
2103         mlx5e_build_channel_param(priv, &chs->params, cparam);
2104         for (i = 0; i < chs->num; i++) {
2105                 err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
2106                 if (err)
2107                         goto err_close_channels;
2108         }
2109
2110         kfree(cparam);
2111         return 0;
2112
2113 err_close_channels:
2114         for (i--; i >= 0; i--)
2115                 mlx5e_close_channel(chs->c[i]);
2116
2117 err_free:
2118         kfree(chs->c);
2119         kfree(cparam);
2120         chs->num = 0;
2121         return err;
2122 }
2123
2124 static void mlx5e_activate_channels(struct mlx5e_channels *chs)
2125 {
2126         int i;
2127
2128         for (i = 0; i < chs->num; i++)
2129                 mlx5e_activate_channel(chs->c[i]);
2130 }
2131
2132 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2133 {
2134         int err = 0;
2135         int i;
2136
2137         for (i = 0; i < chs->num; i++)
2138                 err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq,
2139                                                   err ? 0 : 20000);
2140
2141         return err ? -ETIMEDOUT : 0;
2142 }
2143
2144 static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2145 {
2146         int i;
2147
2148         for (i = 0; i < chs->num; i++)
2149                 mlx5e_deactivate_channel(chs->c[i]);
2150 }
2151
2152 void mlx5e_close_channels(struct mlx5e_channels *chs)
2153 {
2154         int i;
2155
2156         for (i = 0; i < chs->num; i++)
2157                 mlx5e_close_channel(chs->c[i]);
2158
2159         kfree(chs->c);
2160         chs->num = 0;
2161 }
2162
2163 static int
2164 mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
2165 {
2166         struct mlx5_core_dev *mdev = priv->mdev;
2167         void *rqtc;
2168         int inlen;
2169         int err;
2170         u32 *in;
2171         int i;
2172
2173         inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2174         in = kvzalloc(inlen, GFP_KERNEL);
2175         if (!in)
2176                 return -ENOMEM;
2177
2178         rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2179
2180         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2181         MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2182
2183         for (i = 0; i < sz; i++)
2184                 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2185
2186         err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2187         if (!err)
2188                 rqt->enabled = true;
2189
2190         kvfree(in);
2191         return err;
2192 }
2193
2194 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
2195 {
2196         rqt->enabled = false;
2197         mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
2198 }
2199
2200 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
2201 {
2202         struct mlx5e_rqt *rqt = &priv->indir_rqt;
2203         int err;
2204
2205         err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2206         if (err)
2207                 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2208         return err;
2209 }
2210
2211 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
2212 {
2213         struct mlx5e_rqt *rqt;
2214         int err;
2215         int ix;
2216
2217         for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2218                 rqt = &priv->direct_tir[ix].rqt;
2219                 err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
2220                 if (err)
2221                         goto err_destroy_rqts;
2222         }
2223
2224         return 0;
2225
2226 err_destroy_rqts:
2227         mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
2228         for (ix--; ix >= 0; ix--)
2229                 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
2230
2231         return err;
2232 }
2233
2234 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
2235 {
2236         int i;
2237
2238         for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
2239                 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
2240 }
2241
2242 static int mlx5e_rx_hash_fn(int hfunc)
2243 {
2244         return (hfunc == ETH_RSS_HASH_TOP) ?
2245                MLX5_RX_HASH_FN_TOEPLITZ :
2246                MLX5_RX_HASH_FN_INVERTED_XOR8;
2247 }
2248
2249 int mlx5e_bits_invert(unsigned long a, int size)
2250 {
2251         int inv = 0;
2252         int i;
2253
2254         for (i = 0; i < size; i++)
2255                 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2256
2257         return inv;
2258 }
2259
2260 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2261                                 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2262 {
2263         int i;
2264
2265         for (i = 0; i < sz; i++) {
2266                 u32 rqn;
2267
2268                 if (rrp.is_rss) {
2269                         int ix = i;
2270
2271                         if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2272                                 ix = mlx5e_bits_invert(i, ilog2(sz));
2273
2274                         ix = priv->channels.params.indirection_rqt[ix];
2275                         rqn = rrp.rss.channels->c[ix]->rq.rqn;
2276                 } else {
2277                         rqn = rrp.rqn;
2278                 }
2279                 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2280         }
2281 }
2282
2283 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2284                        struct mlx5e_redirect_rqt_param rrp)
2285 {
2286         struct mlx5_core_dev *mdev = priv->mdev;
2287         void *rqtc;
2288         int inlen;
2289         u32 *in;
2290         int err;
2291
2292         inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
2293         in = kvzalloc(inlen, GFP_KERNEL);
2294         if (!in)
2295                 return -ENOMEM;
2296
2297         rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2298
2299         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2300         MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
2301         mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
2302         err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
2303
2304         kvfree(in);
2305         return err;
2306 }
2307
2308 static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2309                                 struct mlx5e_redirect_rqt_param rrp)
2310 {
2311         if (!rrp.is_rss)
2312                 return rrp.rqn;
2313
2314         if (ix >= rrp.rss.channels->num)
2315                 return priv->drop_rq.rqn;
2316
2317         return rrp.rss.channels->c[ix]->rq.rqn;
2318 }
2319
2320 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2321                                 struct mlx5e_redirect_rqt_param rrp)
2322 {
2323         u32 rqtn;
2324         int ix;
2325
2326         if (priv->indir_rqt.enabled) {
2327                 /* RSS RQ table */
2328                 rqtn = priv->indir_rqt.rqtn;
2329                 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
2330         }
2331
2332         for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2333                 struct mlx5e_redirect_rqt_param direct_rrp = {
2334                         .is_rss = false,
2335                         {
2336                                 .rqn    = mlx5e_get_direct_rqn(priv, ix, rrp)
2337                         },
2338                 };
2339
2340                 /* Direct RQ Tables */
2341                 if (!priv->direct_tir[ix].rqt.enabled)
2342                         continue;
2343
2344                 rqtn = priv->direct_tir[ix].rqt.rqtn;
2345                 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
2346         }
2347 }
2348
2349 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2350                                             struct mlx5e_channels *chs)
2351 {
2352         struct mlx5e_redirect_rqt_param rrp = {
2353                 .is_rss        = true,
2354                 {
2355                         .rss = {
2356                                 .channels  = chs,
2357                                 .hfunc     = chs->params.rss_hfunc,
2358                         }
2359                 },
2360         };
2361
2362         mlx5e_redirect_rqts(priv, rrp);
2363 }
2364
2365 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2366 {
2367         struct mlx5e_redirect_rqt_param drop_rrp = {
2368                 .is_rss = false,
2369                 {
2370                         .rqn = priv->drop_rq.rqn,
2371                 },
2372         };
2373
2374         mlx5e_redirect_rqts(priv, drop_rrp);
2375 }
2376
2377 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
2378 {
2379         if (!params->lro_en)
2380                 return;
2381
2382 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2383
2384         MLX5_SET(tirc, tirc, lro_enable_mask,
2385                  MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2386                  MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2387         MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
2388                  (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2389         MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
2390 }
2391
2392 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
2393                                     enum mlx5e_traffic_types tt,
2394                                     void *tirc, bool inner)
2395 {
2396         void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2397                              MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2398
2399 #define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2400                                  MLX5_HASH_FIELD_SEL_DST_IP)
2401
2402 #define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2403                                  MLX5_HASH_FIELD_SEL_DST_IP   |\
2404                                  MLX5_HASH_FIELD_SEL_L4_SPORT |\
2405                                  MLX5_HASH_FIELD_SEL_L4_DPORT)
2406
2407 #define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2408                                  MLX5_HASH_FIELD_SEL_DST_IP   |\
2409                                  MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2410
2411         MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
2412         if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
2413                 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2414                                              rx_hash_toeplitz_key);
2415                 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2416                                                rx_hash_toeplitz_key);
2417
2418                 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2419                 memcpy(rss_key, params->toeplitz_hash_key, len);
2420         }
2421
2422         switch (tt) {
2423         case MLX5E_TT_IPV4_TCP:
2424                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2425                          MLX5_L3_PROT_TYPE_IPV4);
2426                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2427                          MLX5_L4_PROT_TYPE_TCP);
2428                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2429                          MLX5_HASH_IP_L4PORTS);
2430                 break;
2431
2432         case MLX5E_TT_IPV6_TCP:
2433                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2434                          MLX5_L3_PROT_TYPE_IPV6);
2435                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2436                          MLX5_L4_PROT_TYPE_TCP);
2437                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2438                          MLX5_HASH_IP_L4PORTS);
2439                 break;
2440
2441         case MLX5E_TT_IPV4_UDP:
2442                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2443                          MLX5_L3_PROT_TYPE_IPV4);
2444                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2445                          MLX5_L4_PROT_TYPE_UDP);
2446                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2447                          MLX5_HASH_IP_L4PORTS);
2448                 break;
2449
2450         case MLX5E_TT_IPV6_UDP:
2451                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2452                          MLX5_L3_PROT_TYPE_IPV6);
2453                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2454                          MLX5_L4_PROT_TYPE_UDP);
2455                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2456                          MLX5_HASH_IP_L4PORTS);
2457                 break;
2458
2459         case MLX5E_TT_IPV4_IPSEC_AH:
2460                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2461                          MLX5_L3_PROT_TYPE_IPV4);
2462                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2463                          MLX5_HASH_IP_IPSEC_SPI);
2464                 break;
2465
2466         case MLX5E_TT_IPV6_IPSEC_AH:
2467                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2468                          MLX5_L3_PROT_TYPE_IPV6);
2469                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2470                          MLX5_HASH_IP_IPSEC_SPI);
2471                 break;
2472
2473         case MLX5E_TT_IPV4_IPSEC_ESP:
2474                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2475                          MLX5_L3_PROT_TYPE_IPV4);
2476                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2477                          MLX5_HASH_IP_IPSEC_SPI);
2478                 break;
2479
2480         case MLX5E_TT_IPV6_IPSEC_ESP:
2481                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2482                          MLX5_L3_PROT_TYPE_IPV6);
2483                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2484                          MLX5_HASH_IP_IPSEC_SPI);
2485                 break;
2486
2487         case MLX5E_TT_IPV4:
2488                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2489                          MLX5_L3_PROT_TYPE_IPV4);
2490                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2491                          MLX5_HASH_IP);
2492                 break;
2493
2494         case MLX5E_TT_IPV6:
2495                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2496                          MLX5_L3_PROT_TYPE_IPV6);
2497                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2498                          MLX5_HASH_IP);
2499                 break;
2500         default:
2501                 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2502         }
2503 }
2504
2505 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
2506 {
2507         struct mlx5_core_dev *mdev = priv->mdev;
2508
2509         void *in;
2510         void *tirc;
2511         int inlen;
2512         int err;
2513         int tt;
2514         int ix;
2515
2516         inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
2517         in = kvzalloc(inlen, GFP_KERNEL);
2518         if (!in)
2519                 return -ENOMEM;
2520
2521         MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2522         tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2523
2524         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2525
2526         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2527                 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
2528                                            inlen);
2529                 if (err)
2530                         goto free_in;
2531         }
2532
2533         for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2534                 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2535                                            in, inlen);
2536                 if (err)
2537                         goto free_in;
2538         }
2539
2540 free_in:
2541         kvfree(in);
2542
2543         return err;
2544 }
2545
2546 static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
2547                                             enum mlx5e_traffic_types tt,
2548                                             u32 *tirc)
2549 {
2550         MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2551
2552         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2553
2554         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2555         MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2556         MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
2557
2558         mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
2559 }
2560
2561 static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
2562                          struct mlx5e_params *params, u16 mtu)
2563 {
2564         u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
2565         int err;
2566
2567         err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
2568         if (err)
2569                 return err;
2570
2571         /* Update vport context MTU */
2572         mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2573         return 0;
2574 }
2575
2576 static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
2577                             struct mlx5e_params *params, u16 *mtu)
2578 {
2579         u16 hw_mtu = 0;
2580         int err;
2581
2582         err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2583         if (err || !hw_mtu) /* fallback to port oper mtu */
2584                 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2585
2586         *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
2587 }
2588
2589 static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
2590 {
2591         struct mlx5e_params *params = &priv->channels.params;
2592         struct net_device *netdev = priv->netdev;
2593         struct mlx5_core_dev *mdev = priv->mdev;
2594         u16 mtu;
2595         int err;
2596
2597         err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
2598         if (err)
2599                 return err;
2600
2601         mlx5e_query_mtu(mdev, params, &mtu);
2602         if (mtu != params->sw_mtu)
2603                 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2604                             __func__, mtu, params->sw_mtu);
2605
2606         params->sw_mtu = mtu;
2607         return 0;
2608 }
2609
2610 static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2611 {
2612         struct mlx5e_priv *priv = netdev_priv(netdev);
2613         int nch = priv->channels.params.num_channels;
2614         int ntc = priv->channels.params.num_tc;
2615         int tc;
2616
2617         netdev_reset_tc(netdev);
2618
2619         if (ntc == 1)
2620                 return;
2621
2622         netdev_set_num_tc(netdev, ntc);
2623
2624         /* Map netdev TCs to offset 0
2625          * We have our own UP to TXQ mapping for QoS
2626          */
2627         for (tc = 0; tc < ntc; tc++)
2628                 netdev_set_tc_queue(netdev, tc, nch, 0);
2629 }
2630
2631 static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
2632 {
2633         struct mlx5e_channel *c;
2634         struct mlx5e_txqsq *sq;
2635         int i, tc;
2636
2637         for (i = 0; i < priv->channels.num; i++)
2638                 for (tc = 0; tc < priv->profile->max_tc; tc++)
2639                         priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
2640
2641         for (i = 0; i < priv->channels.num; i++) {
2642                 c = priv->channels.c[i];
2643                 for (tc = 0; tc < c->num_tc; tc++) {
2644                         sq = &c->sq[tc];
2645                         priv->txq2sq[sq->txq_ix] = sq;
2646                 }
2647         }
2648 }
2649
2650 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2651 {
2652         int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2653         struct net_device *netdev = priv->netdev;
2654
2655         mlx5e_netdev_set_tcs(netdev);
2656         netif_set_real_num_tx_queues(netdev, num_txqs);
2657         netif_set_real_num_rx_queues(netdev, priv->channels.num);
2658
2659         mlx5e_build_channels_tx_maps(priv);
2660         mlx5e_activate_channels(&priv->channels);
2661         write_lock(&priv->stats_lock);
2662         priv->channels_active = true;
2663         write_unlock(&priv->stats_lock);
2664         netif_tx_start_all_queues(priv->netdev);
2665
2666         if (MLX5_VPORT_MANAGER(priv->mdev))
2667                 mlx5e_add_sqs_fwd_rules(priv);
2668
2669         mlx5e_wait_channels_min_rx_wqes(&priv->channels);
2670         mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
2671 }
2672
2673 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2674 {
2675         mlx5e_redirect_rqts_to_drop(priv);
2676
2677         if (MLX5_VPORT_MANAGER(priv->mdev))
2678                 mlx5e_remove_sqs_fwd_rules(priv);
2679
2680         /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2681          * polling for inactive tx queues.
2682          */
2683         netif_tx_stop_all_queues(priv->netdev);
2684         netif_tx_disable(priv->netdev);
2685         write_lock(&priv->stats_lock);
2686         priv->channels_active = false;
2687         write_unlock(&priv->stats_lock);
2688         mlx5e_deactivate_channels(&priv->channels);
2689 }
2690
2691 void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2692                                 struct mlx5e_channels *new_chs,
2693                                 mlx5e_fp_hw_modify hw_modify)
2694 {
2695         struct net_device *netdev = priv->netdev;
2696         int new_num_txqs;
2697         int carrier_ok;
2698         new_num_txqs = new_chs->num * new_chs->params.num_tc;
2699
2700         carrier_ok = netif_carrier_ok(netdev);
2701         netif_carrier_off(netdev);
2702
2703         if (new_num_txqs < netdev->real_num_tx_queues)
2704                 netif_set_real_num_tx_queues(netdev, new_num_txqs);
2705
2706         mlx5e_deactivate_priv_channels(priv);
2707         mlx5e_close_channels(&priv->channels);
2708
2709         priv->channels = *new_chs;
2710
2711         /* New channels are ready to roll, modify HW settings if needed */
2712         if (hw_modify)
2713                 hw_modify(priv);
2714
2715         mlx5e_refresh_tirs(priv, false);
2716         mlx5e_activate_priv_channels(priv);
2717
2718         /* return carrier back if needed */
2719         if (carrier_ok)
2720                 netif_carrier_on(netdev);
2721 }
2722
2723 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2724 {
2725         priv->tstamp.tx_type   = HWTSTAMP_TX_OFF;
2726         priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
2727 }
2728
2729 int mlx5e_open_locked(struct net_device *netdev)
2730 {
2731         struct mlx5e_priv *priv = netdev_priv(netdev);
2732         int err;
2733
2734         set_bit(MLX5E_STATE_OPENED, &priv->state);
2735
2736         err = mlx5e_open_channels(priv, &priv->channels);
2737         if (err)
2738                 goto err_clear_state_opened_flag;
2739
2740         mlx5e_refresh_tirs(priv, false);
2741         mlx5e_activate_priv_channels(priv);
2742         if (priv->profile->update_carrier)
2743                 priv->profile->update_carrier(priv);
2744
2745         if (priv->profile->update_stats)
2746                 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
2747
2748         return 0;
2749
2750 err_clear_state_opened_flag:
2751         clear_bit(MLX5E_STATE_OPENED, &priv->state);
2752         return err;
2753 }
2754
2755 int mlx5e_open(struct net_device *netdev)
2756 {
2757         struct mlx5e_priv *priv = netdev_priv(netdev);
2758         int err;
2759
2760         mutex_lock(&priv->state_lock);
2761         err = mlx5e_open_locked(netdev);
2762         if (!err)
2763                 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
2764         mutex_unlock(&priv->state_lock);
2765
2766         if (mlx5e_vxlan_allowed(priv->mdev))
2767                 udp_tunnel_get_rx_info(netdev);
2768
2769         return err;
2770 }
2771
2772 int mlx5e_close_locked(struct net_device *netdev)
2773 {
2774         struct mlx5e_priv *priv = netdev_priv(netdev);
2775
2776         /* May already be CLOSED in case a previous configuration operation
2777          * (e.g RX/TX queue size change) that involves close&open failed.
2778          */
2779         if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2780                 return 0;
2781
2782         clear_bit(MLX5E_STATE_OPENED, &priv->state);
2783
2784         netif_carrier_off(priv->netdev);
2785         mlx5e_deactivate_priv_channels(priv);
2786         mlx5e_close_channels(&priv->channels);
2787
2788         return 0;
2789 }
2790
2791 int mlx5e_close(struct net_device *netdev)
2792 {
2793         struct mlx5e_priv *priv = netdev_priv(netdev);
2794         int err;
2795
2796         if (!netif_device_present(netdev))
2797                 return -ENODEV;
2798
2799         mutex_lock(&priv->state_lock);
2800         mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
2801         err = mlx5e_close_locked(netdev);
2802         mutex_unlock(&priv->state_lock);
2803
2804         return err;
2805 }
2806
2807 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
2808                                struct mlx5e_rq *rq,
2809                                struct mlx5e_rq_param *param)
2810 {
2811         void *rqc = param->rqc;
2812         void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2813         int err;
2814
2815         param->wq.db_numa_node = param->wq.buf_numa_node;
2816
2817         err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
2818                                 &rq->wq_ctrl);
2819         if (err)
2820                 return err;
2821
2822         /* Mark as unused given "Drop-RQ" packets never reach XDP */
2823         xdp_rxq_info_unused(&rq->xdp_rxq);
2824
2825         rq->mdev = mdev;
2826
2827         return 0;
2828 }
2829
2830 static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
2831                                struct mlx5e_cq *cq,
2832                                struct mlx5e_cq_param *param)
2833 {
2834         param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2835         param->wq.db_numa_node  = dev_to_node(&mdev->pdev->dev);
2836
2837         return mlx5e_alloc_cq_common(mdev, param, cq);
2838 }
2839
2840 static int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
2841                               struct mlx5e_rq *drop_rq)
2842 {
2843         struct mlx5_core_dev *mdev = priv->mdev;
2844         struct mlx5e_cq_param cq_param = {};
2845         struct mlx5e_rq_param rq_param = {};
2846         struct mlx5e_cq *cq = &drop_rq->cq;
2847         int err;
2848
2849         mlx5e_build_drop_rq_param(priv, &rq_param);
2850
2851         err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
2852         if (err)
2853                 return err;
2854
2855         err = mlx5e_create_cq(cq, &cq_param);
2856         if (err)
2857                 goto err_free_cq;
2858
2859         err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
2860         if (err)
2861                 goto err_destroy_cq;
2862
2863         err = mlx5e_create_rq(drop_rq, &rq_param);
2864         if (err)
2865                 goto err_free_rq;
2866
2867         err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
2868         if (err)
2869                 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
2870
2871         return 0;
2872
2873 err_free_rq:
2874         mlx5e_free_rq(drop_rq);
2875
2876 err_destroy_cq:
2877         mlx5e_destroy_cq(cq);
2878
2879 err_free_cq:
2880         mlx5e_free_cq(cq);
2881
2882         return err;
2883 }
2884
2885 static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
2886 {
2887         mlx5e_destroy_rq(drop_rq);
2888         mlx5e_free_rq(drop_rq);
2889         mlx5e_destroy_cq(&drop_rq->cq);
2890         mlx5e_free_cq(&drop_rq->cq);
2891 }
2892
2893 int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
2894                      u32 underlay_qpn, u32 *tisn)
2895 {
2896         u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
2897         void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2898
2899         MLX5_SET(tisc, tisc, prio, tc << 1);
2900         MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
2901         MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
2902
2903         if (mlx5_lag_is_lacp_owner(mdev))
2904                 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
2905
2906         return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
2907 }
2908
2909 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
2910 {
2911         mlx5_core_destroy_tis(mdev, tisn);
2912 }
2913
2914 int mlx5e_create_tises(struct mlx5e_priv *priv)
2915 {
2916         int err;
2917         int tc;
2918
2919         for (tc = 0; tc < priv->profile->max_tc; tc++) {
2920                 err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
2921                 if (err)
2922                         goto err_close_tises;
2923         }
2924
2925         return 0;
2926
2927 err_close_tises:
2928         for (tc--; tc >= 0; tc--)
2929                 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
2930
2931         return err;
2932 }
2933
2934 void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
2935 {
2936         int tc;
2937
2938         for (tc = 0; tc < priv->profile->max_tc; tc++)
2939                 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
2940 }
2941
2942 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
2943                                       enum mlx5e_traffic_types tt,
2944                                       u32 *tirc)
2945 {
2946         MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2947
2948         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2949
2950         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2951         MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2952         mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
2953 }
2954
2955 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
2956 {
2957         MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2958
2959         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2960
2961         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2962         MLX5_SET(tirc, tirc, indirect_table, rqtn);
2963         MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2964 }
2965
2966 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
2967 {
2968         struct mlx5e_tir *tir;
2969         void *tirc;
2970         int inlen;
2971         int i = 0;
2972         int err;
2973         u32 *in;
2974         int tt;
2975
2976         inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2977         in = kvzalloc(inlen, GFP_KERNEL);
2978         if (!in)
2979                 return -ENOMEM;
2980
2981         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2982                 memset(in, 0, inlen);
2983                 tir = &priv->indir_tir[tt];
2984                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2985                 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
2986                 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2987                 if (err) {
2988                         mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
2989                         goto err_destroy_inner_tirs;
2990                 }
2991         }
2992
2993         if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
2994                 goto out;
2995
2996         for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
2997                 memset(in, 0, inlen);
2998                 tir = &priv->inner_indir_tir[i];
2999                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3000                 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
3001                 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3002                 if (err) {
3003                         mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
3004                         goto err_destroy_inner_tirs;
3005                 }
3006         }
3007
3008 out:
3009         kvfree(in);
3010
3011         return 0;
3012
3013 err_destroy_inner_tirs:
3014         for (i--; i >= 0; i--)
3015                 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3016
3017         for (tt--; tt >= 0; tt--)
3018                 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
3019
3020         kvfree(in);
3021
3022         return err;
3023 }
3024
3025 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
3026 {
3027         int nch = priv->profile->max_nch(priv->mdev);
3028         struct mlx5e_tir *tir;
3029         void *tirc;
3030         int inlen;
3031         int err;
3032         u32 *in;
3033         int ix;
3034
3035         inlen = MLX5_ST_SZ_BYTES(create_tir_in);
3036         in = kvzalloc(inlen, GFP_KERNEL);
3037         if (!in)
3038                 return -ENOMEM;
3039
3040         for (ix = 0; ix < nch; ix++) {
3041                 memset(in, 0, inlen);
3042                 tir = &priv->direct_tir[ix];
3043                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3044                 mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
3045                 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3046                 if (err)
3047                         goto err_destroy_ch_tirs;
3048         }
3049
3050         kvfree(in);
3051
3052         return 0;
3053
3054 err_destroy_ch_tirs:
3055         mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
3056         for (ix--; ix >= 0; ix--)
3057                 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
3058
3059         kvfree(in);
3060
3061         return err;
3062 }
3063
3064 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
3065 {
3066         int i;
3067
3068         for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3069                 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
3070
3071         if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
3072                 return;
3073
3074         for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3075                 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3076 }
3077
3078 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
3079 {
3080         int nch = priv->profile->max_nch(priv->mdev);
3081         int i;
3082
3083         for (i = 0; i < nch; i++)
3084                 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
3085 }
3086
3087 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3088 {
3089         int err = 0;
3090         int i;
3091
3092         for (i = 0; i < chs->num; i++) {
3093                 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3094                 if (err)
3095                         return err;
3096         }
3097
3098         return 0;
3099</