Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
1 /*
2  * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <linux/bpf.h>
38 #include <net/page_pool.h>
39 #include "eswitch.h"
40 #include "en.h"
41 #include "en_tc.h"
42 #include "en_rep.h"
43 #include "en_accel/ipsec.h"
44 #include "en_accel/ipsec_rxtx.h"
45 #include "en_accel/tls.h"
46 #include "accel/ipsec.h"
47 #include "accel/tls.h"
48 #include "vxlan.h"
49 #include "en/port.h"
50
51 struct mlx5e_rq_param {
52         u32                     rqc[MLX5_ST_SZ_DW(rqc)];
53         struct mlx5_wq_param    wq;
54         struct mlx5e_rq_frags_info frags_info;
55 };
56
57 struct mlx5e_sq_param {
58         u32                        sqc[MLX5_ST_SZ_DW(sqc)];
59         struct mlx5_wq_param       wq;
60 };
61
62 struct mlx5e_cq_param {
63         u32                        cqc[MLX5_ST_SZ_DW(cqc)];
64         struct mlx5_wq_param       wq;
65         u16                        eq_ix;
66         u8                         cq_period_mode;
67 };
68
69 struct mlx5e_channel_param {
70         struct mlx5e_rq_param      rq;
71         struct mlx5e_sq_param      sq;
72         struct mlx5e_sq_param      xdp_sq;
73         struct mlx5e_sq_param      icosq;
74         struct mlx5e_cq_param      rx_cq;
75         struct mlx5e_cq_param      tx_cq;
76         struct mlx5e_cq_param      icosq_cq;
77 };
78
79 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
80 {
81         bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
82                 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
83                 MLX5_CAP_ETH(mdev, reg_umr_sq);
84         u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
85         bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
86
87         if (!striding_rq_umr)
88                 return false;
89         if (!inline_umr) {
90                 mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n",
91                                (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap);
92                 return false;
93         }
94         return true;
95 }
96
97 static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
98 {
99         if (!params->xdp_prog) {
100                 u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
101                 u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN;
102
103                 return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu);
104         }
105
106         return PAGE_SIZE;
107 }
108
109 static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
110 {
111         u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params);
112
113         return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
114 }
115
116 static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
117                                    struct mlx5e_params *params)
118 {
119         u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
120
121         return !params->lro_en && frag_sz <= PAGE_SIZE;
122 }
123
124 static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
125                                          struct mlx5e_params *params)
126 {
127         u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
128         s8 signed_log_num_strides_param;
129         u8 log_num_strides;
130
131         if (!mlx5e_rx_is_linear_skb(mdev, params))
132                 return false;
133
134         if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
135                 return true;
136
137         log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz);
138         signed_log_num_strides_param =
139                 (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
140
141         return signed_log_num_strides_param >= 0;
142 }
143
144 static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
145 {
146         if (params->log_rq_mtu_frames <
147             mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
148                 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
149
150         return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params);
151 }
152
153 static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
154                                           struct mlx5e_params *params)
155 {
156         if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
157                 return order_base_2(mlx5e_rx_get_linear_frag_sz(params));
158
159         return MLX5E_MPWQE_STRIDE_SZ(mdev,
160                 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
161 }
162
163 static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
164                                           struct mlx5e_params *params)
165 {
166         return MLX5_MPWRQ_LOG_WQE_SZ -
167                 mlx5e_mpwqe_get_log_stride_size(mdev, params);
168 }
169
170 static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
171                                  struct mlx5e_params *params)
172 {
173         u16 linear_rq_headroom = params->xdp_prog ?
174                 XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
175         bool is_linear_skb;
176
177         linear_rq_headroom += NET_IP_ALIGN;
178
179         is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
180                 mlx5e_rx_is_linear_skb(mdev, params) :
181                 mlx5e_rx_mpwqe_is_linear_skb(mdev, params);
182
183         return is_linear_skb ? linear_rq_headroom : 0;
184 }
185
186 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
187                                struct mlx5e_params *params)
188 {
189         params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
190         params->log_rq_mtu_frames = is_kdump_kernel() ?
191                 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
192                 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
193
194         mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
195                        params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
196                        params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
197                        BIT(mlx5e_mpwqe_get_log_rq_size(params)) :
198                        BIT(params->log_rq_mtu_frames),
199                        BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)),
200                        MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
201 }
202
203 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
204                                 struct mlx5e_params *params)
205 {
206         return mlx5e_check_fragmented_striding_rq_cap(mdev) &&
207                 !MLX5_IPSEC_DEV(mdev) &&
208                 !(params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params));
209 }
210
211 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
212 {
213         params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
214                 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
215                 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
216                 MLX5_WQ_TYPE_CYCLIC;
217 }
218
219 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
220 {
221         struct mlx5_core_dev *mdev = priv->mdev;
222         u8 port_state;
223
224         port_state = mlx5_query_vport_state(mdev,
225                                             MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
226                                             0);
227
228         if (port_state == VPORT_STATE_UP) {
229                 netdev_info(priv->netdev, "Link up\n");
230                 netif_carrier_on(priv->netdev);
231         } else {
232                 netdev_info(priv->netdev, "Link down\n");
233                 netif_carrier_off(priv->netdev);
234         }
235 }
236
237 static void mlx5e_update_carrier_work(struct work_struct *work)
238 {
239         struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
240                                                update_carrier_work);
241
242         mutex_lock(&priv->state_lock);
243         if (test_bit(MLX5E_STATE_OPENED, &priv->state))
244                 if (priv->profile->update_carrier)
245                         priv->profile->update_carrier(priv);
246         mutex_unlock(&priv->state_lock);
247 }
248
249 void mlx5e_update_stats(struct mlx5e_priv *priv)
250 {
251         int i;
252
253         for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
254                 if (mlx5e_stats_grps[i].update_stats)
255                         mlx5e_stats_grps[i].update_stats(priv);
256 }
257
258 static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
259 {
260         int i;
261
262         for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
263                 if (mlx5e_stats_grps[i].update_stats_mask &
264                     MLX5E_NDO_UPDATE_STATS)
265                         mlx5e_stats_grps[i].update_stats(priv);
266 }
267
268 void mlx5e_update_stats_work(struct work_struct *work)
269 {
270         struct delayed_work *dwork = to_delayed_work(work);
271         struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
272                                                update_stats_work);
273         mutex_lock(&priv->state_lock);
274         if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
275                 priv->profile->update_stats(priv);
276                 queue_delayed_work(priv->wq, dwork,
277                                    msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
278         }
279         mutex_unlock(&priv->state_lock);
280 }
281
282 static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
283                               enum mlx5_dev_event event, unsigned long param)
284 {
285         struct mlx5e_priv *priv = vpriv;
286
287         if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
288                 return;
289
290         switch (event) {
291         case MLX5_DEV_EVENT_PORT_UP:
292         case MLX5_DEV_EVENT_PORT_DOWN:
293                 queue_work(priv->wq, &priv->update_carrier_work);
294                 break;
295         default:
296                 break;
297         }
298 }
299
300 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
301 {
302         set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
303 }
304
305 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
306 {
307         clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
308         synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
309 }
310
311 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
312                                        struct mlx5e_icosq *sq,
313                                        struct mlx5e_umr_wqe *wqe)
314 {
315         struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
316         struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
317         u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS);
318
319         cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
320                                       ds_cnt);
321         cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
322         cseg->imm       = rq->mkey_be;
323
324         ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
325         ucseg->xlt_octowords =
326                 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
327         ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
328 }
329
330 static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
331 {
332         switch (rq->wq_type) {
333         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
334                 return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
335         default:
336                 return mlx5_wq_cyc_get_size(&rq->wqe.wq);
337         }
338 }
339
340 static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
341 {
342         switch (rq->wq_type) {
343         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
344                 return rq->mpwqe.wq.cur_sz;
345         default:
346                 return rq->wqe.wq.cur_sz;
347         }
348 }
349
350 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
351                                      struct mlx5e_channel *c)
352 {
353         int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
354
355         rq->mpwqe.info = kcalloc_node(wq_sz, sizeof(*rq->mpwqe.info),
356                                       GFP_KERNEL, cpu_to_node(c->cpu));
357         if (!rq->mpwqe.info)
358                 return -ENOMEM;
359
360         mlx5e_build_umr_wqe(rq, &c->icosq, &rq->mpwqe.umr_wqe);
361
362         return 0;
363 }
364
365 static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
366                                  u64 npages, u8 page_shift,
367                                  struct mlx5_core_mkey *umr_mkey)
368 {
369         int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
370         void *mkc;
371         u32 *in;
372         int err;
373
374         in = kvzalloc(inlen, GFP_KERNEL);
375         if (!in)
376                 return -ENOMEM;
377
378         mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
379
380         MLX5_SET(mkc, mkc, free, 1);
381         MLX5_SET(mkc, mkc, umr_en, 1);
382         MLX5_SET(mkc, mkc, lw, 1);
383         MLX5_SET(mkc, mkc, lr, 1);
384         MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
385
386         MLX5_SET(mkc, mkc, qpn, 0xffffff);
387         MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
388         MLX5_SET64(mkc, mkc, len, npages << page_shift);
389         MLX5_SET(mkc, mkc, translations_octword_size,
390                  MLX5_MTT_OCTW(npages));
391         MLX5_SET(mkc, mkc, log_page_size, page_shift);
392
393         err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
394
395         kvfree(in);
396         return err;
397 }
398
399 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
400 {
401         u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
402
403         return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
404 }
405
406 static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
407 {
408         return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
409 }
410
411 static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
412 {
413         struct mlx5e_wqe_frag_info next_frag, *prev;
414         int i;
415
416         next_frag.di = &rq->wqe.di[0];
417         next_frag.offset = 0;
418         prev = NULL;
419
420         for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
421                 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
422                 struct mlx5e_wqe_frag_info *frag =
423                         &rq->wqe.frags[i << rq->wqe.info.log_num_frags];
424                 int f;
425
426                 for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
427                         if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
428                                 next_frag.di++;
429                                 next_frag.offset = 0;
430                                 if (prev)
431                                         prev->last_in_page = true;
432                         }
433                         *frag = next_frag;
434
435                         /* prepare next */
436                         next_frag.offset += frag_info[f].frag_stride;
437                         prev = frag;
438                 }
439         }
440
441         if (prev)
442                 prev->last_in_page = true;
443 }
444
445 static int mlx5e_init_di_list(struct mlx5e_rq *rq,
446                               struct mlx5e_params *params,
447                               int wq_sz, int cpu)
448 {
449         int len = wq_sz << rq->wqe.info.log_num_frags;
450
451         rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)),
452                                    GFP_KERNEL, cpu_to_node(cpu));
453         if (!rq->wqe.di)
454                 return -ENOMEM;
455
456         mlx5e_init_frags_partition(rq);
457
458         return 0;
459 }
460
461 static void mlx5e_free_di_list(struct mlx5e_rq *rq)
462 {
463         kvfree(rq->wqe.di);
464 }
465
466 static int mlx5e_alloc_rq(struct mlx5e_channel *c,
467                           struct mlx5e_params *params,
468                           struct mlx5e_rq_param *rqp,
469                           struct mlx5e_rq *rq)
470 {
471         struct page_pool_params pp_params = { 0 };
472         struct mlx5_core_dev *mdev = c->mdev;
473         void *rqc = rqp->rqc;
474         void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
475         u32 pool_size;
476         int wq_sz;
477         int err;
478         int i;
479
480         rqp->wq.db_numa_node = cpu_to_node(c->cpu);
481
482         rq->wq_type = params->rq_wq_type;
483         rq->pdev    = c->pdev;
484         rq->netdev  = c->netdev;
485         rq->tstamp  = c->tstamp;
486         rq->clock   = &mdev->clock;
487         rq->channel = c;
488         rq->ix      = c->ix;
489         rq->mdev    = mdev;
490         rq->hw_mtu  = MLX5E_SW2HW_MTU(params, params->sw_mtu);
491         rq->stats   = &c->priv->channel_stats[c->ix].rq;
492
493         rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
494         if (IS_ERR(rq->xdp_prog)) {
495                 err = PTR_ERR(rq->xdp_prog);
496                 rq->xdp_prog = NULL;
497                 goto err_rq_wq_destroy;
498         }
499
500         err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
501         if (err < 0)
502                 goto err_rq_wq_destroy;
503
504         rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
505         rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params);
506         pool_size = 1 << params->log_rq_mtu_frames;
507
508         switch (rq->wq_type) {
509         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
510                 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
511                                         &rq->wq_ctrl);
512                 if (err)
513                         return err;
514
515                 rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
516
517                 wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
518
519                 pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params);
520
521                 rq->post_wqes = mlx5e_post_rx_mpwqes;
522                 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
523
524                 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
525 #ifdef CONFIG_MLX5_EN_IPSEC
526                 if (MLX5_IPSEC_DEV(mdev)) {
527                         err = -EINVAL;
528                         netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
529                         goto err_rq_wq_destroy;
530                 }
531 #endif
532                 if (!rq->handle_rx_cqe) {
533                         err = -EINVAL;
534                         netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
535                         goto err_rq_wq_destroy;
536                 }
537
538                 rq->mpwqe.skb_from_cqe_mpwrq =
539                         mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ?
540                         mlx5e_skb_from_cqe_mpwrq_linear :
541                         mlx5e_skb_from_cqe_mpwrq_nonlinear;
542                 rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params);
543                 rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params));
544
545                 err = mlx5e_create_rq_umr_mkey(mdev, rq);
546                 if (err)
547                         goto err_rq_wq_destroy;
548                 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
549
550                 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
551                 if (err)
552                         goto err_free;
553                 break;
554         default: /* MLX5_WQ_TYPE_CYCLIC */
555                 err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
556                                          &rq->wq_ctrl);
557                 if (err)
558                         return err;
559
560                 rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
561
562                 wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
563
564                 rq->wqe.info = rqp->frags_info;
565                 rq->wqe.frags =
566                         kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
567                                         (wq_sz << rq->wqe.info.log_num_frags)),
568                                       GFP_KERNEL, cpu_to_node(c->cpu));
569                 if (!rq->wqe.frags) {
570                         err = -ENOMEM;
571                         goto err_free;
572                 }
573
574                 err = mlx5e_init_di_list(rq, params, wq_sz, c->cpu);
575                 if (err)
576                         goto err_free;
577                 rq->post_wqes = mlx5e_post_rx_wqes;
578                 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
579
580 #ifdef CONFIG_MLX5_EN_IPSEC
581                 if (c->priv->ipsec)
582                         rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
583                 else
584 #endif
585                         rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
586                 if (!rq->handle_rx_cqe) {
587                         err = -EINVAL;
588                         netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
589                         goto err_free;
590                 }
591
592                 rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(mdev, params) ?
593                         mlx5e_skb_from_cqe_linear :
594                         mlx5e_skb_from_cqe_nonlinear;
595                 rq->mkey_be = c->mkey_be;
596         }
597
598         /* Create a page_pool and register it with rxq */
599         pp_params.order     = 0;
600         pp_params.flags     = 0; /* No-internal DMA mapping in page_pool */
601         pp_params.pool_size = pool_size;
602         pp_params.nid       = cpu_to_node(c->cpu);
603         pp_params.dev       = c->pdev;
604         pp_params.dma_dir   = rq->buff.map_dir;
605
606         /* page_pool can be used even when there is no rq->xdp_prog,
607          * given page_pool does not handle DMA mapping there is no
608          * required state to clear. And page_pool gracefully handle
609          * elevated refcnt.
610          */
611         rq->page_pool = page_pool_create(&pp_params);
612         if (IS_ERR(rq->page_pool)) {
613                 err = PTR_ERR(rq->page_pool);
614                 rq->page_pool = NULL;
615                 goto err_free;
616         }
617         err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
618                                          MEM_TYPE_PAGE_POOL, rq->page_pool);
619         if (err)
620                 goto err_free;
621
622         for (i = 0; i < wq_sz; i++) {
623                 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
624                         struct mlx5e_rx_wqe_ll *wqe =
625                                 mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
626                         u32 byte_count =
627                                 rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
628                         u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
629
630                         wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
631                         wqe->data[0].byte_count = cpu_to_be32(byte_count);
632                         wqe->data[0].lkey = rq->mkey_be;
633                 } else {
634                         struct mlx5e_rx_wqe_cyc *wqe =
635                                 mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
636                         int f;
637
638                         for (f = 0; f < rq->wqe.info.num_frags; f++) {
639                                 u32 frag_size = rq->wqe.info.arr[f].frag_size |
640                                         MLX5_HW_START_PADDING;
641
642                                 wqe->data[f].byte_count = cpu_to_be32(frag_size);
643                                 wqe->data[f].lkey = rq->mkey_be;
644                         }
645                         /* check if num_frags is not a pow of two */
646                         if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
647                                 wqe->data[f].byte_count = 0;
648                                 wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
649                                 wqe->data[f].addr = 0;
650                         }
651                 }
652         }
653
654         INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
655
656         switch (params->rx_cq_moderation.cq_period_mode) {
657         case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
658                 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
659                 break;
660         case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
661         default:
662                 rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
663         }
664
665         rq->page_cache.head = 0;
666         rq->page_cache.tail = 0;
667
668         return 0;
669
670 err_free:
671         switch (rq->wq_type) {
672         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
673                 kfree(rq->mpwqe.info);
674                 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
675                 break;
676         default: /* MLX5_WQ_TYPE_CYCLIC */
677                 kvfree(rq->wqe.frags);
678                 mlx5e_free_di_list(rq);
679         }
680
681 err_rq_wq_destroy:
682         if (rq->xdp_prog)
683                 bpf_prog_put(rq->xdp_prog);
684         xdp_rxq_info_unreg(&rq->xdp_rxq);
685         if (rq->page_pool)
686                 page_pool_destroy(rq->page_pool);
687         mlx5_wq_destroy(&rq->wq_ctrl);
688
689         return err;
690 }
691
692 static void mlx5e_free_rq(struct mlx5e_rq *rq)
693 {
694         int i;
695
696         if (rq->xdp_prog)
697                 bpf_prog_put(rq->xdp_prog);
698
699         xdp_rxq_info_unreg(&rq->xdp_rxq);
700         if (rq->page_pool)
701                 page_pool_destroy(rq->page_pool);
702
703         switch (rq->wq_type) {
704         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
705                 kfree(rq->mpwqe.info);
706                 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
707                 break;
708         default: /* MLX5_WQ_TYPE_CYCLIC */
709                 kvfree(rq->wqe.frags);
710                 mlx5e_free_di_list(rq);
711         }
712
713         for (i = rq->page_cache.head; i != rq->page_cache.tail;
714              i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
715                 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
716
717                 mlx5e_page_release(rq, dma_info, false);
718         }
719         mlx5_wq_destroy(&rq->wq_ctrl);
720 }
721
722 static int mlx5e_create_rq(struct mlx5e_rq *rq,
723                            struct mlx5e_rq_param *param)
724 {
725         struct mlx5_core_dev *mdev = rq->mdev;
726
727         void *in;
728         void *rqc;
729         void *wq;
730         int inlen;
731         int err;
732
733         inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
734                 sizeof(u64) * rq->wq_ctrl.buf.npages;
735         in = kvzalloc(inlen, GFP_KERNEL);
736         if (!in)
737                 return -ENOMEM;
738
739         rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
740         wq  = MLX5_ADDR_OF(rqc, rqc, wq);
741
742         memcpy(rqc, param->rqc, sizeof(param->rqc));
743
744         MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
745         MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
746         MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
747                                                 MLX5_ADAPTER_PAGE_SHIFT);
748         MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
749
750         mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
751                                   (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
752
753         err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
754
755         kvfree(in);
756
757         return err;
758 }
759
760 static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
761                                  int next_state)
762 {
763         struct mlx5_core_dev *mdev = rq->mdev;
764
765         void *in;
766         void *rqc;
767         int inlen;
768         int err;
769
770         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
771         in = kvzalloc(inlen, GFP_KERNEL);
772         if (!in)
773                 return -ENOMEM;
774
775         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
776
777         MLX5_SET(modify_rq_in, in, rq_state, curr_state);
778         MLX5_SET(rqc, rqc, state, next_state);
779
780         err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
781
782         kvfree(in);
783
784         return err;
785 }
786
787 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
788 {
789         struct mlx5e_channel *c = rq->channel;
790         struct mlx5e_priv *priv = c->priv;
791         struct mlx5_core_dev *mdev = priv->mdev;
792
793         void *in;
794         void *rqc;
795         int inlen;
796         int err;
797
798         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
799         in = kvzalloc(inlen, GFP_KERNEL);
800         if (!in)
801                 return -ENOMEM;
802
803         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
804
805         MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
806         MLX5_SET64(modify_rq_in, in, modify_bitmask,
807                    MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
808         MLX5_SET(rqc, rqc, scatter_fcs, enable);
809         MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
810
811         err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
812
813         kvfree(in);
814
815         return err;
816 }
817
818 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
819 {
820         struct mlx5e_channel *c = rq->channel;
821         struct mlx5_core_dev *mdev = c->mdev;
822         void *in;
823         void *rqc;
824         int inlen;
825         int err;
826
827         inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
828         in = kvzalloc(inlen, GFP_KERNEL);
829         if (!in)
830                 return -ENOMEM;
831
832         rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
833
834         MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
835         MLX5_SET64(modify_rq_in, in, modify_bitmask,
836                    MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
837         MLX5_SET(rqc, rqc, vsd, vsd);
838         MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
839
840         err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
841
842         kvfree(in);
843
844         return err;
845 }
846
847 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
848 {
849         mlx5_core_destroy_rq(rq->mdev, rq->rqn);
850 }
851
852 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
853 {
854         unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
855         struct mlx5e_channel *c = rq->channel;
856
857         u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
858
859         do {
860                 if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
861                         return 0;
862
863                 msleep(20);
864         } while (time_before(jiffies, exp_time));
865
866         netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
867                     c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
868
869         return -ETIMEDOUT;
870 }
871
872 static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
873 {
874         __be16 wqe_ix_be;
875         u16 wqe_ix;
876
877         if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
878                 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
879
880                 /* UMR WQE (if in progress) is always at wq->head */
881                 if (rq->mpwqe.umr_in_progress)
882                         mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
883
884                 while (!mlx5_wq_ll_is_empty(wq)) {
885                         struct mlx5e_rx_wqe_ll *wqe;
886
887                         wqe_ix_be = *wq->tail_next;
888                         wqe_ix    = be16_to_cpu(wqe_ix_be);
889                         wqe       = mlx5_wq_ll_get_wqe(wq, wqe_ix);
890                         rq->dealloc_wqe(rq, wqe_ix);
891                         mlx5_wq_ll_pop(wq, wqe_ix_be,
892                                        &wqe->next.next_wqe_index);
893                 }
894         } else {
895                 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
896
897                 while (!mlx5_wq_cyc_is_empty(wq)) {
898                         wqe_ix = mlx5_wq_cyc_get_tail(wq);
899                         rq->dealloc_wqe(rq, wqe_ix);
900                         mlx5_wq_cyc_pop(wq);
901                 }
902         }
903
904 }
905
906 static int mlx5e_open_rq(struct mlx5e_channel *c,
907                          struct mlx5e_params *params,
908                          struct mlx5e_rq_param *param,
909                          struct mlx5e_rq *rq)
910 {
911         int err;
912
913         err = mlx5e_alloc_rq(c, params, param, rq);
914         if (err)
915                 return err;
916
917         err = mlx5e_create_rq(rq, param);
918         if (err)
919                 goto err_free_rq;
920
921         err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
922         if (err)
923                 goto err_destroy_rq;
924
925         if (params->rx_dim_enabled)
926                 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
927
928         return 0;
929
930 err_destroy_rq:
931         mlx5e_destroy_rq(rq);
932 err_free_rq:
933         mlx5e_free_rq(rq);
934
935         return err;
936 }
937
938 static void mlx5e_activate_rq(struct mlx5e_rq *rq)
939 {
940         struct mlx5e_icosq *sq = &rq->channel->icosq;
941         struct mlx5_wq_cyc *wq = &sq->wq;
942         struct mlx5e_tx_wqe *nopwqe;
943
944         u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
945
946         set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
947         sq->db.ico_wqe[pi].opcode     = MLX5_OPCODE_NOP;
948         nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
949         mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
950 }
951
952 static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
953 {
954         clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
955         napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
956 }
957
958 static void mlx5e_close_rq(struct mlx5e_rq *rq)
959 {
960         cancel_work_sync(&rq->dim.work);
961         mlx5e_destroy_rq(rq);
962         mlx5e_free_rx_descs(rq);
963         mlx5e_free_rq(rq);
964 }
965
966 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
967 {
968         kfree(sq->db.di);
969 }
970
971 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
972 {
973         int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
974
975         sq->db.di = kcalloc_node(wq_sz, sizeof(*sq->db.di),
976                                      GFP_KERNEL, numa);
977         if (!sq->db.di) {
978                 mlx5e_free_xdpsq_db(sq);
979                 return -ENOMEM;
980         }
981
982         return 0;
983 }
984
985 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
986                              struct mlx5e_params *params,
987                              struct mlx5e_sq_param *param,
988                              struct mlx5e_xdpsq *sq)
989 {
990         void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
991         struct mlx5_core_dev *mdev = c->mdev;
992         struct mlx5_wq_cyc *wq = &sq->wq;
993         int err;
994
995         sq->pdev      = c->pdev;
996         sq->mkey_be   = c->mkey_be;
997         sq->channel   = c;
998         sq->uar_map   = mdev->mlx5e_res.bfreg.map;
999         sq->min_inline_mode = params->tx_min_inline_mode;
1000
1001         param->wq.db_numa_node = cpu_to_node(c->cpu);
1002         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
1003         if (err)
1004                 return err;
1005         wq->db = &wq->db[MLX5_SND_DBR];
1006
1007         err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
1008         if (err)
1009                 goto err_sq_wq_destroy;
1010
1011         return 0;
1012
1013 err_sq_wq_destroy:
1014         mlx5_wq_destroy(&sq->wq_ctrl);
1015
1016         return err;
1017 }
1018
1019 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1020 {
1021         mlx5e_free_xdpsq_db(sq);
1022         mlx5_wq_destroy(&sq->wq_ctrl);
1023 }
1024
1025 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
1026 {
1027         kfree(sq->db.ico_wqe);
1028 }
1029
1030 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
1031 {
1032         u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1033
1034         sq->db.ico_wqe = kcalloc_node(wq_sz, sizeof(*sq->db.ico_wqe),
1035                                       GFP_KERNEL, numa);
1036         if (!sq->db.ico_wqe)
1037                 return -ENOMEM;
1038
1039         return 0;
1040 }
1041
1042 static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1043                              struct mlx5e_sq_param *param,
1044                              struct mlx5e_icosq *sq)
1045 {
1046         void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
1047         struct mlx5_core_dev *mdev = c->mdev;
1048         struct mlx5_wq_cyc *wq = &sq->wq;
1049         int err;
1050
1051         sq->channel   = c;
1052         sq->uar_map   = mdev->mlx5e_res.bfreg.map;
1053
1054         param->wq.db_numa_node = cpu_to_node(c->cpu);
1055         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
1056         if (err)
1057                 return err;
1058         wq->db = &wq->db[MLX5_SND_DBR];
1059
1060         err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1061         if (err)
1062                 goto err_sq_wq_destroy;
1063
1064         return 0;
1065
1066 err_sq_wq_destroy:
1067         mlx5_wq_destroy(&sq->wq_ctrl);
1068
1069         return err;
1070 }
1071
1072 static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
1073 {
1074         mlx5e_free_icosq_db(sq);
1075         mlx5_wq_destroy(&sq->wq_ctrl);
1076 }
1077
1078 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
1079 {
1080         kfree(sq->db.wqe_info);
1081         kfree(sq->db.dma_fifo);
1082 }
1083
1084 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
1085 {
1086         int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1087         int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1088
1089         sq->db.dma_fifo = kcalloc_node(df_sz, sizeof(*sq->db.dma_fifo),
1090                                            GFP_KERNEL, numa);
1091         sq->db.wqe_info = kcalloc_node(wq_sz, sizeof(*sq->db.wqe_info),
1092                                            GFP_KERNEL, numa);
1093         if (!sq->db.dma_fifo || !sq->db.wqe_info) {
1094                 mlx5e_free_txqsq_db(sq);
1095                 return -ENOMEM;
1096         }
1097
1098         sq->dma_fifo_mask = df_sz - 1;
1099
1100         return 0;
1101 }
1102
1103 static void mlx5e_sq_recover(struct work_struct *work);
1104 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1105                              int txq_ix,
1106                              struct mlx5e_params *params,
1107                              struct mlx5e_sq_param *param,
1108                              struct mlx5e_txqsq *sq,
1109                              int tc)
1110 {
1111         void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
1112         struct mlx5_core_dev *mdev = c->mdev;
1113         struct mlx5_wq_cyc *wq = &sq->wq;
1114         int err;
1115
1116         sq->pdev      = c->pdev;
1117         sq->tstamp    = c->tstamp;
1118         sq->clock     = &mdev->clock;
1119         sq->mkey_be   = c->mkey_be;
1120         sq->channel   = c;
1121         sq->txq_ix    = txq_ix;
1122         sq->uar_map   = mdev->mlx5e_res.bfreg.map;
1123         sq->min_inline_mode = params->tx_min_inline_mode;
1124         sq->stats     = &c->priv->channel_stats[c->ix].sq[tc];
1125         INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
1126         if (MLX5_IPSEC_DEV(c->priv->mdev))
1127                 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1128         if (mlx5_accel_is_tls_device(c->priv->mdev))
1129                 set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
1130
1131         param->wq.db_numa_node = cpu_to_node(c->cpu);
1132         err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
1133         if (err)
1134                 return err;
1135         wq->db    = &wq->db[MLX5_SND_DBR];
1136
1137         err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1138         if (err)
1139                 goto err_sq_wq_destroy;
1140
1141         INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
1142         sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
1143
1144         return 0;
1145
1146 err_sq_wq_destroy:
1147         mlx5_wq_destroy(&sq->wq_ctrl);
1148
1149         return err;
1150 }
1151
1152 static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
1153 {
1154         mlx5e_free_txqsq_db(sq);
1155         mlx5_wq_destroy(&sq->wq_ctrl);
1156 }
1157
1158 struct mlx5e_create_sq_param {
1159         struct mlx5_wq_ctrl        *wq_ctrl;
1160         u32                         cqn;
1161         u32                         tisn;
1162         u8                          tis_lst_sz;
1163         u8                          min_inline_mode;
1164 };
1165
1166 static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
1167                            struct mlx5e_sq_param *param,
1168                            struct mlx5e_create_sq_param *csp,
1169                            u32 *sqn)
1170 {
1171         void *in;
1172         void *sqc;
1173         void *wq;
1174         int inlen;
1175         int err;
1176
1177         inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1178                 sizeof(u64) * csp->wq_ctrl->buf.npages;
1179         in = kvzalloc(inlen, GFP_KERNEL);
1180         if (!in)
1181                 return -ENOMEM;
1182
1183         sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1184         wq = MLX5_ADDR_OF(sqc, sqc, wq);
1185
1186         memcpy(sqc, param->sqc, sizeof(param->sqc));
1187         MLX5_SET(sqc,  sqc, tis_lst_sz, csp->tis_lst_sz);
1188         MLX5_SET(sqc,  sqc, tis_num_0, csp->tisn);
1189         MLX5_SET(sqc,  sqc, cqn, csp->cqn);
1190
1191         if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1192                 MLX5_SET(sqc,  sqc, min_wqe_inline_mode, csp->min_inline_mode);
1193
1194         MLX5_SET(sqc,  sqc, state, MLX5_SQC_STATE_RST);
1195         MLX5_SET(sqc,  sqc, flush_in_error_en, 1);
1196
1197         MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
1198         MLX5_SET(wq,   wq, uar_page,      mdev->mlx5e_res.bfreg.index);
1199         MLX5_SET(wq,   wq, log_wq_pg_sz,  csp->wq_ctrl->buf.page_shift -
1200                                           MLX5_ADAPTER_PAGE_SHIFT);
1201         MLX5_SET64(wq, wq, dbr_addr,      csp->wq_ctrl->db.dma);
1202
1203         mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
1204                                   (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1205
1206         err = mlx5_core_create_sq(mdev, in, inlen, sqn);
1207
1208         kvfree(in);
1209
1210         return err;
1211 }
1212
1213 struct mlx5e_modify_sq_param {
1214         int curr_state;
1215         int next_state;
1216         bool rl_update;
1217         int rl_index;
1218 };
1219
1220 static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1221                            struct mlx5e_modify_sq_param *p)
1222 {
1223         void *in;
1224         void *sqc;
1225         int inlen;
1226         int err;
1227
1228         inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1229         in = kvzalloc(inlen, GFP_KERNEL);
1230         if (!in)
1231                 return -ENOMEM;
1232
1233         sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1234
1235         MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1236         MLX5_SET(sqc, sqc, state, p->next_state);
1237         if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
1238                 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
1239                 MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, p->rl_index);
1240         }
1241
1242         err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
1243
1244         kvfree(in);
1245
1246         return err;
1247 }
1248
1249 static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
1250 {
1251         mlx5_core_destroy_sq(mdev, sqn);
1252 }
1253
1254 static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1255                                struct mlx5e_sq_param *param,
1256                                struct mlx5e_create_sq_param *csp,
1257                                u32 *sqn)
1258 {
1259         struct mlx5e_modify_sq_param msp = {0};
1260         int err;
1261
1262         err = mlx5e_create_sq(mdev, param, csp, sqn);
1263         if (err)
1264                 return err;
1265
1266         msp.curr_state = MLX5_SQC_STATE_RST;
1267         msp.next_state = MLX5_SQC_STATE_RDY;
1268         err = mlx5e_modify_sq(mdev, *sqn, &msp);
1269         if (err)
1270                 mlx5e_destroy_sq(mdev, *sqn);
1271
1272         return err;
1273 }
1274
1275 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1276                                 struct mlx5e_txqsq *sq, u32 rate);
1277
1278 static int mlx5e_open_txqsq(struct mlx5e_channel *c,
1279                             u32 tisn,
1280                             int txq_ix,
1281                             struct mlx5e_params *params,
1282                             struct mlx5e_sq_param *param,
1283                             struct mlx5e_txqsq *sq,
1284                             int tc)
1285 {
1286         struct mlx5e_create_sq_param csp = {};
1287         u32 tx_rate;
1288         int err;
1289
1290         err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
1291         if (err)
1292                 return err;
1293
1294         csp.tisn            = tisn;
1295         csp.tis_lst_sz      = 1;
1296         csp.cqn             = sq->cq.mcq.cqn;
1297         csp.wq_ctrl         = &sq->wq_ctrl;
1298         csp.min_inline_mode = sq->min_inline_mode;
1299         err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1300         if (err)
1301                 goto err_free_txqsq;
1302
1303         tx_rate = c->priv->tx_rates[sq->txq_ix];
1304         if (tx_rate)
1305                 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
1306
1307         if (params->tx_dim_enabled)
1308                 sq->state |= BIT(MLX5E_SQ_STATE_AM);
1309
1310         return 0;
1311
1312 err_free_txqsq:
1313         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1314         mlx5e_free_txqsq(sq);
1315
1316         return err;
1317 }
1318
1319 static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
1320 {
1321         WARN_ONCE(sq->cc != sq->pc,
1322                   "SQ 0x%x: cc (0x%x) != pc (0x%x)\n",
1323                   sq->sqn, sq->cc, sq->pc);
1324         sq->cc = 0;
1325         sq->dma_fifo_cc = 0;
1326         sq->pc = 0;
1327 }
1328
1329 static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1330 {
1331         sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
1332         clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
1333         set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1334         netdev_tx_reset_queue(sq->txq);
1335         netif_tx_start_queue(sq->txq);
1336 }
1337
1338 static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1339 {
1340         __netif_tx_lock_bh(txq);
1341         netif_tx_stop_queue(txq);
1342         __netif_tx_unlock_bh(txq);
1343 }
1344
1345 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1346 {
1347         struct mlx5e_channel *c = sq->channel;
1348         struct mlx5_wq_cyc *wq = &sq->wq;
1349
1350         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1351         /* prevent netif_tx_wake_queue */
1352         napi_synchronize(&c->napi);
1353
1354         netif_tx_disable_queue(sq->txq);
1355
1356         /* last doorbell out, godspeed .. */
1357         if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
1358                 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
1359                 struct mlx5e_tx_wqe *nop;
1360
1361                 sq->db.wqe_info[pi].skb = NULL;
1362                 nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
1363                 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
1364         }
1365 }
1366
1367 static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1368 {
1369         struct mlx5e_channel *c = sq->channel;
1370         struct mlx5_core_dev *mdev = c->mdev;
1371         struct mlx5_rate_limit rl = {0};
1372
1373         mlx5e_destroy_sq(mdev, sq->sqn);
1374         if (sq->rate_limit) {
1375                 rl.rate = sq->rate_limit;
1376                 mlx5_rl_remove_rate(mdev, &rl);
1377         }
1378         mlx5e_free_txqsq_descs(sq);
1379         mlx5e_free_txqsq(sq);
1380 }
1381
1382 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
1383 {
1384         unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
1385
1386         while (time_before(jiffies, exp_time)) {
1387                 if (sq->cc == sq->pc)
1388                         return 0;
1389
1390                 msleep(20);
1391         }
1392
1393         netdev_err(sq->channel->netdev,
1394                    "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n",
1395                    sq->sqn, sq->cc, sq->pc);
1396
1397         return -ETIMEDOUT;
1398 }
1399
1400 static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
1401 {
1402         struct mlx5_core_dev *mdev = sq->channel->mdev;
1403         struct net_device *dev = sq->channel->netdev;
1404         struct mlx5e_modify_sq_param msp = {0};
1405         int err;
1406
1407         msp.curr_state = curr_state;
1408         msp.next_state = MLX5_SQC_STATE_RST;
1409
1410         err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1411         if (err) {
1412                 netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
1413                 return err;
1414         }
1415
1416         memset(&msp, 0, sizeof(msp));
1417         msp.curr_state = MLX5_SQC_STATE_RST;
1418         msp.next_state = MLX5_SQC_STATE_RDY;
1419
1420         err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1421         if (err) {
1422                 netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
1423                 return err;
1424         }
1425
1426         return 0;
1427 }
1428
1429 static void mlx5e_sq_recover(struct work_struct *work)
1430 {
1431         struct mlx5e_txqsq_recover *recover =
1432                 container_of(work, struct mlx5e_txqsq_recover,
1433                              recover_work);
1434         struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq,
1435                                               recover);
1436         struct mlx5_core_dev *mdev = sq->channel->mdev;
1437         struct net_device *dev = sq->channel->netdev;
1438         u8 state;
1439         int err;
1440
1441         err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
1442         if (err) {
1443                 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
1444                            sq->sqn, err);
1445                 return;
1446         }
1447
1448         if (state != MLX5_RQC_STATE_ERR) {
1449                 netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
1450                 return;
1451         }
1452
1453         netif_tx_disable_queue(sq->txq);
1454
1455         if (mlx5e_wait_for_sq_flush(sq))
1456                 return;
1457
1458         /* If the interval between two consecutive recovers per SQ is too
1459          * short, don't recover to avoid infinite loop of ERR_CQE -> recover.
1460          * If we reached this state, there is probably a bug that needs to be
1461          * fixed. let's keep the queue close and let tx timeout cleanup.
1462          */
1463         if (jiffies_to_msecs(jiffies - recover->last_recover) <
1464             MLX5E_SQ_RECOVER_MIN_INTERVAL) {
1465                 netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n",
1466                            sq->sqn);
1467                 return;
1468         }
1469
1470         /* At this point, no new packets will arrive from the stack as TXQ is
1471          * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
1472          * pending WQEs.  SQ can safely reset the SQ.
1473          */
1474         if (mlx5e_sq_to_ready(sq, state))
1475                 return;
1476
1477         mlx5e_reset_txqsq_cc_pc(sq);
1478         sq->stats->recover++;
1479         recover->last_recover = jiffies;
1480         mlx5e_activate_txqsq(sq);
1481 }
1482
1483 static int mlx5e_open_icosq(struct mlx5e_channel *c,
1484                             struct mlx5e_params *params,
1485                             struct mlx5e_sq_param *param,
1486                             struct mlx5e_icosq *sq)
1487 {
1488         struct mlx5e_create_sq_param csp = {};
1489         int err;
1490
1491         err = mlx5e_alloc_icosq(c, param, sq);
1492         if (err)
1493                 return err;
1494
1495         csp.cqn             = sq->cq.mcq.cqn;
1496         csp.wq_ctrl         = &sq->wq_ctrl;
1497         csp.min_inline_mode = params->tx_min_inline_mode;
1498         set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1499         err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1500         if (err)
1501                 goto err_free_icosq;
1502
1503         return 0;
1504
1505 err_free_icosq:
1506         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1507         mlx5e_free_icosq(sq);
1508
1509         return err;
1510 }
1511
1512 static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1513 {
1514         struct mlx5e_channel *c = sq->channel;
1515
1516         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1517         napi_synchronize(&c->napi);
1518
1519         mlx5e_destroy_sq(c->mdev, sq->sqn);
1520         mlx5e_free_icosq(sq);
1521 }
1522
1523 static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
1524                             struct mlx5e_params *params,
1525                             struct mlx5e_sq_param *param,
1526                             struct mlx5e_xdpsq *sq)
1527 {
1528         unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1529         struct mlx5e_create_sq_param csp = {};
1530         unsigned int inline_hdr_sz = 0;
1531         int err;
1532         int i;
1533
1534         err = mlx5e_alloc_xdpsq(c, params, param, sq);
1535         if (err)
1536                 return err;
1537
1538         csp.tis_lst_sz      = 1;
1539         csp.tisn            = c->priv->tisn[0]; /* tc = 0 */
1540         csp.cqn             = sq->cq.mcq.cqn;
1541         csp.wq_ctrl         = &sq->wq_ctrl;
1542         csp.min_inline_mode = sq->min_inline_mode;
1543         set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1544         err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1545         if (err)
1546                 goto err_free_xdpsq;
1547
1548         if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1549                 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1550                 ds_cnt++;
1551         }
1552
1553         /* Pre initialize fixed WQE fields */
1554         for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1555                 struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1556                 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1557                 struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
1558                 struct mlx5_wqe_data_seg *dseg;
1559
1560                 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1561                 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1562
1563                 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1564                 dseg->lkey = sq->mkey_be;
1565         }
1566
1567         return 0;
1568
1569 err_free_xdpsq:
1570         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1571         mlx5e_free_xdpsq(sq);
1572
1573         return err;
1574 }
1575
1576 static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1577 {
1578         struct mlx5e_channel *c = sq->channel;
1579
1580         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1581         napi_synchronize(&c->napi);
1582
1583         mlx5e_destroy_sq(c->mdev, sq->sqn);
1584         mlx5e_free_xdpsq_descs(sq);
1585         mlx5e_free_xdpsq(sq);
1586 }
1587
1588 static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1589                                  struct mlx5e_cq_param *param,
1590                                  struct mlx5e_cq *cq)
1591 {
1592         struct mlx5_core_cq *mcq = &cq->mcq;
1593         int eqn_not_used;
1594         unsigned int irqn;
1595         int err;
1596         u32 i;
1597
1598         err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1599                                &cq->wq_ctrl);
1600         if (err)
1601                 return err;
1602
1603         mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1604
1605         mcq->cqe_sz     = 64;
1606         mcq->set_ci_db  = cq->wq_ctrl.db.db;
1607         mcq->arm_db     = cq->wq_ctrl.db.db + 1;
1608         *mcq->set_ci_db = 0;
1609         *mcq->arm_db    = 0;
1610         mcq->vector     = param->eq_ix;
1611         mcq->comp       = mlx5e_completion_event;
1612         mcq->event      = mlx5e_cq_error_event;
1613         mcq->irqn       = irqn;
1614
1615         for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1616                 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1617
1618                 cqe->op_own = 0xf1;
1619         }
1620
1621         cq->mdev = mdev;
1622
1623         return 0;
1624 }
1625
1626 static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1627                           struct mlx5e_cq_param *param,
1628                           struct mlx5e_cq *cq)
1629 {
1630         struct mlx5_core_dev *mdev = c->priv->mdev;
1631         int err;
1632
1633         param->wq.buf_numa_node = cpu_to_node(c->cpu);
1634         param->wq.db_numa_node  = cpu_to_node(c->cpu);
1635         param->eq_ix   = c->ix;
1636
1637         err = mlx5e_alloc_cq_common(mdev, param, cq);
1638
1639         cq->napi    = &c->napi;
1640         cq->channel = c;
1641
1642         return err;
1643 }
1644
1645 static void mlx5e_free_cq(struct mlx5e_cq *cq)
1646 {
1647         mlx5_wq_destroy(&cq->wq_ctrl);
1648 }
1649
1650 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1651 {
1652         struct mlx5_core_dev *mdev = cq->mdev;
1653         struct mlx5_core_cq *mcq = &cq->mcq;
1654
1655         void *in;
1656         void *cqc;
1657         int inlen;
1658         unsigned int irqn_not_used;
1659         int eqn;
1660         int err;
1661
1662         inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1663                 sizeof(u64) * cq->wq_ctrl.buf.npages;
1664         in = kvzalloc(inlen, GFP_KERNEL);
1665         if (!in)
1666                 return -ENOMEM;
1667
1668         cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1669
1670         memcpy(cqc, param->cqc, sizeof(param->cqc));
1671
1672         mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
1673                                   (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1674
1675         mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1676
1677         MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
1678         MLX5_SET(cqc,   cqc, c_eqn,         eqn);
1679         MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
1680         MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
1681                                             MLX5_ADAPTER_PAGE_SHIFT);
1682         MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
1683
1684         err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1685
1686         kvfree(in);
1687
1688         if (err)
1689                 return err;
1690
1691         mlx5e_cq_arm(cq);
1692
1693         return 0;
1694 }
1695
1696 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
1697 {
1698         mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
1699 }
1700
1701 static int mlx5e_open_cq(struct mlx5e_channel *c,
1702                          struct net_dim_cq_moder moder,
1703                          struct mlx5e_cq_param *param,
1704                          struct mlx5e_cq *cq)
1705 {
1706         struct mlx5_core_dev *mdev = c->mdev;
1707         int err;
1708
1709         err = mlx5e_alloc_cq(c, param, cq);
1710         if (err)
1711                 return err;
1712
1713         err = mlx5e_create_cq(cq, param);
1714         if (err)
1715                 goto err_free_cq;
1716
1717         if (MLX5_CAP_GEN(mdev, cq_moderation))
1718                 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
1719         return 0;
1720
1721 err_free_cq:
1722         mlx5e_free_cq(cq);
1723
1724         return err;
1725 }
1726
1727 static void mlx5e_close_cq(struct mlx5e_cq *cq)
1728 {
1729         mlx5e_destroy_cq(cq);
1730         mlx5e_free_cq(cq);
1731 }
1732
1733 static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1734 {
1735         return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1736 }
1737
1738 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1739                              struct mlx5e_params *params,
1740                              struct mlx5e_channel_param *cparam)
1741 {
1742         int err;
1743         int tc;
1744
1745         for (tc = 0; tc < c->num_tc; tc++) {
1746                 err = mlx5e_open_cq(c, params->tx_cq_moderation,
1747                                     &cparam->tx_cq, &c->sq[tc].cq);
1748                 if (err)
1749                         goto err_close_tx_cqs;
1750         }
1751
1752         return 0;
1753
1754 err_close_tx_cqs:
1755         for (tc--; tc >= 0; tc--)
1756                 mlx5e_close_cq(&c->sq[tc].cq);
1757
1758         return err;
1759 }
1760
1761 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1762 {
1763         int tc;
1764
1765         for (tc = 0; tc < c->num_tc; tc++)
1766                 mlx5e_close_cq(&c->sq[tc].cq);
1767 }
1768
1769 static int mlx5e_open_sqs(struct mlx5e_channel *c,
1770                           struct mlx5e_params *params,
1771                           struct mlx5e_channel_param *cparam)
1772 {
1773         struct mlx5e_priv *priv = c->priv;
1774         int err, tc, max_nch = priv->profile->max_nch(priv->mdev);
1775
1776         for (tc = 0; tc < params->num_tc; tc++) {
1777                 int txq_ix = c->ix + tc * max_nch;
1778
1779                 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
1780                                        params, &cparam->sq, &c->sq[tc], tc);
1781                 if (err)
1782                         goto err_close_sqs;
1783         }
1784
1785         return 0;
1786
1787 err_close_sqs:
1788         for (tc--; tc >= 0; tc--)
1789                 mlx5e_close_txqsq(&c->sq[tc]);
1790
1791         return err;
1792 }
1793
1794 static void mlx5e_close_sqs(struct mlx5e_channel *c)
1795 {
1796         int tc;
1797
1798         for (tc = 0; tc < c->num_tc; tc++)
1799                 mlx5e_close_txqsq(&c->sq[tc]);
1800 }
1801
1802 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1803                                 struct mlx5e_txqsq *sq, u32 rate)
1804 {
1805         struct mlx5e_priv *priv = netdev_priv(dev);
1806         struct mlx5_core_dev *mdev = priv->mdev;
1807         struct mlx5e_modify_sq_param msp = {0};
1808         struct mlx5_rate_limit rl = {0};
1809         u16 rl_index = 0;
1810         int err;
1811
1812         if (rate == sq->rate_limit)
1813                 /* nothing to do */
1814                 return 0;
1815
1816         if (sq->rate_limit) {
1817                 rl.rate = sq->rate_limit;
1818                 /* remove current rl index to free space to next ones */
1819                 mlx5_rl_remove_rate(mdev, &rl);
1820         }
1821
1822         sq->rate_limit = 0;
1823
1824         if (rate) {
1825                 rl.rate = rate;
1826                 err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
1827                 if (err) {
1828                         netdev_err(dev, "Failed configuring rate %u: %d\n",
1829                                    rate, err);
1830                         return err;
1831                 }
1832         }
1833
1834         msp.curr_state = MLX5_SQC_STATE_RDY;
1835         msp.next_state = MLX5_SQC_STATE_RDY;
1836         msp.rl_index   = rl_index;
1837         msp.rl_update  = true;
1838         err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1839         if (err) {
1840                 netdev_err(dev, "Failed configuring rate %u: %d\n",
1841                            rate, err);
1842                 /* remove the rate from the table */
1843                 if (rate)
1844                         mlx5_rl_remove_rate(mdev, &rl);
1845                 return err;
1846         }
1847
1848         sq->rate_limit = rate;
1849         return 0;
1850 }
1851
1852 static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1853 {
1854         struct mlx5e_priv *priv = netdev_priv(dev);
1855         struct mlx5_core_dev *mdev = priv->mdev;
1856         struct mlx5e_txqsq *sq = priv->txq2sq[index];
1857         int err = 0;
1858
1859         if (!mlx5_rl_is_supported(mdev)) {
1860                 netdev_err(dev, "Rate limiting is not supported on this device\n");
1861                 return -EINVAL;
1862         }
1863
1864         /* rate is given in Mb/sec, HW config is in Kb/sec */
1865         rate = rate << 10;
1866
1867         /* Check whether rate in valid range, 0 is always valid */
1868         if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1869                 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1870                 return -ERANGE;
1871         }
1872
1873         mutex_lock(&priv->state_lock);
1874         if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1875                 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1876         if (!err)
1877                 priv->tx_rates[index] = rate;
1878         mutex_unlock(&priv->state_lock);
1879
1880         return err;
1881 }
1882
1883 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1884                               struct mlx5e_params *params,
1885                               struct mlx5e_channel_param *cparam,
1886                               struct mlx5e_channel **cp)
1887 {
1888         struct net_dim_cq_moder icocq_moder = {0, 0};
1889         struct net_device *netdev = priv->netdev;
1890         int cpu = mlx5e_get_cpu(priv, ix);
1891         struct mlx5e_channel *c;
1892         unsigned int irq;
1893         int err;
1894         int eqn;
1895
1896         c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1897         if (!c)
1898                 return -ENOMEM;
1899
1900         c->priv     = priv;
1901         c->mdev     = priv->mdev;
1902         c->tstamp   = &priv->tstamp;
1903         c->ix       = ix;
1904         c->cpu      = cpu;
1905         c->pdev     = &priv->mdev->pdev->dev;
1906         c->netdev   = priv->netdev;
1907         c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1908         c->num_tc   = params->num_tc;
1909         c->xdp      = !!params->xdp_prog;
1910         c->stats    = &priv->channel_stats[ix].ch;
1911
1912         mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1913         c->irq_desc = irq_to_desc(irq);
1914
1915         netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1916
1917         err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
1918         if (err)
1919                 goto err_napi_del;
1920
1921         err = mlx5e_open_tx_cqs(c, params, cparam);
1922         if (err)
1923                 goto err_close_icosq_cq;
1924
1925         err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
1926         if (err)
1927                 goto err_close_tx_cqs;
1928
1929         /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1930         err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
1931                                      &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
1932         if (err)
1933                 goto err_close_rx_cq;
1934
1935         napi_enable(&c->napi);
1936
1937         err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
1938         if (err)
1939                 goto err_disable_napi;
1940
1941         err = mlx5e_open_sqs(c, params, cparam);
1942         if (err)
1943                 goto err_close_icosq;
1944
1945         err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
1946         if (err)
1947                 goto err_close_sqs;
1948
1949         err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
1950         if (err)
1951                 goto err_close_xdp_sq;
1952
1953         *cp = c;
1954
1955         return 0;
1956 err_close_xdp_sq:
1957         if (c->xdp)
1958                 mlx5e_close_xdpsq(&c->rq.xdpsq);
1959
1960 err_close_sqs:
1961         mlx5e_close_sqs(c);
1962
1963 err_close_icosq:
1964         mlx5e_close_icosq(&c->icosq);
1965
1966 err_disable_napi:
1967         napi_disable(&c->napi);
1968         if (c->xdp)
1969                 mlx5e_close_cq(&c->rq.xdpsq.cq);
1970
1971 err_close_rx_cq:
1972         mlx5e_close_cq(&c->rq.cq);
1973
1974 err_close_tx_cqs:
1975         mlx5e_close_tx_cqs(c);
1976
1977 err_close_icosq_cq:
1978         mlx5e_close_cq(&c->icosq.cq);
1979
1980 err_napi_del:
1981         netif_napi_del(&c->napi);
1982         kfree(c);
1983
1984         return err;
1985 }
1986
1987 static void mlx5e_activate_channel(struct mlx5e_channel *c)
1988 {
1989         int tc;
1990
1991         for (tc = 0; tc < c->num_tc; tc++)
1992                 mlx5e_activate_txqsq(&c->sq[tc]);
1993         mlx5e_activate_rq(&c->rq);
1994         netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
1995 }
1996
1997 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
1998 {
1999         int tc;
2000
2001         mlx5e_deactivate_rq(&c->rq);
2002         for (tc = 0; tc < c->num_tc; tc++)
2003                 mlx5e_deactivate_txqsq(&c->sq[tc]);
2004 }
2005
2006 static void mlx5e_close_channel(struct mlx5e_channel *c)
2007 {
2008         mlx5e_close_rq(&c->rq);
2009         if (c->xdp)
2010                 mlx5e_close_xdpsq(&c->rq.xdpsq);
2011         mlx5e_close_sqs(c);
2012         mlx5e_close_icosq(&c->icosq);
2013         napi_disable(&c->napi);
2014         if (c->xdp)
2015                 mlx5e_close_cq(&c->rq.xdpsq.cq);
2016         mlx5e_close_cq(&c->rq.cq);
2017         mlx5e_close_tx_cqs(c);
2018         mlx5e_close_cq(&c->icosq.cq);
2019         netif_napi_del(&c->napi);
2020
2021         kfree(c);
2022 }
2023
2024 #define DEFAULT_FRAG_SIZE (2048)
2025
2026 static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
2027                                       struct mlx5e_params *params,
2028                                       struct mlx5e_rq_frags_info *info)
2029 {
2030         u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
2031         int frag_size_max = DEFAULT_FRAG_SIZE;
2032         u32 buf_size = 0;
2033         int i;
2034
2035 #ifdef CONFIG_MLX5_EN_IPSEC
2036         if (MLX5_IPSEC_DEV(mdev))
2037                 byte_count += MLX5E_METADATA_ETHER_LEN;
2038 #endif
2039
2040         if (mlx5e_rx_is_linear_skb(mdev, params)) {
2041                 int frag_stride;
2042
2043                 frag_stride = mlx5e_rx_get_linear_frag_sz(params);
2044                 frag_stride = roundup_pow_of_two(frag_stride);
2045
2046                 info->arr[0].frag_size = byte_count;
2047                 info->arr[0].frag_stride = frag_stride;
2048                 info->num_frags = 1;
2049                 info->wqe_bulk = PAGE_SIZE / frag_stride;
2050                 goto out;
2051         }
2052
2053         if (byte_count > PAGE_SIZE +
2054             (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
2055                 frag_size_max = PAGE_SIZE;
2056
2057         i = 0;
2058         while (buf_size < byte_count) {
2059                 int frag_size = byte_count - buf_size;
2060
2061                 if (i < MLX5E_MAX_RX_FRAGS - 1)
2062                         frag_size = min(frag_size, frag_size_max);
2063
2064                 info->arr[i].frag_size = frag_size;
2065                 info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
2066
2067                 buf_size += frag_size;
2068                 i++;
2069         }
2070         info->num_frags = i;
2071         /* number of different wqes sharing a page */
2072         info->wqe_bulk = 1 + (info->num_frags % 2);
2073
2074 out:
2075         info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
2076         info->log_num_frags = order_base_2(info->num_frags);
2077 }
2078
2079 static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
2080 {
2081         int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
2082
2083         switch (wq_type) {
2084         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2085                 sz += sizeof(struct mlx5e_rx_wqe_ll);
2086                 break;
2087         default: /* MLX5_WQ_TYPE_CYCLIC */
2088                 sz += sizeof(struct mlx5e_rx_wqe_cyc);
2089         }
2090
2091         return order_base_2(sz);
2092 }
2093
2094 static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
2095                                  struct mlx5e_params *params,
2096                                  struct mlx5e_rq_param *param)
2097 {
2098         struct mlx5_core_dev *mdev = priv->mdev;
2099         void *rqc = param->rqc;
2100         void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2101         int ndsegs = 1;
2102
2103         switch (params->rq_wq_type) {
2104         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2105                 MLX5_SET(wq, wq, log_wqe_num_of_strides,
2106                          mlx5e_mpwqe_get_log_num_strides(mdev, params) -
2107                          MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
2108                 MLX5_SET(wq, wq, log_wqe_stride_size,
2109                          mlx5e_mpwqe_get_log_stride_size(mdev, params) -
2110                          MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
2111                 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params));
2112                 break;
2113         default: /* MLX5_WQ_TYPE_CYCLIC */
2114                 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
2115                 mlx5e_build_rq_frags_info(mdev, params, &param->frags_info);
2116                 ndsegs = param->frags_info.num_frags;
2117         }
2118
2119         MLX5_SET(wq, wq, wq_type,          params->rq_wq_type);
2120         MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
2121         MLX5_SET(wq, wq, log_wq_stride,
2122                  mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
2123         MLX5_SET(wq, wq, pd,               mdev->mlx5e_res.pdn);
2124         MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
2125         MLX5_SET(rqc, rqc, vsd,            params->vlan_strip_disable);
2126         MLX5_SET(rqc, rqc, scatter_fcs,    params->scatter_fcs_en);
2127
2128         param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2129 }
2130
2131 static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
2132                                       struct mlx5e_rq_param *param)
2133 {
2134         struct mlx5_core_dev *mdev = priv->mdev;
2135         void *rqc = param->rqc;
2136         void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
2137
2138         MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
2139         MLX5_SET(wq, wq, log_wq_stride,
2140                  mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
2141         MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
2142
2143         param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2144 }
2145
2146 static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
2147                                         struct mlx5e_sq_param *param)
2148 {
2149         void *sqc = param->sqc;
2150         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2151
2152         MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
2153         MLX5_SET(wq, wq, pd,            priv->mdev->mlx5e_res.pdn);
2154
2155         param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
2156 }
2157
2158 static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
2159                                  struct mlx5e_params *params,
2160                                  struct mlx5e_sq_param *param)
2161 {
2162         void *sqc = param->sqc;
2163         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2164
2165         mlx5e_build_sq_param_common(priv, param);
2166         MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2167         MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
2168 }
2169
2170 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
2171                                         struct mlx5e_cq_param *param)
2172 {
2173         void *cqc = param->cqc;
2174
2175         MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
2176 }
2177
2178 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
2179                                     struct mlx5e_params *params,
2180                                     struct mlx5e_cq_param *param)
2181 {
2182         struct mlx5_core_dev *mdev = priv->mdev;
2183         void *cqc = param->cqc;
2184         u8 log_cq_size;
2185
2186         switch (params->rq_wq_type) {
2187         case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2188                 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) +
2189                         mlx5e_mpwqe_get_log_num_strides(mdev, params);
2190                 break;
2191         default: /* MLX5_WQ_TYPE_CYCLIC */
2192                 log_cq_size = params->log_rq_mtu_frames;
2193         }
2194
2195         MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
2196         if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
2197                 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
2198                 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
2199         }
2200
2201         mlx5e_build_common_cq_param(priv, param);
2202         param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
2203 }
2204
2205 static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
2206                                     struct mlx5e_params *params,
2207                                     struct mlx5e_cq_param *param)
2208 {
2209         void *cqc = param->cqc;
2210
2211         MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
2212
2213         mlx5e_build_common_cq_param(priv, param);
2214         param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
2215 }
2216
2217 static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
2218                                      u8 log_wq_size,
2219                                      struct mlx5e_cq_param *param)
2220 {
2221         void *cqc = param->cqc;
2222
2223         MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2224
2225         mlx5e_build_common_cq_param(priv, param);
2226
2227         param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2228 }
2229
2230 static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
2231                                     u8 log_wq_size,
2232                                     struct mlx5e_sq_param *param)
2233 {
2234         void *sqc = param->sqc;
2235         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2236
2237         mlx5e_build_sq_param_common(priv, param);
2238
2239         MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
2240         MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
2241 }
2242
2243 static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
2244                                     struct mlx5e_params *params,
2245                                     struct mlx5e_sq_param *param)
2246 {
2247         void *sqc = param->sqc;
2248         void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2249
2250         mlx5e_build_sq_param_common(priv, param);
2251         MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2252 }
2253
2254 static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2255                                       struct mlx5e_params *params,
2256                                       struct mlx5e_channel_param *cparam)
2257 {
2258         u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
2259
2260         mlx5e_build_rq_param(priv, params, &cparam->rq);
2261         mlx5e_build_sq_param(priv, params, &cparam->sq);
2262         mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2263         mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2264         mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
2265         mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2266         mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
2267 }
2268
2269 int mlx5e_open_channels(struct mlx5e_priv *priv,
2270                         struct mlx5e_channels *chs)
2271 {
2272         struct mlx5e_channel_param *cparam;
2273         int err = -ENOMEM;
2274         int i;
2275
2276         chs->num = chs->params.num_channels;
2277
2278         chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
2279         cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
2280         if (!chs->c || !cparam)
2281                 goto err_free;
2282
2283         mlx5e_build_channel_param(priv, &chs->params, cparam);
2284         for (i = 0; i < chs->num; i++) {
2285                 err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
2286                 if (err)
2287                         goto err_close_channels;
2288         }
2289
2290         kfree(cparam);
2291         return 0;
2292
2293 err_close_channels:
2294         for (i--; i >= 0; i--)
2295                 mlx5e_close_channel(chs->c[i]);
2296
2297 err_free:
2298         kfree(chs->c);
2299         kfree(cparam);
2300         chs->num = 0;
2301         return err;
2302 }
2303
2304 static void mlx5e_activate_channels(struct mlx5e_channels *chs)
2305 {
2306         int i;
2307
2308         for (i = 0; i < chs->num; i++)
2309                 mlx5e_activate_channel(chs->c[i]);
2310 }
2311
2312 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2313 {
2314         int err = 0;
2315         int i;
2316
2317         for (i = 0; i < chs->num; i++)
2318                 err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq,
2319                                                   err ? 0 : 20000);
2320
2321         return err ? -ETIMEDOUT : 0;
2322 }
2323
2324 static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2325 {
2326         int i;
2327
2328         for (i = 0; i < chs->num; i++)
2329                 mlx5e_deactivate_channel(chs->c[i]);
2330 }
2331
2332 void mlx5e_close_channels(struct mlx5e_channels *chs)
2333 {
2334         int i;
2335
2336         for (i = 0; i < chs->num; i++)
2337                 mlx5e_close_channel(chs->c[i]);
2338
2339         kfree(chs->c);
2340         chs->num = 0;
2341 }
2342
2343 static int
2344 mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
2345 {
2346         struct mlx5_core_dev *mdev = priv->mdev;
2347         void *rqtc;
2348         int inlen;
2349         int err;
2350         u32 *in;
2351         int i;
2352
2353         inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2354         in = kvzalloc(inlen, GFP_KERNEL);
2355         if (!in)
2356                 return -ENOMEM;
2357
2358         rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2359
2360         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2361         MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2362
2363         for (i = 0; i < sz; i++)
2364                 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2365
2366         err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2367         if (!err)
2368                 rqt->enabled = true;
2369
2370         kvfree(in);
2371         return err;
2372 }
2373
2374 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
2375 {
2376         rqt->enabled = false;
2377         mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
2378 }
2379
2380 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
2381 {
2382         struct mlx5e_rqt *rqt = &priv->indir_rqt;
2383         int err;
2384
2385         err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2386         if (err)
2387                 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2388         return err;
2389 }
2390
2391 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
2392 {
2393         struct mlx5e_rqt *rqt;
2394         int err;
2395         int ix;
2396
2397         for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2398                 rqt = &priv->direct_tir[ix].rqt;
2399                 err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
2400                 if (err)
2401                         goto err_destroy_rqts;
2402         }
2403
2404         return 0;
2405
2406 err_destroy_rqts:
2407         mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
2408         for (ix--; ix >= 0; ix--)
2409                 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
2410
2411         return err;
2412 }
2413
2414 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
2415 {
2416         int i;
2417
2418         for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
2419                 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
2420 }
2421
2422 static int mlx5e_rx_hash_fn(int hfunc)
2423 {
2424         return (hfunc == ETH_RSS_HASH_TOP) ?
2425                MLX5_RX_HASH_FN_TOEPLITZ :
2426                MLX5_RX_HASH_FN_INVERTED_XOR8;
2427 }
2428
2429 int mlx5e_bits_invert(unsigned long a, int size)
2430 {
2431         int inv = 0;
2432         int i;
2433
2434         for (i = 0; i < size; i++)
2435                 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2436
2437         return inv;
2438 }
2439
2440 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2441                                 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2442 {
2443         int i;
2444
2445         for (i = 0; i < sz; i++) {
2446                 u32 rqn;
2447
2448                 if (rrp.is_rss) {
2449                         int ix = i;
2450
2451                         if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2452                                 ix = mlx5e_bits_invert(i, ilog2(sz));
2453
2454                         ix = priv->channels.params.indirection_rqt[ix];
2455                         rqn = rrp.rss.channels->c[ix]->rq.rqn;
2456                 } else {
2457                         rqn = rrp.rqn;
2458                 }
2459                 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2460         }
2461 }
2462
2463 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2464                        struct mlx5e_redirect_rqt_param rrp)
2465 {
2466         struct mlx5_core_dev *mdev = priv->mdev;
2467         void *rqtc;
2468         int inlen;
2469         u32 *in;
2470         int err;
2471
2472         inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
2473         in = kvzalloc(inlen, GFP_KERNEL);
2474         if (!in)
2475                 return -ENOMEM;
2476
2477         rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2478
2479         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2480         MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
2481         mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
2482         err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
2483
2484         kvfree(in);
2485         return err;
2486 }
2487
2488 static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2489                                 struct mlx5e_redirect_rqt_param rrp)
2490 {
2491         if (!rrp.is_rss)
2492                 return rrp.rqn;
2493
2494         if (ix >= rrp.rss.channels->num)
2495                 return priv->drop_rq.rqn;
2496
2497         return rrp.rss.channels->c[ix]->rq.rqn;
2498 }
2499
2500 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2501                                 struct mlx5e_redirect_rqt_param rrp)
2502 {
2503         u32 rqtn;
2504         int ix;
2505
2506         if (priv->indir_rqt.enabled) {
2507                 /* RSS RQ table */
2508                 rqtn = priv->indir_rqt.rqtn;
2509                 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
2510         }
2511
2512         for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2513                 struct mlx5e_redirect_rqt_param direct_rrp = {
2514                         .is_rss = false,
2515                         {
2516                                 .rqn    = mlx5e_get_direct_rqn(priv, ix, rrp)
2517                         },
2518                 };
2519
2520                 /* Direct RQ Tables */
2521                 if (!priv->direct_tir[ix].rqt.enabled)
2522                         continue;
2523
2524                 rqtn = priv->direct_tir[ix].rqt.rqtn;
2525                 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
2526         }
2527 }
2528
2529 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2530                                             struct mlx5e_channels *chs)
2531 {
2532         struct mlx5e_redirect_rqt_param rrp = {
2533                 .is_rss        = true,
2534                 {
2535                         .rss = {
2536                                 .channels  = chs,
2537                                 .hfunc     = chs->params.rss_hfunc,
2538                         }
2539                 },
2540         };
2541
2542         mlx5e_redirect_rqts(priv, rrp);
2543 }
2544
2545 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2546 {
2547         struct mlx5e_redirect_rqt_param drop_rrp = {
2548                 .is_rss = false,
2549                 {
2550                         .rqn = priv->drop_rq.rqn,
2551                 },
2552         };
2553
2554         mlx5e_redirect_rqts(priv, drop_rrp);
2555 }
2556
2557 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
2558 {
2559         if (!params->lro_en)
2560                 return;
2561
2562 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2563
2564         MLX5_SET(tirc, tirc, lro_enable_mask,
2565                  MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2566                  MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2567         MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
2568                  (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2569         MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
2570 }
2571
2572 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
2573                                     enum mlx5e_traffic_types tt,
2574                                     void *tirc, bool inner)
2575 {
2576         void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2577                              MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2578
2579 #define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2580                                  MLX5_HASH_FIELD_SEL_DST_IP)
2581
2582 #define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2583                                  MLX5_HASH_FIELD_SEL_DST_IP   |\
2584                                  MLX5_HASH_FIELD_SEL_L4_SPORT |\
2585                                  MLX5_HASH_FIELD_SEL_L4_DPORT)
2586
2587 #define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
2588                                  MLX5_HASH_FIELD_SEL_DST_IP   |\
2589                                  MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2590
2591         MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
2592         if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
2593                 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2594                                              rx_hash_toeplitz_key);
2595                 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2596                                                rx_hash_toeplitz_key);
2597
2598                 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2599                 memcpy(rss_key, params->toeplitz_hash_key, len);
2600         }
2601
2602         switch (tt) {
2603         case MLX5E_TT_IPV4_TCP:
2604                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2605                          MLX5_L3_PROT_TYPE_IPV4);
2606                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2607                          MLX5_L4_PROT_TYPE_TCP);
2608                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2609                          MLX5_HASH_IP_L4PORTS);
2610                 break;
2611
2612         case MLX5E_TT_IPV6_TCP:
2613                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2614                          MLX5_L3_PROT_TYPE_IPV6);
2615                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2616                          MLX5_L4_PROT_TYPE_TCP);
2617                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2618                          MLX5_HASH_IP_L4PORTS);
2619                 break;
2620
2621         case MLX5E_TT_IPV4_UDP:
2622                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2623                          MLX5_L3_PROT_TYPE_IPV4);
2624                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2625                          MLX5_L4_PROT_TYPE_UDP);
2626                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2627                          MLX5_HASH_IP_L4PORTS);
2628                 break;
2629
2630         case MLX5E_TT_IPV6_UDP:
2631                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2632                          MLX5_L3_PROT_TYPE_IPV6);
2633                 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2634                          MLX5_L4_PROT_TYPE_UDP);
2635                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2636                          MLX5_HASH_IP_L4PORTS);
2637                 break;
2638
2639         case MLX5E_TT_IPV4_IPSEC_AH:
2640                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2641                          MLX5_L3_PROT_TYPE_IPV4);
2642                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2643                          MLX5_HASH_IP_IPSEC_SPI);
2644                 break;
2645
2646         case MLX5E_TT_IPV6_IPSEC_AH:
2647                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2648                          MLX5_L3_PROT_TYPE_IPV6);
2649                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2650                          MLX5_HASH_IP_IPSEC_SPI);
2651                 break;
2652
2653         case MLX5E_TT_IPV4_IPSEC_ESP:
2654                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2655                          MLX5_L3_PROT_TYPE_IPV4);
2656                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2657                          MLX5_HASH_IP_IPSEC_SPI);
2658                 break;
2659
2660         case MLX5E_TT_IPV6_IPSEC_ESP:
2661                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2662                          MLX5_L3_PROT_TYPE_IPV6);
2663                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2664                          MLX5_HASH_IP_IPSEC_SPI);
2665                 break;
2666
2667         case MLX5E_TT_IPV4:
2668                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2669                          MLX5_L3_PROT_TYPE_IPV4);
2670                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2671                          MLX5_HASH_IP);
2672                 break;
2673
2674         case MLX5E_TT_IPV6:
2675                 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2676                          MLX5_L3_PROT_TYPE_IPV6);
2677                 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2678                          MLX5_HASH_IP);
2679                 break;
2680         default:
2681                 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2682         }
2683 }
2684
2685 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
2686 {
2687         struct mlx5_core_dev *mdev = priv->mdev;
2688
2689         void *in;
2690         void *tirc;
2691         int inlen;
2692         int err;
2693         int tt;
2694         int ix;
2695
2696         inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
2697         in = kvzalloc(inlen, GFP_KERNEL);
2698         if (!in)
2699                 return -ENOMEM;
2700
2701         MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2702         tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2703
2704         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2705
2706         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2707                 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
2708                                            inlen);
2709                 if (err)
2710                         goto free_in;
2711         }
2712
2713         for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2714                 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2715                                            in, inlen);
2716                 if (err)
2717                         goto free_in;
2718         }
2719
2720 free_in:
2721         kvfree(in);
2722
2723         return err;
2724 }
2725
2726 static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
2727                                             enum mlx5e_traffic_types tt,
2728                                             u32 *tirc)
2729 {
2730         MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2731
2732         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2733
2734         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2735         MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2736         MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
2737
2738         mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
2739 }
2740
2741 static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
2742                          struct mlx5e_params *params, u16 mtu)
2743 {
2744         u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
2745         int err;
2746
2747         err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
2748         if (err)
2749                 return err;
2750
2751         /* Update vport context MTU */
2752         mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2753         return 0;
2754 }
2755
2756 static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
2757                             struct mlx5e_params *params, u16 *mtu)
2758 {
2759         u16 hw_mtu = 0;
2760         int err;
2761
2762         err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2763         if (err || !hw_mtu) /* fallback to port oper mtu */
2764                 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2765
2766         *mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
2767 }
2768
2769 static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
2770 {
2771         struct mlx5e_params *params = &priv->channels.params;
2772         struct net_device *netdev = priv->netdev;
2773         struct mlx5_core_dev *mdev = priv->mdev;
2774         u16 mtu;
2775         int err;
2776
2777         err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
2778         if (err)
2779                 return err;
2780
2781         mlx5e_query_mtu(mdev, params, &mtu);
2782         if (mtu != params->sw_mtu)
2783                 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2784                             __func__, mtu, params->sw_mtu);
2785
2786         params->sw_mtu = mtu;
2787         return 0;
2788 }
2789
2790 static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2791 {
2792         struct mlx5e_priv *priv = netdev_priv(netdev);
2793         int nch = priv->channels.params.num_channels;
2794         int ntc = priv->channels.params.num_tc;
2795         int tc;
2796
2797         netdev_reset_tc(netdev);
2798
2799         if (ntc == 1)
2800                 return;
2801
2802         netdev_set_num_tc(netdev, ntc);
2803
2804         /* Map netdev TCs to offset 0
2805          * We have our own UP to TXQ mapping for QoS
2806          */
2807         for (tc = 0; tc < ntc; tc++)
2808                 netdev_set_tc_queue(netdev, tc, nch, 0);
2809 }
2810
2811 static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
2812 {
2813         int max_nch = priv->profile->max_nch(priv->mdev);
2814         int i, tc;
2815
2816         for (i = 0; i < max_nch; i++)
2817                 for (tc = 0; tc < priv->profile->max_tc; tc++)
2818                         priv->channel_tc2txq[i][tc] = i + tc * max_nch;
2819 }
2820
2821 static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
2822 {
2823         struct mlx5e_channel *c;
2824         struct mlx5e_txqsq *sq;
2825         int i, tc;
2826
2827         for (i = 0; i < priv->channels.num; i++) {
2828                 c = priv->channels.c[i];
2829                 for (tc = 0; tc < c->num_tc; tc++) {
2830                         sq = &c->sq[tc];
2831                         priv->txq2sq[sq->txq_ix] = sq;
2832                 }
2833         }
2834 }
2835
2836 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2837 {
2838         int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2839         struct net_device *netdev = priv->netdev;
2840
2841         mlx5e_netdev_set_tcs(netdev);
2842         netif_set_real_num_tx_queues(netdev, num_txqs);
2843         netif_set_real_num_rx_queues(netdev, priv->channels.num);
2844
2845         mlx5e_build_tx2sq_maps(priv);
2846         mlx5e_activate_channels(&priv->channels);
2847         netif_tx_start_all_queues(priv->netdev);
2848
2849         if (MLX5_ESWITCH_MANAGER(priv->mdev))
2850                 mlx5e_add_sqs_fwd_rules(priv);
2851
2852         mlx5e_wait_channels_min_rx_wqes(&priv->channels);
2853         mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
2854 }
2855
2856 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2857 {
2858         mlx5e_redirect_rqts_to_drop(priv);
2859
2860         if (MLX5_ESWITCH_MANAGER(priv->mdev))
2861                 mlx5e_remove_sqs_fwd_rules(priv);
2862
2863         /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2864          * polling for inactive tx queues.
2865          */
2866         netif_tx_stop_all_queues(priv->netdev);
2867         netif_tx_disable(priv->netdev);
2868         mlx5e_deactivate_channels(&priv->channels);
2869 }
2870
2871 void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2872                                 struct mlx5e_channels *new_chs,
2873                                 mlx5e_fp_hw_modify hw_modify)
2874 {
2875         struct net_device *netdev = priv->netdev;
2876         int new_num_txqs;
2877         int carrier_ok;
2878         new_num_txqs = new_chs->num * new_chs->params.num_tc;
2879
2880         carrier_ok = netif_carrier_ok(netdev);
2881         netif_carrier_off(netdev);
2882
2883         if (new_num_txqs < netdev->real_num_tx_queues)
2884                 netif_set_real_num_tx_queues(netdev, new_num_txqs);
2885
2886         mlx5e_deactivate_priv_channels(priv);
2887         mlx5e_close_channels(&priv->channels);
2888
2889         priv->channels = *new_chs;
2890
2891         /* New channels are ready to roll, modify HW settings if needed */
2892         if (hw_modify)
2893                 hw_modify(priv);
2894
2895         mlx5e_refresh_tirs(priv, false);
2896         mlx5e_activate_priv_channels(priv);
2897
2898         /* return carrier back if needed */
2899         if (carrier_ok)
2900                 netif_carrier_on(netdev);
2901 }
2902
2903 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2904 {
2905         priv->tstamp.tx_type   = HWTSTAMP_TX_OFF;
2906         priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
2907 }
2908
2909 int mlx5e_open_locked(struct net_device *netdev)
2910 {
2911         struct mlx5e_priv *priv = netdev_priv(netdev);
2912         int err;
2913
2914         set_bit(MLX5E_STATE_OPENED, &priv->state);
2915
2916         err = mlx5e_open_channels(priv, &priv->channels);
2917         if (err)
2918                 goto err_clear_state_opened_flag;
2919
2920         mlx5e_refresh_tirs(priv, false);
2921         mlx5e_activate_priv_channels(priv);
2922         if (priv->profile->update_carrier)
2923                 priv->profile->update_carrier(priv);
2924
2925         if (priv->profile->update_stats)
2926                 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
2927
2928         return 0;
2929
2930 err_clear_state_opened_flag:
2931         clear_bit(MLX5E_STATE_OPENED, &priv->state);
2932         return err;
2933 }
2934
2935 int mlx5e_open(struct net_device *netdev)
2936 {
2937         struct mlx5e_priv *priv = netdev_priv(netdev);
2938         int err;
2939
2940         mutex_lock(&priv->state_lock);
2941         err = mlx5e_open_locked(netdev);
2942         if (!err)
2943                 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
2944         mutex_unlock(&priv->state_lock);
2945
2946         if (mlx5e_vxlan_allowed(priv->mdev))
2947                 udp_tunnel_get_rx_info(netdev);
2948
2949         return err;
2950 }
2951
2952 int mlx5e_close_locked(struct net_device *netdev)
2953 {
2954         struct mlx5e_priv *priv = netdev_priv(netdev);
2955
2956         /* May already be CLOSED in case a previous configuration operation
2957          * (e.g RX/TX queue size change) that involves close&open failed.
2958          */
2959         if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2960                 return 0;
2961
2962         clear_bit(MLX5E_STATE_OPENED, &priv->state);
2963
2964         netif_carrier_off(priv->netdev);
2965         mlx5e_deactivate_priv_channels(priv);
2966         mlx5e_close_channels(&priv->channels);
2967
2968         return 0;
2969 }
2970
2971 int mlx5e_close(struct net_device *netdev)
2972 {
2973         struct mlx5e_priv *priv = netdev_priv(netdev);
2974         int err;
2975
2976         if (!netif_device_present(netdev))
2977                 return -ENODEV;
2978
2979         mutex_lock(&priv->state_lock);
2980         mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
2981         err = mlx5e_close_locked(netdev);
2982         mutex_unlock(&priv->state_lock);
2983
2984         return err;
2985 }
2986
2987 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
2988                                struct mlx5e_rq *rq,
2989                                struct mlx5e_rq_param *param)
2990 {
2991         void *rqc = param->rqc;
2992         void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2993         int err;
2994
2995         param->wq.db_numa_node = param->wq.buf_numa_node;
2996
2997         err = mlx5_wq_cyc_create(mdev, &param->wq, rqc_wq, &rq->wqe.wq,
2998                                  &rq->wq_ctrl);
2999         if (err)
3000                 return err;
3001
3002         /* Mark as unused given "Drop-RQ" packets never reach XDP */
3003         xdp_rxq_info_unused(&rq->xdp_rxq);
3004
3005         rq->mdev = mdev;
3006
3007         return 0;
3008 }
3009
3010 static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
3011                                struct mlx5e_cq *cq,
3012                                struct mlx5e_cq_param *param)
3013 {
3014         param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
3015         param->wq.db_numa_node  = dev_to_node(&mdev->pdev->dev);
3016
3017         return mlx5e_alloc_cq_common(mdev, param, cq);
3018 }
3019
3020 static int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
3021                               struct mlx5e_rq *drop_rq)
3022 {
3023         struct mlx5_core_dev *mdev = priv->mdev;
3024         struct mlx5e_cq_param cq_param = {};
3025         struct mlx5e_rq_param rq_param = {};
3026         struct mlx5e_cq *cq = &drop_rq->cq;
3027         int err;
3028
3029         mlx5e_build_drop_rq_param(priv, &rq_param);
3030
3031         err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
3032         if (err)
3033                 return err;
3034
3035         err = mlx5e_create_cq(cq, &cq_param);
3036         if (err)
3037                 goto err_free_cq;
3038
3039         err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
3040         if (err)
3041                 goto err_destroy_cq;
3042
3043         err = mlx5e_create_rq(drop_rq, &rq_param);
3044         if (err)
3045                 goto err_free_rq;
3046
3047         err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
3048         if (err)
3049                 mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
3050
3051         return 0;
3052
3053 err_free_rq:
3054         mlx5e_free_rq(drop_rq);
3055
3056 err_destroy_cq:
3057         mlx5e_destroy_cq(cq);
3058
3059 err_free_cq:
3060         mlx5e_free_cq(cq);
3061
3062         return err;
3063 }
3064
3065 static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
3066 {
3067         mlx5e_destroy_rq(drop_rq);
3068         mlx5e_free_rq(drop_rq);
3069         mlx5e_destroy_cq(&drop_rq->cq);
3070         mlx5e_free_cq(&drop_rq->cq);
3071 }
3072
3073 int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
3074                      u32 underlay_qpn, u32 *tisn)
3075 {
3076         u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
3077         void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
3078
3079         MLX5_SET(tisc, tisc, prio, tc << 1);
3080         MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
3081         MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
3082
3083         if (mlx5_lag_is_lacp_owner(mdev))
3084                 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
3085
3086         return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
3087 }
3088
3089 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
3090 {
3091         mlx5_core_destroy_tis(mdev, tisn);
3092 }
3093
3094 int mlx5e_create_tises(struct mlx5e_priv *priv)
3095 {
3096         int err;
3097         int tc;
3098
3099         for (tc = 0; tc < priv->profile->max_tc; tc++) {
3100                 err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
3101                 if (err)
3102                         goto err_close_tises;
3103         }
3104
3105         return 0;
3106
3107 err_close_tises:
3108         for (tc--; tc >= 0; tc--)
3109                 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
3110
3111         return err;
3112 }
3113
3114 void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
3115 {
3116         int tc;
3117
3118         for (tc = 0; tc < priv->profile->max_tc; tc++)
3119                 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
3120 }
3121
3122 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
3123                                       enum mlx5e_traffic_types tt,
3124                                       u32 *tirc)
3125 {
3126         MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
3127
3128         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
3129
3130         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
3131         MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
3132         mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
3133 }
3134
3135 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
3136 {
3137         MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
3138
3139         mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
3140
3141         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
3142         MLX5_SET(tirc, tirc, indirect_table, rqtn);
3143         MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
3144 }
3145
3146 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
3147 {
3148         struct mlx5e_tir *tir;
3149         void *tirc;
3150         int inlen;
3151         int i = 0;
3152         int err;
3153         u32 *in;
3154         int tt;
3155
3156         inlen = MLX5_ST_SZ_BYTES(create_tir_in);
3157         in = kvzalloc(inlen, GFP_KERNEL);
3158         if (!in)
3159                 return -ENOMEM;
3160
3161         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
3162                 memset(in, 0, inlen);
3163                 tir = &priv->indir_tir[tt];
3164                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3165                 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
3166                 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3167                 if (err) {
3168                         mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
3169                         goto err_destroy_inner_tirs;
3170                 }
3171         }
3172
3173         if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
3174                 goto out;
3175
3176         for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
3177                 memset(in, 0, inlen);
3178                 tir = &priv->inner_indir_tir[i];
3179                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3180                 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
3181                 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3182                 if (err) {
3183                         mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
3184                         goto err_destroy_inner_tirs;
3185                 }
3186         }
3187
3188 out:
3189         kvfree(in);
3190
3191         return 0;
3192
3193 err_destroy_inner_tirs:
3194         for (i--; i >= 0; i--)
3195                 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3196
3197         for (tt--; tt >= 0; tt--)
3198                 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
3199
3200         kvfree(in);
3201
3202         return err;
3203 }
3204
3205 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
3206 {
3207         int nch = priv->profile->max_nch(priv->mdev);
3208         struct mlx5e_tir *tir;
3209         void *tirc;
3210         int inlen;
3211         int err;
3212         u32 *in;
3213         int ix;
3214
3215         inlen = MLX5_ST_SZ_BYTES(create_tir_in);
3216         in = kvzalloc(inlen, GFP_KERNEL);
3217         if (!in)
3218                 return -ENOMEM;
3219
3220         for (ix = 0; ix < nch; ix++) {
3221                 memset(in, 0, inlen);
3222                 tir = &priv->direct_tir[ix];
3223                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
3224                 mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
3225                 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
3226                 if (err)
3227                         goto err_destroy_ch_tirs;
3228         }
3229
3230         kvfree(in);
3231
3232         return 0;
3233
3234 err_destroy_ch_tirs:
3235         mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
3236         for (ix--; ix >= 0; ix--)
3237                 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
3238
3239         kvfree(in);
3240
3241         return err;
3242 }
3243
3244 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
3245 {
3246         int i;
3247
3248         for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3249                 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
3250
3251         if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
3252                 return;
3253
3254         for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3255                 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3256 }
3257
3258 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
3259 {
3260         int nch = priv->profile->max_nch(priv->mdev);
3261         int i;
3262
3263         for (i = 0; i < nch; i++)
3264                 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
3265 }
3266
3267 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3268 {
3269         int err = 0;
3270         int i;
3271
3272         for (i = 0; i < chs->num; i++) {
3273                 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3274                 if (err)
3275                         return err;
3276         }
3277
3278         return 0;
3279 }
3280
3281 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
3282 {
3283         int err = 0;
3284         int i;
3285
3286         for (i = 0; i < chs->num; i++) {
3287                 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
3288                 if (err)
3289                         return err;
3290         }
3291
3292         return 0;
3293 }
3294
3295 static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
3296                                  struct tc_mqprio_qopt *mqprio)
3297 {
3298         struct mlx5e_priv *priv = netdev_priv(netdev);
3299         struct mlx5e_channels new_channels = {};
3300         u8 tc = mqprio->num_tc;
3301         int err = 0;
3302
3303         mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3304
3305         if (tc && tc != MLX5E_MAX_NUM_TC)
3306                 return -EINVAL;
3307
3308         mutex_lock(&priv->state_lock);
3309
3310         new_channels.params = priv->channels.params;
3311         new_channels.params.num_tc = tc ? tc : 1;
3312
3313         if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
3314                 priv->channels.params = new_channels.params;
3315                 goto out;
3316         }
3317
3318         err = mlx5e_open_channels(priv, &new_channels);
3319         if (err)
3320                 goto out;
3321
3322         priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
3323                                     new_channels.params.num_tc);
3324         mlx5e_switch_priv_channels(priv, &new_channels, NULL);
3325 out:
3326         mutex_unlock(&priv->state_lock);
3327         return err;
3328 }
3329
3330 #ifdef CONFIG_MLX5_ESWITCH
3331 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
3332                                      struct tc_cls_flower_offload *cls_flower,
3333                                      int flags)
3334 {
3335         switch (cls_flower->command) {
3336         case TC_CLSFLOWER_REPLACE:
3337                 return mlx5e_configure_flower(priv, cls_flower, flags);
3338         case TC_CLSFLOWER_DESTROY:
3339                 return mlx5e_delete_flower(priv, cls_flower, flags);
3340         case TC_CLSFLOWER_STATS:
3341                 return mlx5e_stats_flower(priv, cls_flower, flags);
3342         default:
3343                 return -EOPNOTSUPP;
3344         }
3345 }
3346
3347 static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3348                                    void *cb_priv)
3349 {
3350         struct mlx5e_priv *priv = cb_priv;
3351
3352         if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
3353                 return -EOPNOTSUPP;
3354
3355         switch (type) {
3356         case TC_SETUP_CLSFLOWER:
3357                 return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
3358         default:
3359                 return -EOPNOTSUPP;
3360         }
3361 }
3362
3363 static int mlx5e_setup_tc_block(struct net_device *dev,
3364                                 struct tc_block_offload *f)
3365 {
3366         struct mlx5e_priv *priv = netdev_priv(dev);
3367
3368         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3369                 return -EOPNOTSUPP;
3370
3371         switch (f->command) {
3372         case TC_BLOCK_BIND:
3373                 return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
3374                                              priv, priv);
3375         case TC_BLOCK_UNBIND:
3376                 tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
3377                                         priv);
3378                 return 0;
3379         default:
3380                 return -EOPNOTSUPP;
3381         }
3382 }
3383 #endif
3384
3385 static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3386                           void *type_data)
3387 {
3388         switch (type) {
3389 #ifdef CONFIG_MLX5_ESWITCH
3390         case TC_SETUP_BLOCK:
3391                 return mlx5e_setup_tc_block(dev, type_data);
3392 #endif
3393         case TC_SETUP_QDISC_MQPRIO:
3394                 return mlx5e_setup_tc_mqprio(dev, type_data);
3395         default:
3396                 return -EOPNOTSUPP;
3397         }
3398 }
3399
3400 static void
3401 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3402 {
3403         struct mlx5e_priv *priv = netdev_priv(dev);
3404         struct mlx5e_sw_stats *sstats = &priv->stats.sw;
3405         struct mlx5e_vport_stats *vstats = &priv->stats.vport;
3406         struct mlx5e_pport_stats *pstats = &priv->stats.pport;
3407
3408         if (mlx5e_is_uplink_rep(priv)) {
3409                 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3410                 stats->rx_bytes   = PPORT_802_3_GET(pstats, a_octets_received_ok);
3411                 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3412                 stats->tx_bytes   = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3413         } else {
3414                 mlx5e_grp_sw_update_stats(priv);
3415                 stats->rx_packets = sstats->rx_packets;
3416                 stats->rx_bytes   = sstats->rx_bytes;
3417                 stats->tx_packets = sstats->tx_packets;
3418                 stats->tx_bytes   = sstats->tx_bytes;
3419                 stats->tx_dropped = sstats->tx_queue_dropped;
3420         }
3421
3422         stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
3423
3424         stats->rx_length_errors =
3425                 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3426                 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3427                 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
3428         stats->rx_crc_errors =
3429                 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3430         stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3431         stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
3432         stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3433                            stats->rx_frame_errors;
3434         stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3435
3436         /* vport multicast also counts packets that are dropped due to steering
3437          * or rx out of buffer
3438          */
3439         stats->multicast =
3440                 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
3441 }
3442
3443 static void mlx5e_set_rx_mode(struct net_device *dev)
3444 {
3445         struct mlx5e_priv *priv = netdev_priv(dev);
3446
3447         queue_work(priv->wq, &priv->set_rx_mode_work);
3448 }
3449
3450 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3451 {
3452         struct mlx5e_priv *priv = netdev_priv(netdev);
3453         struct sockaddr *saddr = addr;
3454
3455         if (!is_valid_ether_addr(saddr->sa_data))
3456                 return -EADDRNOTAVAIL;
3457
3458         netif_addr_lock_bh(netdev);
3459         ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3460         netif_addr_unlock_bh(netdev);
3461
3462         queue_work(priv->wq, &priv->set_rx_mode_work);
3463
3464         return 0;
3465 }
3466
3467 #define MLX5E_SET_FEATURE(features, feature, enable)    \
3468         do {                                            \
3469                 if (enable)                             \
3470                         *features |= feature;           \
3471                 else                                    \
3472                         *features &= ~feature;          \
3473         } while (0)
3474
3475 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3476
3477 static int set_feature_lro(struct net_device *netdev, bool enable)
3478 {
3479         struct mlx5e_priv *priv = netdev_priv(netdev);
3480         struct mlx5_core_dev *mdev = priv->mdev;
3481         struct mlx5e_channels new_channels = {};
3482         struct mlx5e_params *old_params;
3483         int err = 0;
3484         bool reset;
3485
3486         mutex_lock(&priv->state_lock);
3487
3488         old_params = &priv->channels.params;
3489         if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
3490                 netdev_warn(netdev, "can't set LRO with legacy RQ\n");
3491                 err = -EINVAL;
3492                 goto out;
3493         }
3494
3495         reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
3496
3497         new_channels.params = *old_params;
3498         new_channels.params.lro_en = enable;
3499
3500         if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
3501                 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) ==
3502                     mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params))
3503                         reset = false;
3504         }
3505
3506         if (!reset) {
3507                 *old_params = new_channels.params;
3508                 err = mlx5e_modify_tirs_lro(priv);
3509                 goto out;
3510         }
3511
3512         err = mlx5e_open_channels(priv, &new_channels);
3513         if (err)
3514                 goto out;
3515
3516         mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
3517 out:
3518         mutex_unlock(&priv->state_lock);
3519         return err;
3520 }
3521
3522 static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
3523 {
3524         struct mlx5e_priv *priv = netdev_priv(netdev);
3525
3526         if (enable)
3527                 mlx5e_enable_cvlan_filter(priv);
3528         else
3529                 mlx5e_disable_cvlan_filter(priv);
3530
3531         return 0;
3532 }
3533
3534 static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3535 {
3536         struct mlx5e_priv *priv = netdev_priv(netdev);
3537
3538         if (!enable && mlx5e_tc_num_filters(priv)) {
3539                 netdev_err(netdev,
3540                            "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3541                 return -EINVAL;
3542         }
3543
3544         return 0;
3545 }
3546
3547 static int set_feature_rx_all(struct net_device *netdev, bool enable)
3548 {
3549         struct mlx5e_priv *priv = netdev_priv(netdev);
3550         struct mlx5_core_dev *mdev = priv->mdev;
3551
3552         return mlx5_set_port_fcs(mdev, !enable);
3553 }
3554
3555 static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3556 {
3557         struct mlx5e_priv *priv = netdev_priv(netdev);
3558         int err;
3559
3560         mutex_lock(&priv->state_lock);
3561
3562         priv->channels.params.scatter_fcs_en = enable;
3563         err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3564         if (err)
3565                 priv->channels.params.scatter_fcs_en = !enable;
3566
3567         mutex_unlock(&priv->state_lock);
3568
3569         return err;
3570 }
3571
3572 static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3573 {
3574         struct mlx5e_priv *priv = netdev_priv(netdev);
3575         int err = 0;
3576
3577         mutex_lock(&priv->state_lock);
3578
3579         priv->channels.params.vlan_strip_disable = !enable;
3580         if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3581                 goto unlock;
3582
3583         err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
3584         if (err)
3585                 priv->channels.params.vlan_strip_disable = enable;
3586
3587 unlock:
3588         mutex_unlock(&priv->state_lock);
3589
3590         return err;
3591 }
3592
3593 #ifdef CONFIG_RFS_ACCEL
3594 static int set_feature_arfs(struct net_device *netdev, bool enable)
3595 {
3596         struct mlx5e_priv *priv = netdev_priv(netdev);
3597         int err;
3598
3599         if (enable)
3600                 err = mlx5e_arfs_enable(priv);
3601         else
3602                 err = mlx5e_arfs_disable(priv);
3603
3604         return err;
3605 }
3606 #endif
3607
3608 static int mlx5e_handle_feature(struct net_device *netdev,
3609                                 netdev_features_t *features,
3610                                 netdev_features_t wanted_features,
3611                                 netdev_features_t feature,
3612                                 mlx5e_feature_handler feature_handler)
3613 {
3614         netdev_features_t changes = wanted_features ^ netdev->features;
3615         bool enable = !!(wanted_features & feature);
3616         int err;
3617
3618         if (!(changes & feature))
3619                 return 0;
3620
3621         err = feature_handler(netdev, enable);
3622         if (err) {
3623                 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3624                            enable ? "Enable" : "Disable", &feature, err);
3625                 return err;
3626         }
3627
3628         MLX5E_SET_FEATURE(features, feature, enable);
3629         return 0;
3630 }
3631
3632 static int mlx5e_set_features(struct net_device *netdev,
3633                               netdev_features_t features)
3634 {
3635         netdev_features_t oper_features = netdev->features;
3636         int err = 0;
3637
3638 #define MLX5E_HANDLE_FEATURE(feature, handler) \
3639         mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
3640
3641         err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
3642         err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
3643                                     set_feature_cvlan_filter);
3644         err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
3645         err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
3646         err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
3647         err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
3648 #ifdef CONFIG_RFS_ACCEL
3649         err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
3650 #endif
3651
3652         if (err) {
3653                 netdev->features = oper_features;
3654                 return -EINVAL;
3655         }
3656
3657         return 0;
3658 }
3659
3660 static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
3661                                             netdev_features_t features)
3662 {
3663         struct mlx5e_priv *priv = netdev_priv(netdev);
3664         struct mlx5e_params *params;
3665
3666         mutex_lock(&priv->state_lock);
3667         params = &priv->channels.params;
3668         if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
3669                 /* HW strips the outer C-tag header, this is a problem
3670                  * for S-tag traffic.
3671                  */
3672                 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3673                 if (!params->vlan_strip_disable)
3674                         netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
3675         }
3676         if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
3677                 features &= ~NETIF_F_LRO;
3678                 if (params->lro_en)
3679                         netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
3680         }
3681
3682         mutex_unlock(&priv->state_lock);
3683
3684         return features;
3685 }
3686
3687 int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
3688                      change_hw_mtu_cb set_mtu_cb)
3689 {
3690         struct mlx5e_priv *priv = netdev_priv(netdev);
3691         struct mlx5e_channels new_channels = {};
3692         struct mlx5e_params *params;
3693         int err = 0;
3694         bool reset;
3695
3696         mutex_lock(&priv->state_lock);
3697
3698         params = &priv->channels.params;
3699
3700         reset = !params->lro_en;
3701         reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
3702
3703         new_channels.params = *params;
3704         new_channels.params.sw_mtu = new_mtu;
3705
3706         if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
3707                 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
3708                 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
3709
3710                 reset = reset && (ppw_old != ppw_new);
3711         }
3712
3713         if (!reset) {
3714                 params->sw_mtu = new_mtu;
3715                 set_mtu_cb(priv);
3716                 netdev->mtu = params->sw_mtu;
3717                 goto out;
3718         }
3719
3720         err = mlx5e_open_channels(priv, &new_channels);
3721         if (err)
3722                 goto out;
3723
3724         mlx5e_switch_priv_channels(priv, &new_channels, set_mtu_cb);
3725         netdev->mtu = new_channels.params.sw_mtu;
3726
3727 out:
3728         mutex_unlock(&priv->state_lock);
3729         return err;
3730 }
3731
3732 static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
3733 {
3734         return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
3735 }
3736
3737 int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
3738 {
3739         struct hwtstamp_config config;
3740         int err;
3741
3742         if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3743                 return -EOPNOTSUPP;
3744
3745         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
3746                 return -EFAULT;
3747
3748         /* TX HW timestamp */
3749         switch (config.tx_type) {
3750         case HWTSTAMP_TX_OFF:
3751         case HWTSTAMP_TX_ON:
3752                 break;
3753         default:
3754                 return -ERANGE;
3755         }
3756
3757         mutex_lock(&priv->state_lock);
3758         /* RX HW timestamp */
3759         switch (config.rx_filter) {
3760         case HWTSTAMP_FILTER_NONE:
3761                 /* Reset CQE compression to Admin default */
3762                 mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
3763                 break;
3764         case HWTSTAMP_FILTER_ALL:
3765         case HWTSTAMP_FILTER_SOME:
3766         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3767         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3768         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3769         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3770         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3771         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3772         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3773         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3774         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3775         case HWTSTAMP_FILTER_PTP_V2_EVENT:
3776         case HWTSTAMP_FILTER_PTP_V2_SYNC:
3777         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3778         case HWTSTAMP_FILTER_NTP_ALL:
3779                 /* Disable CQE compression */
3780                 netdev_warn(priv->netdev, "Disabling cqe compression");
3781                 err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
3782                 if (err) {
3783                         netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
3784                         mutex_unlock(&priv->state_lock);
3785                         return err;
3786                 }
3787                 config.rx_filter = HWTSTAMP_FILTER_ALL;
3788                 break;
3789         default:
3790                 mutex_unlock(&priv->state_lock);
3791                 return -ERANGE;
3792         }
3793
3794         memcpy(&priv->tstamp, &config, sizeof(config));
3795         mutex_unlock(&priv->state_lock);
3796
3797         return copy_to_user(ifr->ifr_data, &config,
3798                             sizeof(config)) ? -EFAULT : 0;
3799 }
3800
3801 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
3802 {
3803         struct hwtstamp_config *cfg = &priv->tstamp;
3804
3805         if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
3806                 return -EOPNOTSUPP;
3807
3808         return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
3809 }
3810
3811 static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3812 {
3813         struct mlx5e_priv *priv = netdev_priv(dev);
3814
3815         switch (cmd) {
3816         case SIOCSHWTSTAMP:
3817                 return mlx5e_hwstamp_set(priv, ifr);
3818         case SIOCGHWTSTAMP:
3819                 return mlx5e_hwstamp_get(priv, ifr);
3820         default:
3821                 return -EOPNOTSUPP;
3822         }
3823 }
3824
3825 #ifdef CONFIG_MLX5_ESWITCH
3826 static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3827 {
3828         struct mlx5e_priv *priv = netdev_priv(dev);
3829         struct mlx5_core_dev *mdev = priv->mdev;
3830
3831         return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
3832 }
3833
3834 static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
3835                              __be16 vlan_proto)
3836 {
3837         struct mlx5e_priv *priv = netdev_priv(dev);
3838         struct mlx5_core_dev *mdev = priv->mdev;
3839
3840         if (vlan_proto != htons(ETH_P_8021Q))
3841                 return -EPROTONOSUPPORT;
3842
3843         return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
3844                                            vlan, qos);
3845 }
3846
3847 static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
3848 {
3849         struct mlx5e_priv *priv = netdev_priv(dev);
3850         struct mlx5_core_dev *mdev = priv->mdev;
3851
3852         return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
3853 }
3854
3855 static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
3856 {
3857         struct mlx5e_priv *priv = netdev_priv(dev);
3858         struct mlx5_core_dev *mdev = priv->mdev;
3859
3860         return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
3861 }
3862
3863 static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
3864                              int max_tx_rate)
3865 {
3866         struct mlx5e_priv *priv = netdev_priv(dev);
3867         struct mlx5_core_dev *mdev = priv->mdev;
3868
3869         return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
3870                                            max_tx_rate, min_tx_rate);
3871 }
3872
3873 static int mlx5_vport_link2ifla(u8 esw_link)
3874 {
3875         switch (esw_link) {
3876         case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
3877                 return IFLA_VF_LINK_STATE_DISABLE;
3878         case MLX5_ESW_VPORT_ADMIN_STATE_UP:
3879                 return IFLA_VF_LINK_STATE_ENABLE;
3880         }
3881         return IFLA_VF_LINK_STATE_AUTO;
3882 }
3883
3884 static int mlx5_ifla_link2vport(u8 ifla_link)
3885 {
3886         switch (ifla_link) {
3887         case IFLA_VF_LINK_STATE_DISABLE:
3888                 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
3889         case IFLA_VF_LINK_STATE_ENABLE:
3890                 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
3891         }
3892         return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
3893 }
3894
3895 static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
3896                                    int link_state)
3897 {
3898         struct mlx5e_priv *priv = netdev_priv(dev);
3899         struct mlx5_core_dev *mdev = priv->mdev;
3900
3901         return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
3902                                             mlx5_ifla_link2vport(link_state));
3903 }
3904
3905 static int mlx5e_get_vf_config(struct net_device *dev,
3906                                int vf, struct ifla_vf_info *ivi)
3907 {
3908         struct mlx5e_priv *priv = netdev_priv(dev);
3909         struct mlx5_core_dev *mdev = priv->mdev;
3910         int err;
3911
3912         err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
3913         if (err)
3914                 return err;
3915         ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
3916         return 0;
3917 }
3918
3919 static int mlx5e_get_vf_stats(struct net_device *dev,
3920                               int vf, struct ifla_vf_stats *vf_stats)
3921 {
3922         struct mlx5e_priv *priv = netdev_priv(dev);
3923         struct mlx5_core_dev *mdev = priv->mdev;
3924
3925         return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
3926                                             vf_stats);
3927 }
3928 #endif
3929
3930 static void mlx5e_add_vxlan_port(struct net_device *netdev,
3931                                  struct udp_tunnel_info *ti)
3932 {
3933         struct mlx5e_priv *priv = netdev_priv(netdev);
3934
3935         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3936                 return;
3937
3938         if (!mlx5e_vxlan_allowed(priv->mdev))
3939                 return;
3940
3941         mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
3942 }
3943
3944 static void mlx5e_del_vxlan_port(struct net_device *netdev,
3945                                  struct udp_tunnel_info *ti)
3946 {
3947         struct mlx5e_priv *priv = netdev_priv(netdev);
3948
3949         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3950                 return;
3951
3952         if (!mlx5e_vxlan_allowed(priv->mdev))
3953                 return;
3954
3955         mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
3956 }
3957
3958 static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
3959                                                      struct sk_buff *skb,
3960                                                      netdev_features_t features)
3961 {
3962         unsigned int offset = 0;
3963         struct udphdr *udph;
3964         u8 proto;
3965         u16 port;
3966
3967         switch (vlan_get_protocol(skb)) {
3968         case htons(ETH_P_IP):
3969                 proto = ip_hdr(skb)->protocol;
3970                 break;
3971         case htons(ETH_P_IPV6):
3972                 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
3973                 break;
3974         default:
3975                 goto out;
3976         }
3977
3978         switch (proto) {
3979         case IPPROTO_GRE:
3980                 return features;
3981         case IPPROTO_UDP:
3982                 udph = udp_hdr(skb);
3983                 port = be16_to_cpu(udph->dest);
3984
3985                 /* Verify if UDP port is being offloaded by HW */
3986                 if (mlx5e_vxlan_lookup_port(priv, port))
3987                         return features;
3988         }
3989
3990 out:
3991         /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3992         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3993 }
3994
3995 static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
3996                                               struct net_device *netdev,
3997                                               netdev_features_t features)
3998 {
3999         struct mlx5e_priv *priv = netdev_priv(netdev);
4000
4001         features = vlan_features_check(skb, features);
4002         features = vxlan_features_check(skb, features);
4003
4004 #ifdef CONFIG_MLX5_EN_IPSEC
4005         if (mlx5e_ipsec_feature_check(skb, netdev, features))
4006                 return features;
4007 #endif
4008
4009         /* Validate if the tunneled packet is being offloaded by HW */
4010         if (skb->encapsulation &&
4011             (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
4012                 return mlx5e_tunnel_features_check(priv, skb, features);
4013
4014         return features;
4015 }
4016
4017 static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
4018                                         struct mlx5e_txqsq *sq)
4019 {
4020         struct mlx5_eq *eq = sq->cq.mcq.eq;
4021         u32 eqe_count;
4022
4023         netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
4024                    eq->eqn, eq->cons_index, eq->irqn);
4025
4026         eqe_count = mlx5_eq_poll_irq_disabled(eq);
4027         if (!eqe_count)
4028                 return false;
4029
4030         netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
4031         sq->channel->stats->eq_rearm++;
4032         return true;
4033 }
4034
4035 static void mlx5e_tx_timeout_work(struct work_struct *work)
4036 {
4037         struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
4038                                                tx_timeout_work);
4039         struct net_device *dev = priv->netdev;
4040         bool reopen_channels = false;
4041         int i, err;
4042
4043         rtnl_lock();
4044         mutex_lock(&priv->state_lock);
4045
4046         if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
4047                 goto unlock;
4048
4049         for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
4050                 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
4051                 struct mlx5e_txqsq *sq = priv->txq2sq[i];
4052
4053                 if (!netif_xmit_stopped(dev_queue))
4054                         continue;
4055
4056                 netdev_err(dev,
4057                            "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
4058                            i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
4059                            jiffies_to_usecs(jiffies - dev_queue->trans_start));
4060
4061                 /* If we recover a lost interrupt, most likely TX timeout will
4062                  * be resolved, skip reopening channels
4063                  */
4064                 if (!mlx5e_tx_timeout_eq_recover(dev, sq)) {
4065                         clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
4066                         reopen_channels = true;
4067                 }
4068         }
4069
4070         if (!reopen_channels)
4071                 goto unlock;
4072
4073         mlx5e_close_locked(dev);
4074         err = mlx5e_open_locked(dev);
4075         if (err)
4076                 netdev_err(priv->netdev,
4077                            "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
4078                            err);
4079
4080 unlock:
4081         mutex_unlock(&priv->state_lock);
4082         rtnl_unlock();
4083 }
4084
4085 static void mlx5e_tx_timeout(struct net_device *dev)
4086 {
4087         struct mlx5e_priv *priv = netdev_priv(dev);
4088
4089         netdev_err(dev, "TX timeout detected\n");
4090         queue_work(priv->wq, &priv->tx_timeout_work);
4091 }
4092
4093 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
4094 {
4095         struct mlx5e_priv *priv = netdev_priv(netdev);
4096         struct bpf_prog *old_prog;
4097         int err = 0;
4098         bool reset, was_opened;
4099         int i;
4100
4101         mutex_lock(&priv->state_lock);
4102
4103         if ((netdev->features & NETIF_F_LRO) && prog) {
4104                 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
4105                 err = -EINVAL;
4106                 goto unlock;
4107         }
4108
4109         if ((netdev->features & NETIF_F_HW_ESP) && prog) {
4110                 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
4111                 err = -EINVAL;
4112                 goto unlock;
4113         }
4114
4115         was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
4116         /* no need for full reset when exchanging programs */
4117         reset = (!priv->channels.params.xdp_prog || !prog);
4118
4119         if (was_opened && reset)
4120                 mlx5e_close_locked(netdev);
4121         if (was_opened && !reset) {
4122                 /* num_channels is invariant here, so we can take the
4123                  * batched reference right upfront.
4124                  */
4125                 prog = bpf_prog_add(prog, priv->channels.num);
4126                 if (IS_ERR(prog)) {
4127                         err = PTR_ERR(prog);
4128                         goto unlock;
4129                 }
4130         }
4131
4132         /* exchange programs, extra prog reference we got from caller
4133          * as long as we don't fail from this point onwards.
4134          */
4135         old_prog = xchg(&priv->channels.params.xdp_prog, prog);
4136         if (old_prog)
4137                 bpf_prog_put(old_prog);
4138
4139         if (reset) /* change RQ type according to priv->xdp_prog */
4140                 mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
4141
4142         if (was_opened && reset)
4143                 mlx5e_open_locked(netdev);
4144
4145         if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
4146                 goto unlock;
4147
4148         /* exchanging programs w/o reset, we update ref counts on behalf
4149          * of the channels RQs here.
4150          */
4151         for (i = 0; i < priv->channels.num; i++) {
4152                 struct mlx5e_channel *c = priv->channels.c[i];
4153
4154                 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
4155                 napi_synchronize(&c->napi);
4156                 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
4157
4158                 old_prog = xchg(&c->rq.xdp_prog, prog);
4159
4160                 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
4161                 /* napi_schedule in case we have missed anything */
4162                 napi_schedule(&c->napi);
4163
4164                 if (old_prog)
4165                         bpf_prog_put(old_prog);
4166         }
4167
4168 unlock:
4169         mutex_unlock(&priv->state_lock);
4170         return err;
4171 }
4172
4173 static u32 mlx5e_xdp_query(struct net_device *dev)
4174 {
4175         struct mlx5e_priv *priv = netdev_priv(dev);
4176         const struct bpf_prog *xdp_prog;
4177         u32 prog_id = 0;
4178
4179         mutex_lock(&priv->state_lock);
4180         xdp_prog = priv->channels.params.xdp_prog;
4181         if (xdp_prog)
4182                 prog_id = xdp_prog->aux->id;
4183         mutex_unlock(&priv->state_lock);
4184
4185         return prog_id;
4186 }
4187
4188 static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4189 {
4190         switch (xdp->command) {
4191         case XDP_SETUP_PROG:
4192                 return mlx5e_xdp_set(dev, xdp->prog);
4193         case XDP_QUERY_PROG:
4194                 xdp->prog_id = mlx5e_xdp_query(dev);
4195                 xdp->prog_attached = !!xdp->prog_id;
4196                 return 0;
4197         default:
4198                 return -EINVAL;
4199         }
4200 }
4201
4202 #ifdef CONFIG_NET_POLL_CONTROLLER
4203 /* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
4204  * reenabling interrupts.
4205  */
4206 static void mlx5e_netpoll(struct net_device *dev)
4207 {
4208         struct mlx5e_priv *priv = netdev_priv(dev);
4209         struct mlx5e_channels *chs = &priv->channels;
4210
4211         int i;
4212
4213         for (i = 0; i < chs->num; i++)
4214                 napi_schedule(&chs->c[i]->napi);
4215 }
4216 #endif
4217
4218 static const struct net_device_ops mlx5e_netdev_ops = {
4219         .ndo_open                = mlx5e_open,
4220         .ndo_stop                = mlx5e_close,
4221         .ndo_start_xmit          = mlx5e_xmit,
4222         .ndo_setup_tc            = mlx5e_setup_tc,
4223         .ndo_select_queue        = mlx5e_select_queue,
4224         .ndo_get_stats64         = mlx5e_get_stats,
4225         .ndo_set_rx_mode         = mlx5e_set_rx_mode,
4226         .ndo_set_mac_address     = mlx5e_set_mac,
4227         .ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
4228         .ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
4229         .ndo_set_features        = mlx5e_set_features,
4230         .ndo_fix_features        = mlx5e_fix_features,
4231         .ndo_change_mtu          = mlx5e_change_nic_mtu,
4232         .ndo_do_ioctl            = mlx5e_ioctl,
4233         .ndo_set_tx_maxrate      = mlx5e_set_tx_maxrate,
4234         .ndo_udp_tunnel_add      = mlx5e_add_vxlan_port,
4235         .ndo_udp_tunnel_del      = mlx5e_del_vxlan_port,
4236         .ndo_features_check      = mlx5e_features_check,
4237 #ifdef CONFIG_RFS_ACCEL
4238         .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,
4239 #endif
4240         .ndo_tx_timeout          = mlx5e_tx_timeout,
4241         .ndo_bpf                 = mlx5e_xdp,
4242 #ifdef CONFIG_NET_POLL_CONTROLLER
4243         .ndo_poll_controller     = mlx5e_netpoll,
4244 #endif
4245 #ifdef CONFIG_MLX5_ESWITCH
4246         /* SRIOV E-Switch NDOs */
4247         .ndo_set_vf_mac          = mlx5e_set_vf_mac,
4248         .ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
4249         .ndo_set_vf_spoofchk     = mlx5e_set_vf_spoofchk,
4250         .ndo_set_vf_trust        = mlx5e_set_vf_trust,
4251         .ndo_set_vf_rate         = mlx5e_set_vf_rate,
4252         .ndo_get_vf_config       = mlx5e_get_vf_config,
4253         .ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
4254         .ndo_get_vf_stats        = mlx5e_get_vf_stats,
4255         .ndo_has_offload_stats   = mlx5e_has_offload_stats,
4256         .ndo_get_offload_stats   = mlx5e_get_offload_stats,
4257 #endif
4258 };
4259
4260 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
4261 {
4262         if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
4263                 return -EOPNOTSUPP;
4264         if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
4265             !MLX5_CAP_GEN(mdev, nic_flow_table) ||
4266             !MLX5_CAP_ETH(mdev, csum_cap) ||
4267             !MLX5_CAP_ETH(mdev, max_lso_cap) ||
4268             !MLX5_CAP_ETH(mdev, vlan_cap) ||
4269             !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
4270             MLX5_CAP_FLOWTABLE(mdev,
4271                                flow_table_properties_nic_receive.max_ft_level)
4272                                < 3) {
4273                 mlx5_core_warn(mdev,
4274                                "Not creating net device, some required device capabilities are missing\n");
4275                 return -EOPNOTSUPP;
4276         }
4277         if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
4278                 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
4279         if (!MLX5_CAP_GEN(mdev, cq_moderation))
4280                 mlx5_core_warn(mdev, "CQ moderation is not supported\n");
4281
4282         return 0;
4283 }
4284
4285 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
4286                                    int num_channels)
4287 {
4288         int i;
4289
4290         for (i = 0; i < len; i++)
4291                 indirection_rqt[i] = i % num_channels;
4292 }
4293
4294 static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
4295 {
4296         u32 link_speed = 0;
4297         u32 pci_bw = 0;
4298
4299         mlx5e_port_max_linkspeed(mdev, &link_speed);
4300         pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
4301         mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
4302                            link_speed, pci_bw);
4303
4304 #define MLX5E_SLOW_PCI_RATIO (2)
4305
4306         return link_speed && pci_bw &&
4307                 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
4308 }
4309
4310 static struct net_dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
4311 {
4312         struct net_dim_cq_moder moder;
4313
4314         moder.cq_period_mode = cq_period_mode;
4315         moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
4316         moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
4317         if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4318                 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
4319
4320         return moder;
4321 }
4322
4323 static struct net_dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
4324 {
4325         struct net_dim_cq_moder moder;
4326
4327         moder.cq_period_mode = cq_period_mode;
4328         moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
4329         moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
4330         if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
4331                 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
4332
4333         return moder;
4334 }
4335
4336 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
4337 {
4338         return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
4339                 NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE :
4340                 NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4341 }
4342
4343 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4344 {
4345         if (params->tx_dim_enabled) {
4346                 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4347
4348                 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
4349         } else {
4350                 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
4351         }
4352
4353         MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
4354                         params->tx_cq_moderation.cq_period_mode ==
4355                                 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4356 }
4357
4358 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
4359 {
4360         if (params->rx_dim_enabled) {
4361                 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
4362
4363                 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
4364         } else {
4365                 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
4366         }
4367
4368         MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
4369                         params->rx_cq_moderation.cq_period_mode ==
4370                                 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
4371 }
4372
4373 static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
4374 {
4375         int i;
4376
4377         /* The supported periods are organized in ascending order */
4378         for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
4379                 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
4380                         break;
4381
4382         return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
4383 }
4384
4385 void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
4386                             struct mlx5e_params *params,
4387                             u16 max_channels, u16 mtu)
4388 {
4389         u8 rx_cq_period_mode;
4390
4391         params->sw_mtu = mtu;
4392         params->hard_mtu = MLX5E_ETH_HARD_MTU;
4393         params->num_channels = max_channels;
4394         params->num_tc       = 1;
4395
4396         /* SQ */
4397         params->log_sq_size = is_kdump_kernel() ?
4398                 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
4399                 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
4400
4401         /* set CQE compression */
4402         params->rx_cqe_compress_def = false;
4403         if (MLX5_CAP_GEN(mdev, cqe_compression) &&
4404             MLX5_CAP_GEN(mdev, vport_group_manager))
4405                 params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
4406
4407         MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
4408
4409         /* RQ */
4410         /* Prefer Striding RQ, unless any of the following holds:
4411          * - Striding RQ configuration is not possible/supported.
4412          * - Slow PCI heuristic.
4413          * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
4414          */
4415         if (!slow_pci_heuristic(mdev) &&
4416             mlx5e_striding_rq_possible(mdev, params) &&
4417             (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ||
4418              !mlx5e_rx_is_linear_skb(mdev, params)))
4419                 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
4420         mlx5e_set_rq_type(mdev, params);
4421         mlx5e_init_rq_type_params(mdev, params);
4422
4423         /* HW LRO */
4424
4425         /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
4426         if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
4427                 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
4428                         params->lro_en = !slow_pci_heuristic(mdev);
4429         params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
4430
4431         /* CQ moderation params */
4432         rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
4433                         MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
4434                         MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
4435         params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
4436         params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
4437         mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
4438         mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
4439
4440         /* TX inline */
4441         params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
4442
4443         /* RSS */
4444         params->rss_hfunc = ETH_RSS_HASH_XOR;
4445         netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
4446         mlx5e_build_default_indir_rqt(params->indirection_rqt,
4447                                       MLX5E_INDIR_RQT_SIZE, max_channels);
4448 }
4449
4450 static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
4451                                         struct net_device *netdev,
4452                                         const struct mlx5e_profile *profile,
4453                                         void *ppriv)
4454 {
4455         struct mlx5e_priv *priv = netdev_priv(netdev);
4456
4457         priv->mdev        = mdev;
4458         priv->netdev      = netdev;
4459         priv->profile     = profile;
4460         priv->ppriv       = ppriv;
4461         priv->msglevel    = MLX5E_MSG_LEVEL;
4462         priv->max_opened_tc = 1;
4463
4464         mlx5e_build_nic_params(mdev, &priv->channels.params,
4465                                profile->max_nch(mdev), netdev->mtu);
4466
4467         mutex_init(&priv->state_lock);
4468
4469         INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
4470         INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
4471         INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
4472         INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
4473
4474         mlx5e_timestamp_init(priv);
4475 }
4476
4477 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4478 {
4479         struct mlx5e_priv *priv = netdev_priv(netdev);
4480
4481         mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
4482         if (is_zero_ether_addr(netdev->dev_addr) &&
4483             !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4484                 eth_hw_addr_random(netdev);
4485                 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4486         }
4487 }
4488
4489 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4490 static const struct switchdev_ops mlx5e_switchdev_ops = {
4491         .switchdev_port_attr_get        = mlx5e_attr_get,
4492 };
4493 #endif
4494
4495 static void mlx5e_build_nic_netdev(struct net_device *netdev)
4496 {
4497         struct mlx5e_priv *priv = netdev_priv(netdev);
4498         struct mlx5_core_dev *mdev = priv->mdev;
4499         bool fcs_supported;
4500         bool fcs_enabled;
4501
4502         SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
4503
4504         netdev->netdev_ops = &mlx5e_netdev_ops;
4505
4506 #ifdef CONFIG_MLX5_CORE_EN_DCB
4507         if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
4508                 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
4509 #endif
4510
4511         netdev->watchdog_timeo    = 15 * HZ;
4512
4513         netdev->ethtool_ops       = &mlx5e_ethtool_ops;
4514
4515         netdev->vlan_features    |= NETIF_F_SG;
4516         netdev->vlan_features    |= NETIF_F_IP_CSUM;
4517         netdev->vlan_features    |= NETIF_F_IPV6_CSUM;
4518         netdev->vlan_features    |= NETIF_F_GRO;
4519         netdev->vlan_features    |= NETIF_F_TSO;
4520         netdev->vlan_features    |= NETIF_F_TSO6;
4521         netdev->vlan_features    |= NETIF_F_RXCSUM;
4522         netdev->vlan_features    |= NETIF_F_RXHASH;
4523
4524         netdev->hw_enc_features  |= NETIF_F_HW_VLAN_CTAG_TX;
4525         netdev->hw_enc_features  |= NETIF_F_HW_VLAN_CTAG_RX;
4526
4527         if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
4528             mlx5e_check_fragmented_striding_rq_cap(mdev))
4529                 netdev->vlan_features    |= NETIF_F_LRO;
4530
4531         netdev->hw_features       = netdev->vlan_features;
4532         netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_TX;
4533         netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
4534         netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
4535         netdev->hw_features      |= NETIF_F_HW_VLAN_STAG_TX;
4536
4537         if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4538                 netdev->hw_features     |= NETIF_F_GSO_PARTIAL;
4539                 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
4540                 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
4541                 netdev->hw_enc_features |= NETIF_F_TSO;
4542                 netdev->hw_enc_features |= NETIF_F_TSO6;
4543                 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4544         }
4545
4546         if (mlx5e_vxlan_allowed(mdev)) {
4547                 netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
4548                                            NETIF_F_GSO_UDP_TUNNEL_CSUM;
4549                 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4550                                            NETIF_F_GSO_UDP_TUNNEL_CSUM;
4551                 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
4552         }
4553
4554         if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4555                 netdev->hw_features     |= NETIF_F_GSO_GRE |
4556                                            NETIF_F_GSO_GRE_CSUM;
4557                 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4558                                            NETIF_F_GSO_GRE_CSUM;
4559                 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4560                                                 NETIF_F_GSO_GRE_CSUM;
4561         }
4562
4563         mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4564
4565         if (fcs_supported)
4566                 netdev->hw_features |= NETIF_F_RXALL;
4567
4568         if (MLX5_CAP_ETH(mdev, scatter_fcs))
4569                 netdev->hw_features |= NETIF_F_RXFCS;
4570
4571         netdev->features          = netdev->hw_features;
4572         if (!priv->channels.params.lro_en)
4573                 netdev->features  &= ~NETIF_F_LRO;
4574
4575         if (fcs_enabled)
4576                 netdev->features  &= ~NETIF_F_RXALL;
4577
4578         if (!priv->channels.params.scatter_fcs_en)
4579                 netdev->features  &= ~NETIF_F_RXFCS;
4580
4581 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4582         if (FT_CAP(flow_modify_en) &&
4583             FT_CAP(modify_root) &&
4584             FT_CAP(identified_miss_table_mode) &&
4585             FT_CAP(flow_table_modify)) {
4586                 netdev->hw_features      |= NETIF_F_HW_TC;
4587 #ifdef CONFIG_RFS_ACCEL
4588                 netdev->hw_features      |= NETIF_F_NTUPLE;
4589 #endif
4590         }
4591
4592         netdev->features         |= NETIF_F_HIGHDMA;
4593         netdev->features         |= NETIF_F_HW_VLAN_STAG_FILTER;
4594
4595         netdev->priv_flags       |= IFF_UNICAST_FLT;
4596
4597         mlx5e_set_netdev_dev_addr(netdev);
4598
4599 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4600         if (MLX5_ESWITCH_MANAGER(mdev))
4601                 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4602 #endif
4603
4604         mlx5e_ipsec_build_netdev(priv);
4605         mlx5e_tls_build_netdev(priv);
4606 }
4607
4608 static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
4609 {
4610         struct mlx5_core_dev *mdev = priv->mdev;
4611         int err;
4612
4613         err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
4614         if (err) {
4615                 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
4616                 priv->q_counter = 0;
4617         }
4618
4619         err = mlx5_core_alloc_q_counter(mdev, &priv->drop_rq_q_counter);
4620         if (err) {
4621                 mlx5_core_warn(mdev, "alloc drop RQ counter failed, %d\n", err);
4622                 priv->drop_rq_q_counter = 0;
4623         }
4624 }
4625
4626 static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
4627 {
4628         if (priv->q_counter)
4629                 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
4630
4631         if (priv->drop_rq_q_counter)
4632                 mlx5_core_dealloc_q_counter(priv->mdev, priv->drop_rq_q_counter);
4633 }
4634
4635 static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
4636                            struct net_device *netdev,
4637                            const struct mlx5e_profile *profile,
4638                            void *ppriv)
4639 {
4640         struct mlx5e_priv *priv = netdev_priv(netdev);
4641         int err;
4642
4643         mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
4644         err = mlx5e_ipsec_init(priv);
4645         if (err)
4646                 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
4647         err = mlx5e_tls_init(priv);
4648         if (err)
4649                 mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
4650         mlx5e_build_nic_netdev(netdev);
4651         mlx5e_build_tc2txq_maps(priv);
4652         mlx5e_vxlan_init(priv);
4653 }
4654
4655 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4656 {
4657         mlx5e_tls_cleanup(priv);
4658         mlx5e_ipsec_cleanup(priv);
4659         mlx5e_vxlan_cleanup(priv);
4660 }
4661
4662 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
4663 {
4664         struct mlx5_core_dev *mdev = priv->mdev;
4665         int err;
4666
4667         err = mlx5e_create_indirect_rqt(priv);
4668         if (err)
4669                 return err;
4670
4671         err = mlx5e_create_direct_rqts(priv);
4672         if (err)
4673                 goto err_destroy_indirect_rqts;
4674
4675         err = mlx5e_create_indirect_tirs(priv);
4676         if (err)
4677                 goto err_destroy_direct_rqts;
4678
4679         err = mlx5e_create_direct_tirs(priv);
4680         if (err)
4681                 goto err_destroy_indirect_tirs;
4682
4683         err = mlx5e_create_flow_steering(priv);
4684         if (err) {
4685                 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
4686                 goto err_destroy_direct_tirs;
4687         }
4688
4689         err = mlx5e_tc_nic_init(priv);
4690         if (err)
4691                 goto err_destroy_flow_steering;
4692
4693         return 0;
4694
4695 err_destroy_flow_steering:
4696         mlx5e_destroy_flow_steering(priv);
4697 err_destroy_direct_tirs:
4698         mlx5e_destroy_direct_tirs(priv);
4699 err_destroy_indirect_tirs:
4700         mlx5e_destroy_indirect_tirs(priv);
4701 err_destroy_direct_rqts:
4702         mlx5e_destroy_direct_rqts(priv);
4703 err_destroy_indirect_rqts:
4704         mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4705         return err;
4706 }
4707
4708 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
4709 {
4710         mlx5e_tc_nic_cleanup(priv);
4711         mlx5e_destroy_flow_steering(priv);
4712         mlx5e_destroy_direct_tirs(priv);
4713         mlx5e_destroy_indirect_tirs(priv);
4714         mlx5e_destroy_direct_rqts(priv);
4715         mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4716 }
4717
4718 static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
4719 {
4720         int err;
4721
4722         err = mlx5e_create_tises(priv);
4723         if (err) {
4724                 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
4725                 return err;
4726         }
4727
4728 #ifdef CONFIG_MLX5_CORE_EN_DCB
4729         mlx5e_dcbnl_initialize(priv);
4730 #endif
4731         return 0;
4732 }
4733
4734 static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4735 {
4736         struct net_device *netdev = priv->netdev;
4737         struct mlx5_core_dev *mdev = priv->mdev;
4738         u16 max_mtu;
4739
4740         mlx5e_init_l2_addr(priv);
4741
4742         /* Marking the link as currently not needed by the Driver */
4743         if (!netif_running(netdev))
4744                 mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
4745
4746         /* MTU range: 68 - hw-specific max */
4747         netdev->min_mtu = ETH_MIN_MTU;
4748         mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
4749         netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
4750         mlx5e_set_dev_port_mtu(priv);
4751
4752         mlx5_lag_add(mdev, netdev);
4753
4754         mlx5e_enable_async_events(priv);
4755
4756         if (MLX5_ESWITCH_MANAGER(priv->mdev))
4757                 mlx5e_register_vport_reps(priv);
4758
4759         if (netdev->reg_state != NETREG_REGISTERED)
4760                 return;
4761 #ifdef CONFIG_MLX5_CORE_EN_DCB
4762         mlx5e_dcbnl_init_app(priv);
4763 #endif
4764
4765         queue_work(priv->wq, &priv->set_rx_mode_work);
4766
4767         rtnl_lock();
4768         if (netif_running(netdev))
4769                 mlx5e_open(netdev);
4770         netif_device_attach(netdev);
4771         rtnl_unlock();
4772 }
4773
4774 static void mlx5e_nic_disable(struct mlx5e_priv *priv)
4775 {
4776         struct mlx5_core_dev *mdev = priv->mdev;
4777
4778 #ifdef CONFIG_MLX5_CORE_EN_DCB
4779         if (priv->netdev->reg_state == NETREG_REGISTERED)
4780                 mlx5e_dcbnl_delete_app(priv);
4781 #endif
4782
4783         rtnl_lock();
4784         if (netif_running(priv->netdev))
4785                 mlx5e_close(priv->netdev);
4786         netif_device_detach(priv->netdev);
4787         rtnl_unlock();
4788
4789         queue_work(priv->wq, &priv->set_rx_mode_work);
4790
4791         if (MLX5_ESWITCH_MANAGER(priv->mdev))
4792                 mlx5e_unregister_vport_reps(priv);
4793
4794         mlx5e_disable_async_events(priv);
4795         mlx5_lag_remove(mdev);
4796 }
4797
4798 static const struct mlx5e_profile mlx5e_nic_profile = {
4799         .init              = mlx5e_nic_init,
4800         .cleanup           = mlx5e_nic_cleanup,
4801         .init_rx           = mlx5e_init_nic_rx,
4802         .cleanup_rx        = mlx5e_cleanup_nic_rx,
4803         .init_tx           = mlx5e_init_nic_tx,
4804         .cleanup_tx        = mlx5e_cleanup_nic_tx,
4805         .enable            = mlx5e_nic_enable,
4806         .disable           = mlx5e_nic_disable,
4807         .update_stats      = mlx5e_update_ndo_stats,
4808         .max_nch           = mlx5e_get_max_num_channels,
4809         .update_carrier    = mlx5e_update_carrier,
4810         .rx_handlers.handle_rx_cqe       = mlx5e_handle_rx_cqe,
4811         .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
4812         .max_tc            = MLX5E_MAX_NUM_TC,
4813 };
4814
4815 /* mlx5e generic netdev management API (move to en_common.c) */
4816
4817 struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4818                                        const struct mlx5e_profile *profile,
4819                                        void *ppriv)
4820 {
4821         int nch = profile->max_nch(mdev);
4822         struct net_device *netdev;
4823         struct mlx5e_priv *priv;
4824
4825         netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
4826                                     nch * profile->max_tc,
4827                                     nch);
4828         if (!netdev) {
4829                 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
4830                 return NULL;
4831         }
4832
4833 #ifdef CONFIG_RFS_ACCEL
4834         netdev->rx_cpu_rmap = mdev->rmap;
4835 #endif
4836
4837         profile->init(mdev, netdev, profile, ppriv);
4838
4839         netif_carrier_off(netdev);
4840
4841         priv = netdev_priv(netdev);
4842
4843         priv->wq = create_singlethread_workqueue("mlx5e");
4844         if (!priv->wq)
4845                 goto err_cleanup_nic;
4846
4847         return netdev;
4848
4849 err_cleanup_nic:
4850         if (profile->cleanup)
4851                 profile->cleanup(priv);
4852         free_netdev(netdev);
4853
4854         return NULL;
4855 }
4856
4857 int mlx5e_attach_netdev(struct mlx5e_priv *priv)
4858 {
4859         struct mlx5_core_dev *mdev = priv->mdev;
4860         const struct mlx5e_profile *profile;
4861         int err;
4862
4863         profile = priv->profile;
4864         clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
4865
4866         err = profile->init_tx(priv);
4867         if (err)
4868                 goto out;
4869
4870         mlx5e_create_q_counters(priv);
4871
4872         err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
4873         if (err) {
4874                 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
4875                 goto err_destroy_q_counters;
4876         }
4877
4878         err = profile->init_rx(priv);
4879         if (err)
4880                 goto err_close_drop_rq;
4881
4882         if (profile->enable)
4883                 profile->enable(priv);
4884
4885         return 0;
4886
4887 err_close_drop_rq:
4888         mlx5e_close_drop_rq(&priv->drop_rq);
4889
4890 err_destroy_q_counters:
4891         mlx5e_destroy_q_counters(priv);
4892         profile->cleanup_tx(priv);
4893
4894 out:
4895         return err;
4896 }
4897
4898 void mlx5e_detach_netdev(struct mlx5e_priv *priv)
4899 {
4900         const struct mlx5e_profile *profile = priv->profile;
4901
4902         set_bit(MLX5E_STATE_DESTROYING, &priv->state);
4903
4904         if (profile->disable)
4905                 profile->disable(priv);
4906         flush_workqueue(priv->wq);
4907
4908         profile->cleanup_rx(priv);
4909         mlx5e_close_drop_rq(&priv->drop_rq);
4910         mlx5e_destroy_q_counters(priv);
4911         profile->cleanup_tx(priv);
4912         cancel_delayed_work_sync(&priv->update_stats_work);
4913 }
4914
4915 void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
4916 {
4917         const struct mlx5e_profile *profile = priv->profile;
4918         struct net_device *netdev = priv->netdev;
4919
4920         destroy_workqueue(priv->wq);
4921         if (profile->cleanup)
4922                 profile->cleanup(priv);
4923         free_netdev(netdev);
4924 }
4925
4926 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
4927  * hardware contexts and to connect it to the current netdev.
4928  */
4929 static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
4930 {
4931         struct mlx5e_priv *priv = vpriv;
4932         struct net_device *netdev = priv->netdev;
4933         int err;
4934
4935         if (netif_device_present(netdev))
4936                 return 0;
4937
4938         err = mlx5e_create_mdev_resources(mdev);
4939         if (err)
4940                 return err;
4941
4942         err = mlx5e_attach_netdev(priv);
4943         if (err) {
4944                 mlx5e_destroy_mdev_resources(mdev);
4945                 return err;
4946         }
4947
4948         return 0;
4949 }
4950
4951 static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
4952 {
4953         struct mlx5e_priv *priv = vpriv;
4954         struct net_device *netdev = priv->netdev;
4955
4956         if (!netif_device_present(netdev))
4957                 return;
4958
4959         mlx5e_detach_netdev(priv);
4960         mlx5e_destroy_mdev_resources(mdev);
4961 }
4962
4963 static void *mlx5e_add(struct mlx5_core_dev *mdev)
4964 {
4965         struct net_device *netdev;
4966         void *rpriv = NULL;
4967         void *priv;
4968         int err;
4969
4970         err = mlx5e_check_required_hca_cap(mdev);
4971         if (err)
4972                 return NULL;
4973
4974 #ifdef CONFIG_MLX5_ESWITCH
4975         if (MLX5_ESWITCH_MANAGER(mdev)) {
4976                 rpriv = mlx5e_alloc_nic_rep_priv(mdev);
4977                 if (!rpriv) {
4978                         mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
4979                         return NULL;
4980                 }
4981         }
4982 #endif
4983
4984         netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
4985         if (!netdev) {
4986                 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
4987                 goto err_free_rpriv;
4988         }
4989
4990         priv = netdev_priv(netdev);
4991
4992         err = mlx5e_attach(mdev, priv);
4993         if (err) {
4994                 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
4995                 goto err_destroy_netdev;
4996         }
4997
4998         err = register_netdev(netdev);
4999         if (err) {
5000                 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
5001                 goto err_detach;
5002         }
5003
5004 #ifdef CONFIG_MLX5_CORE_EN_DCB
5005         mlx5e_dcbnl_init_app(priv);
5006 #endif
5007         return priv;
5008
5009 err_detach:
5010         mlx5e_detach(mdev, priv);
5011 err_destroy_netdev:
5012         mlx5e_destroy_netdev(priv);
5013 err_free_rpriv:
5014         kfree(rpriv);
5015         return NULL;
5016 }
5017
5018 static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
5019 {
5020         struct mlx5e_priv *priv = vpriv;
5021         void *ppriv = priv->ppriv;
5022
5023 #ifdef CONFIG_MLX5_CORE_EN_DCB
5024         mlx5e_dcbnl_delete_app(priv);
5025 #endif
5026         unregister_netdev(priv->netdev);
5027         mlx5e_detach(mdev, vpriv);
5028         mlx5e_destroy_netdev(priv);
5029         kfree(ppriv);
5030 }
5031
5032 static void *mlx5e_get_netdev(void *vpriv)
5033 {
5034         struct mlx5e_priv *priv = vpriv;
5035
5036         return priv->netdev;
5037 }
5038
5039 static struct mlx5_interface mlx5e_interface = {
5040         .add       = mlx5e_add,
5041         .remove    = mlx5e_remove,
5042         .attach    = mlx5e_attach,
5043         .detach    = mlx5e_detach,
5044         .event     = mlx5e_async_event,
5045         .protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
5046         .get_dev   = mlx5e_get_netdev,
5047 };
5048
5049 void mlx5e_init(void)
5050 {
5051         mlx5e_ipsec_build_inverse_table();
5052         mlx5e_build_ptys2ethtool_map();
5053         mlx5_register_interface(&mlx5e_interface);
5054 }
5055
5056 void mlx5e_cleanup(void)
5057 {
5058         mlx5_unregister_interface(&mlx5e_interface);
5059 }