net/mlx5e: Enable reporting checksum unnecessary also for L3 packets
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rx.c
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/prefetch.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <net/busy_poll.h>
38 #include <net/ip6_checksum.h>
39 #include <net/page_pool.h>
40 #include <net/inet_ecn.h>
41 #include "en.h"
42 #include "en_tc.h"
43 #include "eswitch.h"
44 #include "en_rep.h"
45 #include "ipoib/ipoib.h"
46 #include "en_accel/ipsec_rxtx.h"
47 #include "en_accel/tls_rxtx.h"
48 #include "lib/clock.h"
49 #include "en/xdp.h"
50
51 static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
52 {
53         return config->rx_filter == HWTSTAMP_FILTER_ALL;
54 }
55
56 static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
57                                        void *data)
58 {
59         u32 ci = mlx5_cqwq_ctr2ix(&cq->wq, cqcc);
60
61         memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
62 }
63
64 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
65                                          struct mlx5e_cq *cq, u32 cqcc)
66 {
67         mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
68         cq->decmprs_left        = be32_to_cpu(cq->title.byte_cnt);
69         cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
70         rq->stats->cqe_compress_blks++;
71 }
72
73 static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
74 {
75         mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr);
76         cq->mini_arr_idx = 0;
77 }
78
79 static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
80 {
81         struct mlx5_cqwq *wq = &cq->wq;
82
83         u8  op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
84         u32 ci     = mlx5_cqwq_ctr2ix(wq, cqcc);
85         u32 wq_sz  = mlx5_cqwq_get_size(wq);
86         u32 ci_top = min_t(u32, wq_sz, ci + n);
87
88         for (; ci < ci_top; ci++, n--) {
89                 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
90
91                 cqe->op_own = op_own;
92         }
93
94         if (unlikely(ci == wq_sz)) {
95                 op_own = !op_own;
96                 for (ci = 0; ci < n; ci++) {
97                         struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
98
99                         cqe->op_own = op_own;
100                 }
101         }
102 }
103
104 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
105                                         struct mlx5e_cq *cq, u32 cqcc)
106 {
107         cq->title.byte_cnt     = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
108         cq->title.check_sum    = cq->mini_arr[cq->mini_arr_idx].checksum;
109         cq->title.op_own      &= 0xf0;
110         cq->title.op_own      |= 0x01 & (cqcc >> cq->wq.fbc.log_sz);
111         cq->title.wqe_counter  = cpu_to_be16(cq->decmprs_wqe_counter);
112
113         if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
114                 cq->decmprs_wqe_counter +=
115                         mpwrq_get_cqe_consumed_strides(&cq->title);
116         else
117                 cq->decmprs_wqe_counter =
118                         mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1);
119 }
120
121 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
122                                                 struct mlx5e_cq *cq, u32 cqcc)
123 {
124         mlx5e_decompress_cqe(rq, cq, cqcc);
125         cq->title.rss_hash_type   = 0;
126         cq->title.rss_hash_result = 0;
127 }
128
129 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
130                                              struct mlx5e_cq *cq,
131                                              int update_owner_only,
132                                              int budget_rem)
133 {
134         u32 cqcc = cq->wq.cc + update_owner_only;
135         u32 cqe_count;
136         u32 i;
137
138         cqe_count = min_t(u32, cq->decmprs_left, budget_rem);
139
140         for (i = update_owner_only; i < cqe_count;
141              i++, cq->mini_arr_idx++, cqcc++) {
142                 if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
143                         mlx5e_read_mini_arr_slot(cq, cqcc);
144
145                 mlx5e_decompress_cqe_no_hash(rq, cq, cqcc);
146                 rq->handle_rx_cqe(rq, &cq->title);
147         }
148         mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
149         cq->wq.cc = cqcc;
150         cq->decmprs_left -= cqe_count;
151         rq->stats->cqe_compress_pkts += cqe_count;
152
153         return cqe_count;
154 }
155
156 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
157                                               struct mlx5e_cq *cq,
158                                               int budget_rem)
159 {
160         mlx5e_read_title_slot(rq, cq, cq->wq.cc);
161         mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1);
162         mlx5e_decompress_cqe(rq, cq, cq->wq.cc);
163         rq->handle_rx_cqe(rq, &cq->title);
164         cq->mini_arr_idx++;
165
166         return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
167 }
168
169 static inline bool mlx5e_page_is_reserved(struct page *page)
170 {
171         return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
172 }
173
174 static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
175                                       struct mlx5e_dma_info *dma_info)
176 {
177         struct mlx5e_page_cache *cache = &rq->page_cache;
178         u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
179         struct mlx5e_rq_stats *stats = rq->stats;
180
181         if (tail_next == cache->head) {
182                 stats->cache_full++;
183                 return false;
184         }
185
186         if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
187                 stats->cache_waive++;
188                 return false;
189         }
190
191         cache->page_cache[cache->tail] = *dma_info;
192         cache->tail = tail_next;
193         return true;
194 }
195
196 static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
197                                       struct mlx5e_dma_info *dma_info)
198 {
199         struct mlx5e_page_cache *cache = &rq->page_cache;
200         struct mlx5e_rq_stats *stats = rq->stats;
201
202         if (unlikely(cache->head == cache->tail)) {
203                 stats->cache_empty++;
204                 return false;
205         }
206
207         if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
208                 stats->cache_busy++;
209                 return false;
210         }
211
212         *dma_info = cache->page_cache[cache->head];
213         cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
214         stats->cache_reuse++;
215
216         dma_sync_single_for_device(rq->pdev, dma_info->addr,
217                                    PAGE_SIZE,
218                                    DMA_FROM_DEVICE);
219         return true;
220 }
221
222 static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
223                                           struct mlx5e_dma_info *dma_info)
224 {
225         if (mlx5e_rx_cache_get(rq, dma_info))
226                 return 0;
227
228         dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
229         if (unlikely(!dma_info->page))
230                 return -ENOMEM;
231
232         dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
233                                       PAGE_SIZE, rq->buff.map_dir);
234         if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
235                 put_page(dma_info->page);
236                 dma_info->page = NULL;
237                 return -ENOMEM;
238         }
239
240         return 0;
241 }
242
243 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
244 {
245         dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
246 }
247
248 void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
249                         bool recycle)
250 {
251         if (likely(recycle)) {
252                 if (mlx5e_rx_cache_put(rq, dma_info))
253                         return;
254
255                 mlx5e_page_dma_unmap(rq, dma_info);
256                 page_pool_recycle_direct(rq->page_pool, dma_info->page);
257         } else {
258                 mlx5e_page_dma_unmap(rq, dma_info);
259                 put_page(dma_info->page);
260         }
261 }
262
263 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
264                                     struct mlx5e_wqe_frag_info *frag)
265 {
266         int err = 0;
267
268         if (!frag->offset)
269                 /* On first frag (offset == 0), replenish page (dma_info actually).
270                  * Other frags that point to the same dma_info (with a different
271                  * offset) should just use the new one without replenishing again
272                  * by themselves.
273                  */
274                 err = mlx5e_page_alloc_mapped(rq, frag->di);
275
276         return err;
277 }
278
279 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
280                                      struct mlx5e_wqe_frag_info *frag,
281                                      bool recycle)
282 {
283         if (frag->last_in_page)
284                 mlx5e_page_release(rq, frag->di, recycle);
285 }
286
287 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
288 {
289         return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
290 }
291
292 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
293                               u16 ix)
294 {
295         struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
296         int err;
297         int i;
298
299         for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
300                 err = mlx5e_get_rx_frag(rq, frag);
301                 if (unlikely(err))
302                         goto free_frags;
303
304                 wqe->data[i].addr = cpu_to_be64(frag->di->addr +
305                                                 frag->offset + rq->buff.headroom);
306         }
307
308         return 0;
309
310 free_frags:
311         while (--i >= 0)
312                 mlx5e_put_rx_frag(rq, --frag, true);
313
314         return err;
315 }
316
317 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
318                                      struct mlx5e_wqe_frag_info *wi,
319                                      bool recycle)
320 {
321         int i;
322
323         for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
324                 mlx5e_put_rx_frag(rq, wi, recycle);
325 }
326
327 void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
328 {
329         struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
330
331         mlx5e_free_rx_wqe(rq, wi, false);
332 }
333
334 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
335 {
336         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
337         int err;
338         int i;
339
340         for (i = 0; i < wqe_bulk; i++) {
341                 struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i);
342
343                 err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i);
344                 if (unlikely(err))
345                         goto free_wqes;
346         }
347
348         return 0;
349
350 free_wqes:
351         while (--i >= 0)
352                 mlx5e_dealloc_rx_wqe(rq, ix + i);
353
354         return err;
355 }
356
357 static inline void
358 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
359                    struct mlx5e_dma_info *di, u32 frag_offset, u32 len,
360                    unsigned int truesize)
361 {
362         dma_sync_single_for_cpu(rq->pdev,
363                                 di->addr + frag_offset,
364                                 len, DMA_FROM_DEVICE);
365         page_ref_inc(di->page);
366         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
367                         di->page, frag_offset, len, truesize);
368 }
369
370 static inline void
371 mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
372                       struct mlx5e_dma_info *dma_info,
373                       int offset_from, int offset_to, u32 headlen)
374 {
375         const void *from = page_address(dma_info->page) + offset_from;
376         /* Aligning len to sizeof(long) optimizes memcpy performance */
377         unsigned int len = ALIGN(headlen, sizeof(long));
378
379         dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len,
380                                 DMA_FROM_DEVICE);
381         skb_copy_to_linear_data_offset(skb, offset_to, from, len);
382 }
383
384 static inline void
385 mlx5e_copy_skb_header_mpwqe(struct device *pdev,
386                             struct sk_buff *skb,
387                             struct mlx5e_dma_info *dma_info,
388                             u32 offset, u32 headlen)
389 {
390         u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
391
392         mlx5e_copy_skb_header(pdev, skb, dma_info, offset, 0, headlen_pg);
393
394         if (unlikely(offset + headlen > PAGE_SIZE)) {
395                 dma_info++;
396                 mlx5e_copy_skb_header(pdev, skb, dma_info, 0, headlen_pg,
397                                       headlen - headlen_pg);
398         }
399 }
400
401 static void
402 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle)
403 {
404         const bool no_xdp_xmit =
405                 bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
406         struct mlx5e_dma_info *dma_info = wi->umr.dma_info;
407         int i;
408
409         for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
410                 if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
411                         mlx5e_page_release(rq, &dma_info[i], recycle);
412 }
413
414 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
415 {
416         struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
417         struct mlx5e_rx_wqe_ll *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
418
419         rq->mpwqe.umr_in_progress = false;
420
421         mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
422
423         /* ensure wqes are visible to device before updating doorbell record */
424         dma_wmb();
425
426         mlx5_wq_ll_update_db_record(wq);
427 }
428
429 static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
430 {
431         return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
432 }
433
434 static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
435                                               struct mlx5_wq_cyc *wq,
436                                               u16 pi, u16 frag_pi)
437 {
438         struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
439         u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
440
441         edge_wi = wi + nnops;
442
443         /* fill sq frag edge with nops to avoid wqe wrapping two pages */
444         for (; wi < edge_wi; wi++) {
445                 wi->opcode = MLX5_OPCODE_NOP;
446                 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
447         }
448 }
449
450 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
451 {
452         struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
453         struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
454         struct mlx5e_icosq *sq = &rq->channel->icosq;
455         struct mlx5_wq_cyc *wq = &sq->wq;
456         struct mlx5e_umr_wqe *umr_wqe;
457         u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
458         u16 pi, frag_pi;
459         int err;
460         int i;
461
462         pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
463         frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
464
465         if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) {
466                 mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
467                 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
468         }
469
470         umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
471         if (unlikely(mlx5e_icosq_wrap_cnt(sq) < 2))
472                 memcpy(umr_wqe, &rq->mpwqe.umr_wqe,
473                        offsetof(struct mlx5e_umr_wqe, inline_mtts));
474
475         for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
476                 err = mlx5e_page_alloc_mapped(rq, dma_info);
477                 if (unlikely(err))
478                         goto err_unmap;
479                 umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
480         }
481
482         bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
483         wi->consumed_strides = 0;
484
485         rq->mpwqe.umr_in_progress = true;
486
487         umr_wqe->ctrl.opmod_idx_opcode =
488                 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
489                             MLX5_OPCODE_UMR);
490         umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
491
492         sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
493         sq->pc += MLX5E_UMR_WQEBBS;
494         mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
495
496         return 0;
497
498 err_unmap:
499         while (--i >= 0) {
500                 dma_info--;
501                 mlx5e_page_release(rq, dma_info, true);
502         }
503         rq->stats->buff_alloc_err++;
504
505         return err;
506 }
507
508 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
509 {
510         struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
511         /* Don't recycle, this function is called on rq/netdev close */
512         mlx5e_free_rx_mpwqe(rq, wi, false);
513 }
514
515 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
516 {
517         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
518         u8 wqe_bulk;
519         int err;
520
521         if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
522                 return false;
523
524         wqe_bulk = rq->wqe.info.wqe_bulk;
525
526         if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
527                 return false;
528
529         do {
530                 u16 head = mlx5_wq_cyc_get_head(wq);
531
532                 err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk);
533                 if (unlikely(err)) {
534                         rq->stats->buff_alloc_err++;
535                         break;
536                 }
537
538                 mlx5_wq_cyc_push_n(wq, wqe_bulk);
539         } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk);
540
541         /* ensure wqes are visible to device before updating doorbell record */
542         dma_wmb();
543
544         mlx5_wq_cyc_update_db_record(wq);
545
546         return !!err;
547 }
548
549 static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
550                                              struct mlx5e_icosq *sq,
551                                              struct mlx5e_rq *rq,
552                                              struct mlx5_cqe64 *cqe)
553 {
554         struct mlx5_wq_cyc *wq = &sq->wq;
555         u16 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
556         struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
557
558         mlx5_cqwq_pop(&cq->wq);
559
560         if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
561                 netdev_WARN_ONCE(cq->channel->netdev,
562                                  "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own);
563                 return;
564         }
565
566         if (likely(icowi->opcode == MLX5_OPCODE_UMR)) {
567                 mlx5e_post_rx_mpwqe(rq);
568                 return;
569         }
570
571         if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
572                 netdev_WARN_ONCE(cq->channel->netdev,
573                                  "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode);
574 }
575
576 static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
577 {
578         struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
579         struct mlx5_cqe64 *cqe;
580
581         if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
582                 return;
583
584         cqe = mlx5_cqwq_get_cqe(&cq->wq);
585         if (likely(!cqe))
586                 return;
587
588         /* by design, there's only a single cqe */
589         mlx5e_poll_ico_single_cqe(cq, sq, rq, cqe);
590
591         mlx5_cqwq_update_db_record(&cq->wq);
592 }
593
594 bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
595 {
596         struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
597
598         if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
599                 return false;
600
601         mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq);
602
603         if (mlx5_wq_ll_is_full(wq))
604                 return false;
605
606         if (!rq->mpwqe.umr_in_progress)
607                 mlx5e_alloc_rx_mpwqe(rq, wq->head);
608         else
609                 rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2;
610
611         return false;
612 }
613
614 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
615 {
616         u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
617         u8 tcp_ack     = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
618                          (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
619
620         tcp->check                      = 0;
621         tcp->psh                        = get_cqe_lro_tcppsh(cqe);
622
623         if (tcp_ack) {
624                 tcp->ack                = 1;
625                 tcp->ack_seq            = cqe->lro_ack_seq_num;
626                 tcp->window             = cqe->lro_tcp_win;
627         }
628 }
629
630 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
631                                  u32 cqe_bcnt)
632 {
633         struct ethhdr   *eth = (struct ethhdr *)(skb->data);
634         struct tcphdr   *tcp;
635         int network_depth = 0;
636         __wsum check;
637         __be16 proto;
638         u16 tot_len;
639         void *ip_p;
640
641         proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
642
643         tot_len = cqe_bcnt - network_depth;
644         ip_p = skb->data + network_depth;
645
646         if (proto == htons(ETH_P_IP)) {
647                 struct iphdr *ipv4 = ip_p;
648
649                 tcp = ip_p + sizeof(struct iphdr);
650                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
651
652                 ipv4->ttl               = cqe->lro_min_ttl;
653                 ipv4->tot_len           = cpu_to_be16(tot_len);
654                 ipv4->check             = 0;
655                 ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
656                                                        ipv4->ihl);
657
658                 mlx5e_lro_update_tcp_hdr(cqe, tcp);
659                 check = csum_partial(tcp, tcp->doff * 4,
660                                      csum_unfold((__force __sum16)cqe->check_sum));
661                 /* Almost done, don't forget the pseudo header */
662                 tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
663                                                tot_len - sizeof(struct iphdr),
664                                                IPPROTO_TCP, check);
665         } else {
666                 u16 payload_len = tot_len - sizeof(struct ipv6hdr);
667                 struct ipv6hdr *ipv6 = ip_p;
668
669                 tcp = ip_p + sizeof(struct ipv6hdr);
670                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
671
672                 ipv6->hop_limit         = cqe->lro_min_ttl;
673                 ipv6->payload_len       = cpu_to_be16(payload_len);
674
675                 mlx5e_lro_update_tcp_hdr(cqe, tcp);
676                 check = csum_partial(tcp, tcp->doff * 4,
677                                      csum_unfold((__force __sum16)cqe->check_sum));
678                 /* Almost done, don't forget the pseudo header */
679                 tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
680                                              IPPROTO_TCP, check);
681         }
682 }
683
684 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
685                                       struct sk_buff *skb)
686 {
687         u8 cht = cqe->rss_hash_type;
688         int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
689                  (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
690                                             PKT_HASH_TYPE_NONE;
691         skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
692 }
693
694 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
695                                         __be16 *proto)
696 {
697         *proto = ((struct ethhdr *)skb->data)->h_proto;
698         *proto = __vlan_get_protocol(skb, *proto, network_depth);
699         return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6));
700 }
701
702 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
703 {
704         int network_depth = 0;
705         __be16 proto;
706         void *ip;
707         int rc;
708
709         if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
710                 return;
711
712         ip = skb->data + network_depth;
713         rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
714                                          IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
715
716         rq->stats->ecn_mark += !!rc;
717 }
718
719 static __be32 mlx5e_get_fcs(struct sk_buff *skb)
720 {
721         int last_frag_sz, bytes_in_prev, nr_frags;
722         u8 *fcs_p1, *fcs_p2;
723         skb_frag_t *last_frag;
724         __be32 fcs_bytes;
725
726         if (!skb_is_nonlinear(skb))
727                 return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
728
729         nr_frags = skb_shinfo(skb)->nr_frags;
730         last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
731         last_frag_sz = skb_frag_size(last_frag);
732
733         /* If all FCS data is in last frag */
734         if (last_frag_sz >= ETH_FCS_LEN)
735                 return *(__be32 *)(skb_frag_address(last_frag) +
736                                    last_frag_sz - ETH_FCS_LEN);
737
738         fcs_p2 = (u8 *)skb_frag_address(last_frag);
739         bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
740
741         /* Find where the other part of the FCS is - Linear or another frag */
742         if (nr_frags == 1) {
743                 fcs_p1 = skb_tail_pointer(skb);
744         } else {
745                 skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
746
747                 fcs_p1 = skb_frag_address(prev_frag) +
748                             skb_frag_size(prev_frag);
749         }
750         fcs_p1 -= bytes_in_prev;
751
752         memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
753         memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
754
755         return fcs_bytes;
756 }
757
758 static u8 get_ip_proto(struct sk_buff *skb, __be16 proto)
759 {
760         void *ip_p = skb->data + sizeof(struct ethhdr);
761
762         return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
763                                             ((struct ipv6hdr *)ip_p)->nexthdr;
764 }
765
766 static inline void mlx5e_handle_csum(struct net_device *netdev,
767                                      struct mlx5_cqe64 *cqe,
768                                      struct mlx5e_rq *rq,
769                                      struct sk_buff *skb,
770                                      bool   lro)
771 {
772         struct mlx5e_rq_stats *stats = rq->stats;
773         int network_depth = 0;
774         __be16 proto;
775
776         if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
777                 goto csum_none;
778
779         if (lro) {
780                 skb->ip_summed = CHECKSUM_UNNECESSARY;
781                 stats->csum_unnecessary++;
782                 return;
783         }
784
785         if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
786                 if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP))
787                         goto csum_unnecessary;
788
789                 skb->ip_summed = CHECKSUM_COMPLETE;
790                 skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
791                 if (network_depth > ETH_HLEN)
792                         /* CQE csum is calculated from the IP header and does
793                          * not cover VLAN headers (if present). This will add
794                          * the checksum manually.
795                          */
796                         skb->csum = csum_partial(skb->data + ETH_HLEN,
797                                                  network_depth - ETH_HLEN,
798                                                  skb->csum);
799                 if (unlikely(netdev->features & NETIF_F_RXFCS))
800                         skb->csum = csum_add(skb->csum,
801                                              (__force __wsum)mlx5e_get_fcs(skb));
802                 stats->csum_complete++;
803                 return;
804         }
805
806 csum_unnecessary:
807         if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
808                    ((cqe->hds_ip_ext & CQE_L4_OK) ||
809                     (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
810                 skb->ip_summed = CHECKSUM_UNNECESSARY;
811                 if (cqe_is_tunneled(cqe)) {
812                         skb->csum_level = 1;
813                         skb->encapsulation = 1;
814                         stats->csum_unnecessary_inner++;
815                         return;
816                 }
817                 stats->csum_unnecessary++;
818                 return;
819         }
820 csum_none:
821         skb->ip_summed = CHECKSUM_NONE;
822         stats->csum_none++;
823 }
824
825 #define MLX5E_CE_BIT_MASK 0x80
826
827 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
828                                       u32 cqe_bcnt,
829                                       struct mlx5e_rq *rq,
830                                       struct sk_buff *skb)
831 {
832         u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
833         struct mlx5e_rq_stats *stats = rq->stats;
834         struct net_device *netdev = rq->netdev;
835
836         skb->mac_len = ETH_HLEN;
837
838 #ifdef CONFIG_MLX5_EN_TLS
839         mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt);
840 #endif
841
842         if (lro_num_seg > 1) {
843                 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
844                 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
845                 /* Subtract one since we already counted this as one
846                  * "regular" packet in mlx5e_complete_rx_cqe()
847                  */
848                 stats->packets += lro_num_seg - 1;
849                 stats->lro_packets++;
850                 stats->lro_bytes += cqe_bcnt;
851         }
852
853         if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
854                 skb_hwtstamps(skb)->hwtstamp =
855                                 mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
856
857         skb_record_rx_queue(skb, rq->ix);
858
859         if (likely(netdev->features & NETIF_F_RXHASH))
860                 mlx5e_skb_set_hash(cqe, skb);
861
862         if (cqe_has_vlan(cqe)) {
863                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
864                                        be16_to_cpu(cqe->vlan_info));
865                 stats->removed_vlan_packets++;
866         }
867
868         skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
869
870         mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
871         /* checking CE bit in cqe - MSB in ml_path field */
872         if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
873                 mlx5e_enable_ecn(rq, skb);
874
875         skb->protocol = eth_type_trans(skb, netdev);
876 }
877
878 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
879                                          struct mlx5_cqe64 *cqe,
880                                          u32 cqe_bcnt,
881                                          struct sk_buff *skb)
882 {
883         struct mlx5e_rq_stats *stats = rq->stats;
884
885         stats->packets++;
886         stats->bytes += cqe_bcnt;
887         mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
888 }
889
890 static inline
891 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
892                                        u32 frag_size, u16 headroom,
893                                        u32 cqe_bcnt)
894 {
895         struct sk_buff *skb = build_skb(va, frag_size);
896
897         if (unlikely(!skb)) {
898                 rq->stats->buff_alloc_err++;
899                 return NULL;
900         }
901
902         skb_reserve(skb, headroom);
903         skb_put(skb, cqe_bcnt);
904
905         return skb;
906 }
907
908 struct sk_buff *
909 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
910                           struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
911 {
912         struct mlx5e_dma_info *di = wi->di;
913         u16 rx_headroom = rq->buff.headroom;
914         struct sk_buff *skb;
915         void *va, *data;
916         bool consumed;
917         u32 frag_size;
918
919         va             = page_address(di->page) + wi->offset;
920         data           = va + rx_headroom;
921         frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
922
923         dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
924                                       frag_size, DMA_FROM_DEVICE);
925         prefetchw(va); /* xdp_frame data area */
926         prefetch(data);
927
928         if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
929                 rq->stats->wqe_err++;
930                 return NULL;
931         }
932
933         rcu_read_lock();
934         consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt);
935         rcu_read_unlock();
936         if (consumed)
937                 return NULL; /* page/packet was consumed by XDP */
938
939         skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
940         if (unlikely(!skb))
941                 return NULL;
942
943         /* queue up for recycling/reuse */
944         page_ref_inc(di->page);
945
946         return skb;
947 }
948
949 struct sk_buff *
950 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
951                              struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
952 {
953         struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
954         struct mlx5e_wqe_frag_info *head_wi = wi;
955         u16 headlen      = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt);
956         u16 frag_headlen = headlen;
957         u16 byte_cnt     = cqe_bcnt - headlen;
958         struct sk_buff *skb;
959
960         if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
961                 rq->stats->wqe_err++;
962                 return NULL;
963         }
964
965         /* XDP is not supported in this configuration, as incoming packets
966          * might spread among multiple pages.
967          */
968         skb = napi_alloc_skb(rq->cq.napi,
969                              ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
970         if (unlikely(!skb)) {
971                 rq->stats->buff_alloc_err++;
972                 return NULL;
973         }
974
975         prefetchw(skb->data);
976
977         while (byte_cnt) {
978                 u16 frag_consumed_bytes =
979                         min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt);
980
981                 mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen,
982                                    frag_consumed_bytes, frag_info->frag_stride);
983                 byte_cnt -= frag_consumed_bytes;
984                 frag_headlen = 0;
985                 frag_info++;
986                 wi++;
987         }
988
989         /* copy header */
990         mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset,
991                               0, headlen);
992         /* skb linear part was allocated with headlen and aligned to long */
993         skb->tail += headlen;
994         skb->len  += headlen;
995
996         return skb;
997 }
998
999 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1000 {
1001         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1002         struct mlx5e_wqe_frag_info *wi;
1003         struct sk_buff *skb;
1004         u32 cqe_bcnt;
1005         u16 ci;
1006
1007         ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1008         wi       = get_frag(rq, ci);
1009         cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1010
1011         skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
1012         if (!skb) {
1013                 /* probably for XDP */
1014                 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1015                         /* do not return page to cache,
1016                          * it will be returned on XDP_TX completion.
1017                          */
1018                         goto wq_cyc_pop;
1019                 }
1020                 goto free_wqe;
1021         }
1022
1023         mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1024         napi_gro_receive(rq->cq.napi, skb);
1025
1026 free_wqe:
1027         mlx5e_free_rx_wqe(rq, wi, true);
1028 wq_cyc_pop:
1029         mlx5_wq_cyc_pop(wq);
1030 }
1031
1032 #ifdef CONFIG_MLX5_ESWITCH
1033 void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1034 {
1035         struct net_device *netdev = rq->netdev;
1036         struct mlx5e_priv *priv = netdev_priv(netdev);
1037         struct mlx5e_rep_priv *rpriv  = priv->ppriv;
1038         struct mlx5_eswitch_rep *rep = rpriv->rep;
1039         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1040         struct mlx5e_wqe_frag_info *wi;
1041         struct sk_buff *skb;
1042         u32 cqe_bcnt;
1043         u16 ci;
1044
1045         ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1046         wi       = get_frag(rq, ci);
1047         cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1048
1049         skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
1050         if (!skb) {
1051                 /* probably for XDP */
1052                 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1053                         /* do not return page to cache,
1054                          * it will be returned on XDP_TX completion.
1055                          */
1056                         goto wq_cyc_pop;
1057                 }
1058                 goto free_wqe;
1059         }
1060
1061         mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1062
1063         if (rep->vlan && skb_vlan_tag_present(skb))
1064                 skb_vlan_pop(skb);
1065
1066         napi_gro_receive(rq->cq.napi, skb);
1067
1068 free_wqe:
1069         mlx5e_free_rx_wqe(rq, wi, true);
1070 wq_cyc_pop:
1071         mlx5_wq_cyc_pop(wq);
1072 }
1073 #endif
1074
1075 struct sk_buff *
1076 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1077                                    u16 cqe_bcnt, u32 head_offset, u32 page_idx)
1078 {
1079         u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1080         struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
1081         u32 frag_offset    = head_offset + headlen;
1082         u32 byte_cnt       = cqe_bcnt - headlen;
1083         struct mlx5e_dma_info *head_di = di;
1084         struct sk_buff *skb;
1085
1086         skb = napi_alloc_skb(rq->cq.napi,
1087                              ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
1088         if (unlikely(!skb)) {
1089                 rq->stats->buff_alloc_err++;
1090                 return NULL;
1091         }
1092
1093         prefetchw(skb->data);
1094
1095         if (unlikely(frag_offset >= PAGE_SIZE)) {
1096                 di++;
1097                 frag_offset -= PAGE_SIZE;
1098         }
1099
1100         while (byte_cnt) {
1101                 u32 pg_consumed_bytes =
1102                         min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
1103                 unsigned int truesize =
1104                         ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
1105
1106                 mlx5e_add_skb_frag(rq, skb, di, frag_offset,
1107                                    pg_consumed_bytes, truesize);
1108                 byte_cnt -= pg_consumed_bytes;
1109                 frag_offset = 0;
1110                 di++;
1111         }
1112         /* copy header */
1113         mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, head_di,
1114                                     head_offset, headlen);
1115         /* skb linear part was allocated with headlen and aligned to long */
1116         skb->tail += headlen;
1117         skb->len  += headlen;
1118
1119         return skb;
1120 }
1121
1122 struct sk_buff *
1123 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1124                                 u16 cqe_bcnt, u32 head_offset, u32 page_idx)
1125 {
1126         struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
1127         u16 rx_headroom = rq->buff.headroom;
1128         u32 cqe_bcnt32 = cqe_bcnt;
1129         struct sk_buff *skb;
1130         void *va, *data;
1131         u32 frag_size;
1132         bool consumed;
1133
1134         va             = page_address(di->page) + head_offset;
1135         data           = va + rx_headroom;
1136         frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
1137
1138         dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
1139                                       frag_size, DMA_FROM_DEVICE);
1140         prefetchw(va); /* xdp_frame data area */
1141         prefetch(data);
1142
1143         rcu_read_lock();
1144         consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32);
1145         rcu_read_unlock();
1146         if (consumed) {
1147                 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1148                         __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
1149                 return NULL; /* page/packet was consumed by XDP */
1150         }
1151
1152         skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
1153         if (unlikely(!skb))
1154                 return NULL;
1155
1156         /* queue up for recycling/reuse */
1157         page_ref_inc(di->page);
1158
1159         return skb;
1160 }
1161
1162 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1163 {
1164         u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
1165         u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
1166         struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
1167         u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
1168         u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
1169         u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
1170         u32 page_idx       = wqe_offset >> PAGE_SHIFT;
1171         struct mlx5e_rx_wqe_ll *wqe;
1172         struct mlx5_wq_ll *wq;
1173         struct sk_buff *skb;
1174         u16 cqe_bcnt;
1175
1176         wi->consumed_strides += cstrides;
1177
1178         if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
1179                 rq->stats->wqe_err++;
1180                 goto mpwrq_cqe_out;
1181         }
1182
1183         if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1184                 struct mlx5e_rq_stats *stats = rq->stats;
1185
1186                 stats->mpwqe_filler_cqes++;
1187                 stats->mpwqe_filler_strides += cstrides;
1188                 goto mpwrq_cqe_out;
1189         }
1190
1191         cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1192
1193         skb = rq->mpwqe.skb_from_cqe_mpwrq(rq, wi, cqe_bcnt, head_offset,
1194                                            page_idx);
1195         if (!skb)
1196                 goto mpwrq_cqe_out;
1197
1198         mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1199         napi_gro_receive(rq->cq.napi, skb);
1200
1201 mpwrq_cqe_out:
1202         if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1203                 return;
1204
1205         wq  = &rq->mpwqe.wq;
1206         wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1207         mlx5e_free_rx_mpwqe(rq, wi, true);
1208         mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1209 }
1210
1211 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
1212 {
1213         struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
1214         struct mlx5e_xdpsq *xdpsq;
1215         struct mlx5_cqe64 *cqe;
1216         int work_done = 0;
1217
1218         if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
1219                 return 0;
1220
1221         if (cq->decmprs_left)
1222                 work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
1223
1224         cqe = mlx5_cqwq_get_cqe(&cq->wq);
1225         if (!cqe)
1226                 return 0;
1227
1228         xdpsq = &rq->xdpsq;
1229
1230         do {
1231                 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
1232                         work_done +=
1233                                 mlx5e_decompress_cqes_start(rq, cq,
1234                                                             budget - work_done);
1235                         continue;
1236                 }
1237
1238                 mlx5_cqwq_pop(&cq->wq);
1239
1240                 rq->handle_rx_cqe(rq, cqe);
1241         } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
1242
1243         if (xdpsq->doorbell) {
1244                 mlx5e_xmit_xdp_doorbell(xdpsq);
1245                 xdpsq->doorbell = false;
1246         }
1247
1248         if (xdpsq->redirect_flush) {
1249                 xdp_do_flush_map();
1250                 xdpsq->redirect_flush = false;
1251         }
1252
1253         mlx5_cqwq_update_db_record(&cq->wq);
1254
1255         /* ensure cq space is freed before enabling more cqes */
1256         wmb();
1257
1258         return work_done;
1259 }
1260
1261 #ifdef CONFIG_MLX5_CORE_IPOIB
1262
1263 #define MLX5_IB_GRH_DGID_OFFSET 24
1264 #define MLX5_GID_SIZE           16
1265
1266 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
1267                                          struct mlx5_cqe64 *cqe,
1268                                          u32 cqe_bcnt,
1269                                          struct sk_buff *skb)
1270 {
1271         struct hwtstamp_config *tstamp;
1272         struct mlx5e_rq_stats *stats;
1273         struct net_device *netdev;
1274         struct mlx5e_priv *priv;
1275         char *pseudo_header;
1276         u32 qpn;
1277         u8 *dgid;
1278         u8 g;
1279
1280         qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
1281         netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
1282
1283         /* No mapping present, cannot process SKB. This might happen if a child
1284          * interface is going down while having unprocessed CQEs on parent RQ
1285          */
1286         if (unlikely(!netdev)) {
1287                 /* TODO: add drop counters support */
1288                 skb->dev = NULL;
1289                 pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
1290                 return;
1291         }
1292
1293         priv = mlx5i_epriv(netdev);
1294         tstamp = &priv->tstamp;
1295         stats = &priv->channel_stats[rq->ix].rq;
1296
1297         g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
1298         dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
1299         if ((!g) || dgid[0] != 0xff)
1300                 skb->pkt_type = PACKET_HOST;
1301         else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
1302                 skb->pkt_type = PACKET_BROADCAST;
1303         else
1304                 skb->pkt_type = PACKET_MULTICAST;
1305
1306         /* TODO: IB/ipoib: Allow mcast packets from other VFs
1307          * 68996a6e760e5c74654723eeb57bf65628ae87f4
1308          */
1309
1310         skb_pull(skb, MLX5_IB_GRH_BYTES);
1311
1312         skb->protocol = *((__be16 *)(skb->data));
1313
1314         skb->ip_summed = CHECKSUM_COMPLETE;
1315         skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1316
1317         if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
1318                 skb_hwtstamps(skb)->hwtstamp =
1319                                 mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
1320
1321         skb_record_rx_queue(skb, rq->ix);
1322
1323         if (likely(netdev->features & NETIF_F_RXHASH))
1324                 mlx5e_skb_set_hash(cqe, skb);
1325
1326         /* 20 bytes of ipoib header and 4 for encap existing */
1327         pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
1328         memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
1329         skb_reset_mac_header(skb);
1330         skb_pull(skb, MLX5_IPOIB_HARD_LEN);
1331
1332         skb->dev = netdev;
1333
1334         stats->csum_complete++;
1335         stats->packets++;
1336         stats->bytes += cqe_bcnt;
1337 }
1338
1339 void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1340 {
1341         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1342         struct mlx5e_wqe_frag_info *wi;
1343         struct sk_buff *skb;
1344         u32 cqe_bcnt;
1345         u16 ci;
1346
1347         ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1348         wi       = get_frag(rq, ci);
1349         cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1350
1351         skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
1352         if (!skb)
1353                 goto wq_free_wqe;
1354
1355         mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1356         if (unlikely(!skb->dev)) {
1357                 dev_kfree_skb_any(skb);
1358                 goto wq_free_wqe;
1359         }
1360         napi_gro_receive(rq->cq.napi, skb);
1361
1362 wq_free_wqe:
1363         mlx5e_free_rx_wqe(rq, wi, true);
1364         mlx5_wq_cyc_pop(wq);
1365 }
1366
1367 #endif /* CONFIG_MLX5_CORE_IPOIB */
1368
1369 #ifdef CONFIG_MLX5_EN_IPSEC
1370
1371 void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1372 {
1373         struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1374         struct mlx5e_wqe_frag_info *wi;
1375         struct sk_buff *skb;
1376         u32 cqe_bcnt;
1377         u16 ci;
1378
1379         ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1380         wi       = get_frag(rq, ci);
1381         cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1382
1383         skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
1384         if (unlikely(!skb)) {
1385                 /* a DROP, save the page-reuse checks */
1386                 mlx5e_free_rx_wqe(rq, wi, true);
1387                 goto wq_cyc_pop;
1388         }
1389         skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
1390         if (unlikely(!skb)) {
1391                 mlx5e_free_rx_wqe(rq, wi, true);
1392                 goto wq_cyc_pop;
1393         }
1394
1395         mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1396         napi_gro_receive(rq->cq.napi, skb);
1397
1398         mlx5e_free_rx_wqe(rq, wi, true);
1399 wq_cyc_pop:
1400         mlx5_wq_cyc_pop(wq);
1401 }
1402
1403 #endif /* CONFIG_MLX5_EN_IPSEC */