Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rx.c
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/prefetch.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/bpf_trace.h>
38 #include <net/busy_poll.h>
39 #include <net/ip6_checksum.h>
40 #include "en.h"
41 #include "en_tc.h"
42 #include "eswitch.h"
43 #include "en_rep.h"
44 #include "ipoib/ipoib.h"
45 #include "en_accel/ipsec_rxtx.h"
46 #include "lib/clock.h"
47
48 static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
49 {
50         return config->rx_filter == HWTSTAMP_FILTER_ALL;
51 }
52
53 static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
54                                        void *data)
55 {
56         u32 ci = cqcc & cq->wq.fbc.sz_m1;
57
58         memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
59 }
60
61 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
62                                          struct mlx5e_cq *cq, u32 cqcc)
63 {
64         mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
65         cq->decmprs_left        = be32_to_cpu(cq->title.byte_cnt);
66         cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
67         rq->stats.cqe_compress_blks++;
68 }
69
70 static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
71 {
72         mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr);
73         cq->mini_arr_idx = 0;
74 }
75
76 static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
77 {
78         struct mlx5_frag_buf_ctrl *fbc = &cq->wq.fbc;
79         u8 op_own = (cqcc >> fbc->log_sz) & 1;
80         u32 wq_sz = 1 << fbc->log_sz;
81         u32 ci = cqcc & fbc->sz_m1;
82         u32 ci_top = min_t(u32, wq_sz, ci + n);
83
84         for (; ci < ci_top; ci++, n--) {
85                 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
86
87                 cqe->op_own = op_own;
88         }
89
90         if (unlikely(ci == wq_sz)) {
91                 op_own = !op_own;
92                 for (ci = 0; ci < n; ci++) {
93                         struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
94
95                         cqe->op_own = op_own;
96                 }
97         }
98 }
99
100 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
101                                         struct mlx5e_cq *cq, u32 cqcc)
102 {
103         cq->title.byte_cnt     = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
104         cq->title.check_sum    = cq->mini_arr[cq->mini_arr_idx].checksum;
105         cq->title.op_own      &= 0xf0;
106         cq->title.op_own      |= 0x01 & (cqcc >> cq->wq.fbc.log_sz);
107         cq->title.wqe_counter  = cpu_to_be16(cq->decmprs_wqe_counter);
108
109         if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
110                 cq->decmprs_wqe_counter +=
111                         mpwrq_get_cqe_consumed_strides(&cq->title);
112         else
113                 cq->decmprs_wqe_counter =
114                         (cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1;
115 }
116
117 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
118                                                 struct mlx5e_cq *cq, u32 cqcc)
119 {
120         mlx5e_decompress_cqe(rq, cq, cqcc);
121         cq->title.rss_hash_type   = 0;
122         cq->title.rss_hash_result = 0;
123 }
124
125 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
126                                              struct mlx5e_cq *cq,
127                                              int update_owner_only,
128                                              int budget_rem)
129 {
130         u32 cqcc = cq->wq.cc + update_owner_only;
131         u32 cqe_count;
132         u32 i;
133
134         cqe_count = min_t(u32, cq->decmprs_left, budget_rem);
135
136         for (i = update_owner_only; i < cqe_count;
137              i++, cq->mini_arr_idx++, cqcc++) {
138                 if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
139                         mlx5e_read_mini_arr_slot(cq, cqcc);
140
141                 mlx5e_decompress_cqe_no_hash(rq, cq, cqcc);
142                 rq->handle_rx_cqe(rq, &cq->title);
143         }
144         mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
145         cq->wq.cc = cqcc;
146         cq->decmprs_left -= cqe_count;
147         rq->stats.cqe_compress_pkts += cqe_count;
148
149         return cqe_count;
150 }
151
152 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
153                                               struct mlx5e_cq *cq,
154                                               int budget_rem)
155 {
156         mlx5e_read_title_slot(rq, cq, cq->wq.cc);
157         mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1);
158         mlx5e_decompress_cqe(rq, cq, cq->wq.cc);
159         rq->handle_rx_cqe(rq, &cq->title);
160         cq->mini_arr_idx++;
161
162         return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
163 }
164
165 #define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
166
167 static inline bool mlx5e_page_is_reserved(struct page *page)
168 {
169         return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
170 }
171
172 static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
173                                       struct mlx5e_dma_info *dma_info)
174 {
175         struct mlx5e_page_cache *cache = &rq->page_cache;
176         u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
177
178         if (tail_next == cache->head) {
179                 rq->stats.cache_full++;
180                 return false;
181         }
182
183         if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
184                 rq->stats.cache_waive++;
185                 return false;
186         }
187
188         cache->page_cache[cache->tail] = *dma_info;
189         cache->tail = tail_next;
190         return true;
191 }
192
193 static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
194                                       struct mlx5e_dma_info *dma_info)
195 {
196         struct mlx5e_page_cache *cache = &rq->page_cache;
197
198         if (unlikely(cache->head == cache->tail)) {
199                 rq->stats.cache_empty++;
200                 return false;
201         }
202
203         if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
204                 rq->stats.cache_busy++;
205                 return false;
206         }
207
208         *dma_info = cache->page_cache[cache->head];
209         cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
210         rq->stats.cache_reuse++;
211
212         dma_sync_single_for_device(rq->pdev, dma_info->addr,
213                                    RQ_PAGE_SIZE(rq),
214                                    DMA_FROM_DEVICE);
215         return true;
216 }
217
218 static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
219                                           struct mlx5e_dma_info *dma_info)
220 {
221         if (mlx5e_rx_cache_get(rq, dma_info))
222                 return 0;
223
224         dma_info->page = dev_alloc_pages(rq->buff.page_order);
225         if (unlikely(!dma_info->page))
226                 return -ENOMEM;
227
228         dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
229                                       RQ_PAGE_SIZE(rq), rq->buff.map_dir);
230         if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
231                 put_page(dma_info->page);
232                 dma_info->page = NULL;
233                 return -ENOMEM;
234         }
235
236         return 0;
237 }
238
239 void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
240                         bool recycle)
241 {
242         if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
243                 return;
244
245         dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
246                        rq->buff.map_dir);
247         put_page(dma_info->page);
248 }
249
250 static inline bool mlx5e_page_reuse(struct mlx5e_rq *rq,
251                                     struct mlx5e_wqe_frag_info *wi)
252 {
253         return rq->wqe.page_reuse && wi->di.page &&
254                 (wi->offset + rq->wqe.frag_sz <= RQ_PAGE_SIZE(rq)) &&
255                 !mlx5e_page_is_reserved(wi->di.page);
256 }
257
258 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
259 {
260         struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix];
261
262         /* check if page exists, hence can be reused */
263         if (!wi->di.page) {
264                 if (unlikely(mlx5e_page_alloc_mapped(rq, &wi->di)))
265                         return -ENOMEM;
266                 wi->offset = 0;
267         }
268
269         wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset + rq->buff.headroom);
270         return 0;
271 }
272
273 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
274                                      struct mlx5e_wqe_frag_info *wi)
275 {
276         mlx5e_page_release(rq, &wi->di, true);
277         wi->di.page = NULL;
278 }
279
280 static inline void mlx5e_free_rx_wqe_reuse(struct mlx5e_rq *rq,
281                                            struct mlx5e_wqe_frag_info *wi)
282 {
283         if (mlx5e_page_reuse(rq, wi)) {
284                 rq->stats.page_reuse++;
285                 return;
286         }
287
288         mlx5e_free_rx_wqe(rq, wi);
289 }
290
291 void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
292 {
293         struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix];
294
295         if (wi->di.page)
296                 mlx5e_free_rx_wqe(rq, wi);
297 }
298
299 static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
300                                             struct sk_buff *skb,
301                                             struct mlx5e_dma_info *di,
302                                             u32 frag_offset, u32 len)
303 {
304         unsigned int truesize = ALIGN(len, BIT(rq->mpwqe.log_stride_sz));
305
306         dma_sync_single_for_cpu(rq->pdev,
307                                 di->addr + frag_offset,
308                                 len, DMA_FROM_DEVICE);
309         page_ref_inc(di->page);
310         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
311                         di->page, frag_offset, len, truesize);
312 }
313
314 static inline void
315 mlx5e_copy_skb_header_mpwqe(struct device *pdev,
316                             struct sk_buff *skb,
317                             struct mlx5e_dma_info *dma_info,
318                             u32 offset, u32 headlen)
319 {
320         u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
321         unsigned int len;
322
323          /* Aligning len to sizeof(long) optimizes memcpy performance */
324         len = ALIGN(headlen_pg, sizeof(long));
325         dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len,
326                                 DMA_FROM_DEVICE);
327         skb_copy_to_linear_data(skb, page_address(dma_info->page) + offset, len);
328
329         if (unlikely(offset + headlen > PAGE_SIZE)) {
330                 dma_info++;
331                 headlen_pg = len;
332                 len = ALIGN(headlen - headlen_pg, sizeof(long));
333                 dma_sync_single_for_cpu(pdev, dma_info->addr, len,
334                                         DMA_FROM_DEVICE);
335                 skb_copy_to_linear_data_offset(skb, headlen_pg,
336                                                page_address(dma_info->page),
337                                                len);
338         }
339 }
340
341 void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
342 {
343         const bool no_xdp_xmit =
344                 bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
345         struct mlx5e_dma_info *dma_info = wi->umr.dma_info;
346         int i;
347
348         for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++)
349                 if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
350                         mlx5e_page_release(rq, &dma_info[i], true);
351 }
352
353 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
354 {
355         struct mlx5_wq_ll *wq = &rq->wq;
356         struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
357
358         rq->mpwqe.umr_in_progress = false;
359
360         mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
361
362         /* ensure wqes are visible to device before updating doorbell record */
363         dma_wmb();
364
365         mlx5_wq_ll_update_db_record(wq);
366 }
367
368 static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
369 {
370         return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
371 }
372
373 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
374 {
375         struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
376         struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0];
377         struct mlx5e_icosq *sq = &rq->channel->icosq;
378         struct mlx5_wq_cyc *wq = &sq->wq;
379         struct mlx5e_umr_wqe *umr_wqe;
380         u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
381         int err;
382         u16 pi;
383         int i;
384
385         /* fill sq edge with nops to avoid wqe wrap around */
386         while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
387                 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
388                 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
389         }
390
391         umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
392         if (unlikely(mlx5e_icosq_wrap_cnt(sq) < 2))
393                 memcpy(umr_wqe, &rq->mpwqe.umr_wqe,
394                        offsetof(struct mlx5e_umr_wqe, inline_mtts));
395
396         for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
397                 err = mlx5e_page_alloc_mapped(rq, dma_info);
398                 if (unlikely(err))
399                         goto err_unmap;
400                 umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
401         }
402
403         bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
404         wi->consumed_strides = 0;
405
406         rq->mpwqe.umr_in_progress = true;
407
408         umr_wqe->ctrl.opmod_idx_opcode =
409                 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
410                             MLX5_OPCODE_UMR);
411         umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
412
413         sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
414         sq->pc += MLX5E_UMR_WQEBBS;
415         mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
416
417         return 0;
418
419 err_unmap:
420         while (--i >= 0) {
421                 dma_info--;
422                 mlx5e_page_release(rq, dma_info, true);
423         }
424         rq->stats.buff_alloc_err++;
425
426         return err;
427 }
428
429 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
430 {
431         struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
432
433         mlx5e_free_rx_mpwqe(rq, wi);
434 }
435
436 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
437 {
438         struct mlx5_wq_ll *wq = &rq->wq;
439         int err;
440
441         if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
442                 return false;
443
444         if (mlx5_wq_ll_is_full(wq))
445                 return false;
446
447         do {
448                 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
449
450                 err = mlx5e_alloc_rx_wqe(rq, wqe, wq->head);
451                 if (unlikely(err)) {
452                         rq->stats.buff_alloc_err++;
453                         break;
454                 }
455
456                 mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
457         } while (!mlx5_wq_ll_is_full(wq));
458
459         /* ensure wqes are visible to device before updating doorbell record */
460         dma_wmb();
461
462         mlx5_wq_ll_update_db_record(wq);
463
464         return !!err;
465 }
466
467 static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
468                                              struct mlx5e_icosq *sq,
469                                              struct mlx5e_rq *rq,
470                                              struct mlx5_cqe64 *cqe)
471 {
472         struct mlx5_wq_cyc *wq = &sq->wq;
473         u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
474         struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
475
476         mlx5_cqwq_pop(&cq->wq);
477
478         if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
479                 netdev_WARN_ONCE(cq->channel->netdev,
480                                  "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own);
481                 return;
482         }
483
484         if (likely(icowi->opcode == MLX5_OPCODE_UMR)) {
485                 mlx5e_post_rx_mpwqe(rq);
486                 return;
487         }
488
489         if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
490                 netdev_WARN_ONCE(cq->channel->netdev,
491                                  "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode);
492 }
493
494 static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
495 {
496         struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
497         struct mlx5_cqe64 *cqe;
498
499         if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
500                 return;
501
502         cqe = mlx5_cqwq_get_cqe(&cq->wq);
503         if (likely(!cqe))
504                 return;
505
506         /* by design, there's only a single cqe */
507         mlx5e_poll_ico_single_cqe(cq, sq, rq, cqe);
508
509         mlx5_cqwq_update_db_record(&cq->wq);
510 }
511
512 bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
513 {
514         struct mlx5_wq_ll *wq = &rq->wq;
515
516         if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
517                 return false;
518
519         mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq);
520
521         if (mlx5_wq_ll_is_full(wq))
522                 return false;
523
524         if (!rq->mpwqe.umr_in_progress)
525                 mlx5e_alloc_rx_mpwqe(rq, wq->head);
526
527         return false;
528 }
529
530 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
531 {
532         u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
533         u8 tcp_ack     = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
534                          (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
535
536         tcp->check                      = 0;
537         tcp->psh                        = get_cqe_lro_tcppsh(cqe);
538
539         if (tcp_ack) {
540                 tcp->ack                = 1;
541                 tcp->ack_seq            = cqe->lro_ack_seq_num;
542                 tcp->window             = cqe->lro_tcp_win;
543         }
544 }
545
546 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
547                                  u32 cqe_bcnt)
548 {
549         struct ethhdr   *eth = (struct ethhdr *)(skb->data);
550         struct tcphdr   *tcp;
551         int network_depth = 0;
552         __wsum check;
553         __be16 proto;
554         u16 tot_len;
555         void *ip_p;
556
557         proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
558
559         tot_len = cqe_bcnt - network_depth;
560         ip_p = skb->data + network_depth;
561
562         if (proto == htons(ETH_P_IP)) {
563                 struct iphdr *ipv4 = ip_p;
564
565                 tcp = ip_p + sizeof(struct iphdr);
566                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
567
568                 ipv4->ttl               = cqe->lro_min_ttl;
569                 ipv4->tot_len           = cpu_to_be16(tot_len);
570                 ipv4->check             = 0;
571                 ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
572                                                        ipv4->ihl);
573
574                 mlx5e_lro_update_tcp_hdr(cqe, tcp);
575                 check = csum_partial(tcp, tcp->doff * 4,
576                                      csum_unfold((__force __sum16)cqe->check_sum));
577                 /* Almost done, don't forget the pseudo header */
578                 tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
579                                                tot_len - sizeof(struct iphdr),
580                                                IPPROTO_TCP, check);
581         } else {
582                 u16 payload_len = tot_len - sizeof(struct ipv6hdr);
583                 struct ipv6hdr *ipv6 = ip_p;
584
585                 tcp = ip_p + sizeof(struct ipv6hdr);
586                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
587
588                 ipv6->hop_limit         = cqe->lro_min_ttl;
589                 ipv6->payload_len       = cpu_to_be16(payload_len);
590
591                 mlx5e_lro_update_tcp_hdr(cqe, tcp);
592                 check = csum_partial(tcp, tcp->doff * 4,
593                                      csum_unfold((__force __sum16)cqe->check_sum));
594                 /* Almost done, don't forget the pseudo header */
595                 tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
596                                              IPPROTO_TCP, check);
597         }
598 }
599
600 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
601                                       struct sk_buff *skb)
602 {
603         u8 cht = cqe->rss_hash_type;
604         int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
605                  (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
606                                             PKT_HASH_TYPE_NONE;
607         skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
608 }
609
610 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
611 {
612         __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
613
614         ethertype = __vlan_get_protocol(skb, ethertype, network_depth);
615         return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
616 }
617
618 static __be32 mlx5e_get_fcs(struct sk_buff *skb)
619 {
620         int last_frag_sz, bytes_in_prev, nr_frags;
621         u8 *fcs_p1, *fcs_p2;
622         skb_frag_t *last_frag;
623         __be32 fcs_bytes;
624
625         if (!skb_is_nonlinear(skb))
626                 return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
627
628         nr_frags = skb_shinfo(skb)->nr_frags;
629         last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
630         last_frag_sz = skb_frag_size(last_frag);
631
632         /* If all FCS data is in last frag */
633         if (last_frag_sz >= ETH_FCS_LEN)
634                 return *(__be32 *)(skb_frag_address(last_frag) +
635                                    last_frag_sz - ETH_FCS_LEN);
636
637         fcs_p2 = (u8 *)skb_frag_address(last_frag);
638         bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
639
640         /* Find where the other part of the FCS is - Linear or another frag */
641         if (nr_frags == 1) {
642                 fcs_p1 = skb_tail_pointer(skb);
643         } else {
644                 skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
645
646                 fcs_p1 = skb_frag_address(prev_frag) +
647                             skb_frag_size(prev_frag);
648         }
649         fcs_p1 -= bytes_in_prev;
650
651         memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
652         memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
653
654         return fcs_bytes;
655 }
656
657 static inline void mlx5e_handle_csum(struct net_device *netdev,
658                                      struct mlx5_cqe64 *cqe,
659                                      struct mlx5e_rq *rq,
660                                      struct sk_buff *skb,
661                                      bool   lro)
662 {
663         int network_depth = 0;
664
665         if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
666                 goto csum_none;
667
668         if (lro) {
669                 skb->ip_summed = CHECKSUM_UNNECESSARY;
670                 rq->stats.csum_unnecessary++;
671                 return;
672         }
673
674         if (likely(is_last_ethertype_ip(skb, &network_depth))) {
675                 skb->ip_summed = CHECKSUM_COMPLETE;
676                 skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
677                 if (network_depth > ETH_HLEN)
678                         /* CQE csum is calculated from the IP header and does
679                          * not cover VLAN headers (if present). This will add
680                          * the checksum manually.
681                          */
682                         skb->csum = csum_partial(skb->data + ETH_HLEN,
683                                                  network_depth - ETH_HLEN,
684                                                  skb->csum);
685                 if (unlikely(netdev->features & NETIF_F_RXFCS))
686                         skb->csum = csum_add(skb->csum,
687                                              (__force __wsum)mlx5e_get_fcs(skb));
688                 rq->stats.csum_complete++;
689                 return;
690         }
691
692         if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
693                    (cqe->hds_ip_ext & CQE_L4_OK))) {
694                 skb->ip_summed = CHECKSUM_UNNECESSARY;
695                 if (cqe_is_tunneled(cqe)) {
696                         skb->csum_level = 1;
697                         skb->encapsulation = 1;
698                         rq->stats.csum_unnecessary_inner++;
699                         return;
700                 }
701                 rq->stats.csum_unnecessary++;
702                 return;
703         }
704 csum_none:
705         skb->ip_summed = CHECKSUM_NONE;
706         rq->stats.csum_none++;
707 }
708
709 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
710                                       u32 cqe_bcnt,
711                                       struct mlx5e_rq *rq,
712                                       struct sk_buff *skb)
713 {
714         struct net_device *netdev = rq->netdev;
715         int lro_num_seg;
716
717         skb->mac_len = ETH_HLEN;
718         lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
719         if (lro_num_seg > 1) {
720                 mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
721                 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
722                 /* Subtract one since we already counted this as one
723                  * "regular" packet in mlx5e_complete_rx_cqe()
724                  */
725                 rq->stats.packets += lro_num_seg - 1;
726                 rq->stats.lro_packets++;
727                 rq->stats.lro_bytes += cqe_bcnt;
728         }
729
730         if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
731                 skb_hwtstamps(skb)->hwtstamp =
732                                 mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
733
734         skb_record_rx_queue(skb, rq->ix);
735
736         if (likely(netdev->features & NETIF_F_RXHASH))
737                 mlx5e_skb_set_hash(cqe, skb);
738
739         if (cqe_has_vlan(cqe)) {
740                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
741                                        be16_to_cpu(cqe->vlan_info));
742                 rq->stats.removed_vlan_packets++;
743         }
744
745         skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
746
747         mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
748         skb->protocol = eth_type_trans(skb, netdev);
749 }
750
751 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
752                                          struct mlx5_cqe64 *cqe,
753                                          u32 cqe_bcnt,
754                                          struct sk_buff *skb)
755 {
756         rq->stats.packets++;
757         rq->stats.bytes += cqe_bcnt;
758         mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
759 }
760
761 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
762 {
763         struct mlx5_wq_cyc *wq = &sq->wq;
764         struct mlx5e_tx_wqe *wqe;
765         u16 pi = (sq->pc - 1) & wq->sz_m1; /* last pi */
766
767         wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
768
769         mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
770 }
771
772 static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
773                                         struct mlx5e_dma_info *di,
774                                         const struct xdp_buff *xdp)
775 {
776         struct mlx5e_xdpsq       *sq   = &rq->xdpsq;
777         struct mlx5_wq_cyc       *wq   = &sq->wq;
778         u16                       pi   = sq->pc & wq->sz_m1;
779         struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
780
781         struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
782         struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
783         struct mlx5_wqe_data_seg *dseg;
784
785         ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
786         dma_addr_t dma_addr  = di->addr + data_offset;
787         unsigned int dma_len = xdp->data_end - xdp->data;
788
789         prefetchw(wqe);
790
791         if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) {
792                 rq->stats.xdp_drop++;
793                 return false;
794         }
795
796         if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
797                 if (sq->db.doorbell) {
798                         /* SQ is full, ring doorbell */
799                         mlx5e_xmit_xdp_doorbell(sq);
800                         sq->db.doorbell = false;
801                 }
802                 rq->stats.xdp_tx_full++;
803                 return false;
804         }
805
806         dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
807
808         cseg->fm_ce_se = 0;
809
810         dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
811
812         /* copy the inline part if required */
813         if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
814                 memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
815                 eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
816                 dma_len  -= MLX5E_XDP_MIN_INLINE;
817                 dma_addr += MLX5E_XDP_MIN_INLINE;
818                 dseg++;
819         }
820
821         /* write the dma part */
822         dseg->addr       = cpu_to_be64(dma_addr);
823         dseg->byte_count = cpu_to_be32(dma_len);
824
825         cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
826
827         /* move page to reference to sq responsibility,
828          * and mark so it's not put back in page-cache.
829          */
830         __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
831         sq->db.di[pi] = *di;
832         sq->pc++;
833
834         sq->db.doorbell = true;
835
836         rq->stats.xdp_tx++;
837         return true;
838 }
839
840 /* returns true if packet was consumed by xdp */
841 static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
842                                    struct mlx5e_dma_info *di,
843                                    void *va, u16 *rx_headroom, u32 *len)
844 {
845         const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
846         struct xdp_buff xdp;
847         u32 act;
848
849         if (!prog)
850                 return false;
851
852         xdp.data = va + *rx_headroom;
853         xdp_set_data_meta_invalid(&xdp);
854         xdp.data_end = xdp.data + *len;
855         xdp.data_hard_start = va;
856         xdp.rxq = &rq->xdp_rxq;
857
858         act = bpf_prog_run_xdp(prog, &xdp);
859         switch (act) {
860         case XDP_PASS:
861                 *rx_headroom = xdp.data - xdp.data_hard_start;
862                 *len = xdp.data_end - xdp.data;
863                 return false;
864         case XDP_TX:
865                 if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
866                         trace_xdp_exception(rq->netdev, prog, act);
867                 return true;
868         default:
869                 bpf_warn_invalid_xdp_action(act);
870         case XDP_ABORTED:
871                 trace_xdp_exception(rq->netdev, prog, act);
872         case XDP_DROP:
873                 rq->stats.xdp_drop++;
874                 return true;
875         }
876 }
877
878 static inline
879 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
880                                        u32 frag_size, u16 headroom,
881                                        u32 cqe_bcnt)
882 {
883         struct sk_buff *skb = build_skb(va, frag_size);
884
885         if (unlikely(!skb)) {
886                 rq->stats.buff_alloc_err++;
887                 return NULL;
888         }
889
890         skb_reserve(skb, headroom);
891         skb_put(skb, cqe_bcnt);
892
893         return skb;
894 }
895
896 static inline
897 struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
898                              struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
899 {
900         struct mlx5e_dma_info *di = &wi->di;
901         u16 rx_headroom = rq->buff.headroom;
902         struct sk_buff *skb;
903         void *va, *data;
904         bool consumed;
905         u32 frag_size;
906
907         va             = page_address(di->page) + wi->offset;
908         data           = va + rx_headroom;
909         frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
910
911         dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
912                                       frag_size, DMA_FROM_DEVICE);
913         prefetch(data);
914         wi->offset += frag_size;
915
916         if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
917                 rq->stats.wqe_err++;
918                 return NULL;
919         }
920
921         rcu_read_lock();
922         consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt);
923         rcu_read_unlock();
924         if (consumed)
925                 return NULL; /* page/packet was consumed by XDP */
926
927         skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
928         if (unlikely(!skb))
929                 return NULL;
930
931         /* queue up for recycling/reuse */
932         page_ref_inc(di->page);
933
934         return skb;
935 }
936
937 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
938 {
939         struct mlx5e_wqe_frag_info *wi;
940         struct mlx5e_rx_wqe *wqe;
941         __be16 wqe_counter_be;
942         struct sk_buff *skb;
943         u16 wqe_counter;
944         u32 cqe_bcnt;
945
946         wqe_counter_be = cqe->wqe_counter;
947         wqe_counter    = be16_to_cpu(wqe_counter_be);
948         wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
949         wi             = &rq->wqe.frag_info[wqe_counter];
950         cqe_bcnt       = be32_to_cpu(cqe->byte_cnt);
951
952         skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
953         if (!skb) {
954                 /* probably for XDP */
955                 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
956                         wi->di.page = NULL;
957                         /* do not return page to cache, it will be returned on XDP_TX completion */
958                         goto wq_ll_pop;
959                 }
960                 /* probably an XDP_DROP, save the page-reuse checks */
961                 mlx5e_free_rx_wqe(rq, wi);
962                 goto wq_ll_pop;
963         }
964
965         mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
966         napi_gro_receive(rq->cq.napi, skb);
967
968         mlx5e_free_rx_wqe_reuse(rq, wi);
969 wq_ll_pop:
970         mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
971                        &wqe->next.next_wqe_index);
972 }
973
974 #ifdef CONFIG_MLX5_ESWITCH
975 void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
976 {
977         struct net_device *netdev = rq->netdev;
978         struct mlx5e_priv *priv = netdev_priv(netdev);
979         struct mlx5e_rep_priv *rpriv  = priv->ppriv;
980         struct mlx5_eswitch_rep *rep = rpriv->rep;
981         struct mlx5e_wqe_frag_info *wi;
982         struct mlx5e_rx_wqe *wqe;
983         struct sk_buff *skb;
984         __be16 wqe_counter_be;
985         u16 wqe_counter;
986         u32 cqe_bcnt;
987
988         wqe_counter_be = cqe->wqe_counter;
989         wqe_counter    = be16_to_cpu(wqe_counter_be);
990         wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
991         wi             = &rq->wqe.frag_info[wqe_counter];
992         cqe_bcnt       = be32_to_cpu(cqe->byte_cnt);
993
994         skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
995         if (!skb) {
996                 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
997                         wi->di.page = NULL;
998                         /* do not return page to cache, it will be returned on XDP_TX completion */
999                         goto wq_ll_pop;
1000                 }
1001                 /* probably an XDP_DROP, save the page-reuse checks */
1002                 mlx5e_free_rx_wqe(rq, wi);
1003                 goto wq_ll_pop;
1004         }
1005
1006         mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1007
1008         if (rep->vlan && skb_vlan_tag_present(skb))
1009                 skb_vlan_pop(skb);
1010
1011         napi_gro_receive(rq->cq.napi, skb);
1012
1013         mlx5e_free_rx_wqe_reuse(rq, wi);
1014 wq_ll_pop:
1015         mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
1016                        &wqe->next.next_wqe_index);
1017 }
1018 #endif
1019
1020 struct sk_buff *
1021 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1022                                    u16 cqe_bcnt, u32 head_offset, u32 page_idx)
1023 {
1024         u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt);
1025         struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
1026         u32 frag_offset    = head_offset + headlen;
1027         u32 byte_cnt       = cqe_bcnt - headlen;
1028         struct mlx5e_dma_info *head_di = di;
1029         struct sk_buff *skb;
1030
1031         skb = napi_alloc_skb(rq->cq.napi,
1032                              ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long)));
1033         if (unlikely(!skb)) {
1034                 rq->stats.buff_alloc_err++;
1035                 return NULL;
1036         }
1037
1038         prefetchw(skb->data);
1039
1040         if (unlikely(frag_offset >= PAGE_SIZE)) {
1041                 di++;
1042                 frag_offset -= PAGE_SIZE;
1043         }
1044
1045         while (byte_cnt) {
1046                 u32 pg_consumed_bytes =
1047                         min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
1048
1049                 mlx5e_add_skb_frag_mpwqe(rq, skb, di, frag_offset,
1050                                          pg_consumed_bytes);
1051                 byte_cnt -= pg_consumed_bytes;
1052                 frag_offset = 0;
1053                 di++;
1054         }
1055         /* copy header */
1056         mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, head_di,
1057                                     head_offset, headlen);
1058         /* skb linear part was allocated with headlen and aligned to long */
1059         skb->tail += headlen;
1060         skb->len  += headlen;
1061
1062         return skb;
1063 }
1064
1065 struct sk_buff *
1066 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1067                                 u16 cqe_bcnt, u32 head_offset, u32 page_idx)
1068 {
1069         struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx];
1070         u16 rx_headroom = rq->buff.headroom;
1071         u32 cqe_bcnt32 = cqe_bcnt;
1072         struct sk_buff *skb;
1073         void *va, *data;
1074         u32 frag_size;
1075         bool consumed;
1076
1077         va             = page_address(di->page) + head_offset;
1078         data           = va + rx_headroom;
1079         frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
1080
1081         dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
1082                                       frag_size, DMA_FROM_DEVICE);
1083         prefetch(data);
1084
1085         rcu_read_lock();
1086         consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32);
1087         rcu_read_unlock();
1088         if (consumed) {
1089                 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1090                         __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
1091                 return NULL; /* page/packet was consumed by XDP */
1092         }
1093
1094         skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
1095         if (unlikely(!skb))
1096                 return NULL;
1097
1098         /* queue up for recycling/reuse */
1099         page_ref_inc(di->page);
1100
1101         return skb;
1102 }
1103
1104 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1105 {
1106         u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
1107         u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
1108         struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
1109         u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
1110         u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
1111         u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
1112         u32 page_idx       = wqe_offset >> PAGE_SHIFT;
1113         struct mlx5e_rx_wqe *wqe;
1114         struct sk_buff *skb;
1115         u16 cqe_bcnt;
1116
1117         wi->consumed_strides += cstrides;
1118
1119         if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
1120                 rq->stats.wqe_err++;
1121                 goto mpwrq_cqe_out;
1122         }
1123
1124         if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1125                 rq->stats.mpwqe_filler++;
1126                 goto mpwrq_cqe_out;
1127         }
1128
1129         cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1130
1131         skb = rq->mpwqe.skb_from_cqe_mpwrq(rq, wi, cqe_bcnt, head_offset,
1132                                            page_idx);
1133         if (!skb)
1134                 goto mpwrq_cqe_out;
1135
1136         mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1137         napi_gro_receive(rq->cq.napi, skb);
1138
1139 mpwrq_cqe_out:
1140         if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1141                 return;
1142
1143         wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
1144         mlx5e_free_rx_mpwqe(rq, wi);
1145         mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1146 }
1147
1148 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
1149 {
1150         struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
1151         struct mlx5e_xdpsq *xdpsq;
1152         struct mlx5_cqe64 *cqe;
1153         int work_done = 0;
1154
1155         if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
1156                 return 0;
1157
1158         if (cq->decmprs_left)
1159                 work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
1160
1161         cqe = mlx5_cqwq_get_cqe(&cq->wq);
1162         if (!cqe)
1163                 return 0;
1164
1165         xdpsq = &rq->xdpsq;
1166
1167         do {
1168                 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
1169                         work_done +=
1170                                 mlx5e_decompress_cqes_start(rq, cq,
1171                                                             budget - work_done);
1172                         continue;
1173                 }
1174
1175                 mlx5_cqwq_pop(&cq->wq);
1176
1177                 rq->handle_rx_cqe(rq, cqe);
1178         } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
1179
1180         if (xdpsq->db.doorbell) {
1181                 mlx5e_xmit_xdp_doorbell(xdpsq);
1182                 xdpsq->db.doorbell = false;
1183         }
1184
1185         mlx5_cqwq_update_db_record(&cq->wq);
1186
1187         /* ensure cq space is freed before enabling more cqes */
1188         wmb();
1189
1190         return work_done;
1191 }
1192
1193 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
1194 {
1195         struct mlx5e_xdpsq *sq;
1196         struct mlx5_cqe64 *cqe;
1197         struct mlx5e_rq *rq;
1198         u16 sqcc;
1199         int i;
1200
1201         sq = container_of(cq, struct mlx5e_xdpsq, cq);
1202
1203         if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
1204                 return false;
1205
1206         cqe = mlx5_cqwq_get_cqe(&cq->wq);
1207         if (!cqe)
1208                 return false;
1209
1210         rq = container_of(sq, struct mlx5e_rq, xdpsq);
1211
1212         /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
1213          * otherwise a cq overrun may occur
1214          */
1215         sqcc = sq->cc;
1216
1217         i = 0;
1218         do {
1219                 u16 wqe_counter;
1220                 bool last_wqe;
1221
1222                 mlx5_cqwq_pop(&cq->wq);
1223
1224                 wqe_counter = be16_to_cpu(cqe->wqe_counter);
1225
1226                 do {
1227                         struct mlx5e_dma_info *di;
1228                         u16 ci;
1229
1230                         last_wqe = (sqcc == wqe_counter);
1231
1232                         ci = sqcc & sq->wq.sz_m1;
1233                         di = &sq->db.di[ci];
1234
1235                         sqcc++;
1236                         /* Recycle RX page */
1237                         mlx5e_page_release(rq, di, true);
1238                 } while (!last_wqe);
1239         } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
1240
1241         mlx5_cqwq_update_db_record(&cq->wq);
1242
1243         /* ensure cq space is freed before enabling more cqes */
1244         wmb();
1245
1246         sq->cc = sqcc;
1247         return (i == MLX5E_TX_CQ_POLL_BUDGET);
1248 }
1249
1250 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
1251 {
1252         struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
1253         struct mlx5e_dma_info *di;
1254         u16 ci;
1255
1256         while (sq->cc != sq->pc) {
1257                 ci = sq->cc & sq->wq.sz_m1;
1258                 di = &sq->db.di[ci];
1259                 sq->cc++;
1260
1261                 mlx5e_page_release(rq, di, false);
1262         }
1263 }
1264
1265 #ifdef CONFIG_MLX5_CORE_IPOIB
1266
1267 #define MLX5_IB_GRH_DGID_OFFSET 24
1268 #define MLX5_GID_SIZE           16
1269
1270 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
1271                                          struct mlx5_cqe64 *cqe,
1272                                          u32 cqe_bcnt,
1273                                          struct sk_buff *skb)
1274 {
1275         struct hwtstamp_config *tstamp;
1276         struct net_device *netdev;
1277         struct mlx5e_priv *priv;
1278         char *pseudo_header;
1279         u32 qpn;
1280         u8 *dgid;
1281         u8 g;
1282
1283         qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
1284         netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
1285
1286         /* No mapping present, cannot process SKB. This might happen if a child
1287          * interface is going down while having unprocessed CQEs on parent RQ
1288          */
1289         if (unlikely(!netdev)) {
1290                 /* TODO: add drop counters support */
1291                 skb->dev = NULL;
1292                 pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
1293                 return;
1294         }
1295
1296         priv = mlx5i_epriv(netdev);
1297         tstamp = &priv->tstamp;
1298
1299         g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
1300         dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
1301         if ((!g) || dgid[0] != 0xff)
1302                 skb->pkt_type = PACKET_HOST;
1303         else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
1304                 skb->pkt_type = PACKET_BROADCAST;
1305         else
1306                 skb->pkt_type = PACKET_MULTICAST;
1307
1308         /* TODO: IB/ipoib: Allow mcast packets from other VFs
1309          * 68996a6e760e5c74654723eeb57bf65628ae87f4
1310          */
1311
1312         skb_pull(skb, MLX5_IB_GRH_BYTES);
1313
1314         skb->protocol = *((__be16 *)(skb->data));
1315
1316         skb->ip_summed = CHECKSUM_COMPLETE;
1317         skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1318
1319         if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
1320                 skb_hwtstamps(skb)->hwtstamp =
1321                                 mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
1322
1323         skb_record_rx_queue(skb, rq->ix);
1324
1325         if (likely(netdev->features & NETIF_F_RXHASH))
1326                 mlx5e_skb_set_hash(cqe, skb);
1327
1328         /* 20 bytes of ipoib header and 4 for encap existing */
1329         pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
1330         memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
1331         skb_reset_mac_header(skb);
1332         skb_pull(skb, MLX5_IPOIB_HARD_LEN);
1333
1334         skb->dev = netdev;
1335
1336         rq->stats.csum_complete++;
1337         rq->stats.packets++;
1338         rq->stats.bytes += cqe_bcnt;
1339 }
1340
1341 void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1342 {
1343         struct mlx5e_wqe_frag_info *wi;
1344         struct mlx5e_rx_wqe *wqe;
1345         __be16 wqe_counter_be;
1346         struct sk_buff *skb;
1347         u16 wqe_counter;
1348         u32 cqe_bcnt;
1349
1350         wqe_counter_be = cqe->wqe_counter;
1351         wqe_counter    = be16_to_cpu(wqe_counter_be);
1352         wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
1353         wi             = &rq->wqe.frag_info[wqe_counter];
1354         cqe_bcnt       = be32_to_cpu(cqe->byte_cnt);
1355
1356         skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
1357         if (!skb)
1358                 goto wq_free_wqe;
1359
1360         mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1361         if (unlikely(!skb->dev)) {
1362                 dev_kfree_skb_any(skb);
1363                 goto wq_free_wqe;
1364         }
1365         napi_gro_receive(rq->cq.napi, skb);
1366
1367 wq_free_wqe:
1368         mlx5e_free_rx_wqe_reuse(rq, wi);
1369         mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
1370                        &wqe->next.next_wqe_index);
1371 }
1372
1373 #endif /* CONFIG_MLX5_CORE_IPOIB */
1374
1375 #ifdef CONFIG_MLX5_EN_IPSEC
1376
1377 void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1378 {
1379         struct mlx5e_wqe_frag_info *wi;
1380         struct mlx5e_rx_wqe *wqe;
1381         __be16 wqe_counter_be;
1382         struct sk_buff *skb;
1383         u16 wqe_counter;
1384         u32 cqe_bcnt;
1385
1386         wqe_counter_be = cqe->wqe_counter;
1387         wqe_counter    = be16_to_cpu(wqe_counter_be);
1388         wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
1389         wi             = &rq->wqe.frag_info[wqe_counter];
1390         cqe_bcnt       = be32_to_cpu(cqe->byte_cnt);
1391
1392         skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt);
1393         if (unlikely(!skb)) {
1394                 /* a DROP, save the page-reuse checks */
1395                 mlx5e_free_rx_wqe(rq, wi);
1396                 goto wq_ll_pop;
1397         }
1398         skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb);
1399         if (unlikely(!skb)) {
1400                 mlx5e_free_rx_wqe(rq, wi);
1401                 goto wq_ll_pop;
1402         }
1403
1404         mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1405         napi_gro_receive(rq->cq.napi, skb);
1406
1407         mlx5e_free_rx_wqe_reuse(rq, wi);
1408 wq_ll_pop:
1409         mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
1410                        &wqe->next.next_wqe_index);
1411 }
1412
1413 #endif /* CONFIG_MLX5_EN_IPSEC */