Merge remote-tracking branches 'asoc/topic/wm8524', 'asoc/topic/wm8804' and 'asoc...
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / i40iw / i40iw_puda.c
1 /*******************************************************************************
2 *
3 * Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
10 *
11 *   Redistribution and use in source and binary forms, with or
12 *   without modification, are permitted provided that the following
13 *   conditions are met:
14 *
15 *    - Redistributions of source code must retain the above
16 *       copyright notice, this list of conditions and the following
17 *       disclaimer.
18 *
19 *    - Redistributions in binary form must reproduce the above
20 *       copyright notice, this list of conditions and the following
21 *       disclaimer in the documentation and/or other materials
22 *       provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 *******************************************************************************/
34
35 #include "i40iw_osdep.h"
36 #include "i40iw_register.h"
37 #include "i40iw_status.h"
38 #include "i40iw_hmc.h"
39
40 #include "i40iw_d.h"
41 #include "i40iw_type.h"
42 #include "i40iw_p.h"
43 #include "i40iw_puda.h"
44
45 static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
46                               struct i40iw_puda_buf *buf);
47 static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
48 static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
49 static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
50                                                       *rsrc, bool initial);
51 static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
52 /**
53  * i40iw_puda_get_listbuf - get buffer from puda list
54  * @list: list to use for buffers (ILQ or IEQ)
55  */
56 static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list)
57 {
58         struct i40iw_puda_buf *buf = NULL;
59
60         if (!list_empty(list)) {
61                 buf = (struct i40iw_puda_buf *)list->next;
62                 list_del((struct list_head *)&buf->list);
63         }
64         return buf;
65 }
66
67 /**
68  * i40iw_puda_get_bufpool - return buffer from resource
69  * @rsrc: resource to use for buffer
70  */
71 struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc)
72 {
73         struct i40iw_puda_buf *buf = NULL;
74         struct list_head *list = &rsrc->bufpool;
75         unsigned long   flags;
76
77         spin_lock_irqsave(&rsrc->bufpool_lock, flags);
78         buf = i40iw_puda_get_listbuf(list);
79         if (buf)
80                 rsrc->avail_buf_count--;
81         else
82                 rsrc->stats_buf_alloc_fail++;
83         spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
84         return buf;
85 }
86
87 /**
88  * i40iw_puda_ret_bufpool - return buffer to rsrc list
89  * @rsrc: resource to use for buffer
90  * @buf: buffe to return to resouce
91  */
92 void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
93                             struct i40iw_puda_buf *buf)
94 {
95         unsigned long   flags;
96
97         spin_lock_irqsave(&rsrc->bufpool_lock, flags);
98         list_add(&buf->list, &rsrc->bufpool);
99         spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
100         rsrc->avail_buf_count++;
101 }
102
103 /**
104  * i40iw_puda_post_recvbuf - set wqe for rcv buffer
105  * @rsrc: resource ptr
106  * @wqe_idx: wqe index to use
107  * @buf: puda buffer for rcv q
108  * @initial: flag if during init time
109  */
110 static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
111                                     struct i40iw_puda_buf *buf, bool initial)
112 {
113         u64 *wqe;
114         struct i40iw_sc_qp *qp = &rsrc->qp;
115         u64 offset24 = 0;
116
117         qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
118         wqe = qp->qp_uk.rq_base[wqe_idx].elem;
119         i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
120                     "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__,
121                     wqe_idx, buf, wqe);
122         if (!initial)
123                 get_64bit_val(wqe, 24, &offset24);
124
125         offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
126         set_64bit_val(wqe, 24, offset24);
127
128         set_64bit_val(wqe, 0, buf->mem.pa);
129         set_64bit_val(wqe, 8,
130                       LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
131         set_64bit_val(wqe, 24, offset24);
132 }
133
134 /**
135  * i40iw_puda_replenish_rq - post rcv buffers
136  * @rsrc: resource to use for buffer
137  * @initial: flag if during init time
138  */
139 static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc,
140                                                       bool initial)
141 {
142         u32 i;
143         u32 invalid_cnt = rsrc->rxq_invalid_cnt;
144         struct i40iw_puda_buf *buf = NULL;
145
146         for (i = 0; i < invalid_cnt; i++) {
147                 buf = i40iw_puda_get_bufpool(rsrc);
148                 if (!buf)
149                         return I40IW_ERR_list_empty;
150                 i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf,
151                                         initial);
152                 rsrc->rx_wqe_idx =
153                     ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
154                 rsrc->rxq_invalid_cnt--;
155         }
156         return 0;
157 }
158
159 /**
160  * i40iw_puda_alloc_buf - allocate mem for buffer
161  * @dev: iwarp device
162  * @length: length of buffer
163  */
164 static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,
165                                                    u32 length)
166 {
167         struct i40iw_puda_buf *buf = NULL;
168         struct i40iw_virt_mem buf_mem;
169         enum i40iw_status_code ret;
170
171         ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,
172                                       sizeof(struct i40iw_puda_buf));
173         if (ret) {
174                 i40iw_debug(dev, I40IW_DEBUG_PUDA,
175                             "%s: error mem for buf\n", __func__);
176                 return NULL;
177         }
178         buf = (struct i40iw_puda_buf *)buf_mem.va;
179         ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);
180         if (ret) {
181                 i40iw_debug(dev, I40IW_DEBUG_PUDA,
182                             "%s: error dma mem for buf\n", __func__);
183                 i40iw_free_virt_mem(dev->hw, &buf_mem);
184                 return NULL;
185         }
186         buf->buf_mem.va = buf_mem.va;
187         buf->buf_mem.size = buf_mem.size;
188         return buf;
189 }
190
191 /**
192  * i40iw_puda_dele_buf - delete buffer back to system
193  * @dev: iwarp device
194  * @buf: buffer to free
195  */
196 static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev,
197                                 struct i40iw_puda_buf *buf)
198 {
199         i40iw_free_dma_mem(dev->hw, &buf->mem);
200         i40iw_free_virt_mem(dev->hw, &buf->buf_mem);
201 }
202
203 /**
204  * i40iw_puda_get_next_send_wqe - return next wqe for processing
205  * @qp: puda qp for wqe
206  * @wqe_idx: wqe index for caller
207  */
208 static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
209 {
210         u64 *wqe = NULL;
211         enum i40iw_status_code ret_code = 0;
212
213         *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
214         if (!*wqe_idx)
215                 qp->swqe_polarity = !qp->swqe_polarity;
216         I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
217         if (ret_code)
218                 return wqe;
219         wqe = qp->sq_base[*wqe_idx].elem;
220
221         return wqe;
222 }
223
224 /**
225  * i40iw_puda_poll_info - poll cq for completion
226  * @cq: cq for poll
227  * @info: info return for successful completion
228  */
229 static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq,
230                                                    struct i40iw_puda_completion_info *info)
231 {
232         u64 qword0, qword2, qword3;
233         u64 *cqe;
234         u64 comp_ctx;
235         bool valid_bit;
236         u32 major_err, minor_err;
237         bool error;
238
239         cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk);
240         get_64bit_val(cqe, 24, &qword3);
241         valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID);
242
243         if (valid_bit != cq->cq_uk.polarity)
244                 return I40IW_ERR_QUEUE_EMPTY;
245
246         i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32);
247         error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
248         if (error) {
249                 i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__);
250                 major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR));
251                 minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR));
252                 info->compl_error = major_err << 16 | minor_err;
253                 return I40IW_ERR_CQ_COMPL_ERROR;
254         }
255
256         get_64bit_val(cqe, 0, &qword0);
257         get_64bit_val(cqe, 16, &qword2);
258
259         info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
260         info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
261
262         get_64bit_val(cqe, 8, &comp_ctx);
263         info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
264         info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
265
266         if (info->q_type == I40IW_CQE_QTYPE_RQ) {
267                 info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID);
268                 info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO);
269                 info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO);
270                 info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN);
271         }
272
273         return 0;
274 }
275
276 /**
277  * i40iw_puda_poll_completion - processes completion for cq
278  * @dev: iwarp device
279  * @cq: cq getting interrupt
280  * @compl_err: return any completion err
281  */
282 enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
283                                                   struct i40iw_sc_cq *cq, u32 *compl_err)
284 {
285         struct i40iw_qp_uk *qp;
286         struct i40iw_cq_uk *cq_uk = &cq->cq_uk;
287         struct i40iw_puda_completion_info info;
288         enum i40iw_status_code ret = 0;
289         struct i40iw_puda_buf *buf;
290         struct i40iw_puda_rsrc *rsrc;
291         void *sqwrid;
292         u8 cq_type = cq->cq_type;
293         unsigned long   flags;
294
295         if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
296                 rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq;
297         } else {
298                 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
299                 return I40IW_ERR_BAD_PTR;
300         }
301         memset(&info, 0, sizeof(info));
302         ret = i40iw_puda_poll_info(cq, &info);
303         *compl_err = info.compl_error;
304         if (ret == I40IW_ERR_QUEUE_EMPTY)
305                 return ret;
306         if (ret)
307                 goto done;
308
309         qp = info.qp;
310         if (!qp || !rsrc) {
311                 ret = I40IW_ERR_BAD_PTR;
312                 goto done;
313         }
314
315         if (qp->qp_id != rsrc->qp_id) {
316                 ret = I40IW_ERR_BAD_PTR;
317                 goto done;
318         }
319
320         if (info.q_type == I40IW_CQE_QTYPE_RQ) {
321                 buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx];
322                 /* Get all the tcpip information in the buf header */
323                 ret = i40iw_puda_get_tcpip_info(&info, buf);
324                 if (ret) {
325                         rsrc->stats_rcvd_pkt_err++;
326                         if (cq_type == I40IW_CQ_TYPE_ILQ) {
327                                 i40iw_ilq_putback_rcvbuf(&rsrc->qp,
328                                                          info.wqe_idx);
329                         } else {
330                                 i40iw_puda_ret_bufpool(rsrc, buf);
331                                 i40iw_puda_replenish_rq(rsrc, false);
332                         }
333                         goto done;
334                 }
335
336                 rsrc->stats_pkt_rcvd++;
337                 rsrc->compl_rxwqe_idx = info.wqe_idx;
338                 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
339                 rsrc->receive(rsrc->vsi, buf);
340                 if (cq_type == I40IW_CQ_TYPE_ILQ)
341                         i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
342                 else
343                         i40iw_puda_replenish_rq(rsrc, false);
344
345         } else {
346                 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
347                 sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
348                 I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
349                 rsrc->xmit_complete(rsrc->vsi, sqwrid);
350                 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
351                 rsrc->tx_wqe_avail_cnt++;
352                 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
353                 if (!list_empty(&rsrc->vsi->ilq->txpend))
354                         i40iw_puda_send_buf(rsrc->vsi->ilq, NULL);
355         }
356
357 done:
358         I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret);
359         if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0)
360                 cq_uk->polarity = !cq_uk->polarity;
361         /* update cq tail in cq shadow memory also */
362         I40IW_RING_MOVE_TAIL(cq_uk->cq_ring);
363         set_64bit_val(cq_uk->shadow_area, 0,
364                       I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring));
365         return 0;
366 }
367
368 /**
369  * i40iw_puda_send - complete send wqe for transmit
370  * @qp: puda qp for send
371  * @info: buffer information for transmit
372  */
373 enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
374                                        struct i40iw_puda_send_info *info)
375 {
376         u64 *wqe;
377         u32 iplen, l4len;
378         u64 header[2];
379         u32 wqe_idx;
380         u8 iipt;
381
382         /* number of 32 bits DWORDS in header */
383         l4len = info->tcplen >> 2;
384         if (info->ipv4) {
385                 iipt = 3;
386                 iplen = 5;
387         } else {
388                 iipt = 1;
389                 iplen = 10;
390         }
391
392         wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
393         if (!wqe)
394                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
395         qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
396         /* Third line of WQE descriptor */
397         /* maclen is in words */
398         header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) |
399                     LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) |
400                     LS_64(iipt, I40IW_UDA_QPSQ_IIPT) |
401                     LS_64(l4len, I40IW_UDA_QPSQ_L4LEN);
402         /* Forth line of WQE descriptor */
403         header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) |
404                     LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) |
405                     LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) |
406                     LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID);
407
408         set_64bit_val(wqe, 0, info->paddr);
409         set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
410         set_64bit_val(wqe, 16, header[0]);
411
412         /* Ensure all data is written before writing valid bit */
413         wmb();
414         set_64bit_val(wqe, 24, header[1]);
415
416         i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
417         i40iw_qp_post_wr(&qp->qp_uk);
418         return 0;
419 }
420
421 /**
422  * i40iw_puda_send_buf - transmit puda buffer
423  * @rsrc: resource to use for buffer
424  * @buf: puda buffer to transmit
425  */
426 void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf)
427 {
428         struct i40iw_puda_send_info info;
429         enum i40iw_status_code ret = 0;
430         unsigned long   flags;
431
432         spin_lock_irqsave(&rsrc->bufpool_lock, flags);
433         /* if no wqe available or not from a completion and we have
434          * pending buffers, we must queue new buffer
435          */
436         if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
437                 list_add_tail(&buf->list, &rsrc->txpend);
438                 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
439                 rsrc->stats_sent_pkt_q++;
440                 if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
441                         i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
442                                     "%s: adding to txpend\n", __func__);
443                 return;
444         }
445         rsrc->tx_wqe_avail_cnt--;
446         /* if we are coming from a completion and have pending buffers
447          * then Get one from pending list
448          */
449         if (!buf) {
450                 buf = i40iw_puda_get_listbuf(&rsrc->txpend);
451                 if (!buf)
452                         goto done;
453         }
454
455         info.scratch = (void *)buf;
456         info.paddr = buf->mem.pa;
457         info.len = buf->totallen;
458         info.tcplen = buf->tcphlen;
459         info.maclen = buf->maclen;
460         info.ipv4 = buf->ipv4;
461         info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ);
462
463         ret = i40iw_puda_send(&rsrc->qp, &info);
464         if (ret) {
465                 rsrc->tx_wqe_avail_cnt++;
466                 rsrc->stats_sent_pkt_q++;
467                 list_add(&buf->list, &rsrc->txpend);
468                 if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
469                         i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
470                                     "%s: adding to puda_send\n", __func__);
471         } else {
472                 rsrc->stats_pkt_sent++;
473         }
474 done:
475         spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
476 }
477
478 /**
479  * i40iw_puda_qp_setctx - during init, set qp's context
480  * @rsrc: qp's resource
481  */
482 static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
483 {
484         struct i40iw_sc_qp *qp = &rsrc->qp;
485         u64 *qp_ctx = qp->hw_host_ctx;
486
487         set_64bit_val(qp_ctx, 8, qp->sq_pa);
488         set_64bit_val(qp_ctx, 16, qp->rq_pa);
489
490         set_64bit_val(qp_ctx, 24,
491                       LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
492                       LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));
493
494         set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS));
495         set_64bit_val(qp_ctx, 56, 0);
496         set_64bit_val(qp_ctx, 64, 1);
497
498         set_64bit_val(qp_ctx, 136,
499                       LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) |
500                       LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM));
501
502         set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN));
503
504         set_64bit_val(qp_ctx, 168,
505                       LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX));
506
507         set_64bit_val(qp_ctx, 176,
508                       LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
509                       LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
510                       LS_64(qp->qs_handle, I40IWQPC_QSHANDLE));
511
512         i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT",
513                         qp_ctx, I40IW_QP_CTX_SIZE);
514 }
515
516 /**
517  * i40iw_puda_qp_wqe - setup wqe for qp create
518  * @rsrc: resource for qp
519  */
520 static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
521 {
522         struct i40iw_sc_cqp *cqp;
523         u64 *wqe;
524         u64 header;
525         struct i40iw_ccq_cqe_info compl_info;
526         enum i40iw_status_code status = 0;
527
528         cqp = dev->cqp;
529         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
530         if (!wqe)
531                 return I40IW_ERR_RING_FULL;
532
533         set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
534         set_64bit_val(wqe, 40, qp->shadow_area_pa);
535         header = qp->qp_uk.qp_id |
536                  LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
537                  LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) |
538                  LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) |
539                  LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
540                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
541
542         set_64bit_val(wqe, 24, header);
543
544         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
545         i40iw_sc_cqp_post_sq(cqp);
546         status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
547                                                     I40IW_CQP_OP_CREATE_QP,
548                                                     &compl_info);
549         return status;
550 }
551
552 /**
553  * i40iw_puda_qp_create - create qp for resource
554  * @rsrc: resource to use for buffer
555  */
556 static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
557 {
558         struct i40iw_sc_qp *qp = &rsrc->qp;
559         struct i40iw_qp_uk *ukqp = &qp->qp_uk;
560         enum i40iw_status_code ret = 0;
561         u32 sq_size, rq_size, t_size;
562         struct i40iw_dma_mem *mem;
563
564         sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;
565         rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;
566         t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) +
567                   I40IW_QP_CTX_SIZE);
568         /* Get page aligned memory */
569         ret =
570             i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,
571                                    I40IW_HW_PAGE_SIZE);
572         if (ret) {
573                 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__);
574                 return ret;
575         }
576
577         mem = &rsrc->qpmem;
578         memset(mem->va, 0, t_size);
579         qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);
580         qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);
581         qp->pd = &rsrc->sc_pd;
582         qp->qp_type = I40IW_QP_TYPE_UDA;
583         qp->dev = rsrc->dev;
584         qp->back_qp = (void *)rsrc;
585         qp->sq_pa = mem->pa;
586         qp->rq_pa = qp->sq_pa + sq_size;
587         qp->vsi = rsrc->vsi;
588         ukqp->sq_base = mem->va;
589         ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
590         ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
591         qp->shadow_area_pa = qp->rq_pa + rq_size;
592         qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE;
593         qp->hw_host_ctx_pa =
594                 qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3);
595         ukqp->qp_id = rsrc->qp_id;
596         ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
597         ukqp->rq_wrid_array = rsrc->rq_wrid_array;
598
599         ukqp->qp_id = rsrc->qp_id;
600         ukqp->sq_size = rsrc->sq_size;
601         ukqp->rq_size = rsrc->rq_size;
602
603         I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
604         I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
605         I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
606
607         if (qp->pd->dev->is_pf)
608                 ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
609                                                     I40E_PFPE_WQEALLOC);
610         else
611                 ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
612                                                     I40E_VFPE_WQEALLOC1);
613
614         qp->user_pri = 0;
615         i40iw_qp_add_qos(qp);
616         i40iw_puda_qp_setctx(rsrc);
617         if (rsrc->ceq_valid)
618                 ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
619         else
620                 ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
621         if (ret)
622                 i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
623         return ret;
624 }
625
626 /**
627  * i40iw_puda_cq_wqe - setup wqe for cq create
628  * @rsrc: resource for cq
629  */
630 static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
631 {
632         u64 *wqe;
633         struct i40iw_sc_cqp *cqp;
634         u64 header;
635         struct i40iw_ccq_cqe_info compl_info;
636         enum i40iw_status_code status = 0;
637
638         cqp = dev->cqp;
639         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
640         if (!wqe)
641                 return I40IW_ERR_RING_FULL;
642
643         set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
644         set_64bit_val(wqe, 8, RS_64_1(cq, 1));
645         set_64bit_val(wqe, 16,
646                       LS_64(cq->shadow_read_threshold,
647                             I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
648         set_64bit_val(wqe, 32, cq->cq_pa);
649
650         set_64bit_val(wqe, 40, cq->shadow_area_pa);
651
652         header = cq->cq_uk.cq_id |
653             LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
654             LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
655             LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
656             LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
657             LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
658         set_64bit_val(wqe, 24, header);
659
660         i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
661                         wqe, I40IW_CQP_WQE_SIZE * 8);
662
663         i40iw_sc_cqp_post_sq(dev->cqp);
664         status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
665                                                  I40IW_CQP_OP_CREATE_CQ,
666                                                  &compl_info);
667         return status;
668 }
669
670 /**
671  * i40iw_puda_cq_create - create cq for resource
672  * @rsrc: resource for which cq to create
673  */
674 static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
675 {
676         struct i40iw_sc_dev *dev = rsrc->dev;
677         struct i40iw_sc_cq *cq = &rsrc->cq;
678         enum i40iw_status_code ret = 0;
679         u32 tsize, cqsize;
680         struct i40iw_dma_mem *mem;
681         struct i40iw_cq_init_info info;
682         struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
683
684         cq->vsi = rsrc->vsi;
685         cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
686         tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
687         ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
688                                      I40IW_CQ0_ALIGNMENT);
689         if (ret)
690                 return ret;
691
692         mem = &rsrc->cqmem;
693         memset(&info, 0, sizeof(info));
694         info.dev = dev;
695         info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ?
696                          I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ;
697         info.shadow_read_threshold = rsrc->cq_size >> 2;
698         info.ceq_id_valid = true;
699         info.cq_base_pa = mem->pa;
700         info.shadow_area_pa = mem->pa + cqsize;
701         init_info->cq_base = mem->va;
702         init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
703         init_info->cq_size = rsrc->cq_size;
704         init_info->cq_id = rsrc->cq_id;
705         info.ceqe_mask = true;
706         info.ceq_id_valid = true;
707         ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
708         if (ret)
709                 goto error;
710         if (rsrc->ceq_valid)
711                 ret = i40iw_cqp_cq_create_cmd(dev, cq);
712         else
713                 ret = i40iw_puda_cq_wqe(dev, cq);
714 error:
715         if (ret)
716                 i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
717         return ret;
718 }
719
720 /**
721  * i40iw_puda_free_qp - free qp for resource
722  * @rsrc: resource for which qp to free
723  */
724 static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
725 {
726         enum i40iw_status_code ret;
727         struct i40iw_ccq_cqe_info compl_info;
728         struct i40iw_sc_dev *dev = rsrc->dev;
729
730         if (rsrc->ceq_valid) {
731                 i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
732                 return;
733         }
734
735         ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
736                         0, false, true, true);
737         if (ret)
738                 i40iw_debug(dev, I40IW_DEBUG_PUDA,
739                             "%s error puda qp destroy wqe\n",
740                             __func__);
741
742         if (!ret) {
743                 ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
744                                 I40IW_CQP_OP_DESTROY_QP,
745                                 &compl_info);
746                 if (ret)
747                         i40iw_debug(dev, I40IW_DEBUG_PUDA,
748                                     "%s error puda qp destroy failed\n",
749                                     __func__);
750         }
751 }
752
753 /**
754  * i40iw_puda_free_cq - free cq for resource
755  * @rsrc: resource for which cq to free
756  */
757 static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
758 {
759         enum i40iw_status_code ret;
760         struct i40iw_ccq_cqe_info compl_info;
761         struct i40iw_sc_dev *dev = rsrc->dev;
762
763         if (rsrc->ceq_valid) {
764                 i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
765                 return;
766         }
767         ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
768
769         if (ret)
770                 i40iw_debug(dev, I40IW_DEBUG_PUDA,
771                             "%s error ieq cq destroy\n",
772                             __func__);
773
774         if (!ret) {
775                 ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
776                                 I40IW_CQP_OP_DESTROY_CQ,
777                                 &compl_info);
778                 if (ret)
779                         i40iw_debug(dev, I40IW_DEBUG_PUDA,
780                                     "%s error ieq qp destroy done\n",
781                                     __func__);
782         }
783 }
784
785 /**
786  * i40iw_puda_dele_resources - delete all resources during close
787  * @dev: iwarp device
788  * @type: type of resource to dele
789  * @reset: true if reset chip
790  */
791 void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
792                                enum puda_resource_type type,
793                                bool reset)
794 {
795         struct i40iw_sc_dev *dev = vsi->dev;
796         struct i40iw_puda_rsrc *rsrc;
797         struct i40iw_puda_buf *buf = NULL;
798         struct i40iw_puda_buf *nextbuf = NULL;
799         struct i40iw_virt_mem *vmem;
800
801         switch (type) {
802         case I40IW_PUDA_RSRC_TYPE_ILQ:
803                 rsrc = vsi->ilq;
804                 vmem = &vsi->ilq_mem;
805                 break;
806         case I40IW_PUDA_RSRC_TYPE_IEQ:
807                 rsrc = vsi->ieq;
808                 vmem = &vsi->ieq_mem;
809                 break;
810         default:
811                 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
812                             __func__, type);
813                 return;
814         }
815
816         switch (rsrc->completion) {
817         case PUDA_HASH_CRC_COMPLETE:
818                 i40iw_free_hash_desc(rsrc->hash_desc);
819         case PUDA_QP_CREATED:
820                 if (!reset)
821                         i40iw_puda_free_qp(rsrc);
822
823                 i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
824                 /* fallthrough */
825         case PUDA_CQ_CREATED:
826                 if (!reset)
827                         i40iw_puda_free_cq(rsrc);
828
829                 i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
830                 break;
831         default:
832                 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
833                 break;
834         }
835         /* Free all allocated puda buffers for both tx and rx */
836         buf = rsrc->alloclist;
837         while (buf) {
838                 nextbuf = buf->next;
839                 i40iw_puda_dele_buf(dev, buf);
840                 buf = nextbuf;
841                 rsrc->alloc_buf_count--;
842         }
843         i40iw_free_virt_mem(dev->hw, vmem);
844 }
845
846 /**
847  * i40iw_puda_allocbufs - allocate buffers for resource
848  * @rsrc: resource for buffer allocation
849  * @count: number of buffers to create
850  */
851 static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
852                                                    u32 count)
853 {
854         u32 i;
855         struct i40iw_puda_buf *buf;
856         struct i40iw_puda_buf *nextbuf;
857
858         for (i = 0; i < count; i++) {
859                 buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
860                 if (!buf) {
861                         rsrc->stats_buf_alloc_fail++;
862                         return I40IW_ERR_NO_MEMORY;
863                 }
864                 i40iw_puda_ret_bufpool(rsrc, buf);
865                 rsrc->alloc_buf_count++;
866                 if (!rsrc->alloclist) {
867                         rsrc->alloclist = buf;
868                 } else {
869                         nextbuf = rsrc->alloclist;
870                         rsrc->alloclist = buf;
871                         buf->next = nextbuf;
872                 }
873         }
874         rsrc->avail_buf_count = rsrc->alloc_buf_count;
875         return 0;
876 }
877
878 /**
879  * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
880  * @dev: iwarp device
881  * @info: resource information
882  */
883 enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
884                                               struct i40iw_puda_rsrc_info *info)
885 {
886         struct i40iw_sc_dev *dev = vsi->dev;
887         enum i40iw_status_code ret = 0;
888         struct i40iw_puda_rsrc *rsrc;
889         u32 pudasize;
890         u32 sqwridsize, rqwridsize;
891         struct i40iw_virt_mem *vmem;
892
893         info->count = 1;
894         pudasize = sizeof(struct i40iw_puda_rsrc);
895         sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info);
896         rqwridsize = info->rq_size * 8;
897         switch (info->type) {
898         case I40IW_PUDA_RSRC_TYPE_ILQ:
899                 vmem = &vsi->ilq_mem;
900                 break;
901         case I40IW_PUDA_RSRC_TYPE_IEQ:
902                 vmem = &vsi->ieq_mem;
903                 break;
904         default:
905                 return I40IW_NOT_SUPPORTED;
906         }
907         ret =
908             i40iw_allocate_virt_mem(dev->hw, vmem,
909                                     pudasize + sqwridsize + rqwridsize);
910         if (ret)
911                 return ret;
912         rsrc = (struct i40iw_puda_rsrc *)vmem->va;
913         spin_lock_init(&rsrc->bufpool_lock);
914         if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
915                 vsi->ilq = (struct i40iw_puda_rsrc *)vmem->va;
916                 vsi->ilq_count = info->count;
917                 rsrc->receive = info->receive;
918                 rsrc->xmit_complete = info->xmit_complete;
919         } else {
920                 vmem = &vsi->ieq_mem;
921                 vsi->ieq_count = info->count;
922                 vsi->ieq = (struct i40iw_puda_rsrc *)vmem->va;
923                 rsrc->receive = i40iw_ieq_receive;
924                 rsrc->xmit_complete = i40iw_ieq_tx_compl;
925         }
926
927         rsrc->ceq_valid = info->ceq_valid;
928         rsrc->type = info->type;
929         rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
930         rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
931         /* Initialize all ieq lists */
932         INIT_LIST_HEAD(&rsrc->bufpool);
933         INIT_LIST_HEAD(&rsrc->txpend);
934
935         rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
936         dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);
937         rsrc->qp_id = info->qp_id;
938         rsrc->cq_id = info->cq_id;
939         rsrc->sq_size = info->sq_size;
940         rsrc->rq_size = info->rq_size;
941         rsrc->cq_size = info->rq_size + info->sq_size;
942         rsrc->buf_size = info->buf_size;
943         rsrc->dev = dev;
944         rsrc->vsi = vsi;
945
946         ret = i40iw_puda_cq_create(rsrc);
947         if (!ret) {
948                 rsrc->completion = PUDA_CQ_CREATED;
949                 ret = i40iw_puda_qp_create(rsrc);
950         }
951         if (ret) {
952                 i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__);
953                 goto error;
954         }
955         rsrc->completion = PUDA_QP_CREATED;
956
957         ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
958         if (ret) {
959                 i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__);
960                 goto error;
961         }
962
963         rsrc->rxq_invalid_cnt = info->rq_size;
964         ret = i40iw_puda_replenish_rq(rsrc, true);
965         if (ret)
966                 goto error;
967
968         if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) {
969                 if (!i40iw_init_hash_desc(&rsrc->hash_desc)) {
970                         rsrc->check_crc = true;
971                         rsrc->completion = PUDA_HASH_CRC_COMPLETE;
972                         ret = 0;
973                 }
974         }
975
976         dev->ccq_ops->ccq_arm(&rsrc->cq);
977         return ret;
978  error:
979         i40iw_puda_dele_resources(vsi, info->type, false);
980
981         return ret;
982 }
983
984 /**
985  * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq
986  * @qp: ilq's qp resource
987  * @wqe_idx:  wqe index of completed rcvbuf
988  */
989 static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx)
990 {
991         u64 *wqe;
992         u64 offset24;
993
994         wqe = qp->qp_uk.rq_base[wqe_idx].elem;
995         get_64bit_val(wqe, 24, &offset24);
996         offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
997         set_64bit_val(wqe, 24, offset24);
998 }
999
1000 /**
1001  * i40iw_ieq_get_fpdu - given length return fpdu length
1002  * @length: length if fpdu
1003  */
1004 static u16 i40iw_ieq_get_fpdu_length(u16 length)
1005 {
1006         u16 fpdu_len;
1007
1008         fpdu_len = length + I40IW_IEQ_MPA_FRAMING;
1009         fpdu_len = (fpdu_len + 3) & 0xfffffffc;
1010         return fpdu_len;
1011 }
1012
1013 /**
1014  * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
1015  * @buf: rcv buffer with partial
1016  * @txbuf: tx buffer for sendign back
1017  * @buf_offset: rcv buffer offset to copy from
1018  * @txbuf_offset: at offset in tx buf to copy
1019  * @length: length of data to copy
1020  */
1021 static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf,
1022                                     struct i40iw_puda_buf *txbuf,
1023                                     u16 buf_offset, u32 txbuf_offset,
1024                                     u32 length)
1025 {
1026         void *mem1 = (u8 *)buf->mem.va + buf_offset;
1027         void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
1028
1029         memcpy(mem2, mem1, length);
1030 }
1031
1032 /**
1033  * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling
1034  * @buf: reeive buffer with partial
1035  * @txbuf: buffer to prepare
1036  */
1037 static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf,
1038                                    struct i40iw_puda_buf *txbuf)
1039 {
1040         txbuf->maclen = buf->maclen;
1041         txbuf->tcphlen = buf->tcphlen;
1042         txbuf->ipv4 = buf->ipv4;
1043         txbuf->hdrlen = buf->hdrlen;
1044         i40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
1045 }
1046
1047 /**
1048  * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range
1049  * @buf: receive exception buffer
1050  * @fps: first partial sequence number
1051  */
1052 static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps)
1053 {
1054         u32 offset;
1055
1056         if (buf->seqnum < fps) {
1057                 offset = fps - buf->seqnum;
1058                 if (offset > buf->datalen)
1059                         return;
1060                 buf->data += offset;
1061                 buf->datalen -= (u16)offset;
1062                 buf->seqnum = fps;
1063         }
1064 }
1065
1066 /**
1067  * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu
1068  * @ieq: ieq resource
1069  * @rxlist: ieq's received buffer list
1070  * @pbufl: temporary list for buffers for fpddu
1071  * @txbuf: tx buffer for fpdu
1072  * @fpdu_len: total length of fpdu
1073  */
1074 static void  i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
1075                                    struct list_head *rxlist,
1076                                    struct list_head *pbufl,
1077                                    struct i40iw_puda_buf *txbuf,
1078                                    u16 fpdu_len)
1079 {
1080         struct i40iw_puda_buf *buf;
1081         u32 nextseqnum;
1082         u16 txoffset, bufoffset;
1083
1084         buf = i40iw_puda_get_listbuf(pbufl);
1085         if (!buf)
1086                 return;
1087         nextseqnum = buf->seqnum + fpdu_len;
1088         txbuf->totallen = buf->hdrlen + fpdu_len;
1089         txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
1090         i40iw_ieq_setup_tx_buf(buf, txbuf);
1091
1092         txoffset = buf->hdrlen;
1093         bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1094
1095         do {
1096                 if (buf->datalen >= fpdu_len) {
1097                         /* copied full fpdu */
1098                         i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len);
1099                         buf->datalen -= fpdu_len;
1100                         buf->data += fpdu_len;
1101                         buf->seqnum = nextseqnum;
1102                         break;
1103                 }
1104                 /* copy partial fpdu */
1105                 i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen);
1106                 txoffset += buf->datalen;
1107                 fpdu_len -= buf->datalen;
1108                 i40iw_puda_ret_bufpool(ieq, buf);
1109                 buf = i40iw_puda_get_listbuf(pbufl);
1110                 if (!buf)
1111                         return;
1112                 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1113         } while (1);
1114
1115         /* last buffer on the list*/
1116         if (buf->datalen)
1117                 list_add(&buf->list, rxlist);
1118         else
1119                 i40iw_puda_ret_bufpool(ieq, buf);
1120 }
1121
1122 /**
1123  * i40iw_ieq_create_pbufl - create buffer list for single fpdu
1124  * @rxlist: resource list for receive ieq buffes
1125  * @pbufl: temp. list for buffers for fpddu
1126  * @buf: first receive buffer
1127  * @fpdu_len: total length of fpdu
1128  */
1129 static enum i40iw_status_code i40iw_ieq_create_pbufl(
1130                                                      struct i40iw_pfpdu *pfpdu,
1131                                                      struct list_head *rxlist,
1132                                                      struct list_head *pbufl,
1133                                                      struct i40iw_puda_buf *buf,
1134                                                      u16 fpdu_len)
1135 {
1136         enum i40iw_status_code status = 0;
1137         struct i40iw_puda_buf *nextbuf;
1138         u32     nextseqnum;
1139         u16 plen = fpdu_len - buf->datalen;
1140         bool done = false;
1141
1142         nextseqnum = buf->seqnum + buf->datalen;
1143         do {
1144                 nextbuf = i40iw_puda_get_listbuf(rxlist);
1145                 if (!nextbuf) {
1146                         status = I40IW_ERR_list_empty;
1147                         break;
1148                 }
1149                 list_add_tail(&nextbuf->list, pbufl);
1150                 if (nextbuf->seqnum != nextseqnum) {
1151                         pfpdu->bad_seq_num++;
1152                         status = I40IW_ERR_SEQ_NUM;
1153                         break;
1154                 }
1155                 if (nextbuf->datalen >= plen) {
1156                         done = true;
1157                 } else {
1158                         plen -= nextbuf->datalen;
1159                         nextseqnum = nextbuf->seqnum + nextbuf->datalen;
1160                 }
1161
1162         } while (!done);
1163
1164         return status;
1165 }
1166
1167 /**
1168  * i40iw_ieq_handle_partial - process partial fpdu buffer
1169  * @ieq: ieq resource
1170  * @pfpdu: partial management per user qp
1171  * @buf: receive buffer
1172  * @fpdu_len: fpdu len in the buffer
1173  */
1174 static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq,
1175                                                        struct i40iw_pfpdu *pfpdu,
1176                                                        struct i40iw_puda_buf *buf,
1177                                                        u16 fpdu_len)
1178 {
1179         enum i40iw_status_code status = 0;
1180         u8 *crcptr;
1181         u32 mpacrc;
1182         u32 seqnum = buf->seqnum;
1183         struct list_head pbufl; /* partial buffer list */
1184         struct i40iw_puda_buf *txbuf = NULL;
1185         struct list_head *rxlist = &pfpdu->rxlist;
1186
1187         INIT_LIST_HEAD(&pbufl);
1188         list_add(&buf->list, &pbufl);
1189
1190         status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
1191         if (status)
1192                 goto error;
1193
1194         txbuf = i40iw_puda_get_bufpool(ieq);
1195         if (!txbuf) {
1196                 pfpdu->no_tx_bufs++;
1197                 status = I40IW_ERR_NO_TXBUFS;
1198                 goto error;
1199         }
1200
1201         i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
1202         i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
1203         crcptr = txbuf->data + fpdu_len - 4;
1204         mpacrc = *(u32 *)crcptr;
1205         if (ieq->check_crc) {
1206                 status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
1207                                                 (fpdu_len - 4), mpacrc);
1208                 if (status) {
1209                         i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1210                                     "%s: error bad crc\n", __func__);
1211                         goto error;
1212                 }
1213         }
1214
1215         i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER",
1216                         txbuf->mem.va, txbuf->totallen);
1217         i40iw_puda_send_buf(ieq, txbuf);
1218         pfpdu->rcv_nxt = seqnum + fpdu_len;
1219         return status;
1220  error:
1221         while (!list_empty(&pbufl)) {
1222                 buf = (struct i40iw_puda_buf *)(pbufl.prev);
1223                 list_del(&buf->list);
1224                 list_add(&buf->list, rxlist);
1225         }
1226         if (txbuf)
1227                 i40iw_puda_ret_bufpool(ieq, txbuf);
1228         return status;
1229 }
1230
1231 /**
1232  * i40iw_ieq_process_buf - process buffer rcvd for ieq
1233  * @ieq: ieq resource
1234  * @pfpdu: partial management per user qp
1235  * @buf: receive buffer
1236  */
1237 static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq,
1238                                                     struct i40iw_pfpdu *pfpdu,
1239                                                     struct i40iw_puda_buf *buf)
1240 {
1241         u16 fpdu_len = 0;
1242         u16 datalen = buf->datalen;
1243         u8 *datap = buf->data;
1244         u8 *crcptr;
1245         u16 ioffset = 0;
1246         u32 mpacrc;
1247         u32 seqnum = buf->seqnum;
1248         u16 length = 0;
1249         u16 full = 0;
1250         bool partial = false;
1251         struct i40iw_puda_buf *txbuf;
1252         struct list_head *rxlist = &pfpdu->rxlist;
1253         enum i40iw_status_code ret = 0;
1254         enum i40iw_status_code status = 0;
1255
1256         ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
1257         while (datalen) {
1258                 fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(__be16 *)datap));
1259                 if (fpdu_len > pfpdu->max_fpdu_data) {
1260                         i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1261                                     "%s: error bad fpdu_len\n", __func__);
1262                         status = I40IW_ERR_MPA_CRC;
1263                         list_add(&buf->list, rxlist);
1264                         return status;
1265                 }
1266
1267                 if (datalen < fpdu_len) {
1268                         partial = true;
1269                         break;
1270                 }
1271                 crcptr = datap + fpdu_len - 4;
1272                 mpacrc = *(u32 *)crcptr;
1273                 if (ieq->check_crc)
1274                         ret = i40iw_ieq_check_mpacrc(ieq->hash_desc,
1275                                                      datap, fpdu_len - 4, mpacrc);
1276                 if (ret) {
1277                         status = I40IW_ERR_MPA_CRC;
1278                         list_add(&buf->list, rxlist);
1279                         return status;
1280                 }
1281                 full++;
1282                 pfpdu->fpdu_processed++;
1283                 datap += fpdu_len;
1284                 length += fpdu_len;
1285                 datalen -= fpdu_len;
1286         }
1287         if (full) {
1288                 /* copy full pdu's in the txbuf and send them out */
1289                 txbuf = i40iw_puda_get_bufpool(ieq);
1290                 if (!txbuf) {
1291                         pfpdu->no_tx_bufs++;
1292                         status = I40IW_ERR_NO_TXBUFS;
1293                         list_add(&buf->list, rxlist);
1294                         return status;
1295                 }
1296                 /* modify txbuf's buffer header */
1297                 i40iw_ieq_setup_tx_buf(buf, txbuf);
1298                 /* copy full fpdu's to new buffer */
1299                 i40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen,
1300                                         length);
1301                 txbuf->totallen = buf->hdrlen + length;
1302
1303                 i40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum);
1304                 i40iw_puda_send_buf(ieq, txbuf);
1305
1306                 if (!datalen) {
1307                         pfpdu->rcv_nxt = buf->seqnum + length;
1308                         i40iw_puda_ret_bufpool(ieq, buf);
1309                         return status;
1310                 }
1311                 buf->data = datap;
1312                 buf->seqnum = seqnum + length;
1313                 buf->datalen = datalen;
1314                 pfpdu->rcv_nxt = buf->seqnum;
1315         }
1316         if (partial)
1317                 status = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
1318
1319         return status;
1320 }
1321
1322 /**
1323  * i40iw_ieq_process_fpdus - process fpdu's buffers on its list
1324  * @qp: qp for which partial fpdus
1325  * @ieq: ieq resource
1326  */
1327 static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp,
1328                                     struct i40iw_puda_rsrc *ieq)
1329 {
1330         struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1331         struct list_head *rxlist = &pfpdu->rxlist;
1332         struct i40iw_puda_buf *buf;
1333         enum i40iw_status_code status;
1334
1335         do {
1336                 if (list_empty(rxlist))
1337                         break;
1338                 buf = i40iw_puda_get_listbuf(rxlist);
1339                 if (!buf) {
1340                         i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1341                                     "%s: error no buf\n", __func__);
1342                         break;
1343                 }
1344                 if (buf->seqnum != pfpdu->rcv_nxt) {
1345                         /* This could be out of order or missing packet */
1346                         pfpdu->out_of_order++;
1347                         list_add(&buf->list, rxlist);
1348                         break;
1349                 }
1350                 /* keep processing buffers from the head of the list */
1351                 status = i40iw_ieq_process_buf(ieq, pfpdu, buf);
1352                 if (status == I40IW_ERR_MPA_CRC) {
1353                         pfpdu->mpa_crc_err = true;
1354                         while (!list_empty(rxlist)) {
1355                                 buf = i40iw_puda_get_listbuf(rxlist);
1356                                 i40iw_puda_ret_bufpool(ieq, buf);
1357                                 pfpdu->crc_err++;
1358                         }
1359                         /* create CQP for AE */
1360                         i40iw_ieq_mpa_crc_ae(ieq->dev, qp);
1361                 }
1362         } while (!status);
1363 }
1364
1365 /**
1366  * i40iw_ieq_handle_exception - handle qp's exception
1367  * @ieq: ieq resource
1368  * @qp: qp receiving excpetion
1369  * @buf: receive buffer
1370  */
1371 static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
1372                                        struct i40iw_sc_qp *qp,
1373                                        struct i40iw_puda_buf *buf)
1374 {
1375         struct i40iw_puda_buf *tmpbuf = NULL;
1376         struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1377         u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
1378         u32 rcv_wnd = hw_host_ctx[23];
1379         /* first partial seq # in q2 */
1380         u32 fps = qp->q2_buf[16];
1381         struct list_head *rxlist = &pfpdu->rxlist;
1382         struct list_head *plist;
1383
1384         pfpdu->total_ieq_bufs++;
1385
1386         if (pfpdu->mpa_crc_err) {
1387                 pfpdu->crc_err++;
1388                 goto error;
1389         }
1390         if (pfpdu->mode && (fps != pfpdu->fps)) {
1391                 /* clean up qp as it is new partial sequence */
1392                 i40iw_ieq_cleanup_qp(ieq, qp);
1393                 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1394                             "%s: restarting new partial\n", __func__);
1395                 pfpdu->mode = false;
1396         }
1397
1398         if (!pfpdu->mode) {
1399                 i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128);
1400                 /* First_Partial_Sequence_Number check */
1401                 pfpdu->rcv_nxt = fps;
1402                 pfpdu->fps = fps;
1403                 pfpdu->mode = true;
1404                 pfpdu->max_fpdu_data = ieq->vsi->mss;
1405                 pfpdu->pmode_count++;
1406                 INIT_LIST_HEAD(rxlist);
1407                 i40iw_ieq_check_first_buf(buf, fps);
1408         }
1409
1410         if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
1411                 pfpdu->bad_seq_num++;
1412                 goto error;
1413         }
1414
1415         if (!list_empty(rxlist)) {
1416                 tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
1417                 while ((struct list_head *)tmpbuf != rxlist) {
1418                         if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
1419                                 break;
1420                         plist = &tmpbuf->list;
1421                         tmpbuf = (struct i40iw_puda_buf *)plist->next;
1422                 }
1423                 /* Insert buf before tmpbuf */
1424                 list_add_tail(&buf->list, &tmpbuf->list);
1425         } else {
1426                 list_add_tail(&buf->list, rxlist);
1427         }
1428         i40iw_ieq_process_fpdus(qp, ieq);
1429         return;
1430  error:
1431         i40iw_puda_ret_bufpool(ieq, buf);
1432 }
1433
1434 /**
1435  * i40iw_ieq_receive - received exception buffer
1436  * @dev: iwarp device
1437  * @buf: exception buffer received
1438  */
1439 static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
1440                               struct i40iw_puda_buf *buf)
1441 {
1442         struct i40iw_puda_rsrc *ieq = vsi->ieq;
1443         struct i40iw_sc_qp *qp = NULL;
1444         u32 wqe_idx = ieq->compl_rxwqe_idx;
1445
1446         qp = i40iw_ieq_get_qp(vsi->dev, buf);
1447         if (!qp) {
1448                 ieq->stats_bad_qp_id++;
1449                 i40iw_puda_ret_bufpool(ieq, buf);
1450         } else {
1451                 i40iw_ieq_handle_exception(ieq, qp, buf);
1452         }
1453         /*
1454          * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq()
1455          * on which wqe_idx to start replenish rq
1456          */
1457         if (!ieq->rxq_invalid_cnt)
1458                 ieq->rx_wqe_idx = wqe_idx;
1459         ieq->rxq_invalid_cnt++;
1460 }
1461
1462 /**
1463  * i40iw_ieq_tx_compl - put back after sending completed exception buffer
1464  * @vsi: pointer to the vsi structure
1465  * @sqwrid: pointer to puda buffer
1466  */
1467 static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
1468 {
1469         struct i40iw_puda_rsrc *ieq = vsi->ieq;
1470         struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
1471
1472         i40iw_puda_ret_bufpool(ieq, buf);
1473         if (!list_empty(&ieq->txpend)) {
1474                 buf = i40iw_puda_get_listbuf(&ieq->txpend);
1475                 i40iw_puda_send_buf(ieq, buf);
1476         }
1477 }
1478
1479 /**
1480  * i40iw_ieq_cleanup_qp - qp is being destroyed
1481  * @ieq: ieq resource
1482  * @qp: all pending fpdu buffers
1483  */
1484 static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
1485 {
1486         struct i40iw_puda_buf *buf;
1487         struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1488         struct list_head *rxlist = &pfpdu->rxlist;
1489
1490         if (!pfpdu->mode)
1491                 return;
1492         while (!list_empty(rxlist)) {
1493                 buf = i40iw_puda_get_listbuf(rxlist);
1494                 i40iw_puda_ret_bufpool(ieq, buf);
1495         }
1496 }