qed*: Utilize FW 8.33.1.0
[sfrench/cifs-2.6.git] / drivers / net / ethernet / qlogic / qed / qed_iwarp.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/if_ether.h>
33 #include <linux/if_vlan.h>
34 #include <linux/ip.h>
35 #include <linux/ipv6.h>
36 #include <linux/spinlock.h>
37 #include <linux/tcp.h>
38 #include "qed_cxt.h"
39 #include "qed_hw.h"
40 #include "qed_ll2.h"
41 #include "qed_rdma.h"
42 #include "qed_reg_addr.h"
43 #include "qed_sp.h"
44 #include "qed_ooo.h"
45
46 #define QED_IWARP_ORD_DEFAULT           32
47 #define QED_IWARP_IRD_DEFAULT           32
48 #define QED_IWARP_MAX_FW_MSS            4120
49
50 #define QED_EP_SIG 0xecabcdef
51
52 struct mpa_v2_hdr {
53         __be16 ird;
54         __be16 ord;
55 };
56
57 #define MPA_V2_PEER2PEER_MODEL  0x8000
58 #define MPA_V2_SEND_RTR         0x4000  /* on ird */
59 #define MPA_V2_READ_RTR         0x4000  /* on ord */
60 #define MPA_V2_WRITE_RTR        0x8000
61 #define MPA_V2_IRD_ORD_MASK     0x3FFF
62
63 #define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
64
65 #define QED_IWARP_INVALID_TCP_CID       0xffffffff
66 #define QED_IWARP_RCV_WND_SIZE_DEF      (256 * 1024)
67 #define QED_IWARP_RCV_WND_SIZE_MIN      (0xffff)
68 #define TIMESTAMP_HEADER_SIZE           (12)
69 #define QED_IWARP_MAX_FIN_RT_DEFAULT    (2)
70
71 #define QED_IWARP_TS_EN                 BIT(0)
72 #define QED_IWARP_DA_EN                 BIT(1)
73 #define QED_IWARP_PARAM_CRC_NEEDED      (1)
74 #define QED_IWARP_PARAM_P2P             (1)
75
76 #define QED_IWARP_DEF_MAX_RT_TIME       (0)
77 #define QED_IWARP_DEF_CWND_FACTOR       (4)
78 #define QED_IWARP_DEF_KA_MAX_PROBE_CNT  (5)
79 #define QED_IWARP_DEF_KA_TIMEOUT        (1200000)       /* 20 min */
80 #define QED_IWARP_DEF_KA_INTERVAL       (1000)          /* 1 sec */
81
82 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
83                                  u8 fw_event_code, u16 echo,
84                                  union event_ring_data *data,
85                                  u8 fw_return_code);
86
87 /* Override devinfo with iWARP specific values */
88 void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
89 {
90         struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
91
92         dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
93         dev->max_qp = min_t(u32,
94                             IWARP_MAX_QPS,
95                             p_hwfn->p_rdma_info->num_qps) -
96                       QED_IWARP_PREALLOC_CNT;
97
98         dev->max_cq = dev->max_qp;
99
100         dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
101         dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
102 }
103
104 void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
105 {
106         p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
107         qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
108         p_hwfn->b_rdma_enabled_in_prs = true;
109 }
110
111 /* We have two cid maps, one for tcp which should be used only from passive
112  * syn processing and replacing a pre-allocated ep in the list. The second
113  * for active tcp and for QPs.
114  */
115 static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
116 {
117         cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
118
119         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
120
121         if (cid < QED_IWARP_PREALLOC_CNT)
122                 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
123                                     cid);
124         else
125                 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
126
127         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
128 }
129
130 void
131 qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
132                          struct iwarp_init_func_ramrod_data *p_ramrod)
133 {
134         p_ramrod->iwarp.ll2_ooo_q_index =
135                 RESC_START(p_hwfn, QED_LL2_QUEUE) +
136                 p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
137
138         p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
139
140         return;
141 }
142
143 static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
144 {
145         int rc;
146
147         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
148         rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
149         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
150         if (rc) {
151                 DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
152                 return rc;
153         }
154         *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
155
156         rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
157         if (rc)
158                 qed_iwarp_cid_cleaned(p_hwfn, *cid);
159
160         return rc;
161 }
162
163 static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
164 {
165         cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
166
167         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
168         qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
169         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
170 }
171
172 /* This function allocates a cid for passive tcp (called from syn receive)
173  * the reason it's separate from the regular cid allocation is because it
174  * is assured that these cids already have ilt allocated. They are preallocated
175  * to ensure that we won't need to allocate memory during syn processing
176  */
177 static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
178 {
179         int rc;
180
181         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
182
183         rc = qed_rdma_bmap_alloc_id(p_hwfn,
184                                     &p_hwfn->p_rdma_info->tcp_cid_map, cid);
185
186         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
187
188         if (rc) {
189                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
190                            "can't allocate iwarp tcp cid max-count=%d\n",
191                            p_hwfn->p_rdma_info->tcp_cid_map.max_count);
192
193                 *cid = QED_IWARP_INVALID_TCP_CID;
194                 return rc;
195         }
196
197         *cid += qed_cxt_get_proto_cid_start(p_hwfn,
198                                             p_hwfn->p_rdma_info->proto);
199         return 0;
200 }
201
202 int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
203                         struct qed_rdma_qp *qp,
204                         struct qed_rdma_create_qp_out_params *out_params)
205 {
206         struct iwarp_create_qp_ramrod_data *p_ramrod;
207         struct qed_sp_init_data init_data;
208         struct qed_spq_entry *p_ent;
209         u16 physical_queue;
210         u32 cid;
211         int rc;
212
213         qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
214                                               IWARP_SHARED_QUEUE_PAGE_SIZE,
215                                               &qp->shared_queue_phys_addr,
216                                               GFP_KERNEL);
217         if (!qp->shared_queue)
218                 return -ENOMEM;
219
220         out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
221             IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
222         out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
223             IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
224         out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
225             IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
226         out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
227             IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
228
229         rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
230         if (rc)
231                 goto err1;
232
233         qp->icid = (u16)cid;
234
235         memset(&init_data, 0, sizeof(init_data));
236         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
237         init_data.cid = qp->icid;
238         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
239
240         rc = qed_sp_init_request(p_hwfn, &p_ent,
241                                  IWARP_RAMROD_CMD_ID_CREATE_QP,
242                                  PROTOCOLID_IWARP, &init_data);
243         if (rc)
244                 goto err2;
245
246         p_ramrod = &p_ent->ramrod.iwarp_create_qp;
247
248         SET_FIELD(p_ramrod->flags,
249                   IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
250                   qp->fmr_and_reserved_lkey);
251
252         SET_FIELD(p_ramrod->flags,
253                   IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
254
255         SET_FIELD(p_ramrod->flags,
256                   IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
257                   qp->incoming_rdma_read_en);
258
259         SET_FIELD(p_ramrod->flags,
260                   IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
261                   qp->incoming_rdma_write_en);
262
263         SET_FIELD(p_ramrod->flags,
264                   IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
265                   qp->incoming_atomic_en);
266
267         SET_FIELD(p_ramrod->flags,
268                   IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
269
270         p_ramrod->pd = qp->pd;
271         p_ramrod->sq_num_pages = qp->sq_num_pages;
272         p_ramrod->rq_num_pages = qp->rq_num_pages;
273
274         p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
275         p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
276
277         p_ramrod->cq_cid_for_sq =
278             cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
279         p_ramrod->cq_cid_for_rq =
280             cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
281
282         p_ramrod->dpi = cpu_to_le16(qp->dpi);
283
284         physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
285         p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
286         physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
287         p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
288
289         rc = qed_spq_post(p_hwfn, p_ent, NULL);
290         if (rc)
291                 goto err2;
292
293         return rc;
294
295 err2:
296         qed_iwarp_cid_cleaned(p_hwfn, cid);
297 err1:
298         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
299                           IWARP_SHARED_QUEUE_PAGE_SIZE,
300                           qp->shared_queue, qp->shared_queue_phys_addr);
301
302         return rc;
303 }
304
305 static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
306 {
307         struct iwarp_modify_qp_ramrod_data *p_ramrod;
308         struct qed_sp_init_data init_data;
309         struct qed_spq_entry *p_ent;
310         int rc;
311
312         /* Get SPQ entry */
313         memset(&init_data, 0, sizeof(init_data));
314         init_data.cid = qp->icid;
315         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
316         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
317
318         rc = qed_sp_init_request(p_hwfn, &p_ent,
319                                  IWARP_RAMROD_CMD_ID_MODIFY_QP,
320                                  p_hwfn->p_rdma_info->proto, &init_data);
321         if (rc)
322                 return rc;
323
324         p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
325         SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
326                   0x1);
327         if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
328                 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
329         else
330                 p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
331
332         rc = qed_spq_post(p_hwfn, p_ent, NULL);
333
334         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
335
336         return rc;
337 }
338
339 enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
340 {
341         switch (state) {
342         case QED_ROCE_QP_STATE_RESET:
343         case QED_ROCE_QP_STATE_INIT:
344         case QED_ROCE_QP_STATE_RTR:
345                 return QED_IWARP_QP_STATE_IDLE;
346         case QED_ROCE_QP_STATE_RTS:
347                 return QED_IWARP_QP_STATE_RTS;
348         case QED_ROCE_QP_STATE_SQD:
349                 return QED_IWARP_QP_STATE_CLOSING;
350         case QED_ROCE_QP_STATE_ERR:
351                 return QED_IWARP_QP_STATE_ERROR;
352         case QED_ROCE_QP_STATE_SQE:
353                 return QED_IWARP_QP_STATE_TERMINATE;
354         default:
355                 return QED_IWARP_QP_STATE_ERROR;
356         }
357 }
358
359 static enum qed_roce_qp_state
360 qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
361 {
362         switch (state) {
363         case QED_IWARP_QP_STATE_IDLE:
364                 return QED_ROCE_QP_STATE_INIT;
365         case QED_IWARP_QP_STATE_RTS:
366                 return QED_ROCE_QP_STATE_RTS;
367         case QED_IWARP_QP_STATE_TERMINATE:
368                 return QED_ROCE_QP_STATE_SQE;
369         case QED_IWARP_QP_STATE_CLOSING:
370                 return QED_ROCE_QP_STATE_SQD;
371         case QED_IWARP_QP_STATE_ERROR:
372                 return QED_ROCE_QP_STATE_ERR;
373         default:
374                 return QED_ROCE_QP_STATE_ERR;
375         }
376 }
377
378 const char *iwarp_state_names[] = {
379         "IDLE",
380         "RTS",
381         "TERMINATE",
382         "CLOSING",
383         "ERROR",
384 };
385
386 int
387 qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
388                     struct qed_rdma_qp *qp,
389                     enum qed_iwarp_qp_state new_state, bool internal)
390 {
391         enum qed_iwarp_qp_state prev_iw_state;
392         bool modify_fw = false;
393         int rc = 0;
394
395         /* modify QP can be called from upper-layer or as a result of async
396          * RST/FIN... therefore need to protect
397          */
398         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
399         prev_iw_state = qp->iwarp_state;
400
401         if (prev_iw_state == new_state) {
402                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
403                 return 0;
404         }
405
406         switch (prev_iw_state) {
407         case QED_IWARP_QP_STATE_IDLE:
408                 switch (new_state) {
409                 case QED_IWARP_QP_STATE_RTS:
410                         qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
411                         break;
412                 case QED_IWARP_QP_STATE_ERROR:
413                         qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
414                         if (!internal)
415                                 modify_fw = true;
416                         break;
417                 default:
418                         break;
419                 }
420                 break;
421         case QED_IWARP_QP_STATE_RTS:
422                 switch (new_state) {
423                 case QED_IWARP_QP_STATE_CLOSING:
424                         if (!internal)
425                                 modify_fw = true;
426
427                         qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
428                         break;
429                 case QED_IWARP_QP_STATE_ERROR:
430                         if (!internal)
431                                 modify_fw = true;
432                         qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
433                         break;
434                 default:
435                         break;
436                 }
437                 break;
438         case QED_IWARP_QP_STATE_ERROR:
439                 switch (new_state) {
440                 case QED_IWARP_QP_STATE_IDLE:
441
442                         qp->iwarp_state = new_state;
443                         break;
444                 case QED_IWARP_QP_STATE_CLOSING:
445                         /* could happen due to race... do nothing.... */
446                         break;
447                 default:
448                         rc = -EINVAL;
449                 }
450                 break;
451         case QED_IWARP_QP_STATE_TERMINATE:
452         case QED_IWARP_QP_STATE_CLOSING:
453                 qp->iwarp_state = new_state;
454                 break;
455         default:
456                 break;
457         }
458
459         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
460                    qp->icid,
461                    iwarp_state_names[prev_iw_state],
462                    iwarp_state_names[qp->iwarp_state],
463                    internal ? "internal" : "");
464
465         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
466
467         if (modify_fw)
468                 rc = qed_iwarp_modify_fw(p_hwfn, qp);
469
470         return rc;
471 }
472
473 int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
474 {
475         struct qed_sp_init_data init_data;
476         struct qed_spq_entry *p_ent;
477         int rc;
478
479         /* Get SPQ entry */
480         memset(&init_data, 0, sizeof(init_data));
481         init_data.cid = qp->icid;
482         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
483         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
484
485         rc = qed_sp_init_request(p_hwfn, &p_ent,
486                                  IWARP_RAMROD_CMD_ID_DESTROY_QP,
487                                  p_hwfn->p_rdma_info->proto, &init_data);
488         if (rc)
489                 return rc;
490
491         rc = qed_spq_post(p_hwfn, p_ent, NULL);
492
493         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
494
495         return rc;
496 }
497
498 static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
499                                  struct qed_iwarp_ep *ep,
500                                  bool remove_from_active_list)
501 {
502         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
503                           sizeof(*ep->ep_buffer_virt),
504                           ep->ep_buffer_virt, ep->ep_buffer_phys);
505
506         if (remove_from_active_list) {
507                 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
508                 list_del(&ep->list_entry);
509                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
510         }
511
512         if (ep->qp)
513                 ep->qp->ep = NULL;
514
515         kfree(ep);
516 }
517
518 int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
519 {
520         struct qed_iwarp_ep *ep = qp->ep;
521         int wait_count = 0;
522         int rc = 0;
523
524         if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
525                 rc = qed_iwarp_modify_qp(p_hwfn, qp,
526                                          QED_IWARP_QP_STATE_ERROR, false);
527                 if (rc)
528                         return rc;
529         }
530
531         /* Make sure ep is closed before returning and freeing memory. */
532         if (ep) {
533                 while (ep->state != QED_IWARP_EP_CLOSED && wait_count++ < 200)
534                         msleep(100);
535
536                 if (ep->state != QED_IWARP_EP_CLOSED)
537                         DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
538                                   ep->state);
539
540                 qed_iwarp_destroy_ep(p_hwfn, ep, false);
541         }
542
543         rc = qed_iwarp_fw_destroy(p_hwfn, qp);
544
545         if (qp->shared_queue)
546                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
547                                   IWARP_SHARED_QUEUE_PAGE_SIZE,
548                                   qp->shared_queue, qp->shared_queue_phys_addr);
549
550         return rc;
551 }
552
553 static int
554 qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
555 {
556         struct qed_iwarp_ep *ep;
557         int rc;
558
559         ep = kzalloc(sizeof(*ep), GFP_KERNEL);
560         if (!ep)
561                 return -ENOMEM;
562
563         ep->state = QED_IWARP_EP_INIT;
564
565         ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
566                                                 sizeof(*ep->ep_buffer_virt),
567                                                 &ep->ep_buffer_phys,
568                                                 GFP_KERNEL);
569         if (!ep->ep_buffer_virt) {
570                 rc = -ENOMEM;
571                 goto err;
572         }
573
574         ep->sig = QED_EP_SIG;
575
576         *ep_out = ep;
577
578         return 0;
579
580 err:
581         kfree(ep);
582         return rc;
583 }
584
585 static void
586 qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
587                            struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
588 {
589         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
590                    p_tcp_ramrod->tcp.local_mac_addr_lo,
591                    p_tcp_ramrod->tcp.local_mac_addr_mid,
592                    p_tcp_ramrod->tcp.local_mac_addr_hi,
593                    p_tcp_ramrod->tcp.remote_mac_addr_lo,
594                    p_tcp_ramrod->tcp.remote_mac_addr_mid,
595                    p_tcp_ramrod->tcp.remote_mac_addr_hi);
596
597         if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
598                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
599                            "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
600                            p_tcp_ramrod->tcp.local_ip,
601                            p_tcp_ramrod->tcp.local_port,
602                            p_tcp_ramrod->tcp.remote_ip,
603                            p_tcp_ramrod->tcp.remote_port,
604                            p_tcp_ramrod->tcp.vlan_id);
605         } else {
606                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
607                            "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
608                            p_tcp_ramrod->tcp.local_ip,
609                            p_tcp_ramrod->tcp.local_port,
610                            p_tcp_ramrod->tcp.remote_ip,
611                            p_tcp_ramrod->tcp.remote_port,
612                            p_tcp_ramrod->tcp.vlan_id);
613         }
614
615         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
616                    "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
617                    p_tcp_ramrod->tcp.flow_label,
618                    p_tcp_ramrod->tcp.ttl,
619                    p_tcp_ramrod->tcp.tos_or_tc,
620                    p_tcp_ramrod->tcp.mss,
621                    p_tcp_ramrod->tcp.rcv_wnd_scale,
622                    p_tcp_ramrod->tcp.connect_mode,
623                    p_tcp_ramrod->tcp.flags);
624
625         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
626                    p_tcp_ramrod->tcp.syn_ip_payload_length,
627                    p_tcp_ramrod->tcp.syn_phy_addr_lo,
628                    p_tcp_ramrod->tcp.syn_phy_addr_hi);
629 }
630
631 static int
632 qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
633 {
634         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
635         struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
636         struct tcp_offload_params_opt2 *tcp;
637         struct qed_sp_init_data init_data;
638         struct qed_spq_entry *p_ent;
639         dma_addr_t async_output_phys;
640         dma_addr_t in_pdata_phys;
641         u16 physical_q;
642         u8 tcp_flags;
643         int rc;
644         int i;
645
646         memset(&init_data, 0, sizeof(init_data));
647         init_data.cid = ep->tcp_cid;
648         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
649         if (ep->connect_mode == TCP_CONNECT_PASSIVE)
650                 init_data.comp_mode = QED_SPQ_MODE_CB;
651         else
652                 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
653
654         rc = qed_sp_init_request(p_hwfn, &p_ent,
655                                  IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
656                                  PROTOCOLID_IWARP, &init_data);
657         if (rc)
658                 return rc;
659
660         p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
661
662         in_pdata_phys = ep->ep_buffer_phys +
663                         offsetof(struct qed_iwarp_ep_memory, in_pdata);
664         DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
665                        in_pdata_phys);
666
667         p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
668             cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
669
670         async_output_phys = ep->ep_buffer_phys +
671                             offsetof(struct qed_iwarp_ep_memory, async_output);
672         DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
673                        async_output_phys);
674
675         p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
676         p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
677
678         physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
679         p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
680         physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
681         p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
682         p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
683
684         tcp = &p_tcp_ramrod->tcp;
685         qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
686                             &tcp->remote_mac_addr_mid,
687                             &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
688         qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
689                             &tcp->local_mac_addr_lo, ep->local_mac_addr);
690
691         tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
692
693         tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
694         tcp->flags = 0;
695         SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
696                   !!(tcp_flags & QED_IWARP_TS_EN));
697
698         SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
699                   !!(tcp_flags & QED_IWARP_DA_EN));
700
701         tcp->ip_version = ep->cm_info.ip_version;
702
703         for (i = 0; i < 4; i++) {
704                 tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
705                 tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
706         }
707
708         tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
709         tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
710         tcp->mss = cpu_to_le16(ep->mss);
711         tcp->flow_label = 0;
712         tcp->ttl = 0x40;
713         tcp->tos_or_tc = 0;
714
715         tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
716         tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR *  tcp->mss;
717         tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
718         tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
719         tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
720
721         tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
722         tcp->connect_mode = ep->connect_mode;
723
724         if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
725                 tcp->syn_ip_payload_length =
726                         cpu_to_le16(ep->syn_ip_payload_length);
727                 tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
728                 tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
729         }
730
731         qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
732
733         rc = qed_spq_post(p_hwfn, p_ent, NULL);
734
735         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
736                    "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
737
738         return rc;
739 }
740
741 static void
742 qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
743 {
744         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
745         struct qed_iwarp_cm_event_params params;
746         struct mpa_v2_hdr *mpa_v2;
747         union async_output *async_data;
748         u16 mpa_ord, mpa_ird;
749         u8 mpa_hdr_size = 0;
750         u8 mpa_rev;
751
752         async_data = &ep->ep_buffer_virt->async_output;
753
754         mpa_rev = async_data->mpa_request.mpa_handshake_mode;
755         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
756                    "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
757                    async_data->mpa_request.ulp_data_len,
758                    mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
759
760         if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
761                 /* Read ord/ird values from private data buffer */
762                 mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
763                 mpa_hdr_size = sizeof(*mpa_v2);
764
765                 mpa_ord = ntohs(mpa_v2->ord);
766                 mpa_ird = ntohs(mpa_v2->ird);
767
768                 /* Temprary store in cm_info incoming ord/ird requested, later
769                  * replace with negotiated value during accept
770                  */
771                 ep->cm_info.ord = (u8)min_t(u16,
772                                             (mpa_ord & MPA_V2_IRD_ORD_MASK),
773                                             QED_IWARP_ORD_DEFAULT);
774
775                 ep->cm_info.ird = (u8)min_t(u16,
776                                             (mpa_ird & MPA_V2_IRD_ORD_MASK),
777                                             QED_IWARP_IRD_DEFAULT);
778
779                 /* Peer2Peer negotiation */
780                 ep->rtr_type = MPA_RTR_TYPE_NONE;
781                 if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
782                         if (mpa_ord & MPA_V2_WRITE_RTR)
783                                 ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
784
785                         if (mpa_ord & MPA_V2_READ_RTR)
786                                 ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
787
788                         if (mpa_ird & MPA_V2_SEND_RTR)
789                                 ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
790
791                         ep->rtr_type &= iwarp_info->rtr_type;
792
793                         /* if we're left with no match send our capabilities */
794                         if (ep->rtr_type == MPA_RTR_TYPE_NONE)
795                                 ep->rtr_type = iwarp_info->rtr_type;
796                 }
797
798                 ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
799         } else {
800                 ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
801                 ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
802                 ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
803         }
804
805         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
806                    "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
807                    mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
808                    async_data->mpa_request.ulp_data_len, mpa_hdr_size);
809
810         /* Strip mpa v2 hdr from private data before sending to upper layer */
811         ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
812
813         ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
814                                        mpa_hdr_size;
815
816         params.event = QED_IWARP_EVENT_MPA_REQUEST;
817         params.cm_info = &ep->cm_info;
818         params.ep_context = ep;
819         params.status = 0;
820
821         ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
822         ep->event_cb(ep->cb_context, &params);
823 }
824
825 static int
826 qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
827 {
828         struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
829         struct qed_iwarp_info *iwarp_info;
830         struct qed_sp_init_data init_data;
831         dma_addr_t async_output_phys;
832         struct qed_spq_entry *p_ent;
833         dma_addr_t out_pdata_phys;
834         dma_addr_t in_pdata_phys;
835         struct qed_rdma_qp *qp;
836         bool reject;
837         int rc;
838
839         if (!ep)
840                 return -EINVAL;
841
842         qp = ep->qp;
843         reject = !qp;
844
845         memset(&init_data, 0, sizeof(init_data));
846         init_data.cid = reject ? ep->tcp_cid : qp->icid;
847         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
848
849         if (ep->connect_mode == TCP_CONNECT_ACTIVE)
850                 init_data.comp_mode = QED_SPQ_MODE_CB;
851         else
852                 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
853
854         rc = qed_sp_init_request(p_hwfn, &p_ent,
855                                  IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
856                                  PROTOCOLID_IWARP, &init_data);
857         if (rc)
858                 return rc;
859
860         p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
861         out_pdata_phys = ep->ep_buffer_phys +
862                          offsetof(struct qed_iwarp_ep_memory, out_pdata);
863         DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
864                        out_pdata_phys);
865         p_mpa_ramrod->common.outgoing_ulp_buffer.len =
866             ep->cm_info.private_data_len;
867         p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
868
869         p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
870         p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
871
872         p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
873
874         in_pdata_phys = ep->ep_buffer_phys +
875                         offsetof(struct qed_iwarp_ep_memory, in_pdata);
876         p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
877         DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
878                        in_pdata_phys);
879         p_mpa_ramrod->incoming_ulp_buffer.len =
880             cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
881         async_output_phys = ep->ep_buffer_phys +
882                             offsetof(struct qed_iwarp_ep_memory, async_output);
883         DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
884                        async_output_phys);
885         p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
886         p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
887
888         if (!reject) {
889                 DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
890                                qp->shared_queue_phys_addr);
891                 p_mpa_ramrod->stats_counter_id =
892                     RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
893         } else {
894                 p_mpa_ramrod->common.reject = 1;
895         }
896
897         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
898         p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
899         p_mpa_ramrod->mode = ep->mpa_rev;
900         SET_FIELD(p_mpa_ramrod->rtr_pref,
901                   IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
902
903         ep->state = QED_IWARP_EP_MPA_OFFLOADED;
904         rc = qed_spq_post(p_hwfn, p_ent, NULL);
905         if (!reject)
906                 ep->cid = qp->icid;     /* Now they're migrated. */
907
908         DP_VERBOSE(p_hwfn,
909                    QED_MSG_RDMA,
910                    "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
911                    reject ? 0xffff : qp->icid,
912                    ep->tcp_cid,
913                    rc,
914                    ep->cm_info.ird,
915                    ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
916         return rc;
917 }
918
919 static void
920 qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
921 {
922         ep->state = QED_IWARP_EP_INIT;
923         if (ep->qp)
924                 ep->qp->ep = NULL;
925         ep->qp = NULL;
926         memset(&ep->cm_info, 0, sizeof(ep->cm_info));
927
928         if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
929                 /* We don't care about the return code, it's ok if tcp_cid
930                  * remains invalid...in this case we'll defer allocation
931                  */
932                 qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
933         }
934         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
935
936         list_del(&ep->list_entry);
937         list_add_tail(&ep->list_entry,
938                       &p_hwfn->p_rdma_info->iwarp.ep_free_list);
939
940         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
941 }
942
943 void
944 qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
945 {
946         struct mpa_v2_hdr *mpa_v2_params;
947         union async_output *async_data;
948         u16 mpa_ird, mpa_ord;
949         u8 mpa_data_size = 0;
950
951         if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
952                 mpa_v2_params =
953                         (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
954                 mpa_data_size = sizeof(*mpa_v2_params);
955                 mpa_ird = ntohs(mpa_v2_params->ird);
956                 mpa_ord = ntohs(mpa_v2_params->ord);
957
958                 ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
959                 ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
960         }
961         async_data = &ep->ep_buffer_virt->async_output;
962
963         ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
964         ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
965                                        mpa_data_size;
966 }
967
968 void
969 qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
970 {
971         struct qed_iwarp_cm_event_params params;
972
973         if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
974                 DP_NOTICE(p_hwfn,
975                           "MPA reply event not expected on passive side!\n");
976                 return;
977         }
978
979         params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
980
981         qed_iwarp_parse_private_data(p_hwfn, ep);
982
983         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
984                    "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
985                    ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
986
987         params.cm_info = &ep->cm_info;
988         params.ep_context = ep;
989         params.status = 0;
990
991         ep->mpa_reply_processed = true;
992
993         ep->event_cb(ep->cb_context, &params);
994 }
995
996 #define QED_IWARP_CONNECT_MODE_STRING(ep) \
997         ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
998
999 /* Called as a result of the event:
1000  * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
1001  */
1002 static void
1003 qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
1004                        struct qed_iwarp_ep *ep, u8 fw_return_code)
1005 {
1006         struct qed_iwarp_cm_event_params params;
1007
1008         if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1009                 params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
1010         else
1011                 params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
1012
1013         if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
1014                 qed_iwarp_parse_private_data(p_hwfn, ep);
1015
1016         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1017                    "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1018                    ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1019
1020         params.cm_info = &ep->cm_info;
1021
1022         params.ep_context = ep;
1023
1024         ep->state = QED_IWARP_EP_CLOSED;
1025
1026         switch (fw_return_code) {
1027         case RDMA_RETURN_OK:
1028                 ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1029                 ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1030                 qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
1031                 ep->state = QED_IWARP_EP_ESTABLISHED;
1032                 params.status = 0;
1033                 break;
1034         case IWARP_CONN_ERROR_MPA_TIMEOUT:
1035                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
1036                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1037                 params.status = -EBUSY;
1038                 break;
1039         case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1040                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
1041                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1042                 params.status = -ECONNREFUSED;
1043                 break;
1044         case IWARP_CONN_ERROR_MPA_RST:
1045                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1046                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1047                           ep->tcp_cid);
1048                 params.status = -ECONNRESET;
1049                 break;
1050         case IWARP_CONN_ERROR_MPA_FIN:
1051                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
1052                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1053                 params.status = -ECONNREFUSED;
1054                 break;
1055         case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1056                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
1057                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1058                 params.status = -ECONNREFUSED;
1059                 break;
1060         case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1061                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
1062                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1063                 params.status = -ECONNREFUSED;
1064                 break;
1065         case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1066                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
1067                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1068                 params.status = -ECONNREFUSED;
1069                 break;
1070         case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1071                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
1072                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1073                 params.status = -ECONNREFUSED;
1074                 break;
1075         case IWARP_CONN_ERROR_MPA_TERMINATE:
1076                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
1077                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1078                 params.status = -ECONNREFUSED;
1079                 break;
1080         default:
1081                 params.status = -ECONNRESET;
1082                 break;
1083         }
1084
1085         ep->event_cb(ep->cb_context, &params);
1086
1087         /* on passive side, if there is no associated QP (REJECT) we need to
1088          * return the ep to the pool, (in the regular case we add an element
1089          * in accept instead of this one.
1090          * In both cases we need to remove it from the ep_list.
1091          */
1092         if (fw_return_code != RDMA_RETURN_OK) {
1093                 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1094                 if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1095                     (!ep->qp)) {        /* Rejected */
1096                         qed_iwarp_return_ep(p_hwfn, ep);
1097                 } else {
1098                         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1099                         list_del(&ep->list_entry);
1100                         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1101                 }
1102         }
1103 }
1104
1105 static void
1106 qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
1107                              struct qed_iwarp_ep *ep, u8 *mpa_data_size)
1108 {
1109         struct mpa_v2_hdr *mpa_v2_params;
1110         u16 mpa_ird, mpa_ord;
1111
1112         *mpa_data_size = 0;
1113         if (MPA_REV2(ep->mpa_rev)) {
1114                 mpa_v2_params =
1115                     (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1116                 *mpa_data_size = sizeof(*mpa_v2_params);
1117
1118                 mpa_ird = (u16)ep->cm_info.ird;
1119                 mpa_ord = (u16)ep->cm_info.ord;
1120
1121                 if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1122                         mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1123
1124                         if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1125                                 mpa_ird |= MPA_V2_SEND_RTR;
1126
1127                         if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1128                                 mpa_ord |= MPA_V2_WRITE_RTR;
1129
1130                         if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1131                                 mpa_ord |= MPA_V2_READ_RTR;
1132                 }
1133
1134                 mpa_v2_params->ird = htons(mpa_ird);
1135                 mpa_v2_params->ord = htons(mpa_ord);
1136
1137                 DP_VERBOSE(p_hwfn,
1138                            QED_MSG_RDMA,
1139                            "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1140                            mpa_v2_params->ird,
1141                            mpa_v2_params->ord,
1142                            *((u32 *)mpa_v2_params),
1143                            mpa_ord & MPA_V2_IRD_ORD_MASK,
1144                            mpa_ird & MPA_V2_IRD_ORD_MASK,
1145                            !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1146                            !!(mpa_ird & MPA_V2_SEND_RTR),
1147                            !!(mpa_ord & MPA_V2_WRITE_RTR),
1148                            !!(mpa_ord & MPA_V2_READ_RTR));
1149         }
1150 }
1151
1152 int qed_iwarp_connect(void *rdma_cxt,
1153                       struct qed_iwarp_connect_in *iparams,
1154                       struct qed_iwarp_connect_out *oparams)
1155 {
1156         struct qed_hwfn *p_hwfn = rdma_cxt;
1157         struct qed_iwarp_info *iwarp_info;
1158         struct qed_iwarp_ep *ep;
1159         u8 mpa_data_size = 0;
1160         u8 ts_hdr_size = 0;
1161         u32 cid;
1162         int rc;
1163
1164         if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
1165             (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
1166                 DP_NOTICE(p_hwfn,
1167                           "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1168                           iparams->qp->icid, iparams->cm_info.ord,
1169                           iparams->cm_info.ird);
1170
1171                 return -EINVAL;
1172         }
1173
1174         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1175
1176         /* Allocate ep object */
1177         rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1178         if (rc)
1179                 return rc;
1180
1181         rc = qed_iwarp_create_ep(p_hwfn, &ep);
1182         if (rc)
1183                 goto err;
1184
1185         ep->tcp_cid = cid;
1186
1187         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1188         list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1189         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1190
1191         ep->qp = iparams->qp;
1192         ep->qp->ep = ep;
1193         ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
1194         ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
1195         memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1196
1197         ep->cm_info.ord = iparams->cm_info.ord;
1198         ep->cm_info.ird = iparams->cm_info.ird;
1199
1200         ep->rtr_type = iwarp_info->rtr_type;
1201         if (!iwarp_info->peer2peer)
1202                 ep->rtr_type = MPA_RTR_TYPE_NONE;
1203
1204         if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
1205                 ep->cm_info.ord = 1;
1206
1207         ep->mpa_rev = iwarp_info->mpa_rev;
1208
1209         qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1210
1211         ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1212         ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
1213                                        mpa_data_size;
1214
1215         memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1216                iparams->cm_info.private_data,
1217                iparams->cm_info.private_data_len);
1218
1219         if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
1220                 ts_hdr_size = TIMESTAMP_HEADER_SIZE;
1221
1222         ep->mss = iparams->mss - ts_hdr_size;
1223         ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1224
1225         ep->event_cb = iparams->event_cb;
1226         ep->cb_context = iparams->cb_context;
1227         ep->connect_mode = TCP_CONNECT_ACTIVE;
1228
1229         oparams->ep_context = ep;
1230
1231         rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1232
1233         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1234                    iparams->qp->icid, ep->tcp_cid, rc);
1235
1236         if (rc) {
1237                 qed_iwarp_destroy_ep(p_hwfn, ep, true);
1238                 goto err;
1239         }
1240
1241         return rc;
1242 err:
1243         qed_iwarp_cid_cleaned(p_hwfn, cid);
1244
1245         return rc;
1246 }
1247
1248 static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
1249 {
1250         struct qed_iwarp_ep *ep = NULL;
1251         int rc;
1252
1253         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1254
1255         if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1256                 DP_ERR(p_hwfn, "Ep list is empty\n");
1257                 goto out;
1258         }
1259
1260         ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1261                               struct qed_iwarp_ep, list_entry);
1262
1263         /* in some cases we could have failed allocating a tcp cid when added
1264          * from accept / failure... retry now..this is not the common case.
1265          */
1266         if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
1267                 rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1268
1269                 /* if we fail we could look for another entry with a valid
1270                  * tcp_cid, but since we don't expect to reach this anyway
1271                  * it's not worth the handling
1272                  */
1273                 if (rc) {
1274                         ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1275                         ep = NULL;
1276                         goto out;
1277                 }
1278         }
1279
1280         list_del(&ep->list_entry);
1281
1282 out:
1283         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1284         return ep;
1285 }
1286
1287 #define QED_IWARP_MAX_CID_CLEAN_TIME  100
1288 #define QED_IWARP_MAX_NO_PROGRESS_CNT 5
1289
1290 /* This function waits for all the bits of a bmap to be cleared, as long as
1291  * there is progress ( i.e. the number of bits left to be cleared decreases )
1292  * the function continues.
1293  */
1294 static int
1295 qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
1296 {
1297         int prev_weight = 0;
1298         int wait_count = 0;
1299         int weight = 0;
1300
1301         weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1302         prev_weight = weight;
1303
1304         while (weight) {
1305                 msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
1306
1307                 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1308
1309                 if (prev_weight == weight) {
1310                         wait_count++;
1311                 } else {
1312                         prev_weight = weight;
1313                         wait_count = 0;
1314                 }
1315
1316                 if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
1317                         DP_NOTICE(p_hwfn,
1318                                   "%s bitmap wait timed out (%d cids pending)\n",
1319                                   bmap->name, weight);
1320                         return -EBUSY;
1321                 }
1322         }
1323         return 0;
1324 }
1325
1326 static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
1327 {
1328         int rc;
1329         int i;
1330
1331         rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
1332                                             &p_hwfn->p_rdma_info->tcp_cid_map);
1333         if (rc)
1334                 return rc;
1335
1336         /* Now free the tcp cids from the main cid map */
1337         for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
1338                 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
1339
1340         /* Now wait for all cids to be completed */
1341         return qed_iwarp_wait_cid_map_cleared(p_hwfn,
1342                                               &p_hwfn->p_rdma_info->cid_map);
1343 }
1344
1345 static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
1346 {
1347         struct qed_iwarp_ep *ep;
1348
1349         while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1350                 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1351
1352                 ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1353                                       struct qed_iwarp_ep, list_entry);
1354
1355                 if (!ep) {
1356                         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1357                         break;
1358                 }
1359                 list_del(&ep->list_entry);
1360
1361                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1362
1363                 if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
1364                         qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
1365
1366                 qed_iwarp_destroy_ep(p_hwfn, ep, false);
1367         }
1368 }
1369
1370 static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
1371 {
1372         struct qed_iwarp_ep *ep;
1373         int rc = 0;
1374         int count;
1375         u32 cid;
1376         int i;
1377
1378         count = init ? QED_IWARP_PREALLOC_CNT : 1;
1379         for (i = 0; i < count; i++) {
1380                 rc = qed_iwarp_create_ep(p_hwfn, &ep);
1381                 if (rc)
1382                         return rc;
1383
1384                 /* During initialization we allocate from the main pool,
1385                  * afterwards we allocate only from the tcp_cid.
1386                  */
1387                 if (init) {
1388                         rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1389                         if (rc)
1390                                 goto err;
1391                         qed_iwarp_set_tcp_cid(p_hwfn, cid);
1392                 } else {
1393                         /* We don't care about the return code, it's ok if
1394                          * tcp_cid remains invalid...in this case we'll
1395                          * defer allocation
1396                          */
1397                         qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1398                 }
1399
1400                 ep->tcp_cid = cid;
1401
1402                 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1403                 list_add_tail(&ep->list_entry,
1404                               &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1405                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1406         }
1407
1408         return rc;
1409
1410 err:
1411         qed_iwarp_destroy_ep(p_hwfn, ep, false);
1412
1413         return rc;
1414 }
1415
1416 int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
1417 {
1418         int rc;
1419
1420         /* Allocate bitmap for tcp cid. These are used by passive side
1421          * to ensure it can allocate a tcp cid during dpc that was
1422          * pre-acquired and doesn't require dynamic allocation of ilt
1423          */
1424         rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1425                                  QED_IWARP_PREALLOC_CNT, "TCP_CID");
1426         if (rc) {
1427                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1428                            "Failed to allocate tcp cid, rc = %d\n", rc);
1429                 return rc;
1430         }
1431
1432         INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1433         spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1434
1435         rc = qed_iwarp_prealloc_ep(p_hwfn, true);
1436         if (rc)
1437                 return rc;
1438
1439         return qed_ooo_alloc(p_hwfn);
1440 }
1441
1442 void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
1443 {
1444         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1445
1446         qed_ooo_free(p_hwfn);
1447         qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1448         kfree(iwarp_info->mpa_bufs);
1449         kfree(iwarp_info->partial_fpdus);
1450         kfree(iwarp_info->mpa_intermediate_buf);
1451 }
1452
1453 int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
1454 {
1455         struct qed_hwfn *p_hwfn = rdma_cxt;
1456         struct qed_iwarp_ep *ep;
1457         u8 mpa_data_size = 0;
1458         int rc;
1459
1460         ep = iparams->ep_context;
1461         if (!ep) {
1462                 DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1463                 return -EINVAL;
1464         }
1465
1466         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1467                    iparams->qp->icid, ep->tcp_cid);
1468
1469         if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
1470             (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
1471                 DP_VERBOSE(p_hwfn,
1472                            QED_MSG_RDMA,
1473                            "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1474                            iparams->qp->icid,
1475                            ep->tcp_cid, iparams->ord, iparams->ord);
1476                 return -EINVAL;
1477         }
1478
1479         qed_iwarp_prealloc_ep(p_hwfn, false);
1480
1481         ep->cb_context = iparams->cb_context;
1482         ep->qp = iparams->qp;
1483         ep->qp->ep = ep;
1484
1485         if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1486                 /* Negotiate ord/ird: if upperlayer requested ord larger than
1487                  * ird advertised by remote, we need to decrease our ord
1488                  */
1489                 if (iparams->ord > ep->cm_info.ird)
1490                         iparams->ord = ep->cm_info.ird;
1491
1492                 if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1493                     (iparams->ird == 0))
1494                         iparams->ird = 1;
1495         }
1496
1497         /* Update cm_info ord/ird to be negotiated values */
1498         ep->cm_info.ord = iparams->ord;
1499         ep->cm_info.ird = iparams->ird;
1500
1501         qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1502
1503         ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1504         ep->cm_info.private_data_len = iparams->private_data_len +
1505                                        mpa_data_size;
1506
1507         memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1508                iparams->private_data, iparams->private_data_len);
1509
1510         rc = qed_iwarp_mpa_offload(p_hwfn, ep);
1511         if (rc)
1512                 qed_iwarp_modify_qp(p_hwfn,
1513                                     iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
1514
1515         return rc;
1516 }
1517
1518 int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
1519 {
1520         struct qed_hwfn *p_hwfn = rdma_cxt;
1521         struct qed_iwarp_ep *ep;
1522         u8 mpa_data_size = 0;
1523
1524         ep = iparams->ep_context;
1525         if (!ep) {
1526                 DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1527                 return -EINVAL;
1528         }
1529
1530         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1531
1532         ep->cb_context = iparams->cb_context;
1533         ep->qp = NULL;
1534
1535         qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1536
1537         ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1538         ep->cm_info.private_data_len = iparams->private_data_len +
1539                                        mpa_data_size;
1540
1541         memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1542                iparams->private_data, iparams->private_data_len);
1543
1544         return qed_iwarp_mpa_offload(p_hwfn, ep);
1545 }
1546
1547 static void
1548 qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
1549                         struct qed_iwarp_cm_info *cm_info)
1550 {
1551         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
1552                    cm_info->ip_version);
1553
1554         if (cm_info->ip_version == QED_TCP_IPV4)
1555                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1556                            "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
1557                            cm_info->remote_ip, cm_info->remote_port,
1558                            cm_info->local_ip, cm_info->local_port,
1559                            cm_info->vlan);
1560         else
1561                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1562                            "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
1563                            cm_info->remote_ip, cm_info->remote_port,
1564                            cm_info->local_ip, cm_info->local_port,
1565                            cm_info->vlan);
1566
1567         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1568                    "private_data_len = %x ord = %d, ird = %d\n",
1569                    cm_info->private_data_len, cm_info->ord, cm_info->ird);
1570 }
1571
1572 static int
1573 qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
1574                       struct qed_iwarp_ll2_buff *buf, u8 handle)
1575 {
1576         int rc;
1577
1578         rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
1579                                     (u16)buf->buff_size, buf, 1);
1580         if (rc) {
1581                 DP_NOTICE(p_hwfn,
1582                           "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1583                           rc, handle);
1584                 dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
1585                                   buf->data, buf->data_phys_addr);
1586                 kfree(buf);
1587         }
1588
1589         return rc;
1590 }
1591
1592 static bool
1593 qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
1594 {
1595         struct qed_iwarp_ep *ep = NULL;
1596         bool found = false;
1597
1598         list_for_each_entry(ep,
1599                             &p_hwfn->p_rdma_info->iwarp.ep_list,
1600                             list_entry) {
1601                 if ((ep->cm_info.local_port == cm_info->local_port) &&
1602                     (ep->cm_info.remote_port == cm_info->remote_port) &&
1603                     (ep->cm_info.vlan == cm_info->vlan) &&
1604                     !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
1605                             sizeof(cm_info->local_ip)) &&
1606                     !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
1607                             sizeof(cm_info->remote_ip))) {
1608                         found = true;
1609                         break;
1610                 }
1611         }
1612
1613         if (found) {
1614                 DP_NOTICE(p_hwfn,
1615                           "SYN received on active connection - dropping\n");
1616                 qed_iwarp_print_cm_info(p_hwfn, cm_info);
1617
1618                 return true;
1619         }
1620
1621         return false;
1622 }
1623
1624 static struct qed_iwarp_listener *
1625 qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
1626                        struct qed_iwarp_cm_info *cm_info)
1627 {
1628         struct qed_iwarp_listener *listener = NULL;
1629         static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1630         bool found = false;
1631
1632         qed_iwarp_print_cm_info(p_hwfn, cm_info);
1633
1634         list_for_each_entry(listener,
1635                             &p_hwfn->p_rdma_info->iwarp.listen_list,
1636                             list_entry) {
1637                 if (listener->port == cm_info->local_port) {
1638                         if (!memcmp(listener->ip_addr,
1639                                     ip_zero, sizeof(ip_zero))) {
1640                                 found = true;
1641                                 break;
1642                         }
1643
1644                         if (!memcmp(listener->ip_addr,
1645                                     cm_info->local_ip,
1646                                     sizeof(cm_info->local_ip)) &&
1647                             (listener->vlan == cm_info->vlan)) {
1648                                 found = true;
1649                                 break;
1650                         }
1651                 }
1652         }
1653
1654         if (found) {
1655                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
1656                            listener);
1657                 return listener;
1658         }
1659
1660         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
1661         return NULL;
1662 }
1663
1664 static int
1665 qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1666                        struct qed_iwarp_cm_info *cm_info,
1667                        void *buf,
1668                        u8 *remote_mac_addr,
1669                        u8 *local_mac_addr,
1670                        int *payload_len, int *tcp_start_offset)
1671 {
1672         struct vlan_ethhdr *vethh;
1673         bool vlan_valid = false;
1674         struct ipv6hdr *ip6h;
1675         struct ethhdr *ethh;
1676         struct tcphdr *tcph;
1677         struct iphdr *iph;
1678         int eth_hlen;
1679         int ip_hlen;
1680         int eth_type;
1681         int i;
1682
1683         ethh = buf;
1684         eth_type = ntohs(ethh->h_proto);
1685         if (eth_type == ETH_P_8021Q) {
1686                 vlan_valid = true;
1687                 vethh = (struct vlan_ethhdr *)ethh;
1688                 cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
1689                 eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
1690         }
1691
1692         eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1693
1694         ether_addr_copy(remote_mac_addr, ethh->h_source);
1695         ether_addr_copy(local_mac_addr, ethh->h_dest);
1696
1697         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
1698                    eth_type, ethh->h_source);
1699
1700         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
1701                    eth_hlen, ethh->h_dest);
1702
1703         iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1704
1705         if (eth_type == ETH_P_IP) {
1706                 cm_info->local_ip[0] = ntohl(iph->daddr);
1707                 cm_info->remote_ip[0] = ntohl(iph->saddr);
1708                 cm_info->ip_version = TCP_IPV4;
1709
1710                 ip_hlen = (iph->ihl) * sizeof(u32);
1711                 *payload_len = ntohs(iph->tot_len) - ip_hlen;
1712         } else if (eth_type == ETH_P_IPV6) {
1713                 ip6h = (struct ipv6hdr *)iph;
1714                 for (i = 0; i < 4; i++) {
1715                         cm_info->local_ip[i] =
1716                             ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
1717                         cm_info->remote_ip[i] =
1718                             ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1719                 }
1720                 cm_info->ip_version = TCP_IPV6;
1721
1722                 ip_hlen = sizeof(*ip6h);
1723                 *payload_len = ntohs(ip6h->payload_len);
1724         } else {
1725                 DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
1726                 return -EINVAL;
1727         }
1728
1729         tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
1730
1731         if (!tcph->syn) {
1732                 DP_NOTICE(p_hwfn,
1733                           "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
1734                           iph->ihl, tcph->source, tcph->dest);
1735                 return -EINVAL;
1736         }
1737
1738         cm_info->local_port = ntohs(tcph->dest);
1739         cm_info->remote_port = ntohs(tcph->source);
1740
1741         qed_iwarp_print_cm_info(p_hwfn, cm_info);
1742
1743         *tcp_start_offset = eth_hlen + ip_hlen;
1744
1745         return 0;
1746 }
1747
1748 static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
1749                                                       u16 cid)
1750 {
1751         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1752         struct qed_iwarp_fpdu *partial_fpdu;
1753         u32 idx;
1754
1755         idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
1756         if (idx >= iwarp_info->max_num_partial_fpdus) {
1757                 DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
1758                        iwarp_info->max_num_partial_fpdus);
1759                 return NULL;
1760         }
1761
1762         partial_fpdu = &iwarp_info->partial_fpdus[idx];
1763
1764         return partial_fpdu;
1765 }
1766
1767 enum qed_iwarp_mpa_pkt_type {
1768         QED_IWARP_MPA_PKT_PACKED,
1769         QED_IWARP_MPA_PKT_PARTIAL,
1770         QED_IWARP_MPA_PKT_UNALIGNED
1771 };
1772
1773 #define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
1774 #define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
1775 #define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
1776
1777 /* Pad to multiple of 4 */
1778 #define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
1779 #define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len)                              \
1780         (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) +                      \
1781                                          QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
1782                                          QED_IWARP_MPA_CRC32_DIGEST_SIZE)
1783
1784 /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
1785 #define QED_IWARP_MAX_BDS_PER_FPDU 3
1786
1787 char *pkt_type_str[] = {
1788         "QED_IWARP_MPA_PKT_PACKED",
1789         "QED_IWARP_MPA_PKT_PARTIAL",
1790         "QED_IWARP_MPA_PKT_UNALIGNED"
1791 };
1792
1793 static int
1794 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1795                       struct qed_iwarp_fpdu *fpdu,
1796                       struct qed_iwarp_ll2_buff *buf);
1797
1798 static enum qed_iwarp_mpa_pkt_type
1799 qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
1800                        struct qed_iwarp_fpdu *fpdu,
1801                        u16 tcp_payload_len, u8 *mpa_data)
1802 {
1803         enum qed_iwarp_mpa_pkt_type pkt_type;
1804         u16 mpa_len;
1805
1806         if (fpdu->incomplete_bytes) {
1807                 pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
1808                 goto out;
1809         }
1810
1811         /* special case of one byte remaining...
1812          * lower byte will be read next packet
1813          */
1814         if (tcp_payload_len == 1) {
1815                 fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
1816                 pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1817                 goto out;
1818         }
1819
1820         mpa_len = ntohs(*((u16 *)(mpa_data)));
1821         fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1822
1823         if (fpdu->fpdu_length <= tcp_payload_len)
1824                 pkt_type = QED_IWARP_MPA_PKT_PACKED;
1825         else
1826                 pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1827
1828 out:
1829         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1830                    "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
1831                    pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
1832
1833         return pkt_type;
1834 }
1835
1836 static void
1837 qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
1838                     struct qed_iwarp_fpdu *fpdu,
1839                     struct unaligned_opaque_data *pkt_data,
1840                     u16 tcp_payload_size, u8 placement_offset)
1841 {
1842         fpdu->mpa_buf = buf;
1843         fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
1844         fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
1845         fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset;
1846         fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset;
1847
1848         if (tcp_payload_size == 1)
1849                 fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
1850         else if (tcp_payload_size < fpdu->fpdu_length)
1851                 fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
1852         else
1853                 fpdu->incomplete_bytes = 0;     /* complete fpdu */
1854
1855         fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
1856 }
1857
1858 static int
1859 qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
1860                  struct qed_iwarp_fpdu *fpdu,
1861                  struct unaligned_opaque_data *pkt_data,
1862                  struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
1863 {
1864         u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
1865         int rc;
1866
1867         /* need to copy the data from the partial packet stored in fpdu
1868          * to the new buf, for this we also need to move the data currently
1869          * placed on the buf. The assumption is that the buffer is big enough
1870          * since fpdu_length <= mss, we use an intermediate buffer since
1871          * we may need to copy the new data to an overlapping location
1872          */
1873         if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
1874                 DP_ERR(p_hwfn,
1875                        "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1876                        buf->buff_size, fpdu->mpa_frag_len,
1877                        tcp_payload_size, fpdu->incomplete_bytes);
1878                 return -EINVAL;
1879         }
1880
1881         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1882                    "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
1883                    fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
1884                    (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1885                    tcp_payload_size);
1886
1887         memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
1888         memcpy(tmp_buf + fpdu->mpa_frag_len,
1889                (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1890                tcp_payload_size);
1891
1892         rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
1893         if (rc)
1894                 return rc;
1895
1896         /* If we managed to post the buffer copy the data to the new buffer
1897          * o/w this will occur in the next round...
1898          */
1899         memcpy((u8 *)(buf->data), tmp_buf,
1900                fpdu->mpa_frag_len + tcp_payload_size);
1901
1902         fpdu->mpa_buf = buf;
1903         /* fpdu->pkt_hdr remains as is */
1904         /* fpdu->mpa_frag is overridden with new buf */
1905         fpdu->mpa_frag = buf->data_phys_addr;
1906         fpdu->mpa_frag_virt = buf->data;
1907         fpdu->mpa_frag_len += tcp_payload_size;
1908
1909         fpdu->incomplete_bytes -= tcp_payload_size;
1910
1911         DP_VERBOSE(p_hwfn,
1912                    QED_MSG_RDMA,
1913                    "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1914                    buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
1915                    fpdu->incomplete_bytes);
1916
1917         return 0;
1918 }
1919
1920 static void
1921 qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1922                              struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
1923 {
1924         u16 mpa_len;
1925
1926         /* Update incomplete packets if needed */
1927         if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
1928                 /* Missing lower byte is now available */
1929                 mpa_len = fpdu->fpdu_length | *mpa_data;
1930                 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1931                 fpdu->mpa_frag_len = fpdu->fpdu_length;
1932                 /* one byte of hdr */
1933                 fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1934                 DP_VERBOSE(p_hwfn,
1935                            QED_MSG_RDMA,
1936                            "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
1937                            mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
1938         }
1939 }
1940
1941 #define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
1942         (GET_FIELD((_curr_pkt)->flags,     \
1943                    UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
1944
1945 /* This function is used to recycle a buffer using the ll2 drop option. It
1946  * uses the mechanism to ensure that all buffers posted to tx before this one
1947  * were completed. The buffer sent here will be sent as a cookie in the tx
1948  * completion function and can then be reposted to rx chain when done. The flow
1949  * that requires this is the flow where a FPDU splits over more than 3 tcp
1950  * segments. In this case the driver needs to re-post a rx buffer instead of
1951  * the one received, but driver can't simply repost a buffer it copied from
1952  * as there is a case where the buffer was originally a packed FPDU, and is
1953  * partially posted to FW. Driver needs to ensure FW is done with it.
1954  */
1955 static int
1956 qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1957                       struct qed_iwarp_fpdu *fpdu,
1958                       struct qed_iwarp_ll2_buff *buf)
1959 {
1960         struct qed_ll2_tx_pkt_info tx_pkt;
1961         u8 ll2_handle;
1962         int rc;
1963
1964         memset(&tx_pkt, 0, sizeof(tx_pkt));
1965         tx_pkt.num_of_bds = 1;
1966         tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
1967         tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
1968         tx_pkt.first_frag = fpdu->pkt_hdr;
1969         tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
1970         buf->piggy_buf = NULL;
1971         tx_pkt.cookie = buf;
1972
1973         ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
1974
1975         rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
1976         if (rc)
1977                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1978                            "Can't drop packet rc=%d\n", rc);
1979
1980         DP_VERBOSE(p_hwfn,
1981                    QED_MSG_RDMA,
1982                    "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
1983                    (unsigned long int)tx_pkt.first_frag,
1984                    tx_pkt.first_frag_len, buf, rc);
1985
1986         return rc;
1987 }
1988
1989 static int
1990 qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
1991 {
1992         struct qed_ll2_tx_pkt_info tx_pkt;
1993         u8 ll2_handle;
1994         int rc;
1995
1996         memset(&tx_pkt, 0, sizeof(tx_pkt));
1997         tx_pkt.num_of_bds = 1;
1998         tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
1999         tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2000
2001         tx_pkt.first_frag = fpdu->pkt_hdr;
2002         tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2003         tx_pkt.enable_ip_cksum = true;
2004         tx_pkt.enable_l4_cksum = true;
2005         tx_pkt.calc_ip_len = true;
2006         /* vlan overload with enum iwarp_ll2_tx_queues */
2007         tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2008
2009         ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2010
2011         rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2012         if (rc)
2013                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2014                            "Can't send right edge rc=%d\n", rc);
2015         DP_VERBOSE(p_hwfn,
2016                    QED_MSG_RDMA,
2017                    "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2018                    tx_pkt.num_of_bds,
2019                    (unsigned long int)tx_pkt.first_frag,
2020                    tx_pkt.first_frag_len, rc);
2021
2022         return rc;
2023 }
2024
2025 static int
2026 qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
2027                     struct qed_iwarp_fpdu *fpdu,
2028                     struct unaligned_opaque_data *curr_pkt,
2029                     struct qed_iwarp_ll2_buff *buf,
2030                     u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
2031 {
2032         struct qed_ll2_tx_pkt_info tx_pkt;
2033         u8 ll2_handle;
2034         int rc;
2035
2036         memset(&tx_pkt, 0, sizeof(tx_pkt));
2037
2038         /* An unaligned packet means it's split over two tcp segments. So the
2039          * complete packet requires 3 bds, one for the header, one for the
2040          * part of the fpdu of the first tcp segment, and the last fragment
2041          * will point to the remainder of the fpdu. A packed pdu, requires only
2042          * two bds, one for the header and one for the data.
2043          */
2044         tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2045         tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2046         tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
2047
2048         /* Send the mpa_buf only with the last fpdu (in case of packed) */
2049         if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
2050             tcp_payload_size <= fpdu->fpdu_length)
2051                 tx_pkt.cookie = fpdu->mpa_buf;
2052
2053         tx_pkt.first_frag = fpdu->pkt_hdr;
2054         tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2055         tx_pkt.enable_ip_cksum = true;
2056         tx_pkt.enable_l4_cksum = true;
2057         tx_pkt.calc_ip_len = true;
2058         /* vlan overload with enum iwarp_ll2_tx_queues */
2059         tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2060
2061         /* special case of unaligned packet and not packed, need to send
2062          * both buffers as cookie to release.
2063          */
2064         if (tcp_payload_size == fpdu->incomplete_bytes)
2065                 fpdu->mpa_buf->piggy_buf = buf;
2066
2067         ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2068
2069         /* Set first fragment to header */
2070         rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2071         if (rc)
2072                 goto out;
2073
2074         /* Set second fragment to first part of packet */
2075         rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2076                                                fpdu->mpa_frag,
2077                                                fpdu->mpa_frag_len);
2078         if (rc)
2079                 goto out;
2080
2081         if (!fpdu->incomplete_bytes)
2082                 goto out;
2083
2084         /* Set third fragment to second part of the packet */
2085         rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2086                                                ll2_handle,
2087                                                buf->data_phys_addr +
2088                                                curr_pkt->first_mpa_offset,
2089                                                fpdu->incomplete_bytes);
2090 out:
2091         DP_VERBOSE(p_hwfn,
2092                    QED_MSG_RDMA,
2093                    "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
2094                    tx_pkt.num_of_bds,
2095                    tx_pkt.first_frag_len,
2096                    fpdu->mpa_frag_len,
2097                    fpdu->incomplete_bytes, rc);
2098
2099         return rc;
2100 }
2101
2102 static void
2103 qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
2104                        struct unaligned_opaque_data *curr_pkt,
2105                        u32 opaque_data0, u32 opaque_data1)
2106 {
2107         u64 opaque_data;
2108
2109         opaque_data = HILO_64(opaque_data1, opaque_data0);
2110         *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2111
2112         curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset +
2113                                      le16_to_cpu(curr_pkt->first_mpa_offset);
2114         curr_pkt->cid = le32_to_cpu(curr_pkt->cid);
2115 }
2116
2117 /* This function is called when an unaligned or incomplete MPA packet arrives
2118  * driver needs to align the packet, perhaps using previous data and send
2119  * it down to FW once it is aligned.
2120  */
2121 static int
2122 qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
2123                           struct qed_iwarp_ll2_mpa_buf *mpa_buf)
2124 {
2125         struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2126         struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2127         enum qed_iwarp_mpa_pkt_type pkt_type;
2128         struct qed_iwarp_fpdu *fpdu;
2129         int rc = -EINVAL;
2130         u8 *mpa_data;
2131
2132         fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff);
2133         if (!fpdu) { /* something corrupt with cid, post rx back */
2134                 DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2135                        curr_pkt->cid);
2136                 goto err;
2137         }
2138
2139         do {
2140                 mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset);
2141
2142                 pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
2143                                                   mpa_buf->tcp_payload_len,
2144                                                   mpa_data);
2145
2146                 switch (pkt_type) {
2147                 case QED_IWARP_MPA_PKT_PARTIAL:
2148                         qed_iwarp_init_fpdu(buf, fpdu,
2149                                             curr_pkt,
2150                                             mpa_buf->tcp_payload_len,
2151                                             mpa_buf->placement_offset);
2152
2153                         if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2154                                 mpa_buf->tcp_payload_len = 0;
2155                                 break;
2156                         }
2157
2158                         rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
2159
2160                         if (rc) {
2161                                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2162                                            "Can't send FPDU:reset rc=%d\n", rc);
2163                                 memset(fpdu, 0, sizeof(*fpdu));
2164                                 break;
2165                         }
2166
2167                         mpa_buf->tcp_payload_len = 0;
2168                         break;
2169                 case QED_IWARP_MPA_PKT_PACKED:
2170                         qed_iwarp_init_fpdu(buf, fpdu,
2171                                             curr_pkt,
2172                                             mpa_buf->tcp_payload_len,
2173                                             mpa_buf->placement_offset);
2174
2175                         rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2176                                                  mpa_buf->tcp_payload_len,
2177                                                  pkt_type);
2178                         if (rc) {
2179                                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2180                                            "Can't send FPDU:reset rc=%d\n", rc);
2181                                 memset(fpdu, 0, sizeof(*fpdu));
2182                                 break;
2183                         }
2184
2185                         mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2186                         curr_pkt->first_mpa_offset += fpdu->fpdu_length;
2187                         break;
2188                 case QED_IWARP_MPA_PKT_UNALIGNED:
2189                         qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2190                         if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2191                                 /* special handling of fpdu split over more
2192                                  * than 2 segments
2193                                  */
2194                                 if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2195                                         rc = qed_iwarp_win_right_edge(p_hwfn,
2196                                                                       fpdu);
2197                                         /* packet will be re-processed later */
2198                                         if (rc)
2199                                                 return rc;
2200                                 }
2201
2202                                 rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
2203                                                       buf,
2204                                                       mpa_buf->tcp_payload_len);
2205                                 if (rc) /* packet will be re-processed later */
2206                                         return rc;
2207
2208                                 mpa_buf->tcp_payload_len = 0;
2209                                 break;
2210                         }
2211
2212                         rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2213                                                  mpa_buf->tcp_payload_len,
2214                                                  pkt_type);
2215                         if (rc) {
2216                                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2217                                            "Can't send FPDU:delay rc=%d\n", rc);
2218                                 /* don't reset fpdu -> we need it for next
2219                                  * classify
2220                                  */
2221                                 break;
2222                         }
2223
2224                         mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2225                         curr_pkt->first_mpa_offset += fpdu->incomplete_bytes;
2226                         /* The framed PDU was sent - no more incomplete bytes */
2227                         fpdu->incomplete_bytes = 0;
2228                         break;
2229                 }
2230         } while (mpa_buf->tcp_payload_len && !rc);
2231
2232         return rc;
2233
2234 err:
2235         qed_iwarp_ll2_post_rx(p_hwfn,
2236                               buf,
2237                               p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2238         return rc;
2239 }
2240
2241 static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
2242 {
2243         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2244         struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
2245         int rc;
2246
2247         while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
2248                 mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
2249                                            struct qed_iwarp_ll2_mpa_buf,
2250                                            list_entry);
2251
2252                 rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2253
2254                 /* busy means break and continue processing later, don't
2255                  * remove the buf from the pending list.
2256                  */
2257                 if (rc == -EBUSY)
2258                         break;
2259
2260                 list_del(&mpa_buf->list_entry);
2261                 list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list);
2262
2263                 if (rc) {       /* different error, don't continue */
2264                         DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
2265                         break;
2266                 }
2267         }
2268 }
2269
2270 static void
2271 qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2272 {
2273         struct qed_iwarp_ll2_mpa_buf *mpa_buf;
2274         struct qed_iwarp_info *iwarp_info;
2275         struct qed_hwfn *p_hwfn = cxt;
2276
2277         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2278         mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
2279                                    struct qed_iwarp_ll2_mpa_buf, list_entry);
2280         if (!mpa_buf) {
2281                 DP_ERR(p_hwfn, "No free mpa buf\n");
2282                 goto err;
2283         }
2284
2285         list_del(&mpa_buf->list_entry);
2286         qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2287                                data->opaque_data_0, data->opaque_data_1);
2288
2289         DP_VERBOSE(p_hwfn,
2290                    QED_MSG_RDMA,
2291                    "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2292                    data->length.packet_length, mpa_buf->data.first_mpa_offset,
2293                    mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
2294                    mpa_buf->data.cid);
2295
2296         mpa_buf->ll2_buf = data->cookie;
2297         mpa_buf->tcp_payload_len = data->length.packet_length -
2298                                    mpa_buf->data.first_mpa_offset;
2299         mpa_buf->data.first_mpa_offset += data->u.placement_offset;
2300         mpa_buf->placement_offset = data->u.placement_offset;
2301
2302         list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
2303
2304         qed_iwarp_process_pending_pkts(p_hwfn);
2305         return;
2306 err:
2307         qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
2308                               iwarp_info->ll2_mpa_handle);
2309 }
2310
2311 static void
2312 qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2313 {
2314         struct qed_iwarp_ll2_buff *buf = data->cookie;
2315         struct qed_iwarp_listener *listener;
2316         struct qed_ll2_tx_pkt_info tx_pkt;
2317         struct qed_iwarp_cm_info cm_info;
2318         struct qed_hwfn *p_hwfn = cxt;
2319         u8 remote_mac_addr[ETH_ALEN];
2320         u8 local_mac_addr[ETH_ALEN];
2321         struct qed_iwarp_ep *ep;
2322         int tcp_start_offset;
2323         u8 ts_hdr_size = 0;
2324         u8 ll2_syn_handle;
2325         int payload_len;
2326         u32 hdr_size;
2327         int rc;
2328
2329         memset(&cm_info, 0, sizeof(cm_info));
2330         ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2331
2332         /* Check if packet was received with errors... */
2333         if (data->err_flags) {
2334                 DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
2335                           data->err_flags);
2336                 goto err;
2337         }
2338
2339         if (GET_FIELD(data->parse_flags,
2340                       PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2341             GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2342                 DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
2343                 goto err;
2344         }
2345
2346         rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
2347                                     data->u.placement_offset, remote_mac_addr,
2348                                     local_mac_addr, &payload_len,
2349                                     &tcp_start_offset);
2350         if (rc)
2351                 goto err;
2352
2353         /* Check if there is a listener for this 4-tuple+vlan */
2354         listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
2355         if (!listener) {
2356                 DP_VERBOSE(p_hwfn,
2357                            QED_MSG_RDMA,
2358                            "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2359                            data->parse_flags, data->length.packet_length);
2360
2361                 memset(&tx_pkt, 0, sizeof(tx_pkt));
2362                 tx_pkt.num_of_bds = 1;
2363                 tx_pkt.vlan = data->vlan;
2364
2365                 if (GET_FIELD(data->parse_flags,
2366                               PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
2367                         SET_FIELD(tx_pkt.bd_flags,
2368                                   CORE_TX_BD_DATA_VLAN_INSERTION, 1);
2369
2370                 tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2371                 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2372                 tx_pkt.first_frag = buf->data_phys_addr +
2373                                     data->u.placement_offset;
2374                 tx_pkt.first_frag_len = data->length.packet_length;
2375                 tx_pkt.cookie = buf;
2376
2377                 rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
2378                                                &tx_pkt, true);
2379
2380                 if (rc) {
2381                         DP_NOTICE(p_hwfn,
2382                                   "Can't post SYN back to chip rc=%d\n", rc);
2383                         goto err;
2384                 }
2385                 return;
2386         }
2387
2388         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
2389         /* There may be an open ep on this connection if this is a syn
2390          * retrasnmit... need to make sure there isn't...
2391          */
2392         if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
2393                 goto err;
2394
2395         ep = qed_iwarp_get_free_ep(p_hwfn);
2396         if (!ep)
2397                 goto err;
2398
2399         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2400         list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
2401         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2402
2403         ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
2404         ether_addr_copy(ep->local_mac_addr, local_mac_addr);
2405
2406         memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2407
2408         if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
2409                 ts_hdr_size = TIMESTAMP_HEADER_SIZE;
2410
2411         hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) +
2412                    ts_hdr_size;
2413         ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2414         ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
2415
2416         ep->event_cb = listener->event_cb;
2417         ep->cb_context = listener->cb_context;
2418         ep->connect_mode = TCP_CONNECT_PASSIVE;
2419
2420         ep->syn = buf;
2421         ep->syn_ip_payload_length = (u16)payload_len;
2422         ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2423                            tcp_start_offset;
2424
2425         rc = qed_iwarp_tcp_offload(p_hwfn, ep);
2426         if (rc) {
2427                 qed_iwarp_return_ep(p_hwfn, ep);
2428                 goto err;
2429         }
2430
2431         return;
2432 err:
2433         qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
2434 }
2435
2436 static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
2437                                      void *cookie, dma_addr_t rx_buf_addr,
2438                                      bool b_last_packet)
2439 {
2440         struct qed_iwarp_ll2_buff *buffer = cookie;
2441         struct qed_hwfn *p_hwfn = cxt;
2442
2443         dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2444                           buffer->data, buffer->data_phys_addr);
2445         kfree(buffer);
2446 }
2447
2448 static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
2449                                       void *cookie, dma_addr_t first_frag_addr,
2450                                       bool b_last_fragment, bool b_last_packet)
2451 {
2452         struct qed_iwarp_ll2_buff *buffer = cookie;
2453         struct qed_iwarp_ll2_buff *piggy;
2454         struct qed_hwfn *p_hwfn = cxt;
2455
2456         if (!buffer)            /* can happen in packed mpa unaligned... */
2457                 return;
2458
2459         /* this was originally an rx packet, post it back */
2460         piggy = buffer->piggy_buf;
2461         if (piggy) {
2462                 buffer->piggy_buf = NULL;
2463                 qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
2464         }
2465
2466         qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
2467
2468         if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2469                 qed_iwarp_process_pending_pkts(p_hwfn);
2470
2471         return;
2472 }
2473
2474 static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
2475                                      void *cookie, dma_addr_t first_frag_addr,
2476                                      bool b_last_fragment, bool b_last_packet)
2477 {
2478         struct qed_iwarp_ll2_buff *buffer = cookie;
2479         struct qed_hwfn *p_hwfn = cxt;
2480
2481         if (!buffer)
2482                 return;
2483
2484         if (buffer->piggy_buf) {
2485                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2486                                   buffer->piggy_buf->buff_size,
2487                                   buffer->piggy_buf->data,
2488                                   buffer->piggy_buf->data_phys_addr);
2489
2490                 kfree(buffer->piggy_buf);
2491         }
2492
2493         dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2494                           buffer->data, buffer->data_phys_addr);
2495
2496         kfree(buffer);
2497 }
2498
2499 /* The only slowpath for iwarp ll2 is unalign flush. When this completion
2500  * is received, need to reset the FPDU.
2501  */
2502 void
2503 qed_iwarp_ll2_slowpath(void *cxt,
2504                        u8 connection_handle,
2505                        u32 opaque_data_0, u32 opaque_data_1)
2506 {
2507         struct unaligned_opaque_data unalign_data;
2508         struct qed_hwfn *p_hwfn = cxt;
2509         struct qed_iwarp_fpdu *fpdu;
2510
2511         qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
2512                                opaque_data_0, opaque_data_1);
2513
2514         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n",
2515                    unalign_data.cid);
2516
2517         fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid);
2518         if (fpdu)
2519                 memset(fpdu, 0, sizeof(*fpdu));
2520 }
2521
2522 static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2523 {
2524         struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2525         int rc = 0;
2526
2527         if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
2528                 rc = qed_ll2_terminate_connection(p_hwfn,
2529                                                   iwarp_info->ll2_syn_handle);
2530                 if (rc)
2531                         DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
2532
2533                 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2534                 iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2535         }
2536
2537         if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
2538                 rc = qed_ll2_terminate_connection(p_hwfn,
2539                                                   iwarp_info->ll2_ooo_handle);
2540                 if (rc)
2541                         DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
2542
2543                 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2544                 iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2545         }
2546
2547         if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
2548                 rc = qed_ll2_terminate_connection(p_hwfn,
2549                                                   iwarp_info->ll2_mpa_handle);
2550                 if (rc)
2551                         DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
2552
2553                 qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2554                 iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2555         }
2556
2557         qed_llh_remove_mac_filter(p_hwfn,
2558                                   p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
2559         return rc;
2560 }
2561
2562 static int
2563 qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
2564                             int num_rx_bufs, int buff_size, u8 ll2_handle)
2565 {
2566         struct qed_iwarp_ll2_buff *buffer;
2567         int rc = 0;
2568         int i;
2569
2570         for (i = 0; i < num_rx_bufs; i++) {
2571                 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2572                 if (!buffer) {
2573                         rc = -ENOMEM;
2574                         break;
2575                 }
2576
2577                 buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2578                                                   buff_size,
2579                                                   &buffer->data_phys_addr,
2580                                                   GFP_KERNEL);
2581                 if (!buffer->data) {
2582                         kfree(buffer);
2583                         rc = -ENOMEM;
2584                         break;
2585                 }
2586
2587                 buffer->buff_size = buff_size;
2588                 rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
2589                 if (rc)
2590                         /* buffers will be deallocated by qed_ll2 */
2591                         break;
2592         }
2593         return rc;
2594 }
2595
2596 #define QED_IWARP_MAX_BUF_SIZE(mtu)                                  \
2597         ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
2598                 ETH_CACHE_LINE_SIZE)
2599
2600 static int
2601 qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2602                     struct qed_rdma_start_in_params *params,
2603                     struct qed_ptt *p_ptt)
2604 {
2605         struct qed_iwarp_info *iwarp_info;
2606         struct qed_ll2_acquire_data data;
2607         struct qed_ll2_cbs cbs;
2608         u32 mpa_buff_size;
2609         u16 n_ooo_bufs;
2610         int rc = 0;
2611         int i;
2612
2613         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2614         iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2615         iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2616         iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2617
2618         iwarp_info->max_mtu = params->max_mtu;
2619
2620         ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
2621
2622         rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr);
2623         if (rc)
2624                 return rc;
2625
2626         /* Start SYN connection */
2627         cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
2628         cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
2629         cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
2630         cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
2631         cbs.cookie = p_hwfn;
2632
2633         memset(&data, 0, sizeof(data));
2634         data.input.conn_type = QED_LL2_TYPE_IWARP;
2635         data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
2636         data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2637         data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2638         data.input.tx_max_bds_per_packet = 1;   /* will never be fragmented */
2639         data.input.tx_tc = PKT_LB_TC;
2640         data.input.tx_dest = QED_LL2_TX_DEST_LB;
2641         data.p_connection_handle = &iwarp_info->ll2_syn_handle;
2642         data.cbs = &cbs;
2643
2644         rc = qed_ll2_acquire_connection(p_hwfn, &data);
2645         if (rc) {
2646                 DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
2647                 qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr);
2648                 return rc;
2649         }
2650
2651         rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2652         if (rc) {
2653                 DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
2654                 goto err;
2655         }
2656
2657         rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2658                                          QED_IWARP_LL2_SYN_RX_SIZE,
2659                                          QED_IWARP_MAX_SYN_PKT_SIZE,
2660                                          iwarp_info->ll2_syn_handle);
2661         if (rc)
2662                 goto err;
2663
2664         /* Start OOO connection */
2665         data.input.conn_type = QED_LL2_TYPE_OOO;
2666         data.input.mtu = params->max_mtu;
2667
2668         n_ooo_bufs = (QED_IWARP_MAX_OOO * QED_IWARP_RCV_WND_SIZE_DEF) /
2669                      iwarp_info->max_mtu;
2670         n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
2671
2672         data.input.rx_num_desc = n_ooo_bufs;
2673         data.input.rx_num_ooo_buffers = n_ooo_bufs;
2674
2675         data.input.tx_max_bds_per_packet = 1;   /* will never be fragmented */
2676         data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
2677         data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
2678
2679         rc = qed_ll2_acquire_connection(p_hwfn, &data);
2680         if (rc)
2681                 goto err;
2682
2683         rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2684         if (rc)
2685                 goto err;
2686
2687         /* Start Unaligned MPA connection */
2688         cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
2689         cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
2690
2691         memset(&data, 0, sizeof(data));
2692         data.input.conn_type = QED_LL2_TYPE_IWARP;
2693         data.input.mtu = params->max_mtu;
2694         /* FW requires that once a packet arrives OOO, it must have at
2695          * least 2 rx buffers available on the unaligned connection
2696          * for handling the case that it is a partial fpdu.
2697          */
2698         data.input.rx_num_desc = n_ooo_bufs * 2;
2699         data.input.tx_num_desc = data.input.rx_num_desc;
2700         data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
2701         data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
2702         data.input.secondary_queue = true;
2703         data.cbs = &cbs;
2704
2705         rc = qed_ll2_acquire_connection(p_hwfn, &data);
2706         if (rc)
2707                 goto err;
2708
2709         rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2710         if (rc)
2711                 goto err;
2712
2713         mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2714         rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2715                                          data.input.rx_num_desc,
2716                                          mpa_buff_size,
2717                                          iwarp_info->ll2_mpa_handle);
2718         if (rc)
2719                 goto err;
2720
2721         iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
2722                                             sizeof(*iwarp_info->partial_fpdus),
2723                                             GFP_KERNEL);
2724         if (!iwarp_info->partial_fpdus)
2725                 goto err;
2726
2727         iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2728
2729         iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL);
2730         if (!iwarp_info->mpa_intermediate_buf)
2731                 goto err;
2732
2733         /* The mpa_bufs array serves for pending RX packets received on the
2734          * mpa ll2 that don't have place on the tx ring and require later
2735          * processing. We can't fail on allocation of such a struct therefore
2736          * we allocate enough to take care of all rx packets
2737          */
2738         iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
2739                                        sizeof(*iwarp_info->mpa_bufs),
2740                                        GFP_KERNEL);
2741         if (!iwarp_info->mpa_bufs)
2742                 goto err;
2743
2744         INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
2745         INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
2746         for (i = 0; i < data.input.rx_num_desc; i++)
2747                 list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
2748                               &iwarp_info->mpa_buf_list);
2749         return rc;
2750 err:
2751         qed_iwarp_ll2_stop(p_hwfn, p_ptt);
2752
2753         return rc;
2754 }
2755
2756 int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2757                     struct qed_rdma_start_in_params *params)
2758 {
2759         struct qed_iwarp_info *iwarp_info;
2760         u32 rcv_wnd_size;
2761
2762         iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2763
2764         iwarp_info->tcp_flags = QED_IWARP_TS_EN;
2765         rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF;
2766
2767         /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
2768         iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
2769             ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
2770         iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
2771         iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
2772         iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
2773
2774         iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
2775
2776         iwarp_info->rtr_type =  MPA_RTR_TYPE_ZERO_SEND |
2777                                 MPA_RTR_TYPE_ZERO_WRITE |
2778                                 MPA_RTR_TYPE_ZERO_READ;
2779
2780         spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
2781         INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
2782         INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
2783
2784         qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
2785                                   qed_iwarp_async_event);
2786         qed_ooo_setup(p_hwfn);
2787
2788         return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
2789 }
2790
2791 int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2792 {
2793         int rc;
2794
2795         qed_iwarp_free_prealloc_ep(p_hwfn);
2796         rc = qed_iwarp_wait_for_all_cids(p_hwfn);
2797         if (rc)
2798                 return rc;
2799
2800         qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
2801
2802         return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
2803 }
2804
2805 void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
2806                            struct qed_iwarp_ep *ep, u8 fw_return_code)
2807 {
2808         struct qed_iwarp_cm_event_params params;
2809
2810         qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
2811
2812         params.event = QED_IWARP_EVENT_CLOSE;
2813         params.ep_context = ep;
2814         params.cm_info = &ep->cm_info;
2815         params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
2816                          0 : -ECONNRESET;
2817
2818         ep->state = QED_IWARP_EP_CLOSED;
2819         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2820         list_del(&ep->list_entry);
2821         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2822
2823         ep->event_cb(ep->cb_context, &params);
2824 }
2825
2826 void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
2827                                   struct qed_iwarp_ep *ep, int fw_ret_code)
2828 {
2829         struct qed_iwarp_cm_event_params params;
2830         bool event_cb = false;
2831
2832         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
2833                    ep->cid, fw_ret_code);
2834
2835         switch (fw_ret_code) {
2836         case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
2837                 params.status = 0;
2838                 params.event = QED_IWARP_EVENT_DISCONNECT;
2839                 event_cb = true;
2840                 break;
2841         case IWARP_EXCEPTION_DETECTED_LLP_RESET:
2842                 params.status = -ECONNRESET;
2843                 params.event = QED_IWARP_EVENT_DISCONNECT;
2844                 event_cb = true;
2845                 break;
2846         case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
2847                 params.event = QED_IWARP_EVENT_RQ_EMPTY;
2848                 event_cb = true;
2849                 break;
2850         case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
2851                 params.event = QED_IWARP_EVENT_IRQ_FULL;
2852                 event_cb = true;
2853                 break;
2854         case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
2855                 params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
2856                 event_cb = true;
2857                 break;
2858         case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
2859                 params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
2860                 event_cb = true;
2861                 break;
2862         case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
2863                 params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
2864                 event_cb = true;
2865                 break;
2866         case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
2867                 params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
2868                 event_cb = true;
2869                 break;
2870         case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
2871                 params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
2872                 event_cb = true;
2873                 break;
2874         case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
2875                 params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
2876                 event_cb = true;
2877                 break;
2878         case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
2879                 params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
2880                 event_cb = true;
2881                 break;
2882         default:
2883                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2884                            "Unhandled exception received...fw_ret_code=%d\n",
2885                            fw_ret_code);
2886                 break;
2887         }
2888
2889         if (event_cb) {
2890                 params.ep_context = ep;
2891                 params.cm_info = &ep->cm_info;
2892                 ep->event_cb(ep->cb_context, &params);
2893         }
2894 }
2895
2896 static void
2897 qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
2898                                    struct qed_iwarp_ep *ep, u8 fw_return_code)
2899 {
2900         struct qed_iwarp_cm_event_params params;
2901
2902         memset(&params, 0, sizeof(params));
2903         params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
2904         params.ep_context = ep;
2905         params.cm_info = &ep->cm_info;
2906         ep->state = QED_IWARP_EP_CLOSED;
2907
2908         switch (fw_return_code) {
2909         case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
2910                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2911                            "%s(0x%x) TCP connect got invalid packet\n",
2912                            QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2913                 params.status = -ECONNRESET;
2914                 break;
2915         case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
2916                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2917                            "%s(0x%x) TCP Connection Reset\n",
2918                            QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2919                 params.status = -ECONNRESET;
2920                 break;
2921         case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
2922                 DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
2923                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2924                 params.status = -EBUSY;
2925                 break;
2926         case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
2927                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
2928                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2929                 params.status = -ECONNREFUSED;
2930                 break;
2931         case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
2932                 DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
2933                           QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2934                 params.status = -ECONNRESET;
2935                 break;
2936         default:
2937                 DP_ERR(p_hwfn,
2938                        "%s(0x%x) Unexpected return code tcp connect: %d\n",
2939                        QED_IWARP_CONNECT_MODE_STRING(ep),
2940                        ep->tcp_cid, fw_return_code);
2941                 params.status = -ECONNRESET;
2942                 break;
2943         }
2944
2945         if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
2946                 ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
2947                 qed_iwarp_return_ep(p_hwfn, ep);
2948         } else {
2949                 ep->event_cb(ep->cb_context, &params);
2950                 spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2951                 list_del(&ep->list_entry);
2952                 spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2953         }
2954 }
2955
2956 void
2957 qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
2958                            struct qed_iwarp_ep *ep, u8 fw_return_code)
2959 {
2960         u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2961
2962         if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
2963                 /* Done with the SYN packet, post back to ll2 rx */
2964                 qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
2965
2966                 ep->syn = NULL;
2967
2968                 /* If connect failed - upper layer doesn't know about it */
2969                 if (fw_return_code == RDMA_RETURN_OK)
2970                         qed_iwarp_mpa_received(p_hwfn, ep);
2971                 else
2972                         qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
2973                                                            fw_return_code);
2974         } else {
2975                 if (fw_return_code == RDMA_RETURN_OK)
2976                         qed_iwarp_mpa_offload(p_hwfn, ep);
2977                 else
2978                         qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
2979                                                            fw_return_code);
2980         }
2981 }
2982
2983 static inline bool
2984 qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
2985 {
2986         if (!ep || (ep->sig != QED_EP_SIG)) {
2987                 DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
2988                 return false;
2989         }
2990
2991         return true;
2992 }
2993
2994 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
2995                                  u8 fw_event_code, u16 echo,
2996                                  union event_ring_data *data,
2997                                  u8 fw_return_code)
2998 {
2999         struct regpair *fw_handle = &data->rdma_data.async_handle;
3000         struct qed_iwarp_ep *ep = NULL;
3001         u16 cid;
3002
3003         ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
3004                                                        fw_handle->lo);
3005
3006         switch (fw_event_code) {
3007         case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3008                 /* Async completion after TCP 3-way handshake */
3009                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3010                         return -EINVAL;
3011                 DP_VERBOSE(p_hwfn,
3012                            QED_MSG_RDMA,
3013                            "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3014                            ep->tcp_cid, fw_return_code);
3015                 qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3016                 break;
3017         case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3018                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3019                         return -EINVAL;
3020                 DP_VERBOSE(p_hwfn,
3021                            QED_MSG_RDMA,
3022                            "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3023                            ep->cid, fw_return_code);
3024                 qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3025                 break;
3026         case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3027                 /* Async completion for Close Connection ramrod */
3028                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3029                         return -EINVAL;
3030                 DP_VERBOSE(p_hwfn,
3031                            QED_MSG_RDMA,
3032                            "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3033                            ep->cid, fw_return_code);
3034                 qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3035                 break;
3036         case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3037                 /* Async event for active side only */
3038                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3039                         return -EINVAL;
3040                 DP_VERBOSE(p_hwfn,
3041                            QED_MSG_RDMA,
3042                            "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3043                            ep->cid, fw_return_code);
3044                 qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
3045                 break;
3046         case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3047                 if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3048                         return -EINVAL;
3049                 DP_VERBOSE(p_hwfn,
3050                            QED_MSG_RDMA,
3051                            "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3052                            ep->cid, fw_return_code);
3053                 qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3054                 break;
3055         case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3056                 cid = (u16)le32_to_cpu(fw_handle->lo);
3057                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
3058                            "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
3059                 qed_iwarp_cid_cleaned(p_hwfn, cid);
3060
3061                 break;
3062         case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3063                 DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3064
3065                 p_hwfn->p_rdma_info->events.affiliated_event(
3066                         p_hwfn->p_rdma_info->events.context,
3067                         QED_IWARP_EVENT_CQ_OVERFLOW,
3068                         (void *)fw_handle);
3069                 break;
3070         default:
3071                 DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3072                        fw_event_code);
3073                 return -EINVAL;
3074         }
3075         return 0;
3076 }
3077
3078 int
3079 qed_iwarp_create_listen(void *rdma_cxt,
3080                         struct qed_iwarp_listen_in *iparams,
3081                         struct qed_iwarp_listen_out *oparams)
3082 {
3083         struct qed_hwfn *p_hwfn = rdma_cxt;
3084         struct qed_iwarp_listener *listener;
3085
3086         listener = kzalloc(sizeof(*listener), GFP_KERNEL);
3087         if (!listener)
3088                 return -ENOMEM;
3089
3090         listener->ip_version = iparams->ip_version;
3091         memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
3092         listener->port = iparams->port;
3093         listener->vlan = iparams->vlan;
3094
3095         listener->event_cb = iparams->event_cb;
3096         listener->cb_context = iparams->cb_context;
3097         listener->max_backlog = iparams->max_backlog;
3098         oparams->handle = listener;
3099
3100         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3101         list_add_tail(&listener->list_entry,
3102                       &p_hwfn->p_rdma_info->iwarp.listen_list);
3103         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3104
3105         DP_VERBOSE(p_hwfn,
3106                    QED_MSG_RDMA,
3107                    "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3108                    listener->event_cb,
3109                    listener,
3110                    listener->ip_addr[0],
3111                    listener->ip_addr[1],
3112                    listener->ip_addr[2],
3113                    listener->ip_addr[3], listener->port, listener->vlan);
3114
3115         return 0;
3116 }
3117
3118 int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3119 {
3120         struct qed_iwarp_listener *listener = handle;
3121         struct qed_hwfn *p_hwfn = rdma_cxt;
3122
3123         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
3124
3125         spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3126         list_del(&listener->list_entry);
3127         spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3128
3129         kfree(listener);
3130
3131         return 0;
3132 }
3133
3134 int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
3135 {
3136         struct qed_hwfn *p_hwfn = rdma_cxt;
3137         struct qed_sp_init_data init_data;
3138         struct qed_spq_entry *p_ent;
3139         struct qed_iwarp_ep *ep;
3140         struct qed_rdma_qp *qp;
3141         int rc;
3142
3143         ep = iparams->ep_context;
3144         if (!ep) {
3145                 DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3146                 return -EINVAL;
3147         }
3148
3149         qp = ep->qp;
3150
3151         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3152                    qp->icid, ep->tcp_cid);
3153
3154         memset(&init_data, 0, sizeof(init_data));
3155         init_data.cid = qp->icid;
3156         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3157         init_data.comp_mode = QED_SPQ_MODE_CB;
3158
3159         rc = qed_sp_init_request(p_hwfn, &p_ent,
3160                                  IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3161                                  PROTOCOLID_IWARP, &init_data);
3162
3163         if (rc)
3164                 return rc;
3165
3166         rc = qed_spq_post(p_hwfn, p_ent, NULL);
3167
3168         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
3169
3170         return rc;
3171 }
3172
3173 void
3174 qed_iwarp_query_qp(struct qed_rdma_qp *qp,
3175                    struct qed_rdma_query_qp_out_params *out_params)
3176 {
3177         out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
3178 }