Merge remote-tracking branch 'spi/topic/core' into spi-next
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / qedr / qedr_cm.c
1 /* QLogic qedr NIC Driver
2  * Copyright (c) 2015-2016  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
34 #include <linux/iommu.h>
35 #include <net/ip.h>
36 #include <net/ipv6.h>
37 #include <net/udp.h>
38
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45
46 #include <linux/qed/qed_if.h>
47 #include <linux/qed/qed_rdma_if.h>
48 #include "qedr.h"
49 #include "verbs.h"
50 #include <rdma/qedr-abi.h>
51 #include "qedr_cm.h"
52
53 void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
54 {
55         info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
56 }
57
58 void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
59                           struct ib_qp_init_attr *attrs)
60 {
61         dev->gsi_qp_created = 1;
62         dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
63         dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
64         dev->gsi_qp = qp;
65 }
66
67 void qedr_ll2_complete_tx_packet(void *cxt,
68                                  u8 connection_handle,
69                                  void *cookie,
70                                  dma_addr_t first_frag_addr,
71                                  bool b_last_fragment, bool b_last_packet)
72 {
73         struct qedr_dev *dev = (struct qedr_dev *)cxt;
74         struct qed_roce_ll2_packet *pkt = cookie;
75         struct qedr_cq *cq = dev->gsi_sqcq;
76         struct qedr_qp *qp = dev->gsi_qp;
77         unsigned long flags;
78
79         DP_DEBUG(dev, QEDR_MSG_GSI,
80                  "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
81                  dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
82                  cq->ibcq.comp_handler ? "Yes" : "No");
83
84         dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
85                           pkt->header.baddr);
86         kfree(pkt);
87
88         spin_lock_irqsave(&qp->q_lock, flags);
89         qedr_inc_sw_gsi_cons(&qp->sq);
90         spin_unlock_irqrestore(&qp->q_lock, flags);
91
92         if (cq->ibcq.comp_handler)
93                 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
94 }
95
96 void qedr_ll2_complete_rx_packet(void *cxt,
97                                  struct qed_ll2_comp_rx_data *data)
98 {
99         struct qedr_dev *dev = (struct qedr_dev *)cxt;
100         struct qedr_cq *cq = dev->gsi_rqcq;
101         struct qedr_qp *qp = dev->gsi_qp;
102         unsigned long flags;
103
104         spin_lock_irqsave(&qp->q_lock, flags);
105
106         qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
107                 -EINVAL : 0;
108         qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
109         /* note: length stands for data length i.e. GRH is excluded */
110         qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
111                 data->length.data_length;
112         *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
113                 ntohl(data->opaque_data_0);
114         *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
115                 ntohs((u16)data->opaque_data_1);
116
117         qedr_inc_sw_gsi_cons(&qp->rq);
118
119         spin_unlock_irqrestore(&qp->q_lock, flags);
120
121         if (cq->ibcq.comp_handler)
122                 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
123 }
124
125 void qedr_ll2_release_rx_packet(void *cxt,
126                                 u8 connection_handle,
127                                 void *cookie,
128                                 dma_addr_t rx_buf_addr, bool b_last_packet)
129 {
130         /* Do nothing... */
131 }
132
133 static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
134                                 struct ib_qp_init_attr *attrs)
135 {
136         struct qed_rdma_destroy_cq_in_params iparams;
137         struct qed_rdma_destroy_cq_out_params oparams;
138         struct qedr_cq *cq;
139
140         cq = get_qedr_cq(attrs->send_cq);
141         iparams.icid = cq->icid;
142         dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
143         dev->ops->common->chain_free(dev->cdev, &cq->pbl);
144
145         cq = get_qedr_cq(attrs->recv_cq);
146         /* if a dedicated recv_cq was used, delete it too */
147         if (iparams.icid != cq->icid) {
148                 iparams.icid = cq->icid;
149                 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
150                 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
151         }
152 }
153
154 static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
155                                           struct ib_qp_init_attr *attrs)
156 {
157         if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
158                 DP_ERR(dev,
159                        " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
160                        attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
161                 return -EINVAL;
162         }
163
164         if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
165                 DP_ERR(dev,
166                        " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
167                        attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
168                 return -EINVAL;
169         }
170
171         if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
172                 DP_ERR(dev,
173                        " create gsi qp: failed. max_send_wr is too large %d>%d\n",
174                        attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
175                 return -EINVAL;
176         }
177
178         return 0;
179 }
180
181 static int qedr_ll2_post_tx(struct qedr_dev *dev,
182                             struct qed_roce_ll2_packet *pkt)
183 {
184         enum qed_ll2_roce_flavor_type roce_flavor;
185         struct qed_ll2_tx_pkt_info ll2_tx_pkt;
186         int rc;
187         int i;
188
189         memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
190
191         roce_flavor = (pkt->roce_mode == ROCE_V1) ?
192             QED_LL2_ROCE : QED_LL2_RROCE;
193
194         if (pkt->roce_mode == ROCE_V2_IPV4)
195                 ll2_tx_pkt.enable_ip_cksum = 1;
196
197         ll2_tx_pkt.num_of_bds = 1 /* hdr */  + pkt->n_seg;
198         ll2_tx_pkt.vlan = 0;
199         ll2_tx_pkt.tx_dest = pkt->tx_dest;
200         ll2_tx_pkt.qed_roce_flavor = roce_flavor;
201         ll2_tx_pkt.first_frag = pkt->header.baddr;
202         ll2_tx_pkt.first_frag_len = pkt->header.len;
203         ll2_tx_pkt.cookie = pkt;
204
205         /* tx header */
206         rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx,
207                                              dev->gsi_ll2_handle,
208                                              &ll2_tx_pkt, 1);
209         if (rc) {
210                 /* TX failed while posting header - release resources */
211                 dma_free_coherent(&dev->pdev->dev, pkt->header.len,
212                                   pkt->header.vaddr, pkt->header.baddr);
213                 kfree(pkt);
214
215                 DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc);
216                 return rc;
217         }
218
219         /* tx payload */
220         for (i = 0; i < pkt->n_seg; i++) {
221                 rc = dev->ops->ll2_set_fragment_of_tx_packet(
222                         dev->rdma_ctx,
223                         dev->gsi_ll2_handle,
224                         pkt->payload[i].baddr,
225                         pkt->payload[i].len);
226
227                 if (rc) {
228                         /* if failed not much to do here, partial packet has
229                          * been posted we can't free memory, will need to wait
230                          * for completion
231                          */
232                         DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc);
233                         return rc;
234                 }
235         }
236
237         return 0;
238 }
239
240 int qedr_ll2_stop(struct qedr_dev *dev)
241 {
242         int rc;
243
244         if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE)
245                 return 0;
246
247         /* remove LL2 MAC address filter */
248         rc = dev->ops->ll2_set_mac_filter(dev->cdev,
249                                           dev->gsi_ll2_mac_address, NULL);
250
251         rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx,
252                                                 dev->gsi_ll2_handle);
253         if (rc)
254                 DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc);
255
256         dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
257
258         dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE;
259
260         return rc;
261 }
262
263 int qedr_ll2_start(struct qedr_dev *dev,
264                    struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
265 {
266         struct qed_ll2_acquire_data data;
267         struct qed_ll2_cbs cbs;
268         int rc;
269
270         /* configure and start LL2 */
271         cbs.rx_comp_cb = qedr_ll2_complete_rx_packet;
272         cbs.tx_comp_cb = qedr_ll2_complete_tx_packet;
273         cbs.rx_release_cb = qedr_ll2_release_rx_packet;
274         cbs.tx_release_cb = qedr_ll2_complete_tx_packet;
275         cbs.cookie = dev;
276
277         memset(&data, 0, sizeof(data));
278         data.input.conn_type = QED_LL2_TYPE_ROCE;
279         data.input.mtu = dev->ndev->mtu;
280         data.input.rx_num_desc = attrs->cap.max_recv_wr;
281         data.input.rx_drop_ttl0_flg = true;
282         data.input.rx_vlan_removal_en = false;
283         data.input.tx_num_desc = attrs->cap.max_send_wr;
284         data.input.tx_tc = 0;
285         data.input.tx_dest = QED_LL2_TX_DEST_NW;
286         data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET;
287         data.input.ai_err_no_buf = QED_LL2_DROP_PACKET;
288         data.input.gsi_enable = 1;
289         data.p_connection_handle = &dev->gsi_ll2_handle;
290         data.cbs = &cbs;
291
292         rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data);
293         if (rc) {
294                 DP_ERR(dev,
295                        "ll2 start: failed to acquire LL2 connection (rc=%d)\n",
296                        rc);
297                 return rc;
298         }
299
300         rc = dev->ops->ll2_establish_connection(dev->rdma_ctx,
301                                                 dev->gsi_ll2_handle);
302         if (rc) {
303                 DP_ERR(dev,
304                        "ll2 start: failed to establish LL2 connection (rc=%d)\n",
305                        rc);
306                 goto err1;
307         }
308
309         rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr);
310         if (rc)
311                 goto err2;
312
313         return 0;
314
315 err2:
316         dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
317 err1:
318         dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
319
320         return rc;
321 }
322
323 struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
324                                  struct ib_qp_init_attr *attrs,
325                                  struct qedr_qp *qp)
326 {
327         int rc;
328
329         rc = qedr_check_gsi_qp_attrs(dev, attrs);
330         if (rc)
331                 return ERR_PTR(rc);
332
333         rc = qedr_ll2_start(dev, attrs, qp);
334         if (rc) {
335                 DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
336                 return ERR_PTR(rc);
337         }
338
339         /* create QP */
340         qp->ibqp.qp_num = 1;
341         qp->rq.max_wr = attrs->cap.max_recv_wr;
342         qp->sq.max_wr = attrs->cap.max_send_wr;
343
344         qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
345                                 GFP_KERNEL);
346         if (!qp->rqe_wr_id)
347                 goto err;
348         qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
349                                 GFP_KERNEL);
350         if (!qp->wqe_wr_id)
351                 goto err;
352
353         qedr_store_gsi_qp_cq(dev, qp, attrs);
354         ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
355
356         /* the GSI CQ is handled by the driver so remove it from the FW */
357         qedr_destroy_gsi_cq(dev, attrs);
358         dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
359         dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
360
361         DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
362
363         return &qp->ibqp;
364
365 err:
366         kfree(qp->rqe_wr_id);
367
368         rc = qedr_ll2_stop(dev);
369         if (rc)
370                 DP_ERR(dev, "create gsi qp: failed destroy on create\n");
371
372         return ERR_PTR(-ENOMEM);
373 }
374
375 int qedr_destroy_gsi_qp(struct qedr_dev *dev)
376 {
377         return qedr_ll2_stop(dev);
378 }
379
380 #define QEDR_MAX_UD_HEADER_SIZE (100)
381 #define QEDR_GSI_QPN            (1)
382 static inline int qedr_gsi_build_header(struct qedr_dev *dev,
383                                         struct qedr_qp *qp,
384                                         struct ib_send_wr *swr,
385                                         struct ib_ud_header *udh,
386                                         int *roce_mode)
387 {
388         bool has_vlan = false, has_grh_ipv6 = true;
389         struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
390         const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
391         union ib_gid sgid;
392         int send_size = 0;
393         u16 vlan_id = 0;
394         u16 ether_type;
395         struct ib_gid_attr sgid_attr;
396         int rc;
397         int ip_ver = 0;
398
399         bool has_udp = false;
400         int i;
401
402         send_size = 0;
403         for (i = 0; i < swr->num_sge; ++i)
404                 send_size += swr->sg_list[i].length;
405
406         rc = ib_get_cached_gid(qp->ibqp.device, rdma_ah_get_port_num(ah_attr),
407                                grh->sgid_index, &sgid, &sgid_attr);
408         if (rc) {
409                 DP_ERR(dev,
410                        "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
411                        rdma_ah_get_port_num(ah_attr),
412                        grh->sgid_index);
413                 return rc;
414         }
415
416         if (sgid_attr.ndev) {
417                 vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
418                 if (vlan_id < VLAN_CFI_MASK)
419                         has_vlan = true;
420
421                 dev_put(sgid_attr.ndev);
422         }
423
424         if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
425                 DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
426                        grh->sgid_index);
427                 return -ENOENT;
428         }
429
430         has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
431         if (!has_udp) {
432                 /* RoCE v1 */
433                 ether_type = ETH_P_IBOE;
434                 *roce_mode = ROCE_V1;
435         } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
436                 /* RoCE v2 IPv4 */
437                 ip_ver = 4;
438                 ether_type = ETH_P_IP;
439                 has_grh_ipv6 = false;
440                 *roce_mode = ROCE_V2_IPV4;
441         } else {
442                 /* RoCE v2 IPv6 */
443                 ip_ver = 6;
444                 ether_type = ETH_P_IPV6;
445                 *roce_mode = ROCE_V2_IPV6;
446         }
447
448         rc = ib_ud_header_init(send_size, false, true, has_vlan,
449                                has_grh_ipv6, ip_ver, has_udp, 0, udh);
450         if (rc) {
451                 DP_ERR(dev, "gsi post send: failed to init header\n");
452                 return rc;
453         }
454
455         /* ENET + VLAN headers */
456         ether_addr_copy(udh->eth.dmac_h, ah_attr->roce.dmac);
457         ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
458         if (has_vlan) {
459                 udh->eth.type = htons(ETH_P_8021Q);
460                 udh->vlan.tag = htons(vlan_id);
461                 udh->vlan.type = htons(ether_type);
462         } else {
463                 udh->eth.type = htons(ether_type);
464         }
465
466         /* BTH */
467         udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
468         udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
469         udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
470         udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
471         udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
472
473         /* DETH */
474         udh->deth.qkey = htonl(0x80010000);
475         udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
476
477         if (has_grh_ipv6) {
478                 /* GRH / IPv6 header */
479                 udh->grh.traffic_class = grh->traffic_class;
480                 udh->grh.flow_label = grh->flow_label;
481                 udh->grh.hop_limit = grh->hop_limit;
482                 udh->grh.destination_gid = grh->dgid;
483                 memcpy(&udh->grh.source_gid.raw, &sgid.raw,
484                        sizeof(udh->grh.source_gid.raw));
485         } else {
486                 /* IPv4 header */
487                 u32 ipv4_addr;
488
489                 udh->ip4.protocol = IPPROTO_UDP;
490                 udh->ip4.tos = htonl(grh->flow_label);
491                 udh->ip4.frag_off = htons(IP_DF);
492                 udh->ip4.ttl = grh->hop_limit;
493
494                 ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
495                 udh->ip4.saddr = ipv4_addr;
496                 ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
497                 udh->ip4.daddr = ipv4_addr;
498                 /* note: checksum is calculated by the device */
499         }
500
501         /* UDP */
502         if (has_udp) {
503                 udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
504                 udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
505                 udh->udp.csum = 0;
506                 /* UDP length is untouched hence is zero */
507         }
508         return 0;
509 }
510
511 static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
512                                         struct qedr_qp *qp,
513                                         struct ib_send_wr *swr,
514                                         struct qed_roce_ll2_packet **p_packet)
515 {
516         u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
517         struct qed_roce_ll2_packet *packet;
518         struct pci_dev *pdev = dev->pdev;
519         int roce_mode, header_size;
520         struct ib_ud_header udh;
521         int i, rc;
522
523         *p_packet = NULL;
524
525         rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
526         if (rc)
527                 return rc;
528
529         header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
530
531         packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
532         if (!packet)
533                 return -ENOMEM;
534
535         packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
536                                                   &packet->header.baddr,
537                                                   GFP_ATOMIC);
538         if (!packet->header.vaddr) {
539                 kfree(packet);
540                 return -ENOMEM;
541         }
542
543         if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
544                 packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
545         else
546                 packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
547
548         packet->roce_mode = roce_mode;
549         memcpy(packet->header.vaddr, ud_header_buffer, header_size);
550         packet->header.len = header_size;
551         packet->n_seg = swr->num_sge;
552         for (i = 0; i < packet->n_seg; i++) {
553                 packet->payload[i].baddr = swr->sg_list[i].addr;
554                 packet->payload[i].len = swr->sg_list[i].length;
555         }
556
557         *p_packet = packet;
558
559         return 0;
560 }
561
562 int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
563                        struct ib_send_wr **bad_wr)
564 {
565         struct qed_roce_ll2_packet *pkt = NULL;
566         struct qedr_qp *qp = get_qedr_qp(ibqp);
567         struct qedr_dev *dev = qp->dev;
568         unsigned long flags;
569         int rc;
570
571         if (qp->state != QED_ROCE_QP_STATE_RTS) {
572                 *bad_wr = wr;
573                 DP_ERR(dev,
574                        "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
575                        qp->state);
576                 return -EINVAL;
577         }
578
579         if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
580                 DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
581                        wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
582                 rc = -EINVAL;
583                 goto err;
584         }
585
586         if (wr->opcode != IB_WR_SEND) {
587                 DP_ERR(dev,
588                        "gsi post send: failed due to unsupported opcode %d\n",
589                        wr->opcode);
590                 rc = -EINVAL;
591                 goto err;
592         }
593
594         spin_lock_irqsave(&qp->q_lock, flags);
595
596         rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
597         if (rc) {
598                 spin_unlock_irqrestore(&qp->q_lock, flags);
599                 goto err;
600         }
601
602         rc = qedr_ll2_post_tx(dev, pkt);
603
604         if (!rc) {
605                 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
606                 qedr_inc_sw_prod(&qp->sq);
607                 DP_DEBUG(qp->dev, QEDR_MSG_GSI,
608                          "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
609                          wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
610         } else {
611                 DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
612                 rc = -EAGAIN;
613                 *bad_wr = wr;
614         }
615
616         spin_unlock_irqrestore(&qp->q_lock, flags);
617
618         if (wr->next) {
619                 DP_ERR(dev,
620                        "gsi post send: failed second WR. Only one WR may be passed at a time\n");
621                 *bad_wr = wr->next;
622                 rc = -EINVAL;
623         }
624
625         return rc;
626
627 err:
628         *bad_wr = wr;
629         return rc;
630 }
631
632 int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
633                        struct ib_recv_wr **bad_wr)
634 {
635         struct qedr_dev *dev = get_qedr_dev(ibqp->device);
636         struct qedr_qp *qp = get_qedr_qp(ibqp);
637         unsigned long flags;
638         int rc = 0;
639
640         if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
641             (qp->state != QED_ROCE_QP_STATE_RTS)) {
642                 *bad_wr = wr;
643                 DP_ERR(dev,
644                        "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
645                        qp->state);
646                 return -EINVAL;
647         }
648
649         spin_lock_irqsave(&qp->q_lock, flags);
650
651         while (wr) {
652                 if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
653                         DP_ERR(dev,
654                                "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
655                                wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
656                         goto err;
657                 }
658
659                 rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx,
660                                                   dev->gsi_ll2_handle,
661                                                   wr->sg_list[0].addr,
662                                                   wr->sg_list[0].length,
663                                                   0 /* cookie */,
664                                                   1 /* notify_fw */);
665                 if (rc) {
666                         DP_ERR(dev,
667                                "gsi post recv: failed to post rx buffer (rc=%d)\n",
668                                rc);
669                         goto err;
670                 }
671
672                 memset(&qp->rqe_wr_id[qp->rq.prod], 0,
673                        sizeof(qp->rqe_wr_id[qp->rq.prod]));
674                 qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
675                 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
676
677                 qedr_inc_sw_prod(&qp->rq);
678
679                 wr = wr->next;
680         }
681
682         spin_unlock_irqrestore(&qp->q_lock, flags);
683
684         return rc;
685 err:
686         spin_unlock_irqrestore(&qp->q_lock, flags);
687         *bad_wr = wr;
688         return -ENOMEM;
689 }
690
691 int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
692 {
693         struct qedr_dev *dev = get_qedr_dev(ibcq->device);
694         struct qedr_cq *cq = get_qedr_cq(ibcq);
695         struct qedr_qp *qp = dev->gsi_qp;
696         unsigned long flags;
697         u16 vlan_id;
698         int i = 0;
699
700         spin_lock_irqsave(&cq->cq_lock, flags);
701
702         while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
703                 memset(&wc[i], 0, sizeof(*wc));
704
705                 wc[i].qp = &qp->ibqp;
706                 wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
707                 wc[i].opcode = IB_WC_RECV;
708                 wc[i].pkey_index = 0;
709                 wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
710                     IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
711                 /* 0 - currently only one recv sg is supported */
712                 wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
713                 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
714                 ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
715                 wc[i].wc_flags |= IB_WC_WITH_SMAC;
716
717                 vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
718                           VLAN_VID_MASK;
719                 if (vlan_id) {
720                         wc[i].wc_flags |= IB_WC_WITH_VLAN;
721                         wc[i].vlan_id = vlan_id;
722                         wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
723                                     VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
724                 }
725
726                 qedr_inc_sw_cons(&qp->rq);
727                 i++;
728         }
729
730         while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
731                 memset(&wc[i], 0, sizeof(*wc));
732
733                 wc[i].qp = &qp->ibqp;
734                 wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
735                 wc[i].opcode = IB_WC_SEND;
736                 wc[i].status = IB_WC_SUCCESS;
737
738                 qedr_inc_sw_cons(&qp->sq);
739                 i++;
740         }
741
742         spin_unlock_irqrestore(&cq->cq_lock, flags);
743
744         DP_DEBUG(dev, QEDR_MSG_GSI,
745                  "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
746                  num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
747                  qp->sq.gsi_cons, qp->ibqp.qp_num);
748
749         return i;
750 }