1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
34 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
46 #include <linux/qed/qed_if.h>
47 #include <linux/qed/qed_roce_if.h>
50 #include <rdma/qedr-abi.h>
53 void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
55 info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
58 void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
59 struct ib_qp_init_attr *attrs)
61 dev->gsi_qp_created = 1;
62 dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
63 dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
67 void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
69 struct qedr_dev *dev = (struct qedr_dev *)_qdev;
70 struct qedr_cq *cq = dev->gsi_sqcq;
71 struct qedr_qp *qp = dev->gsi_qp;
74 DP_DEBUG(dev, QEDR_MSG_GSI,
75 "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
76 dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
77 cq->ibcq.comp_handler ? "Yes" : "No");
79 dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
83 spin_lock_irqsave(&qp->q_lock, flags);
84 qedr_inc_sw_gsi_cons(&qp->sq);
85 spin_unlock_irqrestore(&qp->q_lock, flags);
87 if (cq->ibcq.comp_handler)
88 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
91 void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
92 struct qed_roce_ll2_rx_params *params)
94 struct qedr_dev *dev = (struct qedr_dev *)_dev;
95 struct qedr_cq *cq = dev->gsi_rqcq;
96 struct qedr_qp *qp = dev->gsi_qp;
99 spin_lock_irqsave(&qp->q_lock, flags);
101 qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc;
102 qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id;
103 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len;
104 ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac);
106 qedr_inc_sw_gsi_cons(&qp->rq);
108 spin_unlock_irqrestore(&qp->q_lock, flags);
110 if (cq->ibcq.comp_handler)
111 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
114 static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
115 struct ib_qp_init_attr *attrs)
117 struct qed_rdma_destroy_cq_in_params iparams;
118 struct qed_rdma_destroy_cq_out_params oparams;
121 cq = get_qedr_cq(attrs->send_cq);
122 iparams.icid = cq->icid;
123 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
124 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
126 cq = get_qedr_cq(attrs->recv_cq);
127 /* if a dedicated recv_cq was used, delete it too */
128 if (iparams.icid != cq->icid) {
129 iparams.icid = cq->icid;
130 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
131 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
135 static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
136 struct ib_qp_init_attr *attrs)
138 if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
140 " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
141 attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
145 if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
147 " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
148 attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
152 if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
154 " create gsi qp: failed. max_send_wr is too large %d>%d\n",
155 attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
162 struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
163 struct ib_qp_init_attr *attrs,
166 struct qed_roce_ll2_params ll2_params;
169 rc = qedr_check_gsi_qp_attrs(dev, attrs);
173 /* configure and start LL2 */
174 memset(&ll2_params, 0, sizeof(ll2_params));
175 ll2_params.max_tx_buffers = attrs->cap.max_send_wr;
176 ll2_params.max_rx_buffers = attrs->cap.max_recv_wr;
177 ll2_params.cbs.tx_cb = qedr_ll2_tx_cb;
178 ll2_params.cbs.rx_cb = qedr_ll2_rx_cb;
179 ll2_params.cb_cookie = (void *)dev;
180 ll2_params.mtu = dev->ndev->mtu;
181 ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr);
182 rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params);
184 DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
190 qp->rq.max_wr = attrs->cap.max_recv_wr;
191 qp->sq.max_wr = attrs->cap.max_send_wr;
193 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
197 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
202 qedr_store_gsi_qp_cq(dev, qp, attrs);
203 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
205 /* the GSI CQ is handled by the driver so remove it from the FW */
206 qedr_destroy_gsi_cq(dev, attrs);
207 dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
208 dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
210 DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
215 kfree(qp->rqe_wr_id);
217 rc = dev->ops->roce_ll2_stop(dev->cdev);
219 DP_ERR(dev, "create gsi qp: failed destroy on create\n");
221 return ERR_PTR(-ENOMEM);
224 int qedr_destroy_gsi_qp(struct qedr_dev *dev)
228 rc = dev->ops->roce_ll2_stop(dev->cdev);
230 DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc);
232 DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n");
237 #define QEDR_MAX_UD_HEADER_SIZE (100)
238 #define QEDR_GSI_QPN (1)
239 static inline int qedr_gsi_build_header(struct qedr_dev *dev,
241 struct ib_send_wr *swr,
242 struct ib_ud_header *udh,
245 bool has_vlan = false, has_grh_ipv6 = true;
246 struct ib_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
247 struct ib_global_route *grh = &ah_attr->grh;
252 struct ib_gid_attr sgid_attr;
256 bool has_udp = false;
260 for (i = 0; i < swr->num_sge; ++i)
261 send_size += swr->sg_list[i].length;
263 rc = ib_get_cached_gid(qp->ibqp.device, ah_attr->port_num,
264 grh->sgid_index, &sgid, &sgid_attr);
267 "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
268 ah_attr->port_num, grh->sgid_index);
272 vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
273 if (vlan_id < VLAN_CFI_MASK)
276 dev_put(sgid_attr.ndev);
278 if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
279 DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
280 ah_attr->grh.sgid_index);
284 has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
287 ether_type = ETH_P_IBOE;
288 *roce_mode = ROCE_V1;
289 } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
292 ether_type = ETH_P_IP;
293 has_grh_ipv6 = false;
294 *roce_mode = ROCE_V2_IPV4;
298 ether_type = ETH_P_IPV6;
299 *roce_mode = ROCE_V2_IPV6;
302 rc = ib_ud_header_init(send_size, false, true, has_vlan,
303 has_grh_ipv6, ip_ver, has_udp, 0, udh);
305 DP_ERR(dev, "gsi post send: failed to init header\n");
309 /* ENET + VLAN headers */
310 ether_addr_copy(udh->eth.dmac_h, ah_attr->dmac);
311 ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
313 udh->eth.type = htons(ETH_P_8021Q);
314 udh->vlan.tag = htons(vlan_id);
315 udh->vlan.type = htons(ether_type);
317 udh->eth.type = htons(ether_type);
321 udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
322 udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
323 udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
324 udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
325 udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
328 udh->deth.qkey = htonl(0x80010000);
329 udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
332 /* GRH / IPv6 header */
333 udh->grh.traffic_class = grh->traffic_class;
334 udh->grh.flow_label = grh->flow_label;
335 udh->grh.hop_limit = grh->hop_limit;
336 udh->grh.destination_gid = grh->dgid;
337 memcpy(&udh->grh.source_gid.raw, &sgid.raw,
338 sizeof(udh->grh.source_gid.raw));
343 udh->ip4.protocol = IPPROTO_UDP;
344 udh->ip4.tos = htonl(ah_attr->grh.flow_label);
345 udh->ip4.frag_off = htons(IP_DF);
346 udh->ip4.ttl = ah_attr->grh.hop_limit;
348 ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
349 udh->ip4.saddr = ipv4_addr;
350 ipv4_addr = qedr_get_ipv4_from_gid(ah_attr->grh.dgid.raw);
351 udh->ip4.daddr = ipv4_addr;
352 /* note: checksum is calculated by the device */
357 udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
358 udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
360 /* UDP length is untouched hence is zero */
365 static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
367 struct ib_send_wr *swr,
368 struct qed_roce_ll2_packet **p_packet)
370 u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
371 struct qed_roce_ll2_packet *packet;
372 struct pci_dev *pdev = dev->pdev;
373 int roce_mode, header_size;
374 struct ib_ud_header udh;
379 rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
383 header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
385 packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
389 packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
390 &packet->header.baddr,
392 if (!packet->header.vaddr) {
397 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
398 packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
400 packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
402 packet->roce_mode = roce_mode;
403 memcpy(packet->header.vaddr, ud_header_buffer, header_size);
404 packet->header.len = header_size;
405 packet->n_seg = swr->num_sge;
406 for (i = 0; i < packet->n_seg; i++) {
407 packet->payload[i].baddr = swr->sg_list[i].addr;
408 packet->payload[i].len = swr->sg_list[i].length;
416 int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
417 struct ib_send_wr **bad_wr)
419 struct qed_roce_ll2_packet *pkt = NULL;
420 struct qedr_qp *qp = get_qedr_qp(ibqp);
421 struct qed_roce_ll2_tx_params params;
422 struct qedr_dev *dev = qp->dev;
426 if (qp->state != QED_ROCE_QP_STATE_RTS) {
429 "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
434 if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
435 DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
436 wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
441 if (wr->opcode != IB_WR_SEND) {
443 "gsi post send: failed due to unsupported opcode %d\n",
449 memset(¶ms, 0, sizeof(params));
451 spin_lock_irqsave(&qp->q_lock, flags);
453 rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
455 spin_unlock_irqrestore(&qp->q_lock, flags);
459 rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, ¶ms);
461 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
462 qedr_inc_sw_prod(&qp->sq);
463 DP_DEBUG(qp->dev, QEDR_MSG_GSI,
464 "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
465 wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
467 if (rc == QED_ROCE_TX_HEAD_FAILURE) {
468 /* TX failed while posting header - release resources */
469 dma_free_coherent(&dev->pdev->dev, pkt->header.len,
470 pkt->header.vaddr, pkt->header.baddr);
472 } else if (rc == QED_ROCE_TX_FRAG_FAILURE) {
473 /* NTD since TX failed while posting a fragment. We will
474 * release the resources on TX callback
478 DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
483 spin_unlock_irqrestore(&qp->q_lock, flags);
487 "gsi post send: failed second WR. Only one WR may be passed at a time\n");
499 int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
500 struct ib_recv_wr **bad_wr)
502 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
503 struct qedr_qp *qp = get_qedr_qp(ibqp);
504 struct qed_roce_ll2_buffer buf;
509 if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
510 (qp->state != QED_ROCE_QP_STATE_RTS)) {
513 "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
518 memset(&buf, 0, sizeof(buf));
520 spin_lock_irqsave(&qp->q_lock, flags);
523 if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
525 "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
526 wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
530 buf.baddr = wr->sg_list[0].addr;
531 buf.len = wr->sg_list[0].length;
533 rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1);
536 "gsi post recv: failed to post rx buffer (rc=%d)\n",
541 memset(&qp->rqe_wr_id[qp->rq.prod], 0,
542 sizeof(qp->rqe_wr_id[qp->rq.prod]));
543 qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
544 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
546 qedr_inc_sw_prod(&qp->rq);
551 spin_unlock_irqrestore(&qp->q_lock, flags);
555 spin_unlock_irqrestore(&qp->q_lock, flags);
560 int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
562 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
563 struct qedr_cq *cq = get_qedr_cq(ibcq);
564 struct qedr_qp *qp = dev->gsi_qp;
568 spin_lock_irqsave(&cq->cq_lock, flags);
570 while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
571 memset(&wc[i], 0, sizeof(*wc));
573 wc[i].qp = &qp->ibqp;
574 wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
575 wc[i].opcode = IB_WC_RECV;
576 wc[i].pkey_index = 0;
577 wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
578 IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
579 /* 0 - currently only one recv sg is supported */
580 wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
581 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
582 ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
583 wc[i].wc_flags |= IB_WC_WITH_SMAC;
584 if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
585 wc[i].wc_flags |= IB_WC_WITH_VLAN;
586 wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
589 qedr_inc_sw_cons(&qp->rq);
593 while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
594 memset(&wc[i], 0, sizeof(*wc));
596 wc[i].qp = &qp->ibqp;
597 wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
598 wc[i].opcode = IB_WC_SEND;
599 wc[i].status = IB_WC_SUCCESS;
601 qedr_inc_sw_cons(&qp->sq);
605 spin_unlock_irqrestore(&cq->cq_lock, flags);
607 DP_DEBUG(dev, QEDR_MSG_GSI,
608 "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
609 num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
610 qp->sq.gsi_cons, qp->ibqp.qp_num);