1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/bitops.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/string.h>
26 #include "qed_init_ops.h"
30 #include "qed_reg_addr.h"
31 #include <linux/qed/qed_rdma_if.h>
37 int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
38 struct qed_bmap *bmap, u32 max_count, char *name)
40 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
42 bmap->max_count = max_count;
44 bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long),
49 snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
51 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
55 int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
56 struct qed_bmap *bmap, u32 *id_num)
58 *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
59 if (*id_num >= bmap->max_count)
62 __set_bit(*id_num, bmap->bitmap);
64 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
70 void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
71 struct qed_bmap *bmap, u32 id_num)
73 if (id_num >= bmap->max_count)
76 __set_bit(id_num, bmap->bitmap);
79 void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
80 struct qed_bmap *bmap, u32 id_num)
84 if (id_num >= bmap->max_count)
87 b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
89 DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
94 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
98 int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
99 struct qed_bmap *bmap, u32 id_num)
101 if (id_num >= bmap->max_count)
104 return test_bit(id_num, bmap->bitmap);
107 static bool qed_bmap_is_empty(struct qed_bmap *bmap)
109 return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
112 static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
114 /* First sb id for RoCE is after all the l2 sb */
115 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
118 int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
120 struct qed_rdma_info *p_rdma_info;
122 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
126 spin_lock_init(&p_rdma_info->lock);
128 p_hwfn->p_rdma_info = p_rdma_info;
132 void qed_rdma_info_free(struct qed_hwfn *p_hwfn)
134 kfree(p_hwfn->p_rdma_info);
135 p_hwfn->p_rdma_info = NULL;
138 static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
140 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
141 u32 num_cons, num_tasks;
144 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
146 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
147 p_rdma_info->proto = PROTOCOLID_IWARP;
149 p_rdma_info->proto = PROTOCOLID_ROCE;
151 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
154 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
155 p_rdma_info->num_qps = num_cons;
157 p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */
159 num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
161 /* Each MR uses a single task */
162 p_rdma_info->num_mrs = num_tasks;
164 /* Queue zone lines are shared between RoCE and L2 in such a way that
165 * they can be used by each without obstructing the other.
167 p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
168 p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
170 /* Allocate a struct with device params and fill it */
171 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
172 if (!p_rdma_info->dev)
175 /* Allocate a struct with port params and fill it */
176 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
177 if (!p_rdma_info->port)
180 /* Allocate bit map for pd's */
181 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
184 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
185 "Failed to allocate pd_map, rc = %d\n",
190 /* Allocate bit map for XRC Domains */
191 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map,
192 QED_RDMA_MAX_XRCDS, "XRCD");
194 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
195 "Failed to allocate xrcd_map,rc = %d\n", rc);
199 /* Allocate DPI bitmap */
200 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
201 p_hwfn->dpi_count, "DPI");
203 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
204 "Failed to allocate DPI bitmap, rc = %d\n", rc);
208 /* Allocate bitmap for cq's. The maximum number of CQs is bound to
209 * the number of connections we support. (num_qps in iWARP or
210 * num_qps/2 in RoCE).
212 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ");
214 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
215 "Failed to allocate cq bitmap, rc = %d\n", rc);
219 /* Allocate bitmap for toggle bit for cq icids
220 * We toggle the bit every time we create or resize cq for a given icid.
221 * Size needs to equal the size of the cq bmap.
223 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
226 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
227 "Failed to allocate toggle bits, rc = %d\n", rc);
231 /* Allocate bitmap for itids */
232 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
233 p_rdma_info->num_mrs, "MR");
235 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
236 "Failed to allocate itids bitmaps, rc = %d\n", rc);
237 goto free_toggle_map;
240 /* Allocate bitmap for cids used for qps. */
241 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
244 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
245 "Failed to allocate cid bitmap, rc = %d\n", rc);
249 /* Allocate bitmap for cids used for responders/requesters. */
250 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
253 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
254 "Failed to allocate real cid bitmap, rc = %d\n", rc);
258 /* The first SRQ follows the last XRC SRQ. This means that the
259 * SRQ IDs start from an offset equals to max_xrc_srqs.
261 p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count;
262 rc = qed_rdma_bmap_alloc(p_hwfn,
263 &p_rdma_info->xrc_srq_map,
264 p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ");
266 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
267 "Failed to allocate xrc srq bitmap, rc = %d\n", rc);
268 goto free_real_cid_map;
271 /* Allocate bitmap for srqs */
272 p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count;
273 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
274 p_rdma_info->num_srqs, "SRQ");
276 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
277 "Failed to allocate srq bitmap, rc = %d\n", rc);
278 goto free_xrc_srq_map;
281 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
282 rc = qed_iwarp_alloc(p_hwfn);
287 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
291 kfree(p_rdma_info->srq_map.bitmap);
293 kfree(p_rdma_info->xrc_srq_map.bitmap);
295 kfree(p_rdma_info->real_cid_map.bitmap);
297 kfree(p_rdma_info->cid_map.bitmap);
299 kfree(p_rdma_info->tid_map.bitmap);
301 kfree(p_rdma_info->toggle_bits.bitmap);
303 kfree(p_rdma_info->cq_map.bitmap);
305 kfree(p_rdma_info->dpi_map.bitmap);
307 kfree(p_rdma_info->xrcd_map.bitmap);
309 kfree(p_rdma_info->pd_map.bitmap);
311 kfree(p_rdma_info->port);
313 kfree(p_rdma_info->dev);
318 void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
319 struct qed_bmap *bmap, bool check)
321 int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
322 int last_line = bmap->max_count / (64 * 8);
323 int last_item = last_line * 8 +
324 DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
325 u64 *pmap = (u64 *)bmap->bitmap;
326 int line, item, offset;
327 u8 str_last_line[200] = { 0 };
329 if (!weight || !check)
333 "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
334 bmap->name, bmap->max_count, weight);
336 /* print aligned non-zero lines, if any */
337 for (item = 0, line = 0; line < last_line; line++, item += 8)
338 if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
340 "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
348 pmap[item + 6], pmap[item + 7]);
350 /* print last unaligned non-zero line, if any */
351 if ((bmap->max_count % (64 * 8)) &&
352 (bitmap_weight((unsigned long *)&pmap[item],
353 bmap->max_count - item * 64))) {
354 offset = sprintf(str_last_line, "line 0x%04x: ", line);
355 for (; item < last_item; item++)
356 offset += sprintf(str_last_line + offset,
357 "0x%016llx ", pmap[item]);
358 DP_NOTICE(p_hwfn, "%s\n", str_last_line);
366 static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
368 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
370 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
371 qed_iwarp_resc_free(p_hwfn);
373 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
374 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
375 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
376 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
377 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
378 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
379 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
380 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1);
381 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1);
382 qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, 1);
384 kfree(p_rdma_info->port);
385 kfree(p_rdma_info->dev);
388 static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
390 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
392 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
394 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
395 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
396 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
399 static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
401 qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
404 static void qed_rdma_free(struct qed_hwfn *p_hwfn)
406 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
408 qed_rdma_free_reserved_lkey(p_hwfn);
409 qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto);
410 qed_rdma_resc_free(p_hwfn);
413 static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
415 guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
416 guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
417 guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
420 guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
421 guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
422 guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
425 static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
426 struct qed_rdma_start_in_params *params)
428 struct qed_rdma_events *events;
430 events = &p_hwfn->p_rdma_info->events;
432 events->unaffiliated_event = params->events->unaffiliated_event;
433 events->affiliated_event = params->events->affiliated_event;
434 events->context = params->events->context;
437 static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
438 struct qed_rdma_start_in_params *params)
440 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
441 struct qed_dev *cdev = p_hwfn->cdev;
442 u32 pci_status_control;
445 /* Vendor specific information */
446 dev->vendor_id = cdev->vendor_id;
447 dev->vendor_part_id = cdev->device_id;
448 dev->hw_ver = cdev->chip_rev;
449 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
450 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
452 qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
453 dev->node_guid = dev->sys_image_guid;
455 dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
456 RDMA_MAX_SGE_PER_RQ_WQE);
458 if (cdev->rdma_max_sge)
459 dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
461 dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE;
462 if (p_hwfn->cdev->rdma_max_srq_sge) {
463 dev->max_srq_sge = min_t(u32,
464 p_hwfn->cdev->rdma_max_srq_sge,
467 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
469 dev->max_inline = (cdev->rdma_max_inline) ?
470 min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
473 dev->max_wqe = QED_RDMA_MAX_WQE;
474 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
476 /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
477 * it is up-aligned to 16 and then to ILT page size within qed cxt.
478 * This is OK in terms of ILT but we don't want to configure the FW
479 * above its abilities
481 num_qps = ROCE_MAX_QPS;
482 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
483 dev->max_qp = num_qps;
485 /* CQs uses the same icids that QPs use hence they are limited by the
486 * number of icids. There are two icids per QP.
488 dev->max_cq = num_qps * 2;
490 /* The number of mrs is smaller by 1 since the first is reserved */
491 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
492 dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
494 /* The maximum CQE capacity per CQ supported.
495 * max number of cqes will be in two layer pbl,
496 * 8 is the pointer size in bytes
497 * 32 is the size of cq element in bytes
499 if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
500 dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
502 dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
505 dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
506 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
507 if (QED_IS_ROCE_PERSONALITY(p_hwfn))
508 dev->max_pkey = QED_RDMA_MAX_P_KEY;
510 dev->max_srq = p_hwfn->p_rdma_info->num_srqs;
511 dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM;
512 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
513 (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
514 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
515 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
516 dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
517 p_hwfn->p_rdma_info->num_qps;
518 dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
519 dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
520 dev->max_pd = RDMA_MAX_PDS;
521 dev->max_ah = p_hwfn->p_rdma_info->num_qps;
522 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
524 /* Set capablities */
526 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
527 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
528 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
529 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
530 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
531 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
532 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
533 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
535 /* Check atomic operations support in PCI configuration space. */
536 pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2,
537 &pci_status_control);
539 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
540 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
542 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
543 qed_iwarp_init_devinfo(p_hwfn);
546 static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
548 struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
549 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
551 port->port_state = p_hwfn->mcp_info->link_output.link_up ?
552 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
554 port->max_msg_size = min_t(u64,
555 (dev->max_mr_mw_fmr_size *
556 p_hwfn->cdev->rdma_max_sge),
559 port->pkey_bad_counter = 0;
562 static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
566 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
567 p_hwfn->b_rdma_enabled_in_prs = false;
569 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
570 qed_iwarp_init_hw(p_hwfn, p_ptt);
572 rc = qed_roce_init_hw(p_hwfn, p_ptt);
577 static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
578 struct qed_rdma_start_in_params *params,
579 struct qed_ptt *p_ptt)
581 struct rdma_init_func_ramrod_data *p_ramrod;
582 struct qed_rdma_cnq_params *p_cnq_pbl_list;
583 struct rdma_init_func_hdr *p_params_header;
584 struct rdma_cnq_params *p_cnq_params;
585 struct qed_sp_init_data init_data;
586 struct qed_spq_entry *p_ent;
591 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
593 /* Save the number of cnqs for the function close ramrod */
594 p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
597 memset(&init_data, 0, sizeof(init_data));
598 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
599 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
601 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
602 p_hwfn->p_rdma_info->proto, &init_data);
606 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
607 qed_iwarp_init_fw_ramrod(p_hwfn,
608 &p_ent->ramrod.iwarp_init_func);
609 p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
611 p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
614 p_params_header = &p_ramrod->params_header;
615 p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
617 p_params_header->num_cnqs = params->desired_cnq;
618 p_params_header->first_reg_srq_id =
619 cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset);
620 p_params_header->reg_srq_base_addr =
621 cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM));
622 if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
623 p_params_header->cq_ring_mode = 1;
625 p_params_header->cq_ring_mode = 0;
627 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
628 sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
629 igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
630 p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
631 p_cnq_params = &p_ramrod->cnq_params[cnq_id];
632 p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id];
634 p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
635 p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
637 DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
638 p_cnq_pbl_list->pbl_ptr);
640 /* we assume here that cnq_id and qz_offset are the same */
641 p_cnq_params->queue_zone_num =
642 cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
646 return qed_spq_post(p_hwfn, p_ent, NULL);
649 static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
651 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
654 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
656 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
657 rc = qed_rdma_bmap_alloc_id(p_hwfn,
658 &p_hwfn->p_rdma_info->tid_map, itid);
659 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
663 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
665 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
669 static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
671 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
673 /* Tid 0 will be used as the key for "reserved MR".
674 * The driver should allocate memory for it so it can be loaded but no
675 * ramrod should be passed on it.
677 qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
678 if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
680 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
687 static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
688 struct qed_ptt *p_ptt,
689 struct qed_rdma_start_in_params *params)
693 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
695 qed_rdma_init_devinfo(p_hwfn, params);
696 qed_rdma_init_port(p_hwfn);
697 qed_rdma_init_events(p_hwfn, params);
699 rc = qed_rdma_reserve_lkey(p_hwfn);
703 rc = qed_rdma_init_hw(p_hwfn, p_ptt);
707 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
708 rc = qed_iwarp_setup(p_hwfn, params);
712 rc = qed_roce_setup(p_hwfn);
717 return qed_rdma_start_fw(p_hwfn, params, p_ptt);
720 static int qed_rdma_stop(void *rdma_cxt)
722 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
723 struct rdma_close_func_ramrod_data *p_ramrod;
724 struct qed_sp_init_data init_data;
725 struct qed_spq_entry *p_ent;
726 struct qed_ptt *p_ptt;
727 u32 ll2_ethertype_en;
730 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
732 p_ptt = qed_ptt_acquire(p_hwfn);
734 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
738 /* Disable RoCE search */
739 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
740 p_hwfn->b_rdma_enabled_in_prs = false;
741 p_hwfn->p_rdma_info->active = 0;
742 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
744 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
746 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
747 (ll2_ethertype_en & 0xFFFE));
749 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
750 rc = qed_iwarp_stop(p_hwfn);
752 qed_ptt_release(p_hwfn, p_ptt);
756 qed_roce_stop(p_hwfn);
759 qed_ptt_release(p_hwfn, p_ptt);
762 memset(&init_data, 0, sizeof(init_data));
763 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
764 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
767 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
768 p_hwfn->p_rdma_info->proto, &init_data);
772 p_ramrod = &p_ent->ramrod.rdma_close_func;
774 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
775 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
777 rc = qed_spq_post(p_hwfn, p_ent, NULL);
780 qed_rdma_free(p_hwfn);
782 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
786 static int qed_rdma_add_user(void *rdma_cxt,
787 struct qed_rdma_add_user_out_params *out_params)
789 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
790 u32 dpi_start_offset;
794 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
797 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
798 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
800 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
802 out_params->dpi = (u16)returned_id;
804 /* Calculate the corresponding DPI address */
805 dpi_start_offset = p_hwfn->dpi_start_offset;
807 out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset +
808 out_params->dpi * p_hwfn->dpi_size;
810 out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
812 ((out_params->dpi) * p_hwfn->dpi_size);
814 out_params->dpi_size = p_hwfn->dpi_size;
815 out_params->wid_count = p_hwfn->wid_count;
817 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
821 static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
823 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
824 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
825 struct qed_mcp_link_state *p_link_output;
827 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
829 /* The link state is saved only for the leading hwfn */
830 p_link_output = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
832 p_port->port_state = p_link_output->link_up ? QED_RDMA_PORT_UP
833 : QED_RDMA_PORT_DOWN;
835 p_port->link_speed = p_link_output->speed;
837 p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
842 static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
844 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
846 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
848 /* Return struct with device parameters */
849 return p_hwfn->p_rdma_info->dev;
852 static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
854 struct qed_hwfn *p_hwfn;
858 p_hwfn = (struct qed_hwfn *)rdma_cxt;
860 if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
862 "queue zone offset %d is too large (max is %d)\n",
863 qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
867 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
868 addr = GTT_BAR0_MAP_REG_USDM_RAM +
869 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
871 REG_WR16(p_hwfn, addr, prod);
873 /* keep prod updates ordered */
877 static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
878 struct qed_dev_rdma_info *info)
880 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
882 memset(info, 0, sizeof(*info));
884 info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ?
885 QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP;
887 info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
889 qed_fill_dev_info(cdev, &info->common);
894 static int qed_rdma_get_sb_start(struct qed_dev *cdev)
898 if (cdev->num_hwfns > 1)
899 feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE);
901 feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE) *
907 static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
909 int n_cnq = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_RDMA_CNQ);
910 int n_msix = cdev->int_params.rdma_msix_cnt;
912 return min_t(int, n_cnq, n_msix);
915 static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
919 /* Mark the fastpath as free/used */
920 cdev->int_params.fp_initialized = cnt ? true : false;
922 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
924 "qed roce supports only MSI-X interrupts (detected %d).\n",
925 cdev->int_params.out.int_mode);
927 } else if (cdev->int_params.fp_msix_cnt) {
928 limit = cdev->int_params.rdma_msix_cnt;
934 return min_t(int, cnt, limit);
937 static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
939 memset(info, 0, sizeof(*info));
941 if (!cdev->int_params.fp_initialized) {
943 "Protocol driver requested interrupt information, but its support is not yet configured\n");
947 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
948 int msix_base = cdev->int_params.rdma_msix_base;
950 info->msix_cnt = cdev->int_params.rdma_msix_cnt;
951 info->msix = &cdev->int_params.msix_table[msix_base];
953 DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
954 info->msix_cnt, msix_base);
960 static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
962 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
966 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
968 /* Allocates an unused protection domain */
969 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
970 rc = qed_rdma_bmap_alloc_id(p_hwfn,
971 &p_hwfn->p_rdma_info->pd_map, &returned_id);
972 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
974 *pd = (u16)returned_id;
976 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
980 static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
982 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
984 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
986 /* Returns a previously allocated protection domain for reuse */
987 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
988 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
989 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
992 static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id)
994 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
998 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n");
1000 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1001 rc = qed_rdma_bmap_alloc_id(p_hwfn,
1002 &p_hwfn->p_rdma_info->xrcd_map,
1004 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1006 DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n");
1010 *xrcd_id = (u16)returned_id;
1012 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc);
1016 static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id)
1018 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1020 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id);
1022 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1023 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id);
1024 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1027 static enum qed_rdma_toggle_bit
1028 qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
1030 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
1031 enum qed_rdma_toggle_bit toggle_bit;
1034 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
1036 /* the function toggle the bit that is related to a given icid
1037 * and returns the new toggle bit's value
1039 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
1041 spin_lock_bh(&p_info->lock);
1042 toggle_bit = !test_and_change_bit(bmap_id,
1043 p_info->toggle_bits.bitmap);
1044 spin_unlock_bh(&p_info->lock);
1046 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
1052 static int qed_rdma_create_cq(void *rdma_cxt,
1053 struct qed_rdma_create_cq_in_params *params,
1056 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1057 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
1058 struct rdma_create_cq_ramrod_data *p_ramrod;
1059 enum qed_rdma_toggle_bit toggle_bit;
1060 struct qed_sp_init_data init_data;
1061 struct qed_spq_entry *p_ent;
1062 u32 returned_id, start_cid;
1065 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
1066 params->cq_handle_hi, params->cq_handle_lo);
1069 spin_lock_bh(&p_info->lock);
1070 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
1071 spin_unlock_bh(&p_info->lock);
1074 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
1078 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
1080 *icid = returned_id + start_cid;
1082 /* Check if icid requires a page allocation */
1083 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
1088 memset(&init_data, 0, sizeof(init_data));
1089 init_data.cid = *icid;
1090 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1091 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1093 /* Send create CQ ramrod */
1094 rc = qed_sp_init_request(p_hwfn, &p_ent,
1095 RDMA_RAMROD_CREATE_CQ,
1096 p_info->proto, &init_data);
1100 p_ramrod = &p_ent->ramrod.rdma_create_cq;
1102 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
1103 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
1104 p_ramrod->dpi = cpu_to_le16(params->dpi);
1105 p_ramrod->is_two_level_pbl = params->pbl_two_level;
1106 p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
1107 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
1108 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
1109 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
1111 p_ramrod->int_timeout = cpu_to_le16(params->int_timeout);
1113 /* toggle the bit for every resize or create cq for a given icid */
1114 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1116 p_ramrod->toggle_bit = toggle_bit;
1118 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1120 /* restore toggle bit */
1121 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
1125 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
1129 /* release allocated icid */
1130 spin_lock_bh(&p_info->lock);
1131 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
1132 spin_unlock_bh(&p_info->lock);
1133 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
1139 qed_rdma_destroy_cq(void *rdma_cxt,
1140 struct qed_rdma_destroy_cq_in_params *in_params,
1141 struct qed_rdma_destroy_cq_out_params *out_params)
1143 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1144 struct rdma_destroy_cq_output_params *p_ramrod_res;
1145 struct rdma_destroy_cq_ramrod_data *p_ramrod;
1146 struct qed_sp_init_data init_data;
1147 struct qed_spq_entry *p_ent;
1148 dma_addr_t ramrod_res_phys;
1149 enum protocol_type proto;
1152 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
1155 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1156 sizeof(struct rdma_destroy_cq_output_params),
1157 &ramrod_res_phys, GFP_KERNEL);
1158 if (!p_ramrod_res) {
1160 "qed destroy cq failed: cannot allocate memory (ramrod)\n");
1165 memset(&init_data, 0, sizeof(init_data));
1166 init_data.cid = in_params->icid;
1167 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1168 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1169 proto = p_hwfn->p_rdma_info->proto;
1170 /* Send destroy CQ ramrod */
1171 rc = qed_sp_init_request(p_hwfn, &p_ent,
1172 RDMA_RAMROD_DESTROY_CQ,
1177 p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
1178 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1180 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1184 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
1186 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1187 sizeof(struct rdma_destroy_cq_output_params),
1188 p_ramrod_res, ramrod_res_phys);
1191 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1193 qed_bmap_release_id(p_hwfn,
1194 &p_hwfn->p_rdma_info->cq_map,
1196 qed_cxt_get_proto_cid_start(p_hwfn, proto)));
1198 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1200 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1203 err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1204 sizeof(struct rdma_destroy_cq_output_params),
1205 p_ramrod_res, ramrod_res_phys);
1210 void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac)
1212 p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
1213 p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
1214 p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
1217 static int qed_rdma_query_qp(void *rdma_cxt,
1218 struct qed_rdma_qp *qp,
1219 struct qed_rdma_query_qp_out_params *out_params)
1221 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1224 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1226 /* The following fields are filled in from qp and not FW as they can't
1229 out_params->mtu = qp->mtu;
1230 out_params->dest_qp = qp->dest_qp;
1231 out_params->incoming_atomic_en = qp->incoming_atomic_en;
1232 out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
1233 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
1234 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
1235 out_params->dgid = qp->dgid;
1236 out_params->flow_label = qp->flow_label;
1237 out_params->hop_limit_ttl = qp->hop_limit_ttl;
1238 out_params->traffic_class_tos = qp->traffic_class_tos;
1239 out_params->timeout = qp->ack_timeout;
1240 out_params->rnr_retry = qp->rnr_retry_cnt;
1241 out_params->retry_cnt = qp->retry_cnt;
1242 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
1243 out_params->pkey_index = 0;
1244 out_params->max_rd_atomic = qp->max_rd_atomic_req;
1245 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
1246 out_params->sqd_async = qp->sqd_async;
1248 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
1249 qed_iwarp_query_qp(qp, out_params);
1251 rc = qed_roce_query_qp(p_hwfn, qp, out_params);
1253 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
1257 static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
1259 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1262 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1264 if (QED_IS_IWARP_PERSONALITY(p_hwfn))
1265 rc = qed_iwarp_destroy_qp(p_hwfn, qp);
1267 rc = qed_roce_destroy_qp(p_hwfn, qp);
1269 /* free qp params struct */
1272 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
1276 static struct qed_rdma_qp *
1277 qed_rdma_create_qp(void *rdma_cxt,
1278 struct qed_rdma_create_qp_in_params *in_params,
1279 struct qed_rdma_create_qp_out_params *out_params)
1281 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1282 struct qed_rdma_qp *qp;
1283 u8 max_stats_queues;
1286 if (!rdma_cxt || !in_params || !out_params ||
1287 !p_hwfn->p_rdma_info->active) {
1288 pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
1289 rdma_cxt, in_params, out_params);
1293 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1294 "qed rdma create qp called with qp_handle = %08x%08x\n",
1295 in_params->qp_handle_hi, in_params->qp_handle_lo);
1297 /* Some sanity checks... */
1298 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
1299 if (in_params->stats_queue >= max_stats_queues) {
1300 DP_ERR(p_hwfn->cdev,
1301 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
1302 in_params->stats_queue, max_stats_queues);
1306 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
1307 if (in_params->sq_num_pages * sizeof(struct regpair) >
1308 IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) {
1309 DP_NOTICE(p_hwfn->cdev,
1310 "Sq num pages: %d exceeds maximum\n",
1311 in_params->sq_num_pages);
1314 if (in_params->rq_num_pages * sizeof(struct regpair) >
1315 IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) {
1316 DP_NOTICE(p_hwfn->cdev,
1317 "Rq num pages: %d exceeds maximum\n",
1318 in_params->rq_num_pages);
1323 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1327 qp->cur_state = QED_ROCE_QP_STATE_RESET;
1328 qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
1329 qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
1330 qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
1331 qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
1332 qp->use_srq = in_params->use_srq;
1333 qp->signal_all = in_params->signal_all;
1334 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
1335 qp->pd = in_params->pd;
1336 qp->dpi = in_params->dpi;
1337 qp->sq_cq_id = in_params->sq_cq_id;
1338 qp->sq_num_pages = in_params->sq_num_pages;
1339 qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
1340 qp->rq_cq_id = in_params->rq_cq_id;
1341 qp->rq_num_pages = in_params->rq_num_pages;
1342 qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
1343 qp->srq_id = in_params->srq_id;
1344 qp->req_offloaded = false;
1345 qp->resp_offloaded = false;
1346 qp->e2e_flow_control_en = qp->use_srq ? false : true;
1347 qp->stats_queue = in_params->stats_queue;
1348 qp->qp_type = in_params->qp_type;
1349 qp->xrcd_id = in_params->xrcd_id;
1351 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
1352 rc = qed_iwarp_create_qp(p_hwfn, qp, out_params);
1353 qp->qpid = qp->icid;
1355 qp->edpm_mode = GET_FIELD(in_params->flags, QED_ROCE_EDPM_MODE);
1356 rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
1357 qp->qpid = ((0xFF << 16) | qp->icid);
1365 out_params->icid = qp->icid;
1366 out_params->qp_id = qp->qpid;
1368 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
1372 static int qed_rdma_modify_qp(void *rdma_cxt,
1373 struct qed_rdma_qp *qp,
1374 struct qed_rdma_modify_qp_in_params *params)
1376 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1377 enum qed_roce_qp_state prev_state;
1380 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
1381 qp->icid, params->new_state);
1384 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1388 if (GET_FIELD(params->modify_flags,
1389 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
1390 qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
1391 qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
1392 qp->incoming_atomic_en = params->incoming_atomic_en;
1395 /* Update QP structure with the updated values */
1396 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
1397 qp->roce_mode = params->roce_mode;
1398 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
1399 qp->pkey = params->pkey;
1400 if (GET_FIELD(params->modify_flags,
1401 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
1402 qp->e2e_flow_control_en = params->e2e_flow_control_en;
1403 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
1404 qp->dest_qp = params->dest_qp;
1405 if (GET_FIELD(params->modify_flags,
1406 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
1407 /* Indicates that the following parameters have changed:
1408 * Traffic class, flow label, hop limit, source GID,
1409 * destination GID, loopback indicator
1411 qp->traffic_class_tos = params->traffic_class_tos;
1412 qp->flow_label = params->flow_label;
1413 qp->hop_limit_ttl = params->hop_limit_ttl;
1415 qp->sgid = params->sgid;
1416 qp->dgid = params->dgid;
1417 qp->udp_src_port = 0;
1418 qp->vlan_id = params->vlan_id;
1419 qp->mtu = params->mtu;
1420 qp->lb_indication = params->lb_indication;
1421 memcpy((u8 *)&qp->remote_mac_addr[0],
1422 (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN);
1423 if (params->use_local_mac) {
1424 memcpy((u8 *)&qp->local_mac_addr[0],
1425 (u8 *)¶ms->local_mac_addr[0], ETH_ALEN);
1427 memcpy((u8 *)&qp->local_mac_addr[0],
1428 (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1431 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
1432 qp->rq_psn = params->rq_psn;
1433 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
1434 qp->sq_psn = params->sq_psn;
1435 if (GET_FIELD(params->modify_flags,
1436 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
1437 qp->max_rd_atomic_req = params->max_rd_atomic_req;
1438 if (GET_FIELD(params->modify_flags,
1439 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
1440 qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
1441 if (GET_FIELD(params->modify_flags,
1442 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
1443 qp->ack_timeout = params->ack_timeout;
1444 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
1445 qp->retry_cnt = params->retry_cnt;
1446 if (GET_FIELD(params->modify_flags,
1447 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
1448 qp->rnr_retry_cnt = params->rnr_retry_cnt;
1449 if (GET_FIELD(params->modify_flags,
1450 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
1451 qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
1453 qp->sqd_async = params->sqd_async;
1455 prev_state = qp->cur_state;
1456 if (GET_FIELD(params->modify_flags,
1457 QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
1458 qp->cur_state = params->new_state;
1459 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
1463 switch (qp->qp_type) {
1464 case QED_RDMA_QP_TYPE_XRC_INI:
1467 case QED_RDMA_QP_TYPE_XRC_TGT:
1468 qp->has_resp = true;
1472 qp->has_resp = true;
1475 if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
1476 enum qed_iwarp_qp_state new_state =
1477 qed_roce2iwarp_state(qp->cur_state);
1479 rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0);
1481 rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
1484 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
1489 qed_rdma_register_tid(void *rdma_cxt,
1490 struct qed_rdma_register_tid_in_params *params)
1492 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1493 struct rdma_register_tid_ramrod_data *p_ramrod;
1494 struct qed_sp_init_data init_data;
1495 struct qed_spq_entry *p_ent;
1496 enum rdma_tid_type tid_type;
1501 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
1504 memset(&init_data, 0, sizeof(init_data));
1505 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1506 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1508 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
1509 p_hwfn->p_rdma_info->proto, &init_data);
1511 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1515 if (p_hwfn->p_rdma_info->last_tid < params->itid)
1516 p_hwfn->p_rdma_info->last_tid = params->itid;
1518 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
1519 params->pbl_two_level);
1521 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED,
1524 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
1526 /* Don't initialize D/C field, as it may override other bits. */
1527 if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
1528 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
1529 params->page_size_log - 12);
1531 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
1532 params->remote_read);
1534 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
1535 params->remote_write);
1537 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
1538 params->remote_atomic);
1540 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
1541 params->local_write);
1543 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ,
1544 params->local_read);
1546 SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
1549 p_ramrod = &p_ent->ramrod.rdma_register_tid;
1550 p_ramrod->flags = cpu_to_le16(flags);
1552 SET_FIELD(p_ramrod->flags1,
1553 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
1554 params->pbl_page_size_log - 12);
1556 SET_FIELD(p_ramrod->flags2, RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR,
1559 switch (params->tid_type) {
1560 case QED_RDMA_TID_REGISTERED_MR:
1561 tid_type = RDMA_TID_REGISTERED_MR;
1563 case QED_RDMA_TID_FMR:
1564 tid_type = RDMA_TID_FMR;
1566 case QED_RDMA_TID_MW:
1567 tid_type = RDMA_TID_MW;
1571 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1572 qed_sp_destroy_request(p_hwfn, p_ent);
1576 SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE,
1579 p_ramrod->itid = cpu_to_le32(params->itid);
1580 p_ramrod->key = params->key;
1581 p_ramrod->pd = cpu_to_le16(params->pd);
1582 p_ramrod->length_hi = (u8)(params->length >> 32);
1583 p_ramrod->length_lo = DMA_LO_LE(params->length);
1584 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
1585 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
1588 if (params->dif_enabled) {
1589 SET_FIELD(p_ramrod->flags2,
1590 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
1591 DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
1592 params->dif_error_addr);
1595 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1599 if (fw_return_code != RDMA_RETURN_OK) {
1600 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1604 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
1608 static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
1610 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1611 struct rdma_deregister_tid_ramrod_data *p_ramrod;
1612 struct qed_sp_init_data init_data;
1613 struct qed_spq_entry *p_ent;
1614 struct qed_ptt *p_ptt;
1618 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
1621 memset(&init_data, 0, sizeof(init_data));
1622 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1623 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1625 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
1626 p_hwfn->p_rdma_info->proto, &init_data);
1628 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1632 p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
1633 p_ramrod->itid = cpu_to_le32(itid);
1635 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1637 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1641 if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
1642 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1644 } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
1645 /* Bit indicating that the TID is in use and a nig drain is
1646 * required before sending the ramrod again
1648 p_ptt = qed_ptt_acquire(p_hwfn);
1651 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1652 "Failed to acquire PTT\n");
1656 rc = qed_mcp_drain(p_hwfn, p_ptt);
1658 qed_ptt_release(p_hwfn, p_ptt);
1659 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1664 qed_ptt_release(p_hwfn, p_ptt);
1666 /* Resend the ramrod */
1667 rc = qed_sp_init_request(p_hwfn, &p_ent,
1668 RDMA_RAMROD_DEREGISTER_MR,
1669 p_hwfn->p_rdma_info->proto,
1672 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1673 "Failed to init sp-element\n");
1677 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1679 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1684 if (fw_return_code != RDMA_RETURN_OK) {
1685 DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
1691 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
1695 static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
1697 return QED_AFFIN_HWFN(cdev);
1700 static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn,
1704 return &p_hwfn->p_rdma_info->xrc_srq_map;
1706 return &p_hwfn->p_rdma_info->srq_map;
1709 static int qed_rdma_modify_srq(void *rdma_cxt,
1710 struct qed_rdma_modify_srq_in_params *in_params)
1712 struct rdma_srq_modify_ramrod_data *p_ramrod;
1713 struct qed_sp_init_data init_data = {};
1714 struct qed_hwfn *p_hwfn = rdma_cxt;
1715 struct qed_spq_entry *p_ent;
1719 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1720 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1722 rc = qed_sp_init_request(p_hwfn, &p_ent,
1723 RDMA_RAMROD_MODIFY_SRQ,
1724 p_hwfn->p_rdma_info->proto, &init_data);
1728 p_ramrod = &p_ent->ramrod.rdma_modify_srq;
1729 p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
1730 opaque_fid = p_hwfn->hw_info.opaque_fid;
1731 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
1732 p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit);
1734 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1738 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n",
1739 in_params->srq_id, in_params->is_xrc);
1745 qed_rdma_destroy_srq(void *rdma_cxt,
1746 struct qed_rdma_destroy_srq_in_params *in_params)
1748 struct rdma_srq_destroy_ramrod_data *p_ramrod;
1749 struct qed_sp_init_data init_data = {};
1750 struct qed_hwfn *p_hwfn = rdma_cxt;
1751 struct qed_spq_entry *p_ent;
1752 struct qed_bmap *bmap;
1757 opaque_fid = p_hwfn->hw_info.opaque_fid;
1759 init_data.opaque_fid = opaque_fid;
1760 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1762 rc = qed_sp_init_request(p_hwfn, &p_ent,
1763 RDMA_RAMROD_DESTROY_SRQ,
1764 p_hwfn->p_rdma_info->proto, &init_data);
1768 p_ramrod = &p_ent->ramrod.rdma_destroy_srq;
1769 p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
1770 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
1772 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1776 bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
1777 offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
1779 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1780 qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset);
1781 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1783 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1784 "XRC/SRQ destroyed Id = %x, is_xrc=%u\n",
1785 in_params->srq_id, in_params->is_xrc);
1791 qed_rdma_create_srq(void *rdma_cxt,
1792 struct qed_rdma_create_srq_in_params *in_params,
1793 struct qed_rdma_create_srq_out_params *out_params)
1795 struct rdma_srq_create_ramrod_data *p_ramrod;
1796 struct qed_sp_init_data init_data = {};
1797 struct qed_hwfn *p_hwfn = rdma_cxt;
1798 enum qed_cxt_elem_type elem_type;
1799 struct qed_spq_entry *p_ent;
1800 u16 opaque_fid, srq_id;
1801 struct qed_bmap *bmap;
1806 bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
1807 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1808 rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
1809 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1813 "failed to allocate xrc/srq id (is_xrc=%u)\n",
1818 elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ);
1819 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
1823 opaque_fid = p_hwfn->hw_info.opaque_fid;
1825 opaque_fid = p_hwfn->hw_info.opaque_fid;
1826 init_data.opaque_fid = opaque_fid;
1827 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1829 rc = qed_sp_init_request(p_hwfn, &p_ent,
1830 RDMA_RAMROD_CREATE_SRQ,
1831 p_hwfn->p_rdma_info->proto, &init_data);
1835 p_ramrod = &p_ent->ramrod.rdma_create_srq;
1836 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr);
1837 p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
1838 p_ramrod->pd_id = cpu_to_le16(in_params->pd_id);
1839 p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
1840 p_ramrod->page_size = cpu_to_le16(in_params->page_size);
1841 DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr);
1842 offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
1843 srq_id = (u16)returned_id + offset;
1844 p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
1846 if (in_params->is_xrc) {
1847 SET_FIELD(p_ramrod->flags,
1848 RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1);
1849 SET_FIELD(p_ramrod->flags,
1850 RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN,
1851 in_params->reserved_key_en);
1852 p_ramrod->xrc_srq_cq_cid =
1853 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1855 p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id);
1857 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1861 out_params->srq_id = srq_id;
1865 "XRC/SRQ created Id = %x (is_xrc=%u)\n",
1866 out_params->srq_id, in_params->is_xrc);
1870 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1871 qed_bmap_release_id(p_hwfn, bmap, returned_id);
1872 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1877 bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
1881 /* if rdma wasn't activated yet, naturally there are no qps */
1882 if (!p_hwfn->p_rdma_info->active)
1885 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1886 if (!p_hwfn->p_rdma_info->cid_map.bitmap)
1889 result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map);
1890 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1894 void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1898 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
1900 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
1901 DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
1902 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
1903 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
1907 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1909 p_hwfn->db_bar_no_edpm = true;
1911 qed_rdma_dpm_conf(p_hwfn, p_ptt);
1914 static int qed_rdma_start(void *rdma_cxt,
1915 struct qed_rdma_start_in_params *params)
1917 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1918 struct qed_ptt *p_ptt;
1921 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1922 "desired_cnq = %08x\n", params->desired_cnq);
1924 p_ptt = qed_ptt_acquire(p_hwfn);
1928 rc = qed_rdma_alloc(p_hwfn);
1932 rc = qed_rdma_setup(p_hwfn, p_ptt, params);
1936 qed_ptt_release(p_hwfn, p_ptt);
1937 p_hwfn->p_rdma_info->active = 1;
1942 qed_rdma_free(p_hwfn);
1944 qed_ptt_release(p_hwfn, p_ptt);
1946 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
1950 static int qed_rdma_init(struct qed_dev *cdev,
1951 struct qed_rdma_start_in_params *params)
1953 return qed_rdma_start(QED_AFFIN_HWFN(cdev), params);
1956 static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
1958 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1960 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
1962 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1963 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
1964 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1967 static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
1968 u8 *old_mac_address,
1969 u8 *new_mac_address)
1973 if (old_mac_address)
1974 qed_llh_remove_mac_filter(cdev, 0, old_mac_address);
1975 if (new_mac_address)
1976 rc = qed_llh_add_mac_filter(cdev, 0, new_mac_address);
1980 "qed roce ll2 mac filter set: failed to add MAC filter\n");
1985 static int qed_iwarp_set_engine_affin(struct qed_dev *cdev, bool b_reset)
1991 /* Make sure iwarp cmt mode is enabled before setting affinity */
1992 if (!cdev->iwarp_cmt)
1998 eng = cdev->l2_affin_hint ? QED_ENG1 : QED_ENG0;
2000 rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng);
2003 "Failed to set the engine affinity of ppfid %d\n",
2008 DP_VERBOSE(cdev, (QED_MSG_RDMA | QED_MSG_SP),
2009 "LLH: Set the engine affinity of non-RoCE packets as %d\n",
2015 static const struct qed_rdma_ops qed_rdma_ops_pass = {
2016 .common = &qed_common_ops_pass,
2017 .fill_dev_info = &qed_fill_rdma_dev_info,
2018 .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
2019 .rdma_init = &qed_rdma_init,
2020 .rdma_add_user = &qed_rdma_add_user,
2021 .rdma_remove_user = &qed_rdma_remove_user,
2022 .rdma_stop = &qed_rdma_stop,
2023 .rdma_query_port = &qed_rdma_query_port,
2024 .rdma_query_device = &qed_rdma_query_device,
2025 .rdma_get_start_sb = &qed_rdma_get_sb_start,
2026 .rdma_get_rdma_int = &qed_rdma_get_int,
2027 .rdma_set_rdma_int = &qed_rdma_set_int,
2028 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
2029 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
2030 .rdma_alloc_pd = &qed_rdma_alloc_pd,
2031 .rdma_dealloc_pd = &qed_rdma_free_pd,
2032 .rdma_alloc_xrcd = &qed_rdma_alloc_xrcd,
2033 .rdma_dealloc_xrcd = &qed_rdma_free_xrcd,
2034 .rdma_create_cq = &qed_rdma_create_cq,
2035 .rdma_destroy_cq = &qed_rdma_destroy_cq,
2036 .rdma_create_qp = &qed_rdma_create_qp,
2037 .rdma_modify_qp = &qed_rdma_modify_qp,
2038 .rdma_query_qp = &qed_rdma_query_qp,
2039 .rdma_destroy_qp = &qed_rdma_destroy_qp,
2040 .rdma_alloc_tid = &qed_rdma_alloc_tid,
2041 .rdma_free_tid = &qed_rdma_free_tid,
2042 .rdma_register_tid = &qed_rdma_register_tid,
2043 .rdma_deregister_tid = &qed_rdma_deregister_tid,
2044 .rdma_create_srq = &qed_rdma_create_srq,
2045 .rdma_modify_srq = &qed_rdma_modify_srq,
2046 .rdma_destroy_srq = &qed_rdma_destroy_srq,
2047 .ll2_acquire_connection = &qed_ll2_acquire_connection,
2048 .ll2_establish_connection = &qed_ll2_establish_connection,
2049 .ll2_terminate_connection = &qed_ll2_terminate_connection,
2050 .ll2_release_connection = &qed_ll2_release_connection,
2051 .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
2052 .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
2053 .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
2054 .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
2055 .ll2_get_stats = &qed_ll2_get_stats,
2056 .iwarp_set_engine_affin = &qed_iwarp_set_engine_affin,
2057 .iwarp_connect = &qed_iwarp_connect,
2058 .iwarp_create_listen = &qed_iwarp_create_listen,
2059 .iwarp_destroy_listen = &qed_iwarp_destroy_listen,
2060 .iwarp_accept = &qed_iwarp_accept,
2061 .iwarp_reject = &qed_iwarp_reject,
2062 .iwarp_send_rtr = &qed_iwarp_send_rtr,
2065 const struct qed_rdma_ops *qed_get_rdma_ops(void)
2067 return &qed_rdma_ops_pass;
2069 EXPORT_SYMBOL(qed_get_rdma_ops);