2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: QPLib resource manager
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/spinlock.h>
42 #include <linux/pci.h>
43 #include <linux/interrupt.h>
44 #include <linux/inetdevice.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/if_vlan.h>
48 #include "qplib_res.h"
50 #include "qplib_rcfw.h"
52 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
53 struct bnxt_qplib_stats *stats);
54 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
55 struct bnxt_qplib_stats *stats);
58 static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
64 for (i = 0; i < pbl->pg_count; i++) {
66 dma_free_coherent(&pdev->dev, pbl->pg_size,
67 (void *)((unsigned long)
73 "PBL free pg_arr[%d] empty?!\n", i);
74 pbl->pg_arr[i] = NULL;
79 kfree(pbl->pg_map_arr);
80 pbl->pg_map_arr = NULL;
85 static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
86 struct scatterlist *sghead, u32 pages,
87 u32 nmaps, u32 pg_size)
89 struct sg_dma_page_iter sg_iter;
94 pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
98 pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
99 if (!pbl->pg_map_arr) {
105 pbl->pg_size = pg_size;
108 for (i = 0; i < pages; i++) {
109 pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
120 for_each_sg_dma_page(sghead, &sg_iter, nmaps, 0) {
121 pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
122 pbl->pg_arr[i] = NULL;
131 __free_pbl(pdev, pbl, is_umem);
136 void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
140 if (!hwq->max_elements)
142 if (hwq->level >= PBL_LVL_MAX)
145 for (i = 0; i < hwq->level + 1; i++) {
147 __free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
149 __free_pbl(pdev, &hwq->pbl[i], false);
152 hwq->level = PBL_LVL_MAX;
153 hwq->max_elements = 0;
154 hwq->element_size = 0;
160 /* All HWQs are power of 2 in size */
161 int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
162 struct bnxt_qplib_sg_info *sg_info,
163 u32 *elements, u32 element_size, u32 aux,
164 u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
166 u32 pages, maps, slots, size, aux_pages = 0, aux_size = 0;
167 dma_addr_t *src_phys_ptr, **dst_virt_ptr;
168 struct scatterlist *sghead = NULL;
171 hwq->level = PBL_LVL_MAX;
173 slots = roundup_pow_of_two(*elements);
175 aux_size = roundup_pow_of_two(aux);
176 aux_pages = (slots * aux_size) / pg_size;
177 if ((slots * aux_size) % pg_size)
180 size = roundup_pow_of_two(element_size);
183 sghead = sg_info->sglist;
186 hwq->is_user = false;
187 pages = (slots * size) / pg_size + aux_pages;
188 if ((slots * size) % pg_size)
195 pages = sg_info->npages;
196 maps = sg_info->nmap;
199 /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
200 if (sghead && (pages == MAX_PBL_LVL_0_PGS))
201 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
202 pages, maps, pg_size);
204 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL,
209 hwq->level = PBL_LVL_0;
211 if (pages > MAX_PBL_LVL_0_PGS) {
212 if (pages > MAX_PBL_LVL_1_PGS) {
213 /* 2 levels of indirection */
214 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
215 MAX_PBL_LVL_1_PGS_FOR_LVL_2,
219 /* Fill in lvl0 PBL */
221 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
222 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
223 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
224 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
225 src_phys_ptr[i] | PTU_PDE_VALID;
226 hwq->level = PBL_LVL_1;
228 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
229 pages, maps, pg_size);
233 /* Fill in lvl1 PBL */
235 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
236 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
237 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
238 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
239 src_phys_ptr[i] | PTU_PTE_VALID;
241 if (hwq_type == HWQ_TYPE_QUEUE) {
242 /* Find the last pg of the size */
243 i = hwq->pbl[PBL_LVL_2].pg_count;
244 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
247 dst_virt_ptr[PTR_PG(i - 2)]
249 PTU_PTE_NEXT_TO_LAST;
251 hwq->level = PBL_LVL_2;
253 u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
256 /* 1 level of indirection */
257 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
258 pages, maps, pg_size);
261 /* Fill in lvl0 PBL */
263 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
264 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
265 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
266 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
267 src_phys_ptr[i] | flag;
269 if (hwq_type == HWQ_TYPE_QUEUE) {
270 /* Find the last pg of the size */
271 i = hwq->pbl[PBL_LVL_1].pg_count;
272 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
275 dst_virt_ptr[PTR_PG(i - 2)]
277 PTU_PTE_NEXT_TO_LAST;
279 hwq->level = PBL_LVL_1;
283 spin_lock_init(&hwq->lock);
286 *elements = hwq->max_elements = slots;
287 hwq->element_size = size;
289 /* For direct access to the elements */
290 hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
291 hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
296 bnxt_qplib_free_hwq(pdev, hwq);
301 void bnxt_qplib_free_ctx(struct pci_dev *pdev,
302 struct bnxt_qplib_ctx *ctx)
306 bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
307 bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
308 bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
309 bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
310 bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
311 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
312 bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
313 bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
314 bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
318 * Routine: bnxt_qplib_alloc_ctx
320 * Context tables are memories which are used by the chip fw.
321 * The 6 tables defined are:
322 * QPC ctx - holds QP states
323 * MRW ctx - holds memory region and window
324 * SRQ ctx - holds shared RQ states
325 * CQ ctx - holds completion queue states
326 * TQM ctx - holds Tx Queue Manager context
327 * TIM ctx - holds timer context
328 * Depending on the size of the tbl requested, either a 1 Page Buffer List
329 * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
331 * Table might be employed as follows:
332 * For 0 < ctx size <= 1 PAGE, 0 level of ind is used
333 * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
334 * For 512 < ctx size <= MAX, 2 levels of ind is used
336 * 0 if success, else -ERRORS
338 int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
339 struct bnxt_qplib_ctx *ctx,
340 bool virt_fn, bool is_p5)
346 if (virt_fn || is_p5)
350 ctx->qpc_tbl.max_elements = ctx->qpc_count;
351 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL,
352 &ctx->qpc_tbl.max_elements,
353 BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
354 PAGE_SIZE, HWQ_TYPE_CTX);
359 ctx->mrw_tbl.max_elements = ctx->mrw_count;
360 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL,
361 &ctx->mrw_tbl.max_elements,
362 BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
363 PAGE_SIZE, HWQ_TYPE_CTX);
368 ctx->srqc_tbl.max_elements = ctx->srqc_count;
369 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL,
370 &ctx->srqc_tbl.max_elements,
371 BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
372 PAGE_SIZE, HWQ_TYPE_CTX);
377 ctx->cq_tbl.max_elements = ctx->cq_count;
378 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL,
379 &ctx->cq_tbl.max_elements,
380 BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
381 PAGE_SIZE, HWQ_TYPE_CTX);
386 ctx->tqm_pde.max_elements = 512;
387 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL,
388 &ctx->tqm_pde.max_elements, sizeof(u64),
389 0, PAGE_SIZE, HWQ_TYPE_CTX);
393 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
394 if (!ctx->tqm_count[i])
396 ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
398 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL,
399 &ctx->tqm_tbl[i].max_elements, 1,
400 0, PAGE_SIZE, HWQ_TYPE_CTX);
404 pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
405 for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
406 i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
407 if (!ctx->tqm_tbl[i].max_elements)
411 switch (ctx->tqm_tbl[i].level) {
413 for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
415 pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
417 ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
423 pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
424 ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
431 ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
432 PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
435 ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
436 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL,
437 &ctx->tim_tbl.max_elements, 1,
438 0, PAGE_SIZE, HWQ_TYPE_CTX);
444 rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
451 bnxt_qplib_free_ctx(pdev, ctx);
456 void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
460 /* MAC-48 to EUI-64 mapping */
461 memcpy(mac, dev_addr, ETH_ALEN);
462 guid[0] = mac[0] ^ 2;
472 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
473 struct bnxt_qplib_sgid_tbl *sgid_tbl)
475 kfree(sgid_tbl->tbl);
476 kfree(sgid_tbl->hw_id);
477 kfree(sgid_tbl->ctx);
478 kfree(sgid_tbl->vlan);
479 sgid_tbl->tbl = NULL;
480 sgid_tbl->hw_id = NULL;
481 sgid_tbl->ctx = NULL;
482 sgid_tbl->vlan = NULL;
484 sgid_tbl->active = 0;
487 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
488 struct bnxt_qplib_sgid_tbl *sgid_tbl,
491 sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
495 sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
496 if (!sgid_tbl->hw_id)
499 sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
503 sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
510 kfree(sgid_tbl->ctx);
511 sgid_tbl->ctx = NULL;
513 kfree(sgid_tbl->hw_id);
514 sgid_tbl->hw_id = NULL;
516 kfree(sgid_tbl->tbl);
517 sgid_tbl->tbl = NULL;
521 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
522 struct bnxt_qplib_sgid_tbl *sgid_tbl)
526 for (i = 0; i < sgid_tbl->max; i++) {
527 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
528 sizeof(bnxt_qplib_gid_zero)))
529 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
530 sgid_tbl->tbl[i].vlan_id, true);
532 memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
533 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
534 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
535 sgid_tbl->active = 0;
538 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
539 struct net_device *netdev)
543 for (i = 0; i < sgid_tbl->max; i++)
544 sgid_tbl->tbl[i].vlan_id = 0xffff;
546 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
549 static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
550 struct bnxt_qplib_pkey_tbl *pkey_tbl)
553 dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
555 kfree(pkey_tbl->tbl);
557 pkey_tbl->tbl = NULL;
559 pkey_tbl->active = 0;
562 static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
563 struct bnxt_qplib_pkey_tbl *pkey_tbl,
566 pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
575 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
579 bit_num = find_first_bit(pdt->tbl, pdt->max);
580 if (bit_num == pdt->max)
583 /* Found unused PD */
584 clear_bit(bit_num, pdt->tbl);
589 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
590 struct bnxt_qplib_pd_tbl *pdt,
591 struct bnxt_qplib_pd *pd)
593 if (test_and_set_bit(pd->id, pdt->tbl)) {
594 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
602 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
609 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
610 struct bnxt_qplib_pd_tbl *pdt,
618 pdt->tbl = kmalloc(bytes, GFP_KERNEL);
623 memset((u8 *)pdt->tbl, 0xFF, bytes);
629 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
630 struct bnxt_qplib_dpi *dpi,
635 bit_num = find_first_bit(dpit->tbl, dpit->max);
636 if (bit_num == dpit->max)
639 /* Found unused DPI */
640 clear_bit(bit_num, dpit->tbl);
641 dpit->app_tbl[bit_num] = app;
644 dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
645 dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
650 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
651 struct bnxt_qplib_dpi_tbl *dpit,
652 struct bnxt_qplib_dpi *dpi)
654 if (dpi->dpi >= dpit->max) {
655 dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
658 if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
659 dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
664 dpit->app_tbl[dpi->dpi] = NULL;
665 memset(dpi, 0, sizeof(*dpi));
670 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
671 struct bnxt_qplib_dpi_tbl *dpit)
674 kfree(dpit->app_tbl);
675 if (dpit->dbr_bar_reg_iomem)
676 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
677 memset(dpit, 0, sizeof(*dpit));
680 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
681 struct bnxt_qplib_dpi_tbl *dpit,
684 u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
685 resource_size_t bar_reg_base;
688 if (dpit->dbr_bar_reg_iomem) {
689 dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
694 bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
696 dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
701 dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
702 if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
703 dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
707 dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
709 if (!dpit->dbr_bar_reg_iomem) {
710 dev_err(&res->pdev->dev,
711 "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
715 dpit->unmapped_dbr = bar_reg_base + dbr_offset;
716 dpit->max = dbr_len / PAGE_SIZE;
718 dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
722 bytes = dpit->max >> 3;
726 dpit->tbl = kmalloc(bytes, GFP_KERNEL);
728 kfree(dpit->app_tbl);
729 dpit->app_tbl = NULL;
733 memset((u8 *)dpit->tbl, 0xFF, bytes);
738 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
743 static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
745 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
746 pkey_tbl->active = 0;
749 static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
750 struct bnxt_qplib_pkey_tbl *pkey_tbl)
754 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
756 /* pkey default = 0xFFFF */
757 bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
761 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
762 struct bnxt_qplib_stats *stats)
765 dma_free_coherent(&pdev->dev, stats->size,
766 stats->dma, stats->dma_map);
768 memset(stats, 0, sizeof(*stats));
772 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
773 struct bnxt_qplib_stats *stats)
775 memset(stats, 0, sizeof(*stats));
777 /* 128 byte aligned context memory is required only for 57500.
778 * However making this unconditional, it does not harm previous
781 stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
782 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
783 &stats->dma_map, GFP_KERNEL);
785 dev_err(&pdev->dev, "Stats DMA allocation failed\n");
791 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
793 bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
794 bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
797 int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
799 bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
800 bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
805 void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
807 bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
808 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
809 bnxt_qplib_free_pd_tbl(&res->pd_tbl);
810 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
816 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
817 struct net_device *netdev,
818 struct bnxt_qplib_dev_attr *dev_attr)
823 res->netdev = netdev;
825 rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
829 rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
833 rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
837 rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
843 bnxt_qplib_free_res(res);