Merge remote-tracking branches 'spi/topic/sh-msiof', 'spi/topic/stm32', 'spi/topic...
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / bnxt_re / qplib_res.c
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: QPLib resource manager
37  */
38
39 #include <linux/spinlock.h>
40 #include <linux/pci.h>
41 #include <linux/interrupt.h>
42 #include <linux/inetdevice.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/if_vlan.h>
45 #include "roce_hsi.h"
46 #include "qplib_res.h"
47 #include "qplib_sp.h"
48 #include "qplib_rcfw.h"
49
50 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
51                                       struct bnxt_qplib_stats *stats);
52 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
53                                       struct bnxt_qplib_stats *stats);
54
55 /* PBL */
56 static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
57                        bool is_umem)
58 {
59         int i;
60
61         if (!is_umem) {
62                 for (i = 0; i < pbl->pg_count; i++) {
63                         if (pbl->pg_arr[i])
64                                 dma_free_coherent(&pdev->dev, pbl->pg_size,
65                                                   (void *)((unsigned long)
66                                                    pbl->pg_arr[i] &
67                                                   PAGE_MASK),
68                                                   pbl->pg_map_arr[i]);
69                         else
70                                 dev_warn(&pdev->dev,
71                                          "QPLIB: PBL free pg_arr[%d] empty?!",
72                                          i);
73                         pbl->pg_arr[i] = NULL;
74                 }
75         }
76         kfree(pbl->pg_arr);
77         pbl->pg_arr = NULL;
78         kfree(pbl->pg_map_arr);
79         pbl->pg_map_arr = NULL;
80         pbl->pg_count = 0;
81         pbl->pg_size = 0;
82 }
83
84 static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
85                        struct scatterlist *sghead, u32 pages, u32 pg_size)
86 {
87         struct scatterlist *sg;
88         bool is_umem = false;
89         int i;
90
91         /* page ptr arrays */
92         pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
93         if (!pbl->pg_arr)
94                 return -ENOMEM;
95
96         pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
97         if (!pbl->pg_map_arr) {
98                 kfree(pbl->pg_arr);
99                 pbl->pg_arr = NULL;
100                 return -ENOMEM;
101         }
102         pbl->pg_count = 0;
103         pbl->pg_size = pg_size;
104
105         if (!sghead) {
106                 for (i = 0; i < pages; i++) {
107                         pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
108                                                             pbl->pg_size,
109                                                             &pbl->pg_map_arr[i],
110                                                             GFP_KERNEL);
111                         if (!pbl->pg_arr[i])
112                                 goto fail;
113                         memset(pbl->pg_arr[i], 0, pbl->pg_size);
114                         pbl->pg_count++;
115                 }
116         } else {
117                 i = 0;
118                 is_umem = true;
119                 for_each_sg(sghead, sg, pages, i) {
120                         pbl->pg_map_arr[i] = sg_dma_address(sg);
121                         pbl->pg_arr[i] = sg_virt(sg);
122                         if (!pbl->pg_arr[i])
123                                 goto fail;
124
125                         pbl->pg_count++;
126                 }
127         }
128
129         return 0;
130
131 fail:
132         __free_pbl(pdev, pbl, is_umem);
133         return -ENOMEM;
134 }
135
136 /* HWQ */
137 void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
138 {
139         int i;
140
141         if (!hwq->max_elements)
142                 return;
143         if (hwq->level >= PBL_LVL_MAX)
144                 return;
145
146         for (i = 0; i < hwq->level + 1; i++) {
147                 if (i == hwq->level)
148                         __free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
149                 else
150                         __free_pbl(pdev, &hwq->pbl[i], false);
151         }
152
153         hwq->level = PBL_LVL_MAX;
154         hwq->max_elements = 0;
155         hwq->element_size = 0;
156         hwq->prod = 0;
157         hwq->cons = 0;
158         hwq->cp_bit = 0;
159 }
160
161 /* All HWQs are power of 2 in size */
162 int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
163                               struct scatterlist *sghead, int nmap,
164                               u32 *elements, u32 element_size, u32 aux,
165                               u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
166 {
167         u32 pages, slots, size, aux_pages = 0, aux_size = 0;
168         dma_addr_t *src_phys_ptr, **dst_virt_ptr;
169         int i, rc;
170
171         hwq->level = PBL_LVL_MAX;
172
173         slots = roundup_pow_of_two(*elements);
174         if (aux) {
175                 aux_size = roundup_pow_of_two(aux);
176                 aux_pages = (slots * aux_size) / pg_size;
177                 if ((slots * aux_size) % pg_size)
178                         aux_pages++;
179         }
180         size = roundup_pow_of_two(element_size);
181
182         if (!sghead) {
183                 hwq->is_user = false;
184                 pages = (slots * size) / pg_size + aux_pages;
185                 if ((slots * size) % pg_size)
186                         pages++;
187                 if (!pages)
188                         return -EINVAL;
189         } else {
190                 hwq->is_user = true;
191                 pages = nmap;
192         }
193
194         /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
195         if (sghead && (pages == MAX_PBL_LVL_0_PGS))
196                 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
197                                  pages, pg_size);
198         else
199                 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size);
200         if (rc)
201                 goto fail;
202
203         hwq->level = PBL_LVL_0;
204
205         if (pages > MAX_PBL_LVL_0_PGS) {
206                 if (pages > MAX_PBL_LVL_1_PGS) {
207                         /* 2 levels of indirection */
208                         rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
209                                          MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size);
210                         if (rc)
211                                 goto fail;
212                         /* Fill in lvl0 PBL */
213                         dst_virt_ptr =
214                                 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
215                         src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
216                         for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
217                                 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
218                                         src_phys_ptr[i] | PTU_PDE_VALID;
219                         hwq->level = PBL_LVL_1;
220
221                         rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
222                                          pages, pg_size);
223                         if (rc)
224                                 goto fail;
225
226                         /* Fill in lvl1 PBL */
227                         dst_virt_ptr =
228                                 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
229                         src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
230                         for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
231                                 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
232                                         src_phys_ptr[i] | PTU_PTE_VALID;
233                         }
234                         if (hwq_type == HWQ_TYPE_QUEUE) {
235                                 /* Find the last pg of the size */
236                                 i = hwq->pbl[PBL_LVL_2].pg_count;
237                                 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
238                                                                   PTU_PTE_LAST;
239                                 if (i > 1)
240                                         dst_virt_ptr[PTR_PG(i - 2)]
241                                                     [PTR_IDX(i - 2)] |=
242                                                     PTU_PTE_NEXT_TO_LAST;
243                         }
244                         hwq->level = PBL_LVL_2;
245                 } else {
246                         u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
247                                                 PTU_PTE_VALID;
248
249                         /* 1 level of indirection */
250                         rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
251                                          pages, pg_size);
252                         if (rc)
253                                 goto fail;
254                         /* Fill in lvl0 PBL */
255                         dst_virt_ptr =
256                                 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
257                         src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
258                         for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
259                                 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
260                                         src_phys_ptr[i] | flag;
261                         }
262                         if (hwq_type == HWQ_TYPE_QUEUE) {
263                                 /* Find the last pg of the size */
264                                 i = hwq->pbl[PBL_LVL_1].pg_count;
265                                 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
266                                                                   PTU_PTE_LAST;
267                                 if (i > 1)
268                                         dst_virt_ptr[PTR_PG(i - 2)]
269                                                     [PTR_IDX(i - 2)] |=
270                                                     PTU_PTE_NEXT_TO_LAST;
271                         }
272                         hwq->level = PBL_LVL_1;
273                 }
274         }
275         hwq->pdev = pdev;
276         spin_lock_init(&hwq->lock);
277         hwq->prod = 0;
278         hwq->cons = 0;
279         *elements = hwq->max_elements = slots;
280         hwq->element_size = size;
281
282         /* For direct access to the elements */
283         hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
284         hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
285
286         return 0;
287
288 fail:
289         bnxt_qplib_free_hwq(pdev, hwq);
290         return -ENOMEM;
291 }
292
293 /* Context Tables */
294 void bnxt_qplib_free_ctx(struct pci_dev *pdev,
295                          struct bnxt_qplib_ctx *ctx)
296 {
297         int i;
298
299         bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
300         bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
301         bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
302         bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
303         bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
304         for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
305                 bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
306         bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
307         bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
308 }
309
310 /*
311  * Routine: bnxt_qplib_alloc_ctx
312  * Description:
313  *     Context tables are memories which are used by the chip fw.
314  *     The 6 tables defined are:
315  *             QPC ctx - holds QP states
316  *             MRW ctx - holds memory region and window
317  *             SRQ ctx - holds shared RQ states
318  *             CQ ctx - holds completion queue states
319  *             TQM ctx - holds Tx Queue Manager context
320  *             TIM ctx - holds timer context
321  *     Depending on the size of the tbl requested, either a 1 Page Buffer List
322  *     or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
323  *     instead.
324  *     Table might be employed as follows:
325  *             For 0      < ctx size <= 1 PAGE, 0 level of ind is used
326  *             For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
327  *             For 512    < ctx size <= MAX, 2 levels of ind is used
328  * Returns:
329  *     0 if success, else -ERRORS
330  */
331 int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
332                          struct bnxt_qplib_ctx *ctx,
333                          bool virt_fn)
334 {
335         int i, j, k, rc = 0;
336         int fnz_idx = -1;
337         __le64 **pbl_ptr;
338
339         if (virt_fn)
340                 goto stats_alloc;
341
342         /* QPC Tables */
343         ctx->qpc_tbl.max_elements = ctx->qpc_count;
344         rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0,
345                                        &ctx->qpc_tbl.max_elements,
346                                        BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
347                                        PAGE_SIZE, HWQ_TYPE_CTX);
348         if (rc)
349                 goto fail;
350
351         /* MRW Tables */
352         ctx->mrw_tbl.max_elements = ctx->mrw_count;
353         rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0,
354                                        &ctx->mrw_tbl.max_elements,
355                                        BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
356                                        PAGE_SIZE, HWQ_TYPE_CTX);
357         if (rc)
358                 goto fail;
359
360         /* SRQ Tables */
361         ctx->srqc_tbl.max_elements = ctx->srqc_count;
362         rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0,
363                                        &ctx->srqc_tbl.max_elements,
364                                        BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
365                                        PAGE_SIZE, HWQ_TYPE_CTX);
366         if (rc)
367                 goto fail;
368
369         /* CQ Tables */
370         ctx->cq_tbl.max_elements = ctx->cq_count;
371         rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0,
372                                        &ctx->cq_tbl.max_elements,
373                                        BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
374                                        PAGE_SIZE, HWQ_TYPE_CTX);
375         if (rc)
376                 goto fail;
377
378         /* TQM Buffer */
379         ctx->tqm_pde.max_elements = 512;
380         rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0,
381                                        &ctx->tqm_pde.max_elements, sizeof(u64),
382                                        0, PAGE_SIZE, HWQ_TYPE_CTX);
383         if (rc)
384                 goto fail;
385
386         for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
387                 if (!ctx->tqm_count[i])
388                         continue;
389                 ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
390                                                ctx->tqm_count[i];
391                 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0,
392                                                &ctx->tqm_tbl[i].max_elements, 1,
393                                                0, PAGE_SIZE, HWQ_TYPE_CTX);
394                 if (rc)
395                         goto fail;
396         }
397         pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
398         for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
399              i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
400                 if (!ctx->tqm_tbl[i].max_elements)
401                         continue;
402                 if (fnz_idx == -1)
403                         fnz_idx = i;
404                 switch (ctx->tqm_tbl[i].level) {
405                 case PBL_LVL_2:
406                         for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
407                              k++)
408                                 pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
409                                   cpu_to_le64(
410                                     ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
411                                     | PTU_PTE_VALID);
412                         break;
413                 case PBL_LVL_1:
414                 case PBL_LVL_0:
415                 default:
416                         pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
417                                 ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
418                                 PTU_PTE_VALID);
419                         break;
420                 }
421         }
422         if (fnz_idx == -1)
423                 fnz_idx = 0;
424         ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
425                              PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
426
427         /* TIM Buffer */
428         ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
429         rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0,
430                                        &ctx->tim_tbl.max_elements, 1,
431                                        0, PAGE_SIZE, HWQ_TYPE_CTX);
432         if (rc)
433                 goto fail;
434
435 stats_alloc:
436         /* Stats */
437         rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
438         if (rc)
439                 goto fail;
440
441         return 0;
442
443 fail:
444         bnxt_qplib_free_ctx(pdev, ctx);
445         return rc;
446 }
447
448 /* GUID */
449 void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
450 {
451         u8 mac[ETH_ALEN];
452
453         /* MAC-48 to EUI-64 mapping */
454         memcpy(mac, dev_addr, ETH_ALEN);
455         guid[0] = mac[0] ^ 2;
456         guid[1] = mac[1];
457         guid[2] = mac[2];
458         guid[3] = 0xff;
459         guid[4] = 0xfe;
460         guid[5] = mac[3];
461         guid[6] = mac[4];
462         guid[7] = mac[5];
463 }
464
465 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
466                                      struct bnxt_qplib_sgid_tbl *sgid_tbl)
467 {
468         kfree(sgid_tbl->tbl);
469         kfree(sgid_tbl->hw_id);
470         kfree(sgid_tbl->ctx);
471         sgid_tbl->tbl = NULL;
472         sgid_tbl->hw_id = NULL;
473         sgid_tbl->ctx = NULL;
474         sgid_tbl->max = 0;
475         sgid_tbl->active = 0;
476 }
477
478 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
479                                      struct bnxt_qplib_sgid_tbl *sgid_tbl,
480                                      u16 max)
481 {
482         sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
483         if (!sgid_tbl->tbl)
484                 return -ENOMEM;
485
486         sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
487         if (!sgid_tbl->hw_id)
488                 goto out_free1;
489
490         sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
491         if (!sgid_tbl->ctx)
492                 goto out_free2;
493
494         sgid_tbl->max = max;
495         return 0;
496 out_free2:
497         kfree(sgid_tbl->hw_id);
498         sgid_tbl->hw_id = NULL;
499 out_free1:
500         kfree(sgid_tbl->tbl);
501         sgid_tbl->tbl = NULL;
502         return -ENOMEM;
503 };
504
505 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
506                                         struct bnxt_qplib_sgid_tbl *sgid_tbl)
507 {
508         int i;
509
510         for (i = 0; i < sgid_tbl->max; i++) {
511                 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
512                            sizeof(bnxt_qplib_gid_zero)))
513                         bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
514         }
515         memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
516         memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
517         sgid_tbl->active = 0;
518 }
519
520 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
521                                      struct net_device *netdev)
522 {
523         memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
524         memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
525 }
526
527 static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
528                                      struct bnxt_qplib_pkey_tbl *pkey_tbl)
529 {
530         if (!pkey_tbl->tbl)
531                 dev_dbg(&res->pdev->dev, "QPLIB: PKEY tbl not present");
532         else
533                 kfree(pkey_tbl->tbl);
534
535         pkey_tbl->tbl = NULL;
536         pkey_tbl->max = 0;
537         pkey_tbl->active = 0;
538 }
539
540 static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
541                                      struct bnxt_qplib_pkey_tbl *pkey_tbl,
542                                      u16 max)
543 {
544         pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
545         if (!pkey_tbl->tbl)
546                 return -ENOMEM;
547
548         pkey_tbl->max = max;
549         return 0;
550 };
551
552 /* PDs */
553 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
554 {
555         u32 bit_num;
556
557         bit_num = find_first_bit(pdt->tbl, pdt->max);
558         if (bit_num == pdt->max)
559                 return -ENOMEM;
560
561         /* Found unused PD */
562         clear_bit(bit_num, pdt->tbl);
563         pd->id = bit_num;
564         return 0;
565 }
566
567 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
568                           struct bnxt_qplib_pd_tbl *pdt,
569                           struct bnxt_qplib_pd *pd)
570 {
571         if (test_and_set_bit(pd->id, pdt->tbl)) {
572                 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d",
573                          pd->id);
574                 return -EINVAL;
575         }
576         pd->id = 0;
577         return 0;
578 }
579
580 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
581 {
582         kfree(pdt->tbl);
583         pdt->tbl = NULL;
584         pdt->max = 0;
585 }
586
587 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
588                                    struct bnxt_qplib_pd_tbl *pdt,
589                                    u32 max)
590 {
591         u32 bytes;
592
593         bytes = max >> 3;
594         if (!bytes)
595                 bytes = 1;
596         pdt->tbl = kmalloc(bytes, GFP_KERNEL);
597         if (!pdt->tbl)
598                 return -ENOMEM;
599
600         pdt->max = max;
601         memset((u8 *)pdt->tbl, 0xFF, bytes);
602
603         return 0;
604 }
605
606 /* DPIs */
607 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
608                          struct bnxt_qplib_dpi     *dpi,
609                          void                      *app)
610 {
611         u32 bit_num;
612
613         bit_num = find_first_bit(dpit->tbl, dpit->max);
614         if (bit_num == dpit->max)
615                 return -ENOMEM;
616
617         /* Found unused DPI */
618         clear_bit(bit_num, dpit->tbl);
619         dpit->app_tbl[bit_num] = app;
620
621         dpi->dpi = bit_num;
622         dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
623         dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
624
625         return 0;
626 }
627
628 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
629                            struct bnxt_qplib_dpi_tbl *dpit,
630                            struct bnxt_qplib_dpi     *dpi)
631 {
632         if (dpi->dpi >= dpit->max) {
633                 dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d", dpi->dpi);
634                 return -EINVAL;
635         }
636         if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
637                 dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d",
638                          dpi->dpi);
639                 return -EINVAL;
640         }
641         if (dpit->app_tbl)
642                 dpit->app_tbl[dpi->dpi] = NULL;
643         memset(dpi, 0, sizeof(*dpi));
644
645         return 0;
646 }
647
648 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res     *res,
649                                     struct bnxt_qplib_dpi_tbl *dpit)
650 {
651         kfree(dpit->tbl);
652         kfree(dpit->app_tbl);
653         if (dpit->dbr_bar_reg_iomem)
654                 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
655         memset(dpit, 0, sizeof(*dpit));
656 }
657
658 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
659                                     struct bnxt_qplib_dpi_tbl *dpit,
660                                     u32                       dbr_offset)
661 {
662         u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
663         resource_size_t bar_reg_base;
664         u32 dbr_len, bytes;
665
666         if (dpit->dbr_bar_reg_iomem) {
667                 dev_err(&res->pdev->dev,
668                         "QPLIB: DBR BAR region %d already mapped", dbr_bar_reg);
669                 return -EALREADY;
670         }
671
672         bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
673         if (!bar_reg_base) {
674                 dev_err(&res->pdev->dev,
675                         "QPLIB: BAR region %d resc start failed", dbr_bar_reg);
676                 return -ENOMEM;
677         }
678
679         dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
680         if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
681                 dev_err(&res->pdev->dev, "QPLIB: Invalid DBR length %d",
682                         dbr_len);
683                 return -ENOMEM;
684         }
685
686         dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
687                                                   dbr_len);
688         if (!dpit->dbr_bar_reg_iomem) {
689                 dev_err(&res->pdev->dev,
690                         "QPLIB: FP: DBR BAR region %d mapping failed",
691                         dbr_bar_reg);
692                 return -ENOMEM;
693         }
694
695         dpit->unmapped_dbr = bar_reg_base + dbr_offset;
696         dpit->max = dbr_len / PAGE_SIZE;
697
698         dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
699         if (!dpit->app_tbl) {
700                 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
701                 dev_err(&res->pdev->dev,
702                         "QPLIB: DPI app tbl allocation failed");
703                 return -ENOMEM;
704         }
705
706         bytes = dpit->max >> 3;
707         if (!bytes)
708                 bytes = 1;
709
710         dpit->tbl = kmalloc(bytes, GFP_KERNEL);
711         if (!dpit->tbl) {
712                 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
713                 kfree(dpit->app_tbl);
714                 dpit->app_tbl = NULL;
715                 dev_err(&res->pdev->dev,
716                         "QPLIB: DPI tbl allocation failed for size = %d",
717                         bytes);
718                 return -ENOMEM;
719         }
720
721         memset((u8 *)dpit->tbl, 0xFF, bytes);
722
723         return 0;
724 }
725
726 /* PKEYs */
727 static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
728 {
729         memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
730         pkey_tbl->active = 0;
731 }
732
733 static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
734                                      struct bnxt_qplib_pkey_tbl *pkey_tbl)
735 {
736         u16 pkey = 0xFFFF;
737
738         memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
739
740         /* pkey default = 0xFFFF */
741         bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
742 }
743
744 /* Stats */
745 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
746                                       struct bnxt_qplib_stats *stats)
747 {
748         if (stats->dma) {
749                 dma_free_coherent(&pdev->dev, stats->size,
750                                   stats->dma, stats->dma_map);
751         }
752         memset(stats, 0, sizeof(*stats));
753         stats->fw_id = -1;
754 }
755
756 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
757                                       struct bnxt_qplib_stats *stats)
758 {
759         memset(stats, 0, sizeof(*stats));
760         stats->fw_id = -1;
761         stats->size = sizeof(struct ctx_hw_stats);
762         stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
763                                         &stats->dma_map, GFP_KERNEL);
764         if (!stats->dma) {
765                 dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
766                 return -ENOMEM;
767         }
768         return 0;
769 }
770
771 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
772 {
773         bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
774         bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
775 }
776
777 int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
778 {
779         bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
780         bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
781
782         return 0;
783 }
784
785 void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
786 {
787         bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
788         bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
789         bnxt_qplib_free_pd_tbl(&res->pd_tbl);
790         bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
791
792         res->netdev = NULL;
793         res->pdev = NULL;
794 }
795
796 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
797                          struct net_device *netdev,
798                          struct bnxt_qplib_dev_attr *dev_attr)
799 {
800         int rc = 0;
801
802         res->pdev = pdev;
803         res->netdev = netdev;
804
805         rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
806         if (rc)
807                 goto fail;
808
809         rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
810         if (rc)
811                 goto fail;
812
813         rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
814         if (rc)
815                 goto fail;
816
817         rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
818         if (rc)
819                 goto fail;
820
821         return 0;
822 fail:
823         bnxt_qplib_free_res(res);
824         return rc;
825 }