2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/platform_device.h>
35 #include <linux/vmalloc.h>
36 #include <rdma/ib_umem.h>
37 #include "hns_roce_device.h"
38 #include "hns_roce_cmd.h"
39 #include "hns_roce_hem.h"
41 static u32 hw_index_to_key(int ind)
43 return ((u32)ind >> 24) | ((u32)ind << 8);
46 unsigned long key_to_hw_index(u32 key)
48 return (key << 24) | (key >> 8);
51 static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev,
52 struct hns_roce_cmd_mailbox *mailbox,
53 unsigned long mpt_index)
55 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
56 HNS_ROCE_CMD_CREATE_MPT,
57 HNS_ROCE_CMD_TIMEOUT_MSECS);
60 int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
61 struct hns_roce_cmd_mailbox *mailbox,
62 unsigned long mpt_index)
64 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
65 mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT,
66 HNS_ROCE_CMD_TIMEOUT_MSECS);
69 static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
71 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
72 struct ib_device *ibdev = &hr_dev->ib_dev;
76 /* Allocate a key for mr from mr_table */
77 id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
80 ibdev_err(ibdev, "failed to alloc id for MR key, id(%d)\n", id);
84 mr->key = hw_index_to_key(id); /* MR key */
86 err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
89 ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
95 ida_free(&mtpt_ida->ida, id);
99 static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
101 unsigned long obj = key_to_hw_index(mr->key);
103 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
104 ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)obj);
107 static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
108 struct ib_udata *udata, u64 start)
110 struct ib_device *ibdev = &hr_dev->ib_dev;
111 bool is_fast = mr->type == MR_TYPE_FRMR;
112 struct hns_roce_buf_attr buf_attr = {};
115 mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
116 buf_attr.page_shift = is_fast ? PAGE_SHIFT :
117 hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
118 buf_attr.region[0].size = mr->size;
119 buf_attr.region[0].hopnum = mr->pbl_hop_num;
120 buf_attr.region_count = 1;
121 buf_attr.user_access = mr->access;
122 /* fast MR's buffer is alloced before mapping, not at creation */
123 buf_attr.mtt_only = is_fast;
125 err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
126 hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
129 ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
131 mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
136 static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
138 hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
141 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
142 struct hns_roce_mr *mr)
144 struct ib_device *ibdev = &hr_dev->ib_dev;
148 ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
149 key_to_hw_index(mr->key) &
150 (hr_dev->caps.num_mtpts - 1));
152 ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n",
156 free_mr_pbl(hr_dev, mr);
157 free_mr_key(hr_dev, mr);
160 static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
161 struct hns_roce_mr *mr)
163 unsigned long mtpt_idx = key_to_hw_index(mr->key);
164 struct hns_roce_cmd_mailbox *mailbox;
165 struct device *dev = hr_dev->dev;
168 /* Allocate mailbox memory */
169 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
170 if (IS_ERR(mailbox)) {
171 ret = PTR_ERR(mailbox);
175 if (mr->type != MR_TYPE_FRMR)
176 ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
179 ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
181 dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
185 ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
186 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
188 dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
195 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
200 void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
202 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
204 ida_init(&mtpt_ida->ida);
205 mtpt_ida->max = hr_dev->caps.num_mtpts - 1;
206 mtpt_ida->min = hr_dev->caps.reserved_mrws;
209 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
211 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
212 struct hns_roce_mr *mr;
215 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
217 return ERR_PTR(-ENOMEM);
219 mr->type = MR_TYPE_DMA;
220 mr->pd = to_hr_pd(pd)->pdn;
223 /* Allocate memory region key */
224 hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
225 ret = alloc_mr_key(hr_dev, mr);
229 ret = hns_roce_mr_enable(hr_dev, mr);
233 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
237 free_mr_key(hr_dev, mr);
244 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
245 u64 virt_addr, int access_flags,
246 struct ib_udata *udata)
248 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
249 struct hns_roce_mr *mr;
252 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
254 return ERR_PTR(-ENOMEM);
256 mr->iova = virt_addr;
258 mr->pd = to_hr_pd(pd)->pdn;
259 mr->access = access_flags;
260 mr->type = MR_TYPE_MR;
262 ret = alloc_mr_key(hr_dev, mr);
266 ret = alloc_mr_pbl(hr_dev, mr, udata, start);
270 ret = hns_roce_mr_enable(hr_dev, mr);
274 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
275 mr->ibmr.length = length;
280 free_mr_pbl(hr_dev, mr);
282 free_mr_key(hr_dev, mr);
288 struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
289 u64 length, u64 virt_addr,
290 int mr_access_flags, struct ib_pd *pd,
291 struct ib_udata *udata)
293 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
294 struct ib_device *ib_dev = &hr_dev->ib_dev;
295 struct hns_roce_mr *mr = to_hr_mr(ibmr);
296 struct hns_roce_cmd_mailbox *mailbox;
297 unsigned long mtpt_idx;
301 return ERR_PTR(-EINVAL);
303 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
305 return ERR_CAST(mailbox);
307 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
308 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
309 HNS_ROCE_CMD_QUERY_MPT,
310 HNS_ROCE_CMD_TIMEOUT_MSECS);
314 ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx);
316 ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
319 mr->iova = virt_addr;
322 if (flags & IB_MR_REREG_PD)
323 mr->pd = to_hr_pd(pd)->pdn;
325 if (flags & IB_MR_REREG_ACCESS)
326 mr->access = mr_access_flags;
328 if (flags & IB_MR_REREG_TRANS) {
329 free_mr_pbl(hr_dev, mr);
330 ret = alloc_mr_pbl(hr_dev, mr, udata, start);
332 ibdev_err(ib_dev, "failed to alloc mr PBL, ret = %d.\n",
338 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, mailbox->buf);
340 ibdev_err(ib_dev, "failed to write mtpt, ret = %d.\n", ret);
344 ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
346 ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
353 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
360 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
362 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
363 struct hns_roce_mr *mr = to_hr_mr(ibmr);
366 if (hr_dev->hw->dereg_mr) {
367 ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
369 hns_roce_mr_free(hr_dev, mr);
376 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
379 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
380 struct device *dev = hr_dev->dev;
381 struct hns_roce_mr *mr;
384 if (mr_type != IB_MR_TYPE_MEM_REG)
385 return ERR_PTR(-EINVAL);
387 if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
388 dev_err(dev, "max_num_sg larger than %d\n",
389 HNS_ROCE_FRMR_MAX_PA);
390 return ERR_PTR(-EINVAL);
393 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
395 return ERR_PTR(-ENOMEM);
397 mr->type = MR_TYPE_FRMR;
398 mr->pd = to_hr_pd(pd)->pdn;
399 mr->size = max_num_sg * (1 << PAGE_SHIFT);
401 /* Allocate memory region key */
402 ret = alloc_mr_key(hr_dev, mr);
406 ret = alloc_mr_pbl(hr_dev, mr, NULL, 0);
410 ret = hns_roce_mr_enable(hr_dev, mr);
414 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
415 mr->ibmr.length = mr->size;
420 free_mr_key(hr_dev, mr);
422 free_mr_pbl(hr_dev, mr);
428 static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
430 struct hns_roce_mr *mr = to_hr_mr(ibmr);
432 if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
433 mr->page_list[mr->npages++] = addr;
440 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
441 unsigned int *sg_offset)
443 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
444 struct ib_device *ibdev = &hr_dev->ib_dev;
445 struct hns_roce_mr *mr = to_hr_mr(ibmr);
446 struct hns_roce_mtr *mtr = &mr->pbl_mtr;
450 mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
451 sizeof(dma_addr_t), GFP_KERNEL);
455 ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
457 ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
458 mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
462 mtr->hem_cfg.region[0].offset = 0;
463 mtr->hem_cfg.region[0].count = mr->npages;
464 mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
465 mtr->hem_cfg.region_count = 1;
466 ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
468 ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
471 mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
476 kvfree(mr->page_list);
477 mr->page_list = NULL;
482 static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
483 struct hns_roce_mw *mw)
485 struct device *dev = hr_dev->dev;
489 ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
490 key_to_hw_index(mw->rkey) &
491 (hr_dev->caps.num_mtpts - 1));
493 dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
495 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
496 key_to_hw_index(mw->rkey));
499 ida_free(&hr_dev->mr_table.mtpt_ida.ida,
500 (int)key_to_hw_index(mw->rkey));
503 static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
504 struct hns_roce_mw *mw)
506 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
507 struct hns_roce_cmd_mailbox *mailbox;
508 struct device *dev = hr_dev->dev;
509 unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
512 /* prepare HEM entry memory */
513 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
517 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
518 if (IS_ERR(mailbox)) {
519 ret = PTR_ERR(mailbox);
523 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
525 dev_err(dev, "MW write mtpt fail!\n");
529 ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
530 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
532 dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
538 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
543 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
546 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
551 int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
553 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
554 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
555 struct ib_device *ibdev = &hr_dev->ib_dev;
556 struct hns_roce_mw *mw = to_hr_mw(ibmw);
560 /* Allocate a key for mw from mr_table */
561 id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
564 ibdev_err(ibdev, "failed to alloc id for MW key, id(%d)\n", id);
568 mw->rkey = hw_index_to_key(id);
570 ibmw->rkey = mw->rkey;
571 mw->pdn = to_hr_pd(ibmw->pd)->pdn;
572 mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
573 mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
574 mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
576 ret = hns_roce_mw_enable(hr_dev, mw);
583 hns_roce_mw_free(hr_dev, mw);
587 int hns_roce_dealloc_mw(struct ib_mw *ibmw)
589 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
590 struct hns_roce_mw *mw = to_hr_mw(ibmw);
592 hns_roce_mw_free(hr_dev, mw);
596 static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
597 struct hns_roce_buf_region *region, dma_addr_t *pages,
606 offset = region->offset;
607 end = offset + region->count;
609 while (offset < end && npage < max_count) {
611 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
612 offset, &count, NULL);
616 for (i = 0; i < count && npage < max_count; i++) {
617 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
618 addr = to_hr_hw_page_addr(pages[npage]);
622 mtts[i] = cpu_to_le64(addr);
631 static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
635 for (i = 0; i < attr->region_count; i++)
636 if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
637 attr->region[i].hopnum > 0)
640 /* because the mtr only one root base address, when hopnum is 0 means
641 * root base address equals the first buffer address, thus all alloced
642 * memory must in a continuous space accessed by direct mode.
647 static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
652 for (i = 0; i < attr->region_count; i++)
653 size += attr->region[i].size;
659 * check the given pages in continuous address space
660 * Returns 0 on success, or the error page num.
662 static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
663 unsigned int page_shift)
665 size_t page_size = 1 << page_shift;
668 for (i = 1; i < page_count; i++)
669 if (pages[i] - pages[i - 1] != page_size)
675 static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
677 /* release user buffers */
679 ib_umem_release(mtr->umem);
683 /* release kernel buffers */
685 hns_roce_buf_free(hr_dev, mtr->kmem);
690 static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
691 struct hns_roce_buf_attr *buf_attr,
692 struct ib_udata *udata, unsigned long user_addr)
694 struct ib_device *ibdev = &hr_dev->ib_dev;
697 total_size = mtr_bufs_size(buf_attr);
701 mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
702 buf_attr->user_access);
703 if (IS_ERR_OR_NULL(mtr->umem)) {
704 ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
710 mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
711 buf_attr->page_shift,
712 mtr->hem_cfg.is_direct ?
713 HNS_ROCE_BUF_DIRECT : 0);
714 if (IS_ERR(mtr->kmem)) {
715 ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
717 return PTR_ERR(mtr->kmem);
724 static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
725 int page_count, unsigned int page_shift)
727 struct ib_device *ibdev = &hr_dev->ib_dev;
732 /* alloc a tmp array to store buffer's dma address */
733 pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
738 npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count,
739 mtr->umem, page_shift);
741 npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count,
742 mtr->kmem, page_shift);
744 if (npage != page_count) {
745 ibdev_err(ibdev, "failed to get mtr page %d != %d.\n", npage,
751 if (mtr->hem_cfg.is_direct && npage > 1) {
752 ret = mtr_check_direct_pages(pages, npage, page_shift);
754 ibdev_err(ibdev, "failed to check %s page: %d / %d.\n",
755 mtr->umem ? "umtr" : "kmtr", ret, npage);
761 ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count);
763 ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
771 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
772 dma_addr_t *pages, unsigned int page_cnt)
774 struct ib_device *ibdev = &hr_dev->ib_dev;
775 struct hns_roce_buf_region *r;
776 unsigned int i, mapped_cnt;
780 * Only use the first page address as root ba when hopnum is 0, this
781 * is because the addresses of all pages are consecutive in this case.
783 if (mtr->hem_cfg.is_direct) {
784 mtr->hem_cfg.root_ba = pages[0];
788 for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
789 mapped_cnt < page_cnt; i++) {
790 r = &mtr->hem_cfg.region[i];
791 /* if hopnum is 0, no need to map pages in this region */
793 mapped_cnt += r->count;
797 if (r->offset + r->count > page_cnt) {
800 "failed to check mtr%u count %u + %u > %u.\n",
801 i, r->offset, r->count, page_cnt);
805 ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
806 page_cnt - mapped_cnt);
809 "failed to map mtr%u offset %u, ret = %d.\n",
817 if (mapped_cnt < page_cnt) {
819 ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
820 mapped_cnt, page_cnt);
826 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
827 int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
829 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
837 if (!mtt_buf || mtt_max < 1)
840 /* no mtt memory in direct mode, so just return the buffer address */
841 if (cfg->is_direct) {
842 start_index = offset >> HNS_HW_PAGE_SHIFT;
843 for (mtt_count = 0; mtt_count < cfg->region_count &&
844 total < mtt_max; mtt_count++) {
845 npage = cfg->region[mtt_count].offset;
846 if (npage < start_index)
849 addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
850 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
851 mtt_buf[total] = to_hr_hw_page_addr(addr);
853 mtt_buf[total] = addr;
861 start_index = offset >> cfg->buf_pg_shift;
865 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
868 if (!mtts || !mtt_count)
871 npage = min(mtt_count, left);
873 for (mtt_count = 0; mtt_count < npage; mtt_count++)
874 mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
879 *base_addr = cfg->root_ba;
884 static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
885 struct hns_roce_buf_attr *attr,
886 struct hns_roce_hem_cfg *cfg,
887 unsigned int *buf_page_shift, int unalinged_size)
889 struct hns_roce_buf_region *r;
890 int first_region_padding;
891 int page_cnt, region_cnt;
892 unsigned int page_shift;
895 /* If mtt is disabled, all pages must be within a continuous range */
896 cfg->is_direct = !mtr_has_mtt(attr);
897 buf_size = mtr_bufs_size(attr);
898 if (cfg->is_direct) {
899 /* When HEM buffer uses 0-level addressing, the page size is
900 * equal to the whole buffer size, and we split the buffer into
901 * small pages which is used to check whether the adjacent
902 * units are in the continuous space and its size is fixed to
903 * 4K based on hns ROCEE's requirement.
905 page_shift = HNS_HW_PAGE_SHIFT;
907 /* The ROCEE requires the page size to be 4K * 2 ^ N. */
908 cfg->buf_pg_count = 1;
909 cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
910 order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
911 first_region_padding = 0;
913 page_shift = attr->page_shift;
914 cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size,
916 cfg->buf_pg_shift = page_shift;
917 first_region_padding = unalinged_size;
920 /* Convert buffer size to page index and page count for each region and
921 * the buffer's offset needs to be appended to the first region.
923 for (page_cnt = 0, region_cnt = 0; region_cnt < attr->region_count &&
924 region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
925 r = &cfg->region[region_cnt];
926 r->offset = page_cnt;
927 buf_size = hr_hw_page_align(attr->region[region_cnt].size +
928 first_region_padding);
929 r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
930 first_region_padding = 0;
931 page_cnt += r->count;
932 r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
936 cfg->region_count = region_cnt;
937 *buf_page_shift = page_shift;
942 static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
943 unsigned int ba_page_shift)
945 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
948 hns_roce_hem_list_init(&mtr->hem_list);
949 if (!cfg->is_direct) {
950 ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
951 cfg->region, cfg->region_count,
955 cfg->root_ba = mtr->hem_list.root_ba;
956 cfg->ba_pg_shift = ba_page_shift;
958 cfg->ba_pg_shift = cfg->buf_pg_shift;
964 static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
966 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
970 * hns_roce_mtr_create - Create hns memory translate region.
972 * @hr_dev: RoCE device struct pointer
973 * @mtr: memory translate region
974 * @buf_attr: buffer attribute for creating mtr
975 * @ba_page_shift: page shift for multi-hop base address table
976 * @udata: user space context, if it's NULL, means kernel space
977 * @user_addr: userspace virtual address to start at
979 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
980 struct hns_roce_buf_attr *buf_attr,
981 unsigned int ba_page_shift, struct ib_udata *udata,
982 unsigned long user_addr)
984 struct ib_device *ibdev = &hr_dev->ib_dev;
985 unsigned int buf_page_shift = 0;
989 buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg,
991 udata ? user_addr & ~PAGE_MASK : 0);
992 if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) {
993 ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n",
994 buf_page_cnt, buf_page_shift);
998 ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
1000 ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
1004 /* The caller has its own buffer list and invokes the hns_roce_mtr_map()
1005 * to finish the MTT configuration.
1007 if (buf_attr->mtt_only) {
1013 ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
1015 ibdev_err(ibdev, "failed to alloc mtr bufs, ret = %d.\n", ret);
1019 /* Write buffer's dma address to MTT */
1020 ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift);
1022 ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
1026 mtr_free_bufs(hr_dev, mtr);
1028 mtr_free_mtt(hr_dev, mtr);
1032 void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
1034 /* release multi-hop addressing resource */
1035 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1038 mtr_free_bufs(hr_dev, mtr);