2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_umem.h>
34 #include <rdma/ib_umem_odp.h>
35 #include <linux/kernel.h>
40 #include <linux/mlx5/eq.h>
42 /* Contains the details of a pagefault. */
43 struct mlx5_pagefault {
49 /* Initiator or send message responder pagefault details. */
51 /* Received packet size, only valid for responders. */
54 * Number of resource holding WQE, depends on type.
58 * WQE index. Refers to either the send queue or
59 * receive queue, according to event_subtype.
63 /* RDMA responder pagefault details */
67 * Received packet size, minimal size page fault
68 * resolution required for forward progress.
76 struct mlx5_ib_pf_eq *eq;
77 struct work_struct work;
80 #define MAX_PREFETCH_LEN (4*1024*1024U)
82 /* Timeout in ms to wait for an active mmu notifier to complete when handling
84 #define MMU_NOTIFIER_TIMEOUT 1000
86 #define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
87 #define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
88 #define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
89 #define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
90 #define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
92 #define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
94 static u64 mlx5_imr_ksm_entries;
96 static int check_parent(struct ib_umem_odp *odp,
97 struct mlx5_ib_mr *parent)
99 struct mlx5_ib_mr *mr = odp->private;
101 return mr && mr->parent == parent && !odp->dying;
104 static struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
106 if (WARN_ON(!mr || !is_odp_mr(mr)))
109 return to_ib_umem_odp(mr->umem)->per_mm;
112 static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
114 struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent;
115 struct ib_ucontext_per_mm *per_mm = odp->per_mm;
118 down_read(&per_mm->umem_rwsem);
120 rb = rb_next(&odp->interval_tree.rb);
123 odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
124 if (check_parent(odp, parent))
130 up_read(&per_mm->umem_rwsem);
134 static struct ib_umem_odp *odp_lookup(u64 start, u64 length,
135 struct mlx5_ib_mr *parent)
137 struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(parent);
138 struct ib_umem_odp *odp;
141 down_read(&per_mm->umem_rwsem);
142 odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length);
147 if (check_parent(odp, parent))
149 rb = rb_next(&odp->interval_tree.rb);
152 odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
153 if (ib_umem_start(odp) > start + length)
159 up_read(&per_mm->umem_rwsem);
163 void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
164 size_t nentries, struct mlx5_ib_mr *mr, int flags)
166 struct ib_pd *pd = mr->ibmr.pd;
167 struct mlx5_ib_dev *dev = to_mdev(pd->device);
168 struct ib_umem_odp *odp;
172 if (flags & MLX5_IB_UPD_XLT_ZAP) {
173 for (i = 0; i < nentries; i++, pklm++) {
174 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
175 pklm->key = cpu_to_be32(dev->null_mkey);
181 odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
182 nentries * MLX5_IMR_MTT_SIZE, mr);
184 for (i = 0; i < nentries; i++, pklm++) {
185 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
186 va = (offset + i) * MLX5_IMR_MTT_SIZE;
187 if (odp && odp->umem.address == va) {
188 struct mlx5_ib_mr *mtt = odp->private;
190 pklm->key = cpu_to_be32(mtt->ibmr.lkey);
193 pklm->key = cpu_to_be32(dev->null_mkey);
195 mlx5_ib_dbg(dev, "[%d] va %lx key %x\n",
196 i, va, be32_to_cpu(pklm->key));
200 static void mr_leaf_free_action(struct work_struct *work)
202 struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
203 int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
204 struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
207 synchronize_srcu(&mr->dev->mr_srcu);
209 ib_umem_release(&odp->umem);
211 mlx5_ib_update_xlt(imr, idx, 1, 0,
212 MLX5_IB_UPD_XLT_INDIRECT |
213 MLX5_IB_UPD_XLT_ATOMIC);
214 mlx5_mr_cache_free(mr->dev, mr);
216 if (atomic_dec_and_test(&imr->num_leaf_free))
217 wake_up(&imr->q_leaf_free);
220 void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
223 struct mlx5_ib_mr *mr;
224 const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
225 sizeof(struct mlx5_mtt)) - 1;
226 u64 idx = 0, blk_start_idx = 0;
231 pr_err("invalidation called on NULL umem or non-ODP umem\n");
235 mr = umem_odp->private;
237 if (!mr || !mr->ibmr.pd)
240 start = max_t(u64, ib_umem_start(umem_odp), start);
241 end = min_t(u64, ib_umem_end(umem_odp), end);
244 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
245 * while we are doing the invalidation, no page fault will attempt to
246 * overwrite the same MTTs. Concurent invalidations might race us,
247 * but they will write 0s as well, so no difference in the end result.
249 mutex_lock(&umem_odp->umem_mutex);
250 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
251 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
253 * Strive to write the MTTs in chunks, but avoid overwriting
254 * non-existing MTTs. The huristic here can be improved to
255 * estimate the cost of another UMR vs. the cost of bigger
258 if (umem_odp->dma_list[idx] &
259 (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
265 u64 umr_offset = idx & umr_block_mask;
267 if (in_block && umr_offset == 0) {
268 mlx5_ib_update_xlt(mr, blk_start_idx,
269 idx - blk_start_idx, 0,
270 MLX5_IB_UPD_XLT_ZAP |
271 MLX5_IB_UPD_XLT_ATOMIC);
277 mlx5_ib_update_xlt(mr, blk_start_idx,
278 idx - blk_start_idx + 1, 0,
279 MLX5_IB_UPD_XLT_ZAP |
280 MLX5_IB_UPD_XLT_ATOMIC);
281 mutex_unlock(&umem_odp->umem_mutex);
283 * We are now sure that the device will not access the
284 * memory. We can safely unmap it, and mark it as dirty if
288 ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
290 if (unlikely(!umem_odp->npages && mr->parent &&
292 WRITE_ONCE(umem_odp->dying, 1);
293 atomic_inc(&mr->parent->num_leaf_free);
294 schedule_work(&umem_odp->work);
298 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
300 struct ib_odp_caps *caps = &dev->odp_caps;
302 memset(caps, 0, sizeof(*caps));
304 if (!MLX5_CAP_GEN(dev->mdev, pg))
307 caps->general_caps = IB_ODP_SUPPORT;
309 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
310 dev->odp_max_size = U64_MAX;
312 dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
314 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
315 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
317 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
318 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
320 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
321 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
323 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
324 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
326 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
327 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
329 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
330 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
332 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
333 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
335 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
336 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
338 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
339 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
341 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
342 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
344 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
345 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
347 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
348 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
350 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
351 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
353 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
354 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
356 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
357 MLX5_CAP_GEN(dev->mdev, null_mkey) &&
358 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
359 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
364 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
365 struct mlx5_pagefault *pfault,
368 int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
369 pfault->wqe.wq_num : pfault->token;
370 u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = { };
371 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = { };
374 MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
375 MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
376 MLX5_SET(page_fault_resume_in, in, token, pfault->token);
377 MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
378 MLX5_SET(page_fault_resume_in, in, error, !!error);
380 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
382 mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
386 static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
387 struct ib_umem *umem,
388 bool ksm, int access_flags)
390 struct mlx5_ib_dev *dev = to_mdev(pd->device);
391 struct mlx5_ib_mr *mr;
394 mr = mlx5_mr_cache_alloc(dev, ksm ? MLX5_IMR_KSM_CACHE_ENTRY :
395 MLX5_IMR_MTT_CACHE_ENTRY);
403 mr->access_flags = access_flags;
408 err = mlx5_ib_update_xlt(mr, 0,
409 mlx5_imr_ksm_entries,
411 MLX5_IB_UPD_XLT_INDIRECT |
412 MLX5_IB_UPD_XLT_ZAP |
413 MLX5_IB_UPD_XLT_ENABLE);
416 err = mlx5_ib_update_xlt(mr, 0,
417 MLX5_IMR_MTT_ENTRIES,
419 MLX5_IB_UPD_XLT_ZAP |
420 MLX5_IB_UPD_XLT_ENABLE |
421 MLX5_IB_UPD_XLT_ATOMIC);
427 mr->ibmr.lkey = mr->mmkey.key;
428 mr->ibmr.rkey = mr->mmkey.key;
432 mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
433 mr->mmkey.key, dev->mdev, mr);
438 mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
439 mlx5_mr_cache_free(dev, mr);
444 static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
445 u64 io_virt, size_t bcnt)
447 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
448 struct ib_umem_odp *odp, *result = NULL;
449 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
450 u64 addr = io_virt & MLX5_IMR_MTT_MASK;
451 int nentries = 0, start_idx = 0, ret;
452 struct mlx5_ib_mr *mtt;
454 mutex_lock(&odp_mr->umem_mutex);
455 odp = odp_lookup(addr, 1, mr);
457 mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
458 io_virt, bcnt, addr, odp);
465 odp = ib_alloc_odp_umem(odp_mr, addr,
468 mutex_unlock(&odp_mr->umem_mutex);
469 return ERR_CAST(odp);
472 mtt = implicit_mr_alloc(mr->ibmr.pd, &odp->umem, 0,
475 mutex_unlock(&odp_mr->umem_mutex);
476 ib_umem_release(&odp->umem);
477 return ERR_CAST(mtt);
481 mtt->umem = &odp->umem;
482 mtt->mmkey.iova = addr;
484 INIT_WORK(&odp->work, mr_leaf_free_action);
487 start_idx = addr >> MLX5_IMR_MTT_SHIFT;
491 /* Return first odp if region not covered by single one */
495 addr += MLX5_IMR_MTT_SIZE;
496 if (unlikely(addr < io_virt + bcnt)) {
498 if (odp && odp->umem.address != addr)
503 if (unlikely(nentries)) {
504 ret = mlx5_ib_update_xlt(mr, start_idx, nentries, 0,
505 MLX5_IB_UPD_XLT_INDIRECT |
506 MLX5_IB_UPD_XLT_ATOMIC);
508 mlx5_ib_err(dev, "Failed to update PAS\n");
509 result = ERR_PTR(ret);
513 mutex_unlock(&odp_mr->umem_mutex);
517 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
518 struct ib_udata *udata,
521 struct mlx5_ib_mr *imr;
522 struct ib_umem *umem;
524 umem = ib_umem_get(udata, 0, 0, access_flags, 0);
526 return ERR_CAST(umem);
528 imr = implicit_mr_alloc(&pd->ibpd, umem, 1, access_flags);
530 ib_umem_release(umem);
531 return ERR_CAST(imr);
535 init_waitqueue_head(&imr->q_leaf_free);
536 atomic_set(&imr->num_leaf_free, 0);
537 atomic_set(&imr->num_pending_prefetch, 0);
542 static int mr_leaf_free(struct ib_umem_odp *umem_odp, u64 start, u64 end,
545 struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie;
547 if (mr->parent != imr)
550 ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
551 ib_umem_end(umem_odp));
556 WRITE_ONCE(umem_odp->dying, 1);
557 atomic_inc(&imr->num_leaf_free);
558 schedule_work(&umem_odp->work);
563 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
565 struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
567 down_read(&per_mm->umem_rwsem);
568 rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, 0, ULLONG_MAX,
569 mr_leaf_free, true, imr);
570 up_read(&per_mm->umem_rwsem);
572 wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
575 #define MLX5_PF_FLAGS_PREFETCH BIT(0)
576 #define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
577 static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
578 u64 io_virt, size_t bcnt, u32 *bytes_mapped,
581 int npages = 0, current_seq, page_shift, ret, np;
582 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
583 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
584 bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
586 u64 start_idx, page_mask;
587 struct ib_umem_odp *odp;
590 if (!odp_mr->page_list) {
591 odp = implicit_mr_get_data(mr, io_virt, bcnt);
601 size = min_t(size_t, bcnt, ib_umem_end(odp) - io_virt);
603 page_shift = odp->page_shift;
604 page_mask = ~(BIT(page_shift) - 1);
605 start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
606 access_mask = ODP_READ_ALLOWED_BIT;
608 if (prefetch && !downgrade && !mr->umem->writable) {
609 /* prefetch with write-access must
610 * be supported by the MR
616 if (mr->umem->writable && !downgrade)
617 access_mask |= ODP_WRITE_ALLOWED_BIT;
619 current_seq = READ_ONCE(odp->notifiers_seq);
621 * Ensure the sequence number is valid for some time before we call
626 ret = ib_umem_odp_map_dma_pages(to_ib_umem_odp(mr->umem), io_virt, size,
627 access_mask, current_seq);
634 mutex_lock(&odp->umem_mutex);
635 if (!ib_umem_mmu_notifier_retry(to_ib_umem_odp(mr->umem),
638 * No need to check whether the MTTs really belong to
639 * this MR, since ib_umem_odp_map_dma_pages already
642 ret = mlx5_ib_update_xlt(mr, start_idx, np,
643 page_shift, MLX5_IB_UPD_XLT_ATOMIC);
647 mutex_unlock(&odp->umem_mutex);
651 mlx5_ib_err(dev, "Failed to update mkey page tables\n");
656 u32 new_mappings = (np << page_shift) -
657 (io_virt - round_down(io_virt, 1 << page_shift));
658 *bytes_mapped += min_t(u32, new_mappings, size);
661 npages += np << (page_shift - PAGE_SHIFT);
664 if (unlikely(bcnt)) {
665 struct ib_umem_odp *next;
668 next = odp_next(odp);
669 if (unlikely(!next || next->umem.address != io_virt)) {
670 mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
682 if (ret == -EAGAIN) {
683 unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
685 if (!wait_for_completion_timeout(&odp->notifier_completion,
689 "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
690 current_seq, odp->notifiers_seq,
691 odp->notifiers_count);
699 struct pf_frame *next;
706 static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
710 if (mmkey->type == MLX5_MKEY_MW)
711 return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
712 return mmkey->key == key;
715 static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
717 struct mlx5_ib_mw *mw;
718 struct mlx5_ib_devx_mr *devx_mr;
720 if (mmkey->type == MLX5_MKEY_MW) {
721 mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
725 devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr,
727 return devx_mr->ndescs;
731 * Handle a single data segment in a page-fault WQE or RDMA region.
733 * Returns number of OS pages retrieved on success. The caller may continue to
734 * the next data segment.
735 * Can return the following error codes:
736 * -EAGAIN to designate a temporary error. The caller will abort handling the
737 * page fault and resolve it.
738 * -EFAULT when there's an error mapping the requested pages. The caller will
739 * abort the page fault handling.
741 static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
742 struct ib_pd *pd, u32 key,
743 u64 io_virt, size_t bcnt,
744 u32 *bytes_committed,
745 u32 *bytes_mapped, u32 flags)
747 int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0;
748 bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
749 struct pf_frame *head = NULL, *frame;
750 struct mlx5_core_mkey *mmkey;
751 struct mlx5_ib_mr *mr;
752 struct mlx5_klm *pklm;
757 srcu_key = srcu_read_lock(&dev->mr_srcu);
759 io_virt += *bytes_committed;
760 bcnt -= *bytes_committed;
763 mmkey = xa_load(&dev->mdev->priv.mkey_table, mlx5_base_mkey(key));
764 if (!mkey_is_eq(mmkey, key)) {
765 mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
770 if (prefetch && mmkey->type != MLX5_MKEY_MR) {
771 mlx5_ib_dbg(dev, "prefetch is allowed only for MR\n");
776 switch (mmkey->type) {
778 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
779 if (!mr->live || !mr->ibmr.pd) {
780 mlx5_ib_dbg(dev, "got dead MR\n");
786 if (!is_odp_mr(mr) ||
788 mlx5_ib_dbg(dev, "Invalid prefetch request: %s\n",
789 is_odp_mr(mr) ? "MR is not ODP" :
790 "PD is not of the MR");
796 if (!is_odp_mr(mr)) {
797 mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
800 *bytes_mapped += bcnt;
805 ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped, flags);
814 case MLX5_MKEY_INDIRECT_DEVX:
815 ndescs = get_indirect_num_descs(mmkey);
817 if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
818 mlx5_ib_dbg(dev, "indirection level exceeded\n");
823 outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
824 sizeof(*pklm) * (ndescs - 2);
826 if (outlen > cur_outlen) {
828 out = kzalloc(outlen, GFP_KERNEL);
836 pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
837 bsf0_klm0_pas_mtt0_1);
839 ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen);
843 offset = io_virt - MLX5_GET64(query_mkey_out, out,
844 memory_key_mkey_entry.start_addr);
846 for (i = 0; bcnt && i < ndescs; i++, pklm++) {
847 if (offset >= be32_to_cpu(pklm->bcount)) {
848 offset -= be32_to_cpu(pklm->bcount);
852 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
858 frame->key = be32_to_cpu(pklm->key);
859 frame->io_virt = be64_to_cpu(pklm->va) + offset;
860 frame->bcnt = min_t(size_t, bcnt,
861 be32_to_cpu(pklm->bcount) - offset);
862 frame->depth = depth + 1;
872 mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
882 io_virt = frame->io_virt;
884 depth = frame->depth;
898 srcu_read_unlock(&dev->mr_srcu, srcu_key);
899 *bytes_committed = 0;
900 return ret ? ret : npages;
904 * Parse a series of data segments for page fault handling.
906 * @pfault contains page fault information.
907 * @wqe points at the first data segment in the WQE.
908 * @wqe_end points after the end of the WQE.
909 * @bytes_mapped receives the number of bytes that the function was able to
910 * map. This allows the caller to decide intelligently whether
911 * enough memory was mapped to resolve the page fault
912 * successfully (e.g. enough for the next MTU, or the entire
914 * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
915 * the committed bytes).
917 * Returns the number of pages loaded if positive, zero for an empty WQE, or a
918 * negative error code.
920 static int pagefault_data_segments(struct mlx5_ib_dev *dev,
921 struct mlx5_pagefault *pfault,
923 void *wqe_end, u32 *bytes_mapped,
924 u32 *total_wqe_bytes, bool receive_queue)
926 int ret = 0, npages = 0;
936 *total_wqe_bytes = 0;
938 while (wqe < wqe_end) {
939 struct mlx5_wqe_data_seg *dseg = wqe;
941 io_virt = be64_to_cpu(dseg->addr);
942 key = be32_to_cpu(dseg->lkey);
943 byte_count = be32_to_cpu(dseg->byte_count);
944 inline_segment = !!(byte_count & MLX5_INLINE_SEG);
945 bcnt = byte_count & ~MLX5_INLINE_SEG;
947 if (inline_segment) {
948 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
949 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
952 wqe += sizeof(*dseg);
955 /* receive WQE end of sg list. */
956 if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
960 if (!inline_segment && total_wqe_bytes) {
961 *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
962 pfault->bytes_committed);
965 /* A zero length data segment designates a length of 2GB. */
969 if (inline_segment || bcnt <= pfault->bytes_committed) {
970 pfault->bytes_committed -=
972 pfault->bytes_committed);
976 ret = pagefault_single_data_segment(dev, NULL, key,
978 &pfault->bytes_committed,
985 return ret < 0 ? ret : npages;
988 static const u32 mlx5_ib_odp_opcode_cap[] = {
989 [MLX5_OPCODE_SEND] = IB_ODP_SUPPORT_SEND,
990 [MLX5_OPCODE_SEND_IMM] = IB_ODP_SUPPORT_SEND,
991 [MLX5_OPCODE_SEND_INVAL] = IB_ODP_SUPPORT_SEND,
992 [MLX5_OPCODE_RDMA_WRITE] = IB_ODP_SUPPORT_WRITE,
993 [MLX5_OPCODE_RDMA_WRITE_IMM] = IB_ODP_SUPPORT_WRITE,
994 [MLX5_OPCODE_RDMA_READ] = IB_ODP_SUPPORT_READ,
995 [MLX5_OPCODE_ATOMIC_CS] = IB_ODP_SUPPORT_ATOMIC,
996 [MLX5_OPCODE_ATOMIC_FA] = IB_ODP_SUPPORT_ATOMIC,
1000 * Parse initiator WQE. Advances the wqe pointer to point at the
1001 * scatter-gather list, and set wqe_end to the end of the WQE.
1003 static int mlx5_ib_mr_initiator_pfault_handler(
1004 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
1005 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
1007 struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
1008 u16 wqe_index = pfault->wqe.wqe_index;
1010 struct mlx5_base_av *av;
1011 unsigned ds, opcode;
1013 u32 ctrl_wqe_index, ctrl_qpn;
1015 u32 qpn = qp->trans_qp.base.mqp.qpn;
1017 ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
1018 if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
1019 mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
1025 mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
1031 ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) &
1032 MLX5_WQE_CTRL_WQE_INDEX_MASK) >>
1033 MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
1034 if (wqe_index != ctrl_wqe_index) {
1035 mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
1041 ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
1042 MLX5_WQE_CTRL_QPN_SHIFT;
1043 if (qpn != ctrl_qpn) {
1044 mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
1051 *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
1052 *wqe += sizeof(*ctrl);
1054 opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
1055 MLX5_WQE_CTRL_OPCODE_MASK;
1057 switch (qp->ibqp.qp_type) {
1058 case IB_QPT_XRC_INI:
1059 *wqe += sizeof(struct mlx5_wqe_xrc_seg);
1060 transport_caps = dev->odp_caps.per_transport_caps.xrc_odp_caps;
1063 transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
1066 transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
1069 mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
1074 if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) ||
1075 !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
1076 mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
1081 if (qp->ibqp.qp_type == IB_QPT_UD) {
1083 if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
1084 *wqe += sizeof(struct mlx5_av);
1086 *wqe += sizeof(struct mlx5_base_av);
1090 case MLX5_OPCODE_RDMA_WRITE:
1091 case MLX5_OPCODE_RDMA_WRITE_IMM:
1092 case MLX5_OPCODE_RDMA_READ:
1093 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1095 case MLX5_OPCODE_ATOMIC_CS:
1096 case MLX5_OPCODE_ATOMIC_FA:
1097 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1098 *wqe += sizeof(struct mlx5_wqe_atomic_seg);
1106 * Parse responder WQE and set wqe_end to the end of the WQE.
1108 static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
1109 struct mlx5_ib_srq *srq,
1110 void **wqe, void **wqe_end,
1113 int wqe_size = 1 << srq->msrq.wqe_shift;
1115 if (wqe_size > wqe_length) {
1116 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1120 *wqe_end = *wqe + wqe_size;
1121 *wqe += sizeof(struct mlx5_wqe_srq_next_seg);
1126 static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
1127 struct mlx5_ib_qp *qp,
1128 void *wqe, void **wqe_end,
1131 struct mlx5_ib_wq *wq = &qp->rq;
1132 int wqe_size = 1 << wq->wqe_shift;
1135 mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
1139 if (wqe_size > wqe_length) {
1140 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1144 switch (qp->ibqp.qp_type) {
1146 if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
1147 IB_ODP_SUPPORT_RECV))
1148 goto invalid_transport_or_opcode;
1151 invalid_transport_or_opcode:
1152 mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
1157 *wqe_end = wqe + wqe_size;
1162 static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
1163 u32 wq_num, int pf_type)
1165 struct mlx5_core_rsc_common *common = NULL;
1166 struct mlx5_core_srq *srq;
1169 case MLX5_WQE_PF_TYPE_RMP:
1170 srq = mlx5_cmd_get_srq(dev, wq_num);
1172 common = &srq->common;
1174 case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
1175 case MLX5_WQE_PF_TYPE_RESP:
1176 case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
1177 common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP);
1186 static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
1188 struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
1190 return to_mibqp(mqp);
1193 static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
1195 struct mlx5_core_srq *msrq =
1196 container_of(res, struct mlx5_core_srq, common);
1198 return to_mibsrq(msrq);
1201 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
1202 struct mlx5_pagefault *pfault)
1204 bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
1205 u16 wqe_index = pfault->wqe.wqe_index;
1206 void *wqe = NULL, *wqe_end = NULL;
1207 u32 bytes_mapped, total_wqe_bytes;
1208 struct mlx5_core_rsc_common *res;
1209 int resume_with_error = 1;
1210 struct mlx5_ib_qp *qp;
1211 size_t bytes_copied;
1214 res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
1216 mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
1220 if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
1221 res->res != MLX5_RES_XSRQ) {
1222 mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
1224 goto resolve_page_fault;
1227 wqe = (void *)__get_free_page(GFP_KERNEL);
1229 mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
1230 goto resolve_page_fault;
1233 qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
1235 ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
1239 ret = mlx5_ib_mr_initiator_pfault_handler(
1240 dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
1241 } else if (qp && !sq) {
1242 ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
1246 ret = mlx5_ib_mr_responder_pfault_handler_rq(
1247 dev, qp, wqe, &wqe_end, bytes_copied);
1249 struct mlx5_ib_srq *srq = res_to_srq(res);
1251 ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
1255 ret = mlx5_ib_mr_responder_pfault_handler_srq(
1256 dev, srq, &wqe, &wqe_end, bytes_copied);
1259 if (ret < 0 || wqe >= wqe_end)
1260 goto resolve_page_fault;
1262 ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
1263 &total_wqe_bytes, !sq);
1267 if (ret < 0 || total_wqe_bytes > bytes_mapped)
1268 goto resolve_page_fault;
1272 resume_with_error = 0;
1278 "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
1279 ret, wqe_index, pfault->token);
1282 mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
1283 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
1284 pfault->wqe.wq_num, resume_with_error,
1286 mlx5_core_res_put(res);
1287 free_page((unsigned long)wqe);
1290 static int pages_in_range(u64 address, u32 length)
1292 return (ALIGN(address + length, PAGE_SIZE) -
1293 (address & PAGE_MASK)) >> PAGE_SHIFT;
1296 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
1297 struct mlx5_pagefault *pfault)
1301 u32 prefetch_len = pfault->bytes_committed;
1302 int prefetch_activated = 0;
1303 u32 rkey = pfault->rdma.r_key;
1306 /* The RDMA responder handler handles the page fault in two parts.
1307 * First it brings the necessary pages for the current packet
1308 * (and uses the pfault context), and then (after resuming the QP)
1309 * prefetches more pages. The second operation cannot use the pfault
1310 * context and therefore uses the dummy_pfault context allocated on
1312 pfault->rdma.rdma_va += pfault->bytes_committed;
1313 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
1314 pfault->rdma.rdma_op_len);
1315 pfault->bytes_committed = 0;
1317 address = pfault->rdma.rdma_va;
1318 length = pfault->rdma.rdma_op_len;
1320 /* For some operations, the hardware cannot tell the exact message
1321 * length, and in those cases it reports zero. Use prefetch
1324 prefetch_activated = 1;
1325 length = pfault->rdma.packet_size;
1326 prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
1329 ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
1330 &pfault->bytes_committed, NULL,
1332 if (ret == -EAGAIN) {
1333 /* We're racing with an invalidation, don't prefetch */
1334 prefetch_activated = 0;
1335 } else if (ret < 0 || pages_in_range(address, length) > ret) {
1336 mlx5_ib_page_fault_resume(dev, pfault, 1);
1338 mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
1339 ret, pfault->token, pfault->type);
1343 mlx5_ib_page_fault_resume(dev, pfault, 0);
1344 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
1345 pfault->token, pfault->type,
1346 prefetch_activated);
1348 /* At this point, there might be a new pagefault already arriving in
1349 * the eq, switch to the dummy pagefault for the rest of the
1350 * processing. We're still OK with the objects being alive as the
1351 * work-queue is being fenced. */
1353 if (prefetch_activated) {
1354 u32 bytes_committed = 0;
1356 ret = pagefault_single_data_segment(dev, NULL, rkey, address,
1358 &bytes_committed, NULL,
1360 if (ret < 0 && ret != -EAGAIN) {
1361 mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
1362 ret, pfault->token, address, prefetch_len);
1367 static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
1369 u8 event_subtype = pfault->event_subtype;
1371 switch (event_subtype) {
1372 case MLX5_PFAULT_SUBTYPE_WQE:
1373 mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
1375 case MLX5_PFAULT_SUBTYPE_RDMA:
1376 mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
1379 mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
1381 mlx5_ib_page_fault_resume(dev, pfault, 1);
1385 static void mlx5_ib_eqe_pf_action(struct work_struct *work)
1387 struct mlx5_pagefault *pfault = container_of(work,
1388 struct mlx5_pagefault,
1390 struct mlx5_ib_pf_eq *eq = pfault->eq;
1392 mlx5_ib_pfault(eq->dev, pfault);
1393 mempool_free(pfault, eq->pool);
1396 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
1398 struct mlx5_eqe_page_fault *pf_eqe;
1399 struct mlx5_pagefault *pfault;
1400 struct mlx5_eqe *eqe;
1403 while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
1404 pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
1406 schedule_work(&eq->work);
1410 pf_eqe = &eqe->data.page_fault;
1411 pfault->event_subtype = eqe->sub_type;
1412 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
1414 mlx5_ib_dbg(eq->dev,
1415 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
1416 eqe->sub_type, pfault->bytes_committed);
1418 switch (eqe->sub_type) {
1419 case MLX5_PFAULT_SUBTYPE_RDMA:
1420 /* RDMA based event */
1422 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
1424 be32_to_cpu(pf_eqe->rdma.pftype_token) &
1426 pfault->rdma.r_key =
1427 be32_to_cpu(pf_eqe->rdma.r_key);
1428 pfault->rdma.packet_size =
1429 be16_to_cpu(pf_eqe->rdma.packet_length);
1430 pfault->rdma.rdma_op_len =
1431 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
1432 pfault->rdma.rdma_va =
1433 be64_to_cpu(pf_eqe->rdma.rdma_va);
1434 mlx5_ib_dbg(eq->dev,
1435 "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
1436 pfault->type, pfault->token,
1437 pfault->rdma.r_key);
1438 mlx5_ib_dbg(eq->dev,
1439 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
1440 pfault->rdma.rdma_op_len,
1441 pfault->rdma.rdma_va);
1444 case MLX5_PFAULT_SUBTYPE_WQE:
1445 /* WQE based event */
1447 (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
1449 be32_to_cpu(pf_eqe->wqe.token);
1450 pfault->wqe.wq_num =
1451 be32_to_cpu(pf_eqe->wqe.pftype_wq) &
1453 pfault->wqe.wqe_index =
1454 be16_to_cpu(pf_eqe->wqe.wqe_index);
1455 pfault->wqe.packet_size =
1456 be16_to_cpu(pf_eqe->wqe.packet_length);
1457 mlx5_ib_dbg(eq->dev,
1458 "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
1459 pfault->type, pfault->token,
1461 pfault->wqe.wqe_index);
1465 mlx5_ib_warn(eq->dev,
1466 "Unsupported page fault event sub-type: 0x%02hhx\n",
1468 /* Unsupported page faults should still be
1469 * resolved by the page fault handler
1474 INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
1475 queue_work(eq->wq, &pfault->work);
1477 cc = mlx5_eq_update_cc(eq->core, ++cc);
1480 mlx5_eq_update_ci(eq->core, cc, 1);
1483 static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
1486 struct mlx5_ib_pf_eq *eq =
1487 container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
1488 unsigned long flags;
1490 if (spin_trylock_irqsave(&eq->lock, flags)) {
1491 mlx5_ib_eq_pf_process(eq);
1492 spin_unlock_irqrestore(&eq->lock, flags);
1494 schedule_work(&eq->work);
1500 /* mempool_refill() was proposed but unfortunately wasn't accepted
1501 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
1504 static void mempool_refill(mempool_t *pool)
1506 while (pool->curr_nr < pool->min_nr)
1507 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
1510 static void mlx5_ib_eq_pf_action(struct work_struct *work)
1512 struct mlx5_ib_pf_eq *eq =
1513 container_of(work, struct mlx5_ib_pf_eq, work);
1515 mempool_refill(eq->pool);
1517 spin_lock_irq(&eq->lock);
1518 mlx5_ib_eq_pf_process(eq);
1519 spin_unlock_irq(&eq->lock);
1523 MLX5_IB_NUM_PF_EQE = 0x1000,
1524 MLX5_IB_NUM_PF_DRAIN = 64,
1528 mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1530 struct mlx5_eq_param param = {};
1533 INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
1534 spin_lock_init(&eq->lock);
1537 eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
1538 sizeof(struct mlx5_pagefault));
1542 eq->wq = alloc_workqueue("mlx5_ib_page_fault",
1543 WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
1550 eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
1551 param = (struct mlx5_eq_param) {
1553 .nent = MLX5_IB_NUM_PF_EQE,
1555 param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
1556 eq->core = mlx5_eq_create_generic(dev->mdev, ¶m);
1557 if (IS_ERR(eq->core)) {
1558 err = PTR_ERR(eq->core);
1561 err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
1563 mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
1569 mlx5_eq_destroy_generic(dev->mdev, eq->core);
1571 destroy_workqueue(eq->wq);
1573 mempool_destroy(eq->pool);
1578 mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1582 mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
1583 err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
1584 cancel_work_sync(&eq->work);
1585 destroy_workqueue(eq->wq);
1586 mempool_destroy(eq->pool);
1591 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
1593 if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1596 switch (ent->order - 2) {
1597 case MLX5_IMR_MTT_CACHE_ENTRY:
1598 ent->page = PAGE_SHIFT;
1599 ent->xlt = MLX5_IMR_MTT_ENTRIES *
1600 sizeof(struct mlx5_mtt) /
1601 MLX5_IB_UMR_OCTOWORD;
1602 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1606 case MLX5_IMR_KSM_CACHE_ENTRY:
1607 ent->page = MLX5_KSM_PAGE_SHIFT;
1608 ent->xlt = mlx5_imr_ksm_entries *
1609 sizeof(struct mlx5_klm) /
1610 MLX5_IB_UMR_OCTOWORD;
1611 ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
1617 static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
1618 .advise_mr = mlx5_ib_advise_mr,
1621 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1625 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1626 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1628 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1629 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
1631 mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
1636 if (!MLX5_CAP_GEN(dev->mdev, pg))
1639 ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
1644 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1646 if (!MLX5_CAP_GEN(dev->mdev, pg))
1649 mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
1652 int mlx5_ib_odp_init(void)
1654 mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
1660 struct prefetch_mr_work {
1661 struct work_struct work;
1665 struct ib_sge sg_list[0];
1668 static void num_pending_prefetch_dec(struct mlx5_ib_dev *dev,
1669 struct ib_sge *sg_list, u32 num_sge,
1675 srcu_key = srcu_read_lock(&dev->mr_srcu);
1677 for (i = from; i < num_sge; ++i) {
1678 struct mlx5_core_mkey *mmkey;
1679 struct mlx5_ib_mr *mr;
1681 mmkey = xa_load(&dev->mdev->priv.mkey_table,
1682 mlx5_base_mkey(sg_list[i].lkey));
1683 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
1684 atomic_dec(&mr->num_pending_prefetch);
1687 srcu_read_unlock(&dev->mr_srcu, srcu_key);
1690 static bool num_pending_prefetch_inc(struct ib_pd *pd,
1691 struct ib_sge *sg_list, u32 num_sge)
1693 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1697 for (i = 0; i < num_sge; ++i) {
1698 struct mlx5_core_mkey *mmkey;
1699 struct mlx5_ib_mr *mr;
1701 mmkey = xa_load(&dev->mdev->priv.mkey_table,
1702 mlx5_base_mkey(sg_list[i].lkey));
1703 if (!mmkey || mmkey->key != sg_list[i].lkey) {
1708 if (mmkey->type != MLX5_MKEY_MR) {
1713 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
1715 if (mr->ibmr.pd != pd) {
1725 atomic_inc(&mr->num_pending_prefetch);
1729 num_pending_prefetch_dec(dev, sg_list, i, 0);
1734 static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, u32 pf_flags,
1735 struct ib_sge *sg_list, u32 num_sge)
1739 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1741 for (i = 0; i < num_sge; ++i) {
1742 struct ib_sge *sg = &sg_list[i];
1743 int bytes_committed = 0;
1745 ret = pagefault_single_data_segment(dev, pd, sg->lkey, sg->addr,
1747 &bytes_committed, NULL,
1753 return ret < 0 ? ret : 0;
1756 static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
1758 struct prefetch_mr_work *w =
1759 container_of(work, struct prefetch_mr_work, work);
1761 if (ib_device_try_get(w->pd->device)) {
1762 mlx5_ib_prefetch_sg_list(w->pd, w->pf_flags, w->sg_list,
1764 ib_device_put(w->pd->device);
1767 num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
1772 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1773 enum ib_uverbs_advise_mr_advice advice,
1774 u32 flags, struct ib_sge *sg_list, u32 num_sge)
1776 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1777 u32 pf_flags = MLX5_PF_FLAGS_PREFETCH;
1778 struct prefetch_mr_work *work;
1782 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
1783 pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
1785 if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
1786 return mlx5_ib_prefetch_sg_list(pd, pf_flags, sg_list,
1789 work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
1793 memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
1795 /* It is guaranteed that the pd when work is executed is the pd when
1796 * work was queued since pd can't be destroyed while it holds MRs and
1797 * destroying a MR leads to flushing the workquque
1800 work->pf_flags = pf_flags;
1801 work->num_sge = num_sge;
1803 INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
1805 srcu_key = srcu_read_lock(&dev->mr_srcu);
1807 valid_req = num_pending_prefetch_inc(pd, sg_list, num_sge);
1809 queue_work(system_unbound_wq, &work->work);
1813 srcu_read_unlock(&dev->mr_srcu, srcu_key);
1815 return valid_req ? 0 : -EINVAL;