vfio/type1: track locked_vm per dma
authorSteve Sistare <steven.sistare@oracle.com>
Tue, 31 Jan 2023 16:58:05 +0000 (08:58 -0800)
committerAlex Williamson <alex.williamson@redhat.com>
Thu, 9 Feb 2023 18:39:14 +0000 (11:39 -0700)
Track locked_vm per dma struct, and create a new subroutine, both for use
in a subsequent patch.  No functional change.

Fixes: c3cbab24db38 ("vfio/type1: implement interfaces to update vaddr")
Cc: stable@vger.kernel.org
Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/1675184289-267876-4-git-send-email-steven.sistare@oracle.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
drivers/vfio/vfio_iommu_type1.c

index 6b757d035457a34533a58719fe881f9468d85f35..372d91b6a2f603c9d0260b959a50dec804aff213 100644 (file)
@@ -101,6 +101,7 @@ struct vfio_dma {
        struct rb_root          pfn_list;       /* Ex-user pinned pfn list */
        unsigned long           *bitmap;
        struct mm_struct        *mm;
+       size_t                  locked_vm;
 };
 
 struct vfio_batch {
@@ -413,6 +414,19 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
        return ret;
 }
 
+static int mm_lock_acct(struct task_struct *task, struct mm_struct *mm,
+                       bool lock_cap, long npage)
+{
+       int ret = mmap_write_lock_killable(mm);
+
+       if (ret)
+               return ret;
+
+       ret = __account_locked_vm(mm, abs(npage), npage > 0, task, lock_cap);
+       mmap_write_unlock(mm);
+       return ret;
+}
+
 static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
 {
        struct mm_struct *mm;
@@ -425,12 +439,9 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
        if (async && !mmget_not_zero(mm))
                return -ESRCH; /* process exited */
 
-       ret = mmap_write_lock_killable(mm);
-       if (!ret) {
-               ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
-                                         dma->lock_cap);
-               mmap_write_unlock(mm);
-       }
+       ret = mm_lock_acct(dma->task, mm, dma->lock_cap, npage);
+       if (!ret)
+               dma->locked_vm += npage;
 
        if (async)
                mmput(mm);