x86: PAT: add follow_pfnmp_pte routine to help tracking pfnmap pages - v3
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>
Thu, 18 Dec 2008 19:41:28 +0000 (11:41 -0800)
committerH. Peter Anvin <hpa@zytor.com>
Thu, 18 Dec 2008 21:30:15 +0000 (13:30 -0800)
Impact: New currently unused interface.

Add a generic interface to follow pfn in a pfnmap vma range. This is used by
one of the subsequent x86 PAT related patch to keep track of memory types
for vma regions across vma copy and free.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
include/linux/mm.h
mm/memory.c

index 2be8d9b5e46fa174d11abab45dce459d38a26991..a25024ff9c11966010e3ff647a555a09f1f2e7d6 100644 (file)
@@ -1223,6 +1223,9 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
 #define FOLL_GET       0x04    /* do get_page on page */
 #define FOLL_ANON      0x08    /* give ZERO_PAGE if no pgtable */
 
+int follow_pfnmap_pte(struct vm_area_struct *vma,
+                               unsigned long address, pte_t *ret_ptep);
+
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
                        void *data);
 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
index cef95c8c77fad028b33d2fd2c10d461acafce9ab..8ca6bbf34ad6ac2be0596441e1e57ccebb9c555f 100644 (file)
@@ -1111,6 +1111,49 @@ no_page_table:
        return page;
 }
 
+int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address,
+                       pte_t *ret_ptep)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *ptep, pte;
+       spinlock_t *ptl;
+       struct page *page;
+       struct mm_struct *mm = vma->vm_mm;
+
+       if (!is_pfn_mapping(vma))
+               goto err;
+
+       page = NULL;
+       pgd = pgd_offset(mm, address);
+       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+               goto err;
+
+       pud = pud_offset(pgd, address);
+       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+               goto err;
+
+       pmd = pmd_offset(pud, address);
+       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+               goto err;
+
+       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+
+       pte = *ptep;
+       if (!pte_present(pte))
+               goto err_unlock;
+
+       *ret_ptep = pte;
+       pte_unmap_unlock(ptep, ptl);
+       return 0;
+
+err_unlock:
+       pte_unmap_unlock(ptep, ptl);
+err:
+       return -EINVAL;
+}
+
 /* Can we do the FOLL_ANON optimization? */
 static inline int use_zero_page(struct vm_area_struct *vma)
 {