mm: more likely reclaim MADV_SEQUENTIAL mappings
[sfrench/cifs-2.6.git] / mm / memory.c
index fc031d68327e5fad33130b15d50a020b7b9b31a8..99e8d5c7b3126a455d664942d0f90621ce6c8825 100644 (file)
@@ -669,6 +669,16 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        if (is_vm_hugetlb_page(vma))
                return copy_hugetlb_page_range(dst_mm, src_mm, vma);
 
+       if (unlikely(is_pfn_mapping(vma))) {
+               /*
+                * We do not free on error cases below as remove_vma
+                * gets called on error from higher level routine
+                */
+               ret = track_pfn_vma_copy(vma);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * We need to invalidate the secondary MMU mappings only when
         * there could be a permission downgrade on the ptes of the
@@ -757,8 +767,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                        else {
                                if (pte_dirty(ptent))
                                        set_page_dirty(page);
-                               if (pte_young(ptent))
-                                       SetPageReferenced(page);
+                               if (pte_young(ptent) &&
+                                   likely(!VM_SequentialReadHint(vma)))
+                                       mark_page_accessed(page);
                                file_rss--;
                        }
                        page_remove_rmap(page, vma);
@@ -915,6 +926,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                if (vma->vm_flags & VM_ACCOUNT)
                        *nr_accounted += (end - start) >> PAGE_SHIFT;
 
+               if (unlikely(is_pfn_mapping(vma)))
+                       untrack_pfn_vma(vma, 0, 0);
+
                while (start != end) {
                        if (!tlb_start_valid) {
                                tlb_start = start;
@@ -1430,6 +1444,7 @@ out:
 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn)
 {
+       int ret;
        /*
         * Technically, architectures with pte_special can avoid all these
         * restrictions (same for remap_pfn_range).  However we would like
@@ -1444,7 +1459,15 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
-       return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+       if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
+               return -EINVAL;
+
+       ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+
+       if (ret)
+               untrack_pfn_vma(vma, pfn, PAGE_SIZE);
+
+       return ret;
 }
 EXPORT_SYMBOL(vm_insert_pfn);
 
@@ -1575,14 +1598,17 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
         * behaviour that some programs depend on. We mark the "original"
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
         */
-       if (is_cow_mapping(vma->vm_flags)) {
-               if (addr != vma->vm_start || end != vma->vm_end)
-                       return -EINVAL;
+       if (addr == vma->vm_start && end == vma->vm_end)
                vma->vm_pgoff = pfn;
-       }
+       else if (is_cow_mapping(vma->vm_flags))
+               return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 
+       err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
+       if (err)
+               return -EINVAL;
+
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
        pgd = pgd_offset(mm, addr);
@@ -1594,6 +1620,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
+
+       if (err)
+               untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
+
        return err;
 }
 EXPORT_SYMBOL(remap_pfn_range);
@@ -2237,7 +2267,7 @@ int vmtruncate(struct inode * inode, loff_t offset)
                unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
        }
 
-       if (inode->i_op && inode->i_op->truncate)
+       if (inode->i_op->truncate)
                inode->i_op->truncate(inode);
        return 0;
 
@@ -2257,7 +2287,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
         * a way to truncate a range of blocks (punch a hole) -
         * we should return failure right now.
         */
-       if (!inode->i_op || !inode->i_op->truncate_range)
+       if (!inode->i_op->truncate_range)
                return -ENOSYS;
 
        mutex_lock(&inode->i_mutex);
@@ -2865,9 +2895,9 @@ int in_gate_area_no_task(unsigned long addr)
 #endif /* __HAVE_ARCH_GATE_AREA */
 
 #ifdef CONFIG_HAVE_IOREMAP_PROT
-static resource_size_t follow_phys(struct vm_area_struct *vma,
-                       unsigned long address, unsigned int flags,
-                       unsigned long *prot)
+int follow_phys(struct vm_area_struct *vma,
+               unsigned long address, unsigned int flags,
+               unsigned long *prot, resource_size_t *phys)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -2876,24 +2906,26 @@ static resource_size_t follow_phys(struct vm_area_struct *vma,
        spinlock_t *ptl;
        resource_size_t phys_addr = 0;
        struct mm_struct *mm = vma->vm_mm;
+       int ret = -EINVAL;
 
-       VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP)));
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               goto out;
 
        pgd = pgd_offset(mm, address);
        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               goto no_page_table;
+               goto out;
 
        pud = pud_offset(pgd, address);
        if (pud_none(*pud) || unlikely(pud_bad(*pud)))
-               goto no_page_table;
+               goto out;
 
        pmd = pmd_offset(pud, address);
        if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
-               goto no_page_table;
+               goto out;
 
        /* We cannot handle huge page PFN maps. Luckily they don't exist. */
        if (pmd_huge(*pmd))
-               goto no_page_table;
+               goto out;
 
        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (!ptep)
@@ -2908,13 +2940,13 @@ static resource_size_t follow_phys(struct vm_area_struct *vma,
        phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
 
        *prot = pgprot_val(pte_pgprot(pte));
+       *phys = phys_addr;
+       ret = 0;
 
 unlock:
        pte_unmap_unlock(ptep, ptl);
 out:
-       return phys_addr;
-no_page_table:
-       return 0;
+       return ret;
 }
 
 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
@@ -2925,12 +2957,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
        void *maddr;
        int offset = addr & (PAGE_SIZE-1);
 
-       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
-               return -EINVAL;
-
-       phys_addr = follow_phys(vma, addr, write, &prot);
-
-       if (!phys_addr)
+       if (follow_phys(vma, addr, write, &prot, &phys_addr))
                return -EINVAL;
 
        maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);