ASoC: SOF: core: harden shutdown helper
[sfrench/cifs-2.6.git] / mm / memory.c
index c32318dc11d4e57eab21f631555325529b7b3d82..c8e357627318619a9aa7614354f60758b2b9e303 100644 (file)
@@ -2177,11 +2177,11 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
                        unsigned long addr, unsigned long end,
                        unsigned long pfn, pgprot_t prot)
 {
-       pte_t *pte;
+       pte_t *pte, *mapped_pte;
        spinlock_t *ptl;
        int err = 0;
 
-       pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+       mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
        if (!pte)
                return -ENOMEM;
        arch_enter_lazy_mmu_mode();
@@ -2195,7 +2195,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
        arch_leave_lazy_mmu_mode();
-       pte_unmap_unlock(pte - 1, ptl);
+       pte_unmap_unlock(mapped_pte, ptl);
        return err;
 }
 
@@ -2394,18 +2394,18 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
                                     pte_fn_t fn, void *data, bool create,
                                     pgtbl_mod_mask *mask)
 {
-       pte_t *pte;
+       pte_t *pte, *mapped_pte;
        int err = 0;
        spinlock_t *ptl;
 
        if (create) {
-               pte = (mm == &init_mm) ?
+               mapped_pte = pte = (mm == &init_mm) ?
                        pte_alloc_kernel_track(pmd, addr, mask) :
                        pte_alloc_map_lock(mm, pmd, addr, &ptl);
                if (!pte)
                        return -ENOMEM;
        } else {
-               pte = (mm == &init_mm) ?
+               mapped_pte = pte = (mm == &init_mm) ?
                        pte_offset_kernel(pmd, addr) :
                        pte_offset_map_lock(mm, pmd, addr, &ptl);
        }
@@ -2428,7 +2428,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
        arch_leave_lazy_mmu_mode();
 
        if (mm != &init_mm)
-               pte_unmap_unlock(pte-1, ptl);
+               pte_unmap_unlock(mapped_pte, ptl);
        return err;
 }
 
@@ -2902,7 +2902,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                }
                flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
                entry = mk_pte(new_page, vma->vm_page_prot);
-               entry = pte_sw_mkyoung(entry);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 
                /*
@@ -3560,7 +3559,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        __SetPageUptodate(page);
 
        entry = mk_pte(page, vma->vm_page_prot);
-       entry = pte_sw_mkyoung(entry);
        if (vma->vm_flags & VM_WRITE)
                entry = pte_mkwrite(pte_mkdirty(entry));
 
@@ -3745,8 +3743,6 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
 
        if (prefault && arch_wants_old_prefaulted_pte())
                entry = pte_mkold(entry);
-       else
-               entry = pte_sw_mkyoung(entry);
 
        if (write)
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -4798,28 +4794,68 @@ out:
        return ret;
 }
 
+/**
+ * generic_access_phys - generic implementation for iomem mmap access
+ * @vma: the vma to access
+ * @addr: userspace addres, not relative offset within @vma
+ * @buf: buffer to read/write
+ * @len: length of transfer
+ * @write: set to FOLL_WRITE when writing, otherwise reading
+ *
+ * This is a generic implementation for &vm_operations_struct.access for an
+ * iomem mapping. This callback is used by access_process_vm() when the @vma is
+ * not page based.
+ */
 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
                        void *buf, int len, int write)
 {
        resource_size_t phys_addr;
        unsigned long prot = 0;
        void __iomem *maddr;
-       int offset = addr & (PAGE_SIZE-1);
+       pte_t *ptep, pte;
+       spinlock_t *ptl;
+       int offset = offset_in_page(addr);
+       int ret = -EINVAL;
+
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               return -EINVAL;
 
-       if (follow_phys(vma, addr, write, &prot, &phys_addr))
+retry:
+       if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
+               return -EINVAL;
+       pte = *ptep;
+       pte_unmap_unlock(ptep, ptl);
+
+       prot = pgprot_val(pte_pgprot(pte));
+       phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
+
+       if ((write & FOLL_WRITE) && !pte_write(pte))
                return -EINVAL;
 
        maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
        if (!maddr)
                return -ENOMEM;
 
+       if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
+               goto out_unmap;
+
+       if (!pte_same(pte, *ptep)) {
+               pte_unmap_unlock(ptep, ptl);
+               iounmap(maddr);
+
+               goto retry;
+       }
+
        if (write)
                memcpy_toio(maddr + offset, buf, len);
        else
                memcpy_fromio(buf, maddr + offset, len);
+       ret = len;
+       pte_unmap_unlock(ptep, ptl);
+out_unmap:
        iounmap(maddr);
 
-       return len;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(generic_access_phys);
 #endif
@@ -5137,17 +5173,19 @@ long copy_huge_page_from_user(struct page *dst_page,
        void *page_kaddr;
        unsigned long i, rc = 0;
        unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
+       struct page *subpage = dst_page;
 
-       for (i = 0; i < pages_per_huge_page; i++) {
+       for (i = 0; i < pages_per_huge_page;
+            i++, subpage = mem_map_next(subpage, dst_page, i)) {
                if (allow_pagefault)
-                       page_kaddr = kmap(dst_page + i);
+                       page_kaddr = kmap(subpage);
                else
-                       page_kaddr = kmap_atomic(dst_page + i);
+                       page_kaddr = kmap_atomic(subpage);
                rc = copy_from_user(page_kaddr,
                                (const void __user *)(src + i * PAGE_SIZE),
                                PAGE_SIZE);
                if (allow_pagefault)
-                       kunmap(dst_page + i);
+                       kunmap(subpage);
                else
                        kunmap_atomic(page_kaddr);