ASoC: SOF: core: harden shutdown helper
[sfrench/cifs-2.6.git] / mm / memory.c
index ef4e10c60b5f3bb54a88678b1bbb2c19bed825b6..c8e357627318619a9aa7614354f60758b2b9e303 100644 (file)
@@ -2902,7 +2902,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                }
                flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
                entry = mk_pte(new_page, vma->vm_page_prot);
-               entry = pte_sw_mkyoung(entry);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 
                /*
@@ -3560,7 +3559,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        __SetPageUptodate(page);
 
        entry = mk_pte(page, vma->vm_page_prot);
-       entry = pte_sw_mkyoung(entry);
        if (vma->vm_flags & VM_WRITE)
                entry = pte_mkwrite(pte_mkdirty(entry));
 
@@ -3745,8 +3743,6 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
 
        if (prefault && arch_wants_old_prefaulted_pte())
                entry = pte_mkold(entry);
-       else
-               entry = pte_sw_mkyoung(entry);
 
        if (write)
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -5177,17 +5173,19 @@ long copy_huge_page_from_user(struct page *dst_page,
        void *page_kaddr;
        unsigned long i, rc = 0;
        unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
+       struct page *subpage = dst_page;
 
-       for (i = 0; i < pages_per_huge_page; i++) {
+       for (i = 0; i < pages_per_huge_page;
+            i++, subpage = mem_map_next(subpage, dst_page, i)) {
                if (allow_pagefault)
-                       page_kaddr = kmap(dst_page + i);
+                       page_kaddr = kmap(subpage);
                else
-                       page_kaddr = kmap_atomic(dst_page + i);
+                       page_kaddr = kmap_atomic(subpage);
                rc = copy_from_user(page_kaddr,
                                (const void __user *)(src + i * PAGE_SIZE),
                                PAGE_SIZE);
                if (allow_pagefault)
-                       kunmap(dst_page + i);
+                       kunmap(subpage);
                else
                        kunmap_atomic(page_kaddr);