Move FAULT_FLAG_xyz into handle_mm_fault() callers
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 10 Apr 2009 16:01:23 +0000 (09:01 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 21 Jun 2009 20:08:22 +0000 (13:08 -0700)
This allows the callers to now pass down the full set of FAULT_FLAG_xyz
flags to handle_mm_fault().  All callers have been (mechanically)
converted to the new calling convention, there's almost certainly room
for architectures to clean up their code and then add FAULT_FLAG_RETRY
when that support is added.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
25 files changed:
arch/alpha/mm/fault.c
arch/arm/mm/fault.c
arch/avr32/mm/fault.c
arch/cris/mm/fault.c
arch/frv/mm/fault.c
arch/ia64/mm/fault.c
arch/m32r/mm/fault.c
arch/m68k/mm/fault.c
arch/microblaze/mm/fault.c
arch/mips/mm/fault.c
arch/mn10300/mm/fault.c
arch/parisc/mm/fault.c
arch/powerpc/mm/fault.c
arch/powerpc/platforms/cell/spu_fault.c
arch/s390/lib/uaccess_pt.c
arch/s390/mm/fault.c
arch/sh/mm/fault_32.c
arch/sh/mm/tlbflush_64.c
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/um/kernel/trap.c
arch/x86/mm/fault.c
arch/xtensa/mm/fault.c
include/linux/mm.h
mm/memory.c

index 4829f96585b15a1fd59f0409750df4ed59d99d35..00a31deaa96e0cc1d54e3fc2a34e4944cfb4609b 100644 (file)
@@ -146,7 +146,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
        /* If for any reason at all we couldn't handle the fault,
           make sure we exit gracefully rather than endlessly redo
           the fault.  */
-       fault = handle_mm_fault(mm, vma, address, cause > 0);
+       fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0);
        up_read(&mm->mmap_sem);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
index 0455557a289957e13bab0ef74a37ff0ebe5052d8..6fdcbb709827f695ee8a7047708ffaa89a504c0e 100644 (file)
@@ -208,7 +208,7 @@ good_area:
         * than endlessly redo the fault.
         */
 survive:
-       fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11));
+       fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index 62d4abbaa65431185993ee249722dcb2d3cfdc44..b61d86d3debfff5fb8e284bb5c40c17b5531ad72 100644 (file)
@@ -133,7 +133,7 @@ good_area:
         * fault.
         */
 survive:
-       fault = handle_mm_fault(mm, vma, address, writeaccess);
+       fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index c4c76db90f9cc9238e0da443fad56a0a8298f5b8..f925115e3250e516e9118cc331cdd838c6b075df 100644 (file)
@@ -163,7 +163,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
         * the fault.
         */
 
-       fault = handle_mm_fault(mm, vma, address, writeaccess & 1);
+       fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index 05093d41d98ea77c9213f50d8920cfc69d4c3874..30f5d100a81c15038f79f88672db560cf4f950a5 100644 (file)
@@ -163,7 +163,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(mm, vma, ear0, write);
+       fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index 23088bed111ec229c4df3b1315da0e0fce7411a3..19261a99e6234cee94a8bb23a26b0b5241783938 100644 (file)
@@ -154,7 +154,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
         * sure we exit gracefully rather than endlessly redo the
         * fault.
         */
-       fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0);
+       fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                /*
                 * We ran out of memory, or some other thing happened
index 4a71df4c1b3022b1c8f324b50d78dd2b79f8e51a..7274b47f4c229b9d27aae0c6e787468fc07b8fb3 100644 (file)
@@ -196,7 +196,7 @@ survive:
         */
        addr = (address & PAGE_MASK);
        set_thread_fault_code(error_code);
-       fault = handle_mm_fault(mm, vma, addr, write);
+       fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index f493f03231d5ad3594d0813bd11a8855e9efcfbb..d0e35cf99fc69a531cb0239277c3351525c13773 100644 (file)
@@ -155,7 +155,7 @@ good_area:
         */
 
  survive:
-       fault = handle_mm_fault(mm, vma, address, write);
+       fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
 #ifdef DEBUG
        printk("handle_mm_fault returns %d\n",fault);
 #endif
index 5e67cd1fab40f2794de649b9d058429b2f07a393..956607a63f4c21327b1b1d749031cbbd7510ee58 100644 (file)
@@ -232,7 +232,7 @@ good_area:
         * the fault.
         */
 survive:
-       fault = handle_mm_fault(mm, vma, address, is_write);
+       fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index 55767ad9f00ee4a5aec5be3e984d5a971b808eb9..6751ce9ede9ed6ff6935ec058e86d5f47b992730 100644 (file)
@@ -102,7 +102,7 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(mm, vma, address, write);
+       fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index 33cf25025dac22f86dad42a8ebf8cef489db8779..a62e1e138bc101c112872eae8b4eb9e0ce61d8cf 100644 (file)
@@ -258,7 +258,7 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(mm, vma, address, write);
+       fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index 92c7fa4ecc3f4a27ac6ac79f31f1acda964f967e..bfb6dd6ab380c3a2b762575d28e36008900658f4 100644 (file)
@@ -202,7 +202,7 @@ good_area:
         * fault.
         */
 
-       fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0);
+       fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                /*
                 * We hit a shared mapping outside of the file, or some
index 5beffc8f481e7c6d725b63c82f3aebeed2bde9c2..830bef0a11311d856890d3554bbc64a67be83600 100644 (file)
@@ -302,7 +302,7 @@ good_area:
         * the fault.
         */
  survive:
-       ret = handle_mm_fault(mm, vma, address, is_write);
+       ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
        if (unlikely(ret & VM_FAULT_ERROR)) {
                if (ret & VM_FAULT_OOM)
                        goto out_of_memory;
index 95d8dadf2d87f5daa01f1cdf9121fa4e06c6fdce..d06ba87f1a19e8b6d40906d330446395ffeec0d3 100644 (file)
@@ -70,7 +70,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
        }
 
        ret = 0;
-       *flt = handle_mm_fault(mm, vma, ea, is_write);
+       *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
        if (unlikely(*flt & VM_FAULT_ERROR)) {
                if (*flt & VM_FAULT_OOM) {
                        ret = -ENOMEM;
index b0b84c35b0ade7f23c464c6ee848183b63827469..cb5d59eab0eea006657e8645e05785771b740cb0 100644 (file)
@@ -66,7 +66,7 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address,
        }
 
 survive:
-       fault = handle_mm_fault(mm, vma, address, write_access);
+       fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index 220a152c836ccf990c109bed1089d297e4ba76d6..74eb26bf1970ee4f92bf3d6f34e3036471a6302d 100644 (file)
@@ -352,7 +352,7 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(mm, vma, address, write);
+       fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM) {
                        up_read(&mm->mmap_sem);
index 2c50f80fc33235574f1e895d163b6eb54e92d46c..cc8ddbdf3d7a75005c2c9590fe386d20bb79050c 100644 (file)
@@ -133,7 +133,7 @@ good_area:
         * the fault.
         */
 survive:
-       fault = handle_mm_fault(mm, vma, address, writeaccess);
+       fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index 7876997ba19a829f50941757ba6807bd93d3b3d4..fcbb6e135cef319ee30c5d27a12fb75b234608e1 100644 (file)
@@ -187,7 +187,7 @@ good_area:
         * the fault.
         */
 survive:
-       fault = handle_mm_fault(mm, vma, address, writeaccess);
+       fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index 12e447fc8542c0c43f72220b779a74e7b3c08835..a5e30c642ee3bf33f136cababbb99bc20a9aba07 100644 (file)
@@ -241,7 +241,7 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(mm, vma, address, write);
+       fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
@@ -484,7 +484,7 @@ good_area:
                if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
                        goto bad_area;
        }
-       switch (handle_mm_fault(mm, vma, address, write)) {
+       switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
        case VM_FAULT_SIGBUS:
        case VM_FAULT_OOM:
                goto do_sigbus;
index 4ab8993b0863cda9f432464038daa14b483ae463..e5620b27c8bf6a9877367c28ca9088889f259d99 100644 (file)
@@ -398,7 +398,7 @@ good_area:
                        goto bad_area;
        }
 
-       fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE));
+       fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index 7384d8accfe7d005d896eced4de7ab745452efef..637c6505dc00de1ce482335023bff8227aedaaa1 100644 (file)
@@ -65,7 +65,7 @@ good_area:
        do {
                int fault;
 
-               fault = handle_mm_fault(mm, vma, address, is_write);
+               fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
                if (unlikely(fault & VM_FAULT_ERROR)) {
                        if (fault & VM_FAULT_OOM) {
                                goto out_of_memory;
index c403526d5d15418b19f5fedbbc3ffc20f5bd9df0..78a5fff857bea0906cb6748baf6ef450b259b538 100644 (file)
@@ -1113,7 +1113,7 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault:
         */
-       fault = handle_mm_fault(mm, vma, address, write);
+       fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
 
        if (unlikely(fault & VM_FAULT_ERROR)) {
                mm_fault_error(regs, error_code, address, fault);
index bdd860d93f72a99442bc324bce7319dba62ceca8..bc0733359a8852a12288f083c2420f4c9734948e 100644 (file)
@@ -106,7 +106,7 @@ good_area:
         * the fault.
         */
 survive:
-       fault = handle_mm_fault(mm, vma, address, is_write);
+       fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index cf260d848eb925edf884e4d82a8477a1f890f6f4..d006e93d5c93c3323da45ebd14d0f7920722120b 100644 (file)
@@ -810,11 +810,11 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
 
 #ifdef CONFIG_MMU
 extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-                       unsigned long address, int write_access);
+                       unsigned long address, unsigned int flags);
 #else
 static inline int handle_mm_fault(struct mm_struct *mm,
                        struct vm_area_struct *vma, unsigned long address,
-                       int write_access)
+                       unsigned int flags)
 {
        /* should never happen if there's no MMU */
        BUG();
index e6a9700359df1fcb6867687a8a3a38f85dbb76a0..98bcb90d5957010ada3218c06117cc797aa81813 100644 (file)
@@ -1310,8 +1310,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        cond_resched();
                        while (!(page = follow_page(vma, start, foll_flags))) {
                                int ret;
-                               ret = handle_mm_fault(mm, vma, start,
-                                               foll_flags & FOLL_WRITE);
+
+                               /* FOLL_WRITE matches FAULT_FLAG_WRITE! */
+                               ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE);
                                if (ret & VM_FAULT_ERROR) {
                                        if (ret & VM_FAULT_OOM)
                                                return i ? i : -ENOMEM;
@@ -2958,13 +2959,12 @@ unlock:
  * By the time we get here, we already hold the mm semaphore
  */
 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-               unsigned long address, int write_access)
+               unsigned long address, unsigned int flags)
 {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
-       unsigned int flags = write_access ? FAULT_FLAG_WRITE : 0;
 
        __set_current_state(TASK_RUNNING);