mm: make vm_area_alloc() initialize core fields
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 21 Jul 2018 22:24:03 +0000 (15:24 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 21 Jul 2018 22:24:03 +0000 (15:24 -0700)
Like vm_area_dup(), it initializes the anon_vma_chain head, and the
basic mm pointer.

The rest of the fields end up being different for different users,
although the plan is to also initialize the 'vm_ops' field to a dummy
entry.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/ia64/kernel/perfmon.c
arch/ia64/mm/init.c
fs/exec.c
include/linux/mm.h
kernel/fork.c
mm/mmap.c
mm/nommu.c

index e859246badcacb32a643df81f6078c34ad0d622c..46bff16618362308ef016a2125cbdc6419c7124f 100644 (file)
@@ -2278,17 +2278,15 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
        DPRINT(("smpl_buf @%p\n", smpl_buf));
 
        /* allocate vma */
-       vma = vm_area_alloc();
+       vma = vm_area_alloc(mm);
        if (!vma) {
                DPRINT(("Cannot allocate vma\n"));
                goto error_kmem;
        }
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
 
        /*
         * partially initialize the vma for the sampling buffer
         */
-       vma->vm_mm           = mm;
        vma->vm_file         = get_file(filp);
        vma->vm_flags        = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
        vma->vm_page_prot    = PAGE_READONLY; /* XXX may need to change */
index 3f2321bffb72a2406afe468c9ad378f22f5f4da5..bdb14a3691370c339a538e844847f2b252ae683e 100644 (file)
@@ -114,10 +114,8 @@ ia64_init_addr_space (void)
         * the problem.  When the process attempts to write to the register backing store
         * for the first time, it will get a SEGFAULT in this case.
         */
-       vma = vm_area_alloc();
+       vma = vm_area_alloc(current->mm);
        if (vma) {
-               INIT_LIST_HEAD(&vma->anon_vma_chain);
-               vma->vm_mm = current->mm;
                vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
                vma->vm_end = vma->vm_start + PAGE_SIZE;
                vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
@@ -133,10 +131,8 @@ ia64_init_addr_space (void)
 
        /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
        if (!(current->personality & MMAP_PAGE_ZERO)) {
-               vma = vm_area_alloc();
+               vma = vm_area_alloc(current->mm);
                if (vma) {
-                       INIT_LIST_HEAD(&vma->anon_vma_chain);
-                       vma->vm_mm = current->mm;
                        vma->vm_end = PAGE_SIZE;
                        vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
                        vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
index 9bd83989ea259bc19250c83d905870236c7df169..72e961a62adb92cacf1ddd954986944d20498817 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -290,7 +290,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        struct vm_area_struct *vma = NULL;
        struct mm_struct *mm = bprm->mm;
 
-       bprm->vma = vma = vm_area_alloc();
+       bprm->vma = vma = vm_area_alloc(mm);
        if (!vma)
                return -ENOMEM;
 
@@ -298,7 +298,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
                err = -EINTR;
                goto err_free;
        }
-       vma->vm_mm = mm;
 
        /*
         * Place the stack at the largest stack address the architecture
@@ -311,7 +310,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        vma->vm_start = vma->vm_end - PAGE_SIZE;
        vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
 
        err = insert_vm_struct(mm, vma);
        if (err)
index de2fd86c61549f5eacbea530b61a6d7e63db2d5d..d3a3842316b87c3f6b5d0c02b6731154b65484fc 100644 (file)
@@ -155,7 +155,7 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
  * mmap() functions).
  */
 
-struct vm_area_struct *vm_area_alloc(void);
+struct vm_area_struct *vm_area_alloc(struct mm_struct *);
 struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
 void vm_area_free(struct vm_area_struct *);
 
index 67253e41bfb0fe56b8eb656923eaf052a2e0a4b9..a191c05e757d8ff348a60d25e550a0bcb85765b9 100644 (file)
@@ -308,9 +308,15 @@ static struct kmem_cache *vm_area_cachep;
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
-struct vm_area_struct *vm_area_alloc(void)
+struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
 {
-       return kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+
+       if (vma) {
+               vma->vm_mm = mm;
+               INIT_LIST_HEAD(&vma->anon_vma_chain);
+       }
+       return vma;
 }
 
 struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
index b0ed8ce1b67e4c674e13be6345b6c5924149eda6..ff1944d8d458bf82297c33c2949a23919e98d862 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1729,19 +1729,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
         * specific mapper. the address has already been validated, but
         * not unmapped, but the maps are removed from the list.
         */
-       vma = vm_area_alloc();
+       vma = vm_area_alloc(mm);
        if (!vma) {
                error = -ENOMEM;
                goto unacct_error;
        }
 
-       vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_flags = vm_flags;
        vma->vm_page_prot = vm_get_page_prot(vm_flags);
        vma->vm_pgoff = pgoff;
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
 
        if (file) {
                if (vm_flags & VM_DENYWRITE) {
@@ -2979,14 +2977,12 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
        /*
         * create a vma struct for an anonymous mapping
         */
-       vma = vm_area_alloc();
+       vma = vm_area_alloc(mm);
        if (!vma) {
                vm_unacct_memory(len >> PAGE_SHIFT);
                return -ENOMEM;
        }
 
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
-       vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_pgoff = pgoff;
@@ -3343,12 +3339,10 @@ static struct vm_area_struct *__install_special_mapping(
        int ret;
        struct vm_area_struct *vma;
 
-       vma = vm_area_alloc();
+       vma = vm_area_alloc(mm);
        if (unlikely(vma == NULL))
                return ERR_PTR(-ENOMEM);
 
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
-       vma->vm_mm = mm;
        vma->vm_start = addr;
        vma->vm_end = addr + len;
 
index c2560e9cc803a0b9d291331736454163e92c9f14..1d22fdbf7d7c9778d4a67c89bd972b3225679fb9 100644 (file)
@@ -1204,7 +1204,7 @@ unsigned long do_mmap(struct file *file,
        if (!region)
                goto error_getting_region;
 
-       vma = vm_area_alloc();
+       vma = vm_area_alloc(current->mm);
        if (!vma)
                goto error_getting_vma;
 
@@ -1212,7 +1212,6 @@ unsigned long do_mmap(struct file *file,
        region->vm_flags = vm_flags;
        region->vm_pgoff = pgoff;
 
-       INIT_LIST_HEAD(&vma->anon_vma_chain);
        vma->vm_flags = vm_flags;
        vma->vm_pgoff = pgoff;