Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[sfrench/cifs-2.6.git] / fs / exec.c
1 /*
2  *  linux/fs/exec.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  * #!-checking implemented by tytso.
9  */
10 /*
11  * Demand-loading implemented 01.12.91 - no need to read anything but
12  * the header into memory. The inode of the executable is put into
13  * "current->executable", and page faults do the actual loading. Clean.
14  *
15  * Once more I can proudly say that linux stood up to being changed: it
16  * was less than 2 hours work to get demand-loading completely implemented.
17  *
18  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
19  * current->executable is only used by the procfs.  This allows a dispatch
20  * table to check for several different types  of binary formats.  We keep
21  * trying until we recognize the file or we run out of supported binary
22  * formats. 
23  */
24
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
28 #include <linux/mm.h>
29 #include <linux/stat.h>
30 #include <linux/fcntl.h>
31 #include <linux/swap.h>
32 #include <linux/string.h>
33 #include <linux/init.h>
34 #include <linux/pagemap.h>
35 #include <linux/perf_event.h>
36 #include <linux/highmem.h>
37 #include <linux/spinlock.h>
38 #include <linux/key.h>
39 #include <linux/personality.h>
40 #include <linux/binfmts.h>
41 #include <linux/utsname.h>
42 #include <linux/pid_namespace.h>
43 #include <linux/module.h>
44 #include <linux/namei.h>
45 #include <linux/mount.h>
46 #include <linux/security.h>
47 #include <linux/syscalls.h>
48 #include <linux/tsacct_kern.h>
49 #include <linux/cn_proc.h>
50 #include <linux/audit.h>
51 #include <linux/tracehook.h>
52 #include <linux/kmod.h>
53 #include <linux/fsnotify.h>
54 #include <linux/fs_struct.h>
55 #include <linux/pipe_fs_i.h>
56 #include <linux/oom.h>
57 #include <linux/compat.h>
58
59 #include <asm/uaccess.h>
60 #include <asm/mmu_context.h>
61 #include <asm/tlb.h>
62
63 #include <trace/events/task.h>
64 #include "internal.h"
65
66 #include <trace/events/sched.h>
67
68 int suid_dumpable = 0;
69
70 static LIST_HEAD(formats);
71 static DEFINE_RWLOCK(binfmt_lock);
72
73 void __register_binfmt(struct linux_binfmt * fmt, int insert)
74 {
75         BUG_ON(!fmt);
76         if (WARN_ON(!fmt->load_binary))
77                 return;
78         write_lock(&binfmt_lock);
79         insert ? list_add(&fmt->lh, &formats) :
80                  list_add_tail(&fmt->lh, &formats);
81         write_unlock(&binfmt_lock);
82 }
83
84 EXPORT_SYMBOL(__register_binfmt);
85
86 void unregister_binfmt(struct linux_binfmt * fmt)
87 {
88         write_lock(&binfmt_lock);
89         list_del(&fmt->lh);
90         write_unlock(&binfmt_lock);
91 }
92
93 EXPORT_SYMBOL(unregister_binfmt);
94
95 static inline void put_binfmt(struct linux_binfmt * fmt)
96 {
97         module_put(fmt->module);
98 }
99
100 #ifdef CONFIG_USELIB
101 /*
102  * Note that a shared library must be both readable and executable due to
103  * security reasons.
104  *
105  * Also note that we take the address to load from from the file itself.
106  */
107 SYSCALL_DEFINE1(uselib, const char __user *, library)
108 {
109         struct linux_binfmt *fmt;
110         struct file *file;
111         struct filename *tmp = getname(library);
112         int error = PTR_ERR(tmp);
113         static const struct open_flags uselib_flags = {
114                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
115                 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
116                 .intent = LOOKUP_OPEN,
117                 .lookup_flags = LOOKUP_FOLLOW,
118         };
119
120         if (IS_ERR(tmp))
121                 goto out;
122
123         file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
124         putname(tmp);
125         error = PTR_ERR(file);
126         if (IS_ERR(file))
127                 goto out;
128
129         error = -EINVAL;
130         if (!S_ISREG(file_inode(file)->i_mode))
131                 goto exit;
132
133         error = -EACCES;
134         if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
135                 goto exit;
136
137         fsnotify_open(file);
138
139         error = -ENOEXEC;
140
141         read_lock(&binfmt_lock);
142         list_for_each_entry(fmt, &formats, lh) {
143                 if (!fmt->load_shlib)
144                         continue;
145                 if (!try_module_get(fmt->module))
146                         continue;
147                 read_unlock(&binfmt_lock);
148                 error = fmt->load_shlib(file);
149                 read_lock(&binfmt_lock);
150                 put_binfmt(fmt);
151                 if (error != -ENOEXEC)
152                         break;
153         }
154         read_unlock(&binfmt_lock);
155 exit:
156         fput(file);
157 out:
158         return error;
159 }
160 #endif /* #ifdef CONFIG_USELIB */
161
162 #ifdef CONFIG_MMU
163 /*
164  * The nascent bprm->mm is not visible until exec_mmap() but it can
165  * use a lot of memory, account these pages in current->mm temporary
166  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
167  * change the counter back via acct_arg_size(0).
168  */
169 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
170 {
171         struct mm_struct *mm = current->mm;
172         long diff = (long)(pages - bprm->vma_pages);
173
174         if (!mm || !diff)
175                 return;
176
177         bprm->vma_pages = pages;
178         add_mm_counter(mm, MM_ANONPAGES, diff);
179 }
180
181 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
182                 int write)
183 {
184         struct page *page;
185         int ret;
186
187 #ifdef CONFIG_STACK_GROWSUP
188         if (write) {
189                 ret = expand_downwards(bprm->vma, pos);
190                 if (ret < 0)
191                         return NULL;
192         }
193 #endif
194         ret = get_user_pages(current, bprm->mm, pos,
195                         1, write, 1, &page, NULL);
196         if (ret <= 0)
197                 return NULL;
198
199         if (write) {
200                 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
201                 struct rlimit *rlim;
202
203                 acct_arg_size(bprm, size / PAGE_SIZE);
204
205                 /*
206                  * We've historically supported up to 32 pages (ARG_MAX)
207                  * of argument strings even with small stacks
208                  */
209                 if (size <= ARG_MAX)
210                         return page;
211
212                 /*
213                  * Limit to 1/4-th the stack size for the argv+env strings.
214                  * This ensures that:
215                  *  - the remaining binfmt code will not run out of stack space,
216                  *  - the program will have a reasonable amount of stack left
217                  *    to work from.
218                  */
219                 rlim = current->signal->rlim;
220                 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
221                         put_page(page);
222                         return NULL;
223                 }
224         }
225
226         return page;
227 }
228
229 static void put_arg_page(struct page *page)
230 {
231         put_page(page);
232 }
233
234 static void free_arg_page(struct linux_binprm *bprm, int i)
235 {
236 }
237
238 static void free_arg_pages(struct linux_binprm *bprm)
239 {
240 }
241
242 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
243                 struct page *page)
244 {
245         flush_cache_page(bprm->vma, pos, page_to_pfn(page));
246 }
247
248 static int __bprm_mm_init(struct linux_binprm *bprm)
249 {
250         int err;
251         struct vm_area_struct *vma = NULL;
252         struct mm_struct *mm = bprm->mm;
253
254         bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
255         if (!vma)
256                 return -ENOMEM;
257
258         down_write(&mm->mmap_sem);
259         vma->vm_mm = mm;
260
261         /*
262          * Place the stack at the largest stack address the architecture
263          * supports. Later, we'll move this to an appropriate place. We don't
264          * use STACK_TOP because that can depend on attributes which aren't
265          * configured yet.
266          */
267         BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
268         vma->vm_end = STACK_TOP_MAX;
269         vma->vm_start = vma->vm_end - PAGE_SIZE;
270         vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
271         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
272         INIT_LIST_HEAD(&vma->anon_vma_chain);
273
274         err = insert_vm_struct(mm, vma);
275         if (err)
276                 goto err;
277
278         mm->stack_vm = mm->total_vm = 1;
279         up_write(&mm->mmap_sem);
280         bprm->p = vma->vm_end - sizeof(void *);
281         return 0;
282 err:
283         up_write(&mm->mmap_sem);
284         bprm->vma = NULL;
285         kmem_cache_free(vm_area_cachep, vma);
286         return err;
287 }
288
289 static bool valid_arg_len(struct linux_binprm *bprm, long len)
290 {
291         return len <= MAX_ARG_STRLEN;
292 }
293
294 #else
295
296 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
297 {
298 }
299
300 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
301                 int write)
302 {
303         struct page *page;
304
305         page = bprm->page[pos / PAGE_SIZE];
306         if (!page && write) {
307                 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
308                 if (!page)
309                         return NULL;
310                 bprm->page[pos / PAGE_SIZE] = page;
311         }
312
313         return page;
314 }
315
316 static void put_arg_page(struct page *page)
317 {
318 }
319
320 static void free_arg_page(struct linux_binprm *bprm, int i)
321 {
322         if (bprm->page[i]) {
323                 __free_page(bprm->page[i]);
324                 bprm->page[i] = NULL;
325         }
326 }
327
328 static void free_arg_pages(struct linux_binprm *bprm)
329 {
330         int i;
331
332         for (i = 0; i < MAX_ARG_PAGES; i++)
333                 free_arg_page(bprm, i);
334 }
335
336 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
337                 struct page *page)
338 {
339 }
340
341 static int __bprm_mm_init(struct linux_binprm *bprm)
342 {
343         bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
344         return 0;
345 }
346
347 static bool valid_arg_len(struct linux_binprm *bprm, long len)
348 {
349         return len <= bprm->p;
350 }
351
352 #endif /* CONFIG_MMU */
353
354 /*
355  * Create a new mm_struct and populate it with a temporary stack
356  * vm_area_struct.  We don't have enough context at this point to set the stack
357  * flags, permissions, and offset, so we use temporary values.  We'll update
358  * them later in setup_arg_pages().
359  */
360 static int bprm_mm_init(struct linux_binprm *bprm)
361 {
362         int err;
363         struct mm_struct *mm = NULL;
364
365         bprm->mm = mm = mm_alloc();
366         err = -ENOMEM;
367         if (!mm)
368                 goto err;
369
370         err = init_new_context(current, mm);
371         if (err)
372                 goto err;
373
374         err = __bprm_mm_init(bprm);
375         if (err)
376                 goto err;
377
378         return 0;
379
380 err:
381         if (mm) {
382                 bprm->mm = NULL;
383                 mmdrop(mm);
384         }
385
386         return err;
387 }
388
389 struct user_arg_ptr {
390 #ifdef CONFIG_COMPAT
391         bool is_compat;
392 #endif
393         union {
394                 const char __user *const __user *native;
395 #ifdef CONFIG_COMPAT
396                 const compat_uptr_t __user *compat;
397 #endif
398         } ptr;
399 };
400
401 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
402 {
403         const char __user *native;
404
405 #ifdef CONFIG_COMPAT
406         if (unlikely(argv.is_compat)) {
407                 compat_uptr_t compat;
408
409                 if (get_user(compat, argv.ptr.compat + nr))
410                         return ERR_PTR(-EFAULT);
411
412                 return compat_ptr(compat);
413         }
414 #endif
415
416         if (get_user(native, argv.ptr.native + nr))
417                 return ERR_PTR(-EFAULT);
418
419         return native;
420 }
421
422 /*
423  * count() counts the number of strings in array ARGV.
424  */
425 static int count(struct user_arg_ptr argv, int max)
426 {
427         int i = 0;
428
429         if (argv.ptr.native != NULL) {
430                 for (;;) {
431                         const char __user *p = get_user_arg_ptr(argv, i);
432
433                         if (!p)
434                                 break;
435
436                         if (IS_ERR(p))
437                                 return -EFAULT;
438
439                         if (i >= max)
440                                 return -E2BIG;
441                         ++i;
442
443                         if (fatal_signal_pending(current))
444                                 return -ERESTARTNOHAND;
445                         cond_resched();
446                 }
447         }
448         return i;
449 }
450
451 /*
452  * 'copy_strings()' copies argument/environment strings from the old
453  * processes's memory to the new process's stack.  The call to get_user_pages()
454  * ensures the destination page is created and not swapped out.
455  */
456 static int copy_strings(int argc, struct user_arg_ptr argv,
457                         struct linux_binprm *bprm)
458 {
459         struct page *kmapped_page = NULL;
460         char *kaddr = NULL;
461         unsigned long kpos = 0;
462         int ret;
463
464         while (argc-- > 0) {
465                 const char __user *str;
466                 int len;
467                 unsigned long pos;
468
469                 ret = -EFAULT;
470                 str = get_user_arg_ptr(argv, argc);
471                 if (IS_ERR(str))
472                         goto out;
473
474                 len = strnlen_user(str, MAX_ARG_STRLEN);
475                 if (!len)
476                         goto out;
477
478                 ret = -E2BIG;
479                 if (!valid_arg_len(bprm, len))
480                         goto out;
481
482                 /* We're going to work our way backwords. */
483                 pos = bprm->p;
484                 str += len;
485                 bprm->p -= len;
486
487                 while (len > 0) {
488                         int offset, bytes_to_copy;
489
490                         if (fatal_signal_pending(current)) {
491                                 ret = -ERESTARTNOHAND;
492                                 goto out;
493                         }
494                         cond_resched();
495
496                         offset = pos % PAGE_SIZE;
497                         if (offset == 0)
498                                 offset = PAGE_SIZE;
499
500                         bytes_to_copy = offset;
501                         if (bytes_to_copy > len)
502                                 bytes_to_copy = len;
503
504                         offset -= bytes_to_copy;
505                         pos -= bytes_to_copy;
506                         str -= bytes_to_copy;
507                         len -= bytes_to_copy;
508
509                         if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
510                                 struct page *page;
511
512                                 page = get_arg_page(bprm, pos, 1);
513                                 if (!page) {
514                                         ret = -E2BIG;
515                                         goto out;
516                                 }
517
518                                 if (kmapped_page) {
519                                         flush_kernel_dcache_page(kmapped_page);
520                                         kunmap(kmapped_page);
521                                         put_arg_page(kmapped_page);
522                                 }
523                                 kmapped_page = page;
524                                 kaddr = kmap(kmapped_page);
525                                 kpos = pos & PAGE_MASK;
526                                 flush_arg_page(bprm, kpos, kmapped_page);
527                         }
528                         if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
529                                 ret = -EFAULT;
530                                 goto out;
531                         }
532                 }
533         }
534         ret = 0;
535 out:
536         if (kmapped_page) {
537                 flush_kernel_dcache_page(kmapped_page);
538                 kunmap(kmapped_page);
539                 put_arg_page(kmapped_page);
540         }
541         return ret;
542 }
543
544 /*
545  * Like copy_strings, but get argv and its values from kernel memory.
546  */
547 int copy_strings_kernel(int argc, const char *const *__argv,
548                         struct linux_binprm *bprm)
549 {
550         int r;
551         mm_segment_t oldfs = get_fs();
552         struct user_arg_ptr argv = {
553                 .ptr.native = (const char __user *const  __user *)__argv,
554         };
555
556         set_fs(KERNEL_DS);
557         r = copy_strings(argc, argv, bprm);
558         set_fs(oldfs);
559
560         return r;
561 }
562 EXPORT_SYMBOL(copy_strings_kernel);
563
564 #ifdef CONFIG_MMU
565
566 /*
567  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
568  * the binfmt code determines where the new stack should reside, we shift it to
569  * its final location.  The process proceeds as follows:
570  *
571  * 1) Use shift to calculate the new vma endpoints.
572  * 2) Extend vma to cover both the old and new ranges.  This ensures the
573  *    arguments passed to subsequent functions are consistent.
574  * 3) Move vma's page tables to the new range.
575  * 4) Free up any cleared pgd range.
576  * 5) Shrink the vma to cover only the new range.
577  */
578 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
579 {
580         struct mm_struct *mm = vma->vm_mm;
581         unsigned long old_start = vma->vm_start;
582         unsigned long old_end = vma->vm_end;
583         unsigned long length = old_end - old_start;
584         unsigned long new_start = old_start - shift;
585         unsigned long new_end = old_end - shift;
586         struct mmu_gather tlb;
587
588         BUG_ON(new_start > new_end);
589
590         /*
591          * ensure there are no vmas between where we want to go
592          * and where we are
593          */
594         if (vma != find_vma(mm, new_start))
595                 return -EFAULT;
596
597         /*
598          * cover the whole range: [new_start, old_end)
599          */
600         if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
601                 return -ENOMEM;
602
603         /*
604          * move the page tables downwards, on failure we rely on
605          * process cleanup to remove whatever mess we made.
606          */
607         if (length != move_page_tables(vma, old_start,
608                                        vma, new_start, length, false))
609                 return -ENOMEM;
610
611         lru_add_drain();
612         tlb_gather_mmu(&tlb, mm, old_start, old_end);
613         if (new_end > old_start) {
614                 /*
615                  * when the old and new regions overlap clear from new_end.
616                  */
617                 free_pgd_range(&tlb, new_end, old_end, new_end,
618                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
619         } else {
620                 /*
621                  * otherwise, clean from old_start; this is done to not touch
622                  * the address space in [new_end, old_start) some architectures
623                  * have constraints on va-space that make this illegal (IA64) -
624                  * for the others its just a little faster.
625                  */
626                 free_pgd_range(&tlb, old_start, old_end, new_end,
627                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
628         }
629         tlb_finish_mmu(&tlb, old_start, old_end);
630
631         /*
632          * Shrink the vma to just the new range.  Always succeeds.
633          */
634         vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
635
636         return 0;
637 }
638
639 /*
640  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
641  * the stack is optionally relocated, and some extra space is added.
642  */
643 int setup_arg_pages(struct linux_binprm *bprm,
644                     unsigned long stack_top,
645                     int executable_stack)
646 {
647         unsigned long ret;
648         unsigned long stack_shift;
649         struct mm_struct *mm = current->mm;
650         struct vm_area_struct *vma = bprm->vma;
651         struct vm_area_struct *prev = NULL;
652         unsigned long vm_flags;
653         unsigned long stack_base;
654         unsigned long stack_size;
655         unsigned long stack_expand;
656         unsigned long rlim_stack;
657
658 #ifdef CONFIG_STACK_GROWSUP
659         /* Limit stack size to 1GB */
660         stack_base = rlimit_max(RLIMIT_STACK);
661         if (stack_base > (1 << 30))
662                 stack_base = 1 << 30;
663
664         /* Make sure we didn't let the argument array grow too large. */
665         if (vma->vm_end - vma->vm_start > stack_base)
666                 return -ENOMEM;
667
668         stack_base = PAGE_ALIGN(stack_top - stack_base);
669
670         stack_shift = vma->vm_start - stack_base;
671         mm->arg_start = bprm->p - stack_shift;
672         bprm->p = vma->vm_end - stack_shift;
673 #else
674         stack_top = arch_align_stack(stack_top);
675         stack_top = PAGE_ALIGN(stack_top);
676
677         if (unlikely(stack_top < mmap_min_addr) ||
678             unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
679                 return -ENOMEM;
680
681         stack_shift = vma->vm_end - stack_top;
682
683         bprm->p -= stack_shift;
684         mm->arg_start = bprm->p;
685 #endif
686
687         if (bprm->loader)
688                 bprm->loader -= stack_shift;
689         bprm->exec -= stack_shift;
690
691         down_write(&mm->mmap_sem);
692         vm_flags = VM_STACK_FLAGS;
693
694         /*
695          * Adjust stack execute permissions; explicitly enable for
696          * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
697          * (arch default) otherwise.
698          */
699         if (unlikely(executable_stack == EXSTACK_ENABLE_X))
700                 vm_flags |= VM_EXEC;
701         else if (executable_stack == EXSTACK_DISABLE_X)
702                 vm_flags &= ~VM_EXEC;
703         vm_flags |= mm->def_flags;
704         vm_flags |= VM_STACK_INCOMPLETE_SETUP;
705
706         ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
707                         vm_flags);
708         if (ret)
709                 goto out_unlock;
710         BUG_ON(prev != vma);
711
712         /* Move stack pages down in memory. */
713         if (stack_shift) {
714                 ret = shift_arg_pages(vma, stack_shift);
715                 if (ret)
716                         goto out_unlock;
717         }
718
719         /* mprotect_fixup is overkill to remove the temporary stack flags */
720         vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
721
722         stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
723         stack_size = vma->vm_end - vma->vm_start;
724         /*
725          * Align this down to a page boundary as expand_stack
726          * will align it up.
727          */
728         rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
729 #ifdef CONFIG_STACK_GROWSUP
730         if (stack_size + stack_expand > rlim_stack)
731                 stack_base = vma->vm_start + rlim_stack;
732         else
733                 stack_base = vma->vm_end + stack_expand;
734 #else
735         if (stack_size + stack_expand > rlim_stack)
736                 stack_base = vma->vm_end - rlim_stack;
737         else
738                 stack_base = vma->vm_start - stack_expand;
739 #endif
740         current->mm->start_stack = bprm->p;
741         ret = expand_stack(vma, stack_base);
742         if (ret)
743                 ret = -EFAULT;
744
745 out_unlock:
746         up_write(&mm->mmap_sem);
747         return ret;
748 }
749 EXPORT_SYMBOL(setup_arg_pages);
750
751 #endif /* CONFIG_MMU */
752
753 static struct file *do_open_exec(struct filename *name)
754 {
755         struct file *file;
756         int err;
757         static const struct open_flags open_exec_flags = {
758                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
759                 .acc_mode = MAY_EXEC | MAY_OPEN,
760                 .intent = LOOKUP_OPEN,
761                 .lookup_flags = LOOKUP_FOLLOW,
762         };
763
764         file = do_filp_open(AT_FDCWD, name, &open_exec_flags);
765         if (IS_ERR(file))
766                 goto out;
767
768         err = -EACCES;
769         if (!S_ISREG(file_inode(file)->i_mode))
770                 goto exit;
771
772         if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
773                 goto exit;
774
775         fsnotify_open(file);
776
777         err = deny_write_access(file);
778         if (err)
779                 goto exit;
780
781 out:
782         return file;
783
784 exit:
785         fput(file);
786         return ERR_PTR(err);
787 }
788
789 struct file *open_exec(const char *name)
790 {
791         struct filename tmp = { .name = name };
792         return do_open_exec(&tmp);
793 }
794 EXPORT_SYMBOL(open_exec);
795
796 int kernel_read(struct file *file, loff_t offset,
797                 char *addr, unsigned long count)
798 {
799         mm_segment_t old_fs;
800         loff_t pos = offset;
801         int result;
802
803         old_fs = get_fs();
804         set_fs(get_ds());
805         /* The cast to a user pointer is valid due to the set_fs() */
806         result = vfs_read(file, (void __user *)addr, count, &pos);
807         set_fs(old_fs);
808         return result;
809 }
810
811 EXPORT_SYMBOL(kernel_read);
812
813 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
814 {
815         ssize_t res = file->f_op->read(file, (void __user *)addr, len, &pos);
816         if (res > 0)
817                 flush_icache_range(addr, addr + len);
818         return res;
819 }
820 EXPORT_SYMBOL(read_code);
821
822 static int exec_mmap(struct mm_struct *mm)
823 {
824         struct task_struct *tsk;
825         struct mm_struct * old_mm, *active_mm;
826
827         /* Notify parent that we're no longer interested in the old VM */
828         tsk = current;
829         old_mm = current->mm;
830         mm_release(tsk, old_mm);
831
832         if (old_mm) {
833                 sync_mm_rss(old_mm);
834                 /*
835                  * Make sure that if there is a core dump in progress
836                  * for the old mm, we get out and die instead of going
837                  * through with the exec.  We must hold mmap_sem around
838                  * checking core_state and changing tsk->mm.
839                  */
840                 down_read(&old_mm->mmap_sem);
841                 if (unlikely(old_mm->core_state)) {
842                         up_read(&old_mm->mmap_sem);
843                         return -EINTR;
844                 }
845         }
846         task_lock(tsk);
847         active_mm = tsk->active_mm;
848         tsk->mm = mm;
849         tsk->active_mm = mm;
850         activate_mm(active_mm, mm);
851         task_unlock(tsk);
852         if (old_mm) {
853                 up_read(&old_mm->mmap_sem);
854                 BUG_ON(active_mm != old_mm);
855                 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
856                 mm_update_next_owner(old_mm);
857                 mmput(old_mm);
858                 return 0;
859         }
860         mmdrop(active_mm);
861         return 0;
862 }
863
864 /*
865  * This function makes sure the current process has its own signal table,
866  * so that flush_signal_handlers can later reset the handlers without
867  * disturbing other processes.  (Other processes might share the signal
868  * table via the CLONE_SIGHAND option to clone().)
869  */
870 static int de_thread(struct task_struct *tsk)
871 {
872         struct signal_struct *sig = tsk->signal;
873         struct sighand_struct *oldsighand = tsk->sighand;
874         spinlock_t *lock = &oldsighand->siglock;
875
876         if (thread_group_empty(tsk))
877                 goto no_thread_group;
878
879         /*
880          * Kill all other threads in the thread group.
881          */
882         spin_lock_irq(lock);
883         if (signal_group_exit(sig)) {
884                 /*
885                  * Another group action in progress, just
886                  * return so that the signal is processed.
887                  */
888                 spin_unlock_irq(lock);
889                 return -EAGAIN;
890         }
891
892         sig->group_exit_task = tsk;
893         sig->notify_count = zap_other_threads(tsk);
894         if (!thread_group_leader(tsk))
895                 sig->notify_count--;
896
897         while (sig->notify_count) {
898                 __set_current_state(TASK_KILLABLE);
899                 spin_unlock_irq(lock);
900                 schedule();
901                 if (unlikely(__fatal_signal_pending(tsk)))
902                         goto killed;
903                 spin_lock_irq(lock);
904         }
905         spin_unlock_irq(lock);
906
907         /*
908          * At this point all other threads have exited, all we have to
909          * do is to wait for the thread group leader to become inactive,
910          * and to assume its PID:
911          */
912         if (!thread_group_leader(tsk)) {
913                 struct task_struct *leader = tsk->group_leader;
914
915                 sig->notify_count = -1; /* for exit_notify() */
916                 for (;;) {
917                         threadgroup_change_begin(tsk);
918                         write_lock_irq(&tasklist_lock);
919                         if (likely(leader->exit_state))
920                                 break;
921                         __set_current_state(TASK_KILLABLE);
922                         write_unlock_irq(&tasklist_lock);
923                         threadgroup_change_end(tsk);
924                         schedule();
925                         if (unlikely(__fatal_signal_pending(tsk)))
926                                 goto killed;
927                 }
928
929                 /*
930                  * The only record we have of the real-time age of a
931                  * process, regardless of execs it's done, is start_time.
932                  * All the past CPU time is accumulated in signal_struct
933                  * from sister threads now dead.  But in this non-leader
934                  * exec, nothing survives from the original leader thread,
935                  * whose birth marks the true age of this process now.
936                  * When we take on its identity by switching to its PID, we
937                  * also take its birthdate (always earlier than our own).
938                  */
939                 tsk->start_time = leader->start_time;
940                 tsk->real_start_time = leader->real_start_time;
941
942                 BUG_ON(!same_thread_group(leader, tsk));
943                 BUG_ON(has_group_leader_pid(tsk));
944                 /*
945                  * An exec() starts a new thread group with the
946                  * TGID of the previous thread group. Rehash the
947                  * two threads with a switched PID, and release
948                  * the former thread group leader:
949                  */
950
951                 /* Become a process group leader with the old leader's pid.
952                  * The old leader becomes a thread of the this thread group.
953                  * Note: The old leader also uses this pid until release_task
954                  *       is called.  Odd but simple and correct.
955                  */
956                 tsk->pid = leader->pid;
957                 change_pid(tsk, PIDTYPE_PID, task_pid(leader));
958                 transfer_pid(leader, tsk, PIDTYPE_PGID);
959                 transfer_pid(leader, tsk, PIDTYPE_SID);
960
961                 list_replace_rcu(&leader->tasks, &tsk->tasks);
962                 list_replace_init(&leader->sibling, &tsk->sibling);
963
964                 tsk->group_leader = tsk;
965                 leader->group_leader = tsk;
966
967                 tsk->exit_signal = SIGCHLD;
968                 leader->exit_signal = -1;
969
970                 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
971                 leader->exit_state = EXIT_DEAD;
972
973                 /*
974                  * We are going to release_task()->ptrace_unlink() silently,
975                  * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
976                  * the tracer wont't block again waiting for this thread.
977                  */
978                 if (unlikely(leader->ptrace))
979                         __wake_up_parent(leader, leader->parent);
980                 write_unlock_irq(&tasklist_lock);
981                 threadgroup_change_end(tsk);
982
983                 release_task(leader);
984         }
985
986         sig->group_exit_task = NULL;
987         sig->notify_count = 0;
988
989 no_thread_group:
990         /* we have changed execution domain */
991         tsk->exit_signal = SIGCHLD;
992
993         exit_itimers(sig);
994         flush_itimer_signals();
995
996         if (atomic_read(&oldsighand->count) != 1) {
997                 struct sighand_struct *newsighand;
998                 /*
999                  * This ->sighand is shared with the CLONE_SIGHAND
1000                  * but not CLONE_THREAD task, switch to the new one.
1001                  */
1002                 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1003                 if (!newsighand)
1004                         return -ENOMEM;
1005
1006                 atomic_set(&newsighand->count, 1);
1007                 memcpy(newsighand->action, oldsighand->action,
1008                        sizeof(newsighand->action));
1009
1010                 write_lock_irq(&tasklist_lock);
1011                 spin_lock(&oldsighand->siglock);
1012                 rcu_assign_pointer(tsk->sighand, newsighand);
1013                 spin_unlock(&oldsighand->siglock);
1014                 write_unlock_irq(&tasklist_lock);
1015
1016                 __cleanup_sighand(oldsighand);
1017         }
1018
1019         BUG_ON(!thread_group_leader(tsk));
1020         return 0;
1021
1022 killed:
1023         /* protects against exit_notify() and __exit_signal() */
1024         read_lock(&tasklist_lock);
1025         sig->group_exit_task = NULL;
1026         sig->notify_count = 0;
1027         read_unlock(&tasklist_lock);
1028         return -EAGAIN;
1029 }
1030
1031 char *get_task_comm(char *buf, struct task_struct *tsk)
1032 {
1033         /* buf must be at least sizeof(tsk->comm) in size */
1034         task_lock(tsk);
1035         strncpy(buf, tsk->comm, sizeof(tsk->comm));
1036         task_unlock(tsk);
1037         return buf;
1038 }
1039 EXPORT_SYMBOL_GPL(get_task_comm);
1040
1041 /*
1042  * These functions flushes out all traces of the currently running executable
1043  * so that a new one can be started
1044  */
1045
1046 void set_task_comm(struct task_struct *tsk, char *buf)
1047 {
1048         task_lock(tsk);
1049         trace_task_rename(tsk, buf);
1050         strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1051         task_unlock(tsk);
1052         perf_event_comm(tsk);
1053 }
1054
1055 static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
1056 {
1057         int i, ch;
1058
1059         /* Copies the binary name from after last slash */
1060         for (i = 0; (ch = *(fn++)) != '\0';) {
1061                 if (ch == '/')
1062                         i = 0; /* overwrite what we wrote */
1063                 else
1064                         if (i < len - 1)
1065                                 tcomm[i++] = ch;
1066         }
1067         tcomm[i] = '\0';
1068 }
1069
1070 int flush_old_exec(struct linux_binprm * bprm)
1071 {
1072         int retval;
1073
1074         /*
1075          * Make sure we have a private signal table and that
1076          * we are unassociated from the previous thread group.
1077          */
1078         retval = de_thread(current);
1079         if (retval)
1080                 goto out;
1081
1082         set_mm_exe_file(bprm->mm, bprm->file);
1083
1084         filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
1085         /*
1086          * Release all of the old mmap stuff
1087          */
1088         acct_arg_size(bprm, 0);
1089         retval = exec_mmap(bprm->mm);
1090         if (retval)
1091                 goto out;
1092
1093         bprm->mm = NULL;                /* We're using it now */
1094
1095         set_fs(USER_DS);
1096         current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1097                                         PF_NOFREEZE | PF_NO_SETAFFINITY);
1098         flush_thread();
1099         current->personality &= ~bprm->per_clear;
1100
1101         return 0;
1102
1103 out:
1104         return retval;
1105 }
1106 EXPORT_SYMBOL(flush_old_exec);
1107
1108 void would_dump(struct linux_binprm *bprm, struct file *file)
1109 {
1110         if (inode_permission(file_inode(file), MAY_READ) < 0)
1111                 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1112 }
1113 EXPORT_SYMBOL(would_dump);
1114
1115 void setup_new_exec(struct linux_binprm * bprm)
1116 {
1117         arch_pick_mmap_layout(current->mm);
1118
1119         /* This is the point of no return */
1120         current->sas_ss_sp = current->sas_ss_size = 0;
1121
1122         if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
1123                 set_dumpable(current->mm, SUID_DUMP_USER);
1124         else
1125                 set_dumpable(current->mm, suid_dumpable);
1126
1127         set_task_comm(current, bprm->tcomm);
1128
1129         /* Set the new mm task size. We have to do that late because it may
1130          * depend on TIF_32BIT which is only updated in flush_thread() on
1131          * some architectures like powerpc
1132          */
1133         current->mm->task_size = TASK_SIZE;
1134
1135         /* install the new credentials */
1136         if (!uid_eq(bprm->cred->uid, current_euid()) ||
1137             !gid_eq(bprm->cred->gid, current_egid())) {
1138                 current->pdeath_signal = 0;
1139         } else {
1140                 would_dump(bprm, bprm->file);
1141                 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
1142                         set_dumpable(current->mm, suid_dumpable);
1143         }
1144
1145         /* An exec changes our domain. We are no longer part of the thread
1146            group */
1147         current->self_exec_id++;
1148         flush_signal_handlers(current, 0);
1149         do_close_on_exec(current->files);
1150 }
1151 EXPORT_SYMBOL(setup_new_exec);
1152
1153 /*
1154  * Prepare credentials and lock ->cred_guard_mutex.
1155  * install_exec_creds() commits the new creds and drops the lock.
1156  * Or, if exec fails before, free_bprm() should release ->cred and
1157  * and unlock.
1158  */
1159 int prepare_bprm_creds(struct linux_binprm *bprm)
1160 {
1161         if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1162                 return -ERESTARTNOINTR;
1163
1164         bprm->cred = prepare_exec_creds();
1165         if (likely(bprm->cred))
1166                 return 0;
1167
1168         mutex_unlock(&current->signal->cred_guard_mutex);
1169         return -ENOMEM;
1170 }
1171
1172 static void free_bprm(struct linux_binprm *bprm)
1173 {
1174         free_arg_pages(bprm);
1175         if (bprm->cred) {
1176                 mutex_unlock(&current->signal->cred_guard_mutex);
1177                 abort_creds(bprm->cred);
1178         }
1179         if (bprm->file) {
1180                 allow_write_access(bprm->file);
1181                 fput(bprm->file);
1182         }
1183         /* If a binfmt changed the interp, free it. */
1184         if (bprm->interp != bprm->filename)
1185                 kfree(bprm->interp);
1186         kfree(bprm);
1187 }
1188
1189 int bprm_change_interp(char *interp, struct linux_binprm *bprm)
1190 {
1191         /* If a binfmt changed the interp, free it first. */
1192         if (bprm->interp != bprm->filename)
1193                 kfree(bprm->interp);
1194         bprm->interp = kstrdup(interp, GFP_KERNEL);
1195         if (!bprm->interp)
1196                 return -ENOMEM;
1197         return 0;
1198 }
1199 EXPORT_SYMBOL(bprm_change_interp);
1200
1201 /*
1202  * install the new credentials for this executable
1203  */
1204 void install_exec_creds(struct linux_binprm *bprm)
1205 {
1206         security_bprm_committing_creds(bprm);
1207
1208         commit_creds(bprm->cred);
1209         bprm->cred = NULL;
1210
1211         /*
1212          * Disable monitoring for regular users
1213          * when executing setuid binaries. Must
1214          * wait until new credentials are committed
1215          * by commit_creds() above
1216          */
1217         if (get_dumpable(current->mm) != SUID_DUMP_USER)
1218                 perf_event_exit_task(current);
1219         /*
1220          * cred_guard_mutex must be held at least to this point to prevent
1221          * ptrace_attach() from altering our determination of the task's
1222          * credentials; any time after this it may be unlocked.
1223          */
1224         security_bprm_committed_creds(bprm);
1225         mutex_unlock(&current->signal->cred_guard_mutex);
1226 }
1227 EXPORT_SYMBOL(install_exec_creds);
1228
1229 /*
1230  * determine how safe it is to execute the proposed program
1231  * - the caller must hold ->cred_guard_mutex to protect against
1232  *   PTRACE_ATTACH
1233  */
1234 static void check_unsafe_exec(struct linux_binprm *bprm)
1235 {
1236         struct task_struct *p = current, *t;
1237         unsigned n_fs;
1238
1239         if (p->ptrace) {
1240                 if (p->ptrace & PT_PTRACE_CAP)
1241                         bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
1242                 else
1243                         bprm->unsafe |= LSM_UNSAFE_PTRACE;
1244         }
1245
1246         /*
1247          * This isn't strictly necessary, but it makes it harder for LSMs to
1248          * mess up.
1249          */
1250         if (current->no_new_privs)
1251                 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1252
1253         t = p;
1254         n_fs = 1;
1255         spin_lock(&p->fs->lock);
1256         rcu_read_lock();
1257         while_each_thread(p, t) {
1258                 if (t->fs == p->fs)
1259                         n_fs++;
1260         }
1261         rcu_read_unlock();
1262
1263         if (p->fs->users > n_fs)
1264                 bprm->unsafe |= LSM_UNSAFE_SHARE;
1265         else
1266                 p->fs->in_exec = 1;
1267         spin_unlock(&p->fs->lock);
1268 }
1269
1270 /*
1271  * Fill the binprm structure from the inode.
1272  * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1273  *
1274  * This may be called multiple times for binary chains (scripts for example).
1275  */
1276 int prepare_binprm(struct linux_binprm *bprm)
1277 {
1278         struct inode *inode = file_inode(bprm->file);
1279         umode_t mode = inode->i_mode;
1280         int retval;
1281
1282
1283         /* clear any previous set[ug]id data from a previous binary */
1284         bprm->cred->euid = current_euid();
1285         bprm->cred->egid = current_egid();
1286
1287         if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
1288             !current->no_new_privs &&
1289             kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
1290             kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
1291                 /* Set-uid? */
1292                 if (mode & S_ISUID) {
1293                         bprm->per_clear |= PER_CLEAR_ON_SETID;
1294                         bprm->cred->euid = inode->i_uid;
1295                 }
1296
1297                 /* Set-gid? */
1298                 /*
1299                  * If setgid is set but no group execute bit then this
1300                  * is a candidate for mandatory locking, not a setgid
1301                  * executable.
1302                  */
1303                 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1304                         bprm->per_clear |= PER_CLEAR_ON_SETID;
1305                         bprm->cred->egid = inode->i_gid;
1306                 }
1307         }
1308
1309         /* fill in binprm security blob */
1310         retval = security_bprm_set_creds(bprm);
1311         if (retval)
1312                 return retval;
1313         bprm->cred_prepared = 1;
1314
1315         memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1316         return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
1317 }
1318
1319 EXPORT_SYMBOL(prepare_binprm);
1320
1321 /*
1322  * Arguments are '\0' separated strings found at the location bprm->p
1323  * points to; chop off the first by relocating brpm->p to right after
1324  * the first '\0' encountered.
1325  */
1326 int remove_arg_zero(struct linux_binprm *bprm)
1327 {
1328         int ret = 0;
1329         unsigned long offset;
1330         char *kaddr;
1331         struct page *page;
1332
1333         if (!bprm->argc)
1334                 return 0;
1335
1336         do {
1337                 offset = bprm->p & ~PAGE_MASK;
1338                 page = get_arg_page(bprm, bprm->p, 0);
1339                 if (!page) {
1340                         ret = -EFAULT;
1341                         goto out;
1342                 }
1343                 kaddr = kmap_atomic(page);
1344
1345                 for (; offset < PAGE_SIZE && kaddr[offset];
1346                                 offset++, bprm->p++)
1347                         ;
1348
1349                 kunmap_atomic(kaddr);
1350                 put_arg_page(page);
1351
1352                 if (offset == PAGE_SIZE)
1353                         free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1354         } while (offset == PAGE_SIZE);
1355
1356         bprm->p++;
1357         bprm->argc--;
1358         ret = 0;
1359
1360 out:
1361         return ret;
1362 }
1363 EXPORT_SYMBOL(remove_arg_zero);
1364
1365 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1366 /*
1367  * cycle the list of binary formats handler, until one recognizes the image
1368  */
1369 int search_binary_handler(struct linux_binprm *bprm)
1370 {
1371         bool need_retry = IS_ENABLED(CONFIG_MODULES);
1372         struct linux_binfmt *fmt;
1373         int retval;
1374
1375         /* This allows 4 levels of binfmt rewrites before failing hard. */
1376         if (bprm->recursion_depth > 5)
1377                 return -ELOOP;
1378
1379         retval = security_bprm_check(bprm);
1380         if (retval)
1381                 return retval;
1382
1383         retval = -ENOENT;
1384  retry:
1385         read_lock(&binfmt_lock);
1386         list_for_each_entry(fmt, &formats, lh) {
1387                 if (!try_module_get(fmt->module))
1388                         continue;
1389                 read_unlock(&binfmt_lock);
1390                 bprm->recursion_depth++;
1391                 retval = fmt->load_binary(bprm);
1392                 bprm->recursion_depth--;
1393                 if (retval >= 0 || retval != -ENOEXEC ||
1394                     bprm->mm == NULL || bprm->file == NULL) {
1395                         put_binfmt(fmt);
1396                         return retval;
1397                 }
1398                 read_lock(&binfmt_lock);
1399                 put_binfmt(fmt);
1400         }
1401         read_unlock(&binfmt_lock);
1402
1403         if (need_retry && retval == -ENOEXEC) {
1404                 if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1405                     printable(bprm->buf[2]) && printable(bprm->buf[3]))
1406                         return retval;
1407                 if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1408                         return retval;
1409                 need_retry = false;
1410                 goto retry;
1411         }
1412
1413         return retval;
1414 }
1415 EXPORT_SYMBOL(search_binary_handler);
1416
1417 static int exec_binprm(struct linux_binprm *bprm)
1418 {
1419         pid_t old_pid, old_vpid;
1420         int ret;
1421
1422         /* Need to fetch pid before load_binary changes it */
1423         old_pid = current->pid;
1424         rcu_read_lock();
1425         old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1426         rcu_read_unlock();
1427
1428         ret = search_binary_handler(bprm);
1429         if (ret >= 0) {
1430                 audit_bprm(bprm);
1431                 trace_sched_process_exec(current, old_pid, bprm);
1432                 ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1433                 proc_exec_connector(current);
1434         }
1435
1436         return ret;
1437 }
1438
1439 /*
1440  * sys_execve() executes a new program.
1441  */
1442 static int do_execve_common(struct filename *filename,
1443                                 struct user_arg_ptr argv,
1444                                 struct user_arg_ptr envp)
1445 {
1446         struct linux_binprm *bprm;
1447         struct file *file;
1448         struct files_struct *displaced;
1449         int retval;
1450
1451         if (IS_ERR(filename))
1452                 return PTR_ERR(filename);
1453
1454         /*
1455          * We move the actual failure in case of RLIMIT_NPROC excess from
1456          * set*uid() to execve() because too many poorly written programs
1457          * don't check setuid() return code.  Here we additionally recheck
1458          * whether NPROC limit is still exceeded.
1459          */
1460         if ((current->flags & PF_NPROC_EXCEEDED) &&
1461             atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
1462                 retval = -EAGAIN;
1463                 goto out_ret;
1464         }
1465
1466         /* We're below the limit (still or again), so we don't want to make
1467          * further execve() calls fail. */
1468         current->flags &= ~PF_NPROC_EXCEEDED;
1469
1470         retval = unshare_files(&displaced);
1471         if (retval)
1472                 goto out_ret;
1473
1474         retval = -ENOMEM;
1475         bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1476         if (!bprm)
1477                 goto out_files;
1478
1479         retval = prepare_bprm_creds(bprm);
1480         if (retval)
1481                 goto out_free;
1482
1483         check_unsafe_exec(bprm);
1484         current->in_execve = 1;
1485
1486         file = do_open_exec(filename);
1487         retval = PTR_ERR(file);
1488         if (IS_ERR(file))
1489                 goto out_unmark;
1490
1491         sched_exec();
1492
1493         bprm->file = file;
1494         bprm->filename = bprm->interp = filename->name;
1495
1496         retval = bprm_mm_init(bprm);
1497         if (retval)
1498                 goto out_unmark;
1499
1500         bprm->argc = count(argv, MAX_ARG_STRINGS);
1501         if ((retval = bprm->argc) < 0)
1502                 goto out;
1503
1504         bprm->envc = count(envp, MAX_ARG_STRINGS);
1505         if ((retval = bprm->envc) < 0)
1506                 goto out;
1507
1508         retval = prepare_binprm(bprm);
1509         if (retval < 0)
1510                 goto out;
1511
1512         retval = copy_strings_kernel(1, &bprm->filename, bprm);
1513         if (retval < 0)
1514                 goto out;
1515
1516         bprm->exec = bprm->p;
1517         retval = copy_strings(bprm->envc, envp, bprm);
1518         if (retval < 0)
1519                 goto out;
1520
1521         retval = copy_strings(bprm->argc, argv, bprm);
1522         if (retval < 0)
1523                 goto out;
1524
1525         retval = exec_binprm(bprm);
1526         if (retval < 0)
1527                 goto out;
1528
1529         /* execve succeeded */
1530         current->fs->in_exec = 0;
1531         current->in_execve = 0;
1532         acct_update_integrals(current);
1533         task_numa_free(current);
1534         free_bprm(bprm);
1535         putname(filename);
1536         if (displaced)
1537                 put_files_struct(displaced);
1538         return retval;
1539
1540 out:
1541         if (bprm->mm) {
1542                 acct_arg_size(bprm, 0);
1543                 mmput(bprm->mm);
1544         }
1545
1546 out_unmark:
1547         current->fs->in_exec = 0;
1548         current->in_execve = 0;
1549
1550 out_free:
1551         free_bprm(bprm);
1552
1553 out_files:
1554         if (displaced)
1555                 reset_files_struct(displaced);
1556 out_ret:
1557         putname(filename);
1558         return retval;
1559 }
1560
1561 int do_execve(struct filename *filename,
1562         const char __user *const __user *__argv,
1563         const char __user *const __user *__envp)
1564 {
1565         struct user_arg_ptr argv = { .ptr.native = __argv };
1566         struct user_arg_ptr envp = { .ptr.native = __envp };
1567         return do_execve_common(filename, argv, envp);
1568 }
1569
1570 #ifdef CONFIG_COMPAT
1571 static int compat_do_execve(struct filename *filename,
1572         const compat_uptr_t __user *__argv,
1573         const compat_uptr_t __user *__envp)
1574 {
1575         struct user_arg_ptr argv = {
1576                 .is_compat = true,
1577                 .ptr.compat = __argv,
1578         };
1579         struct user_arg_ptr envp = {
1580                 .is_compat = true,
1581                 .ptr.compat = __envp,
1582         };
1583         return do_execve_common(filename, argv, envp);
1584 }
1585 #endif
1586
1587 void set_binfmt(struct linux_binfmt *new)
1588 {
1589         struct mm_struct *mm = current->mm;
1590
1591         if (mm->binfmt)
1592                 module_put(mm->binfmt->module);
1593
1594         mm->binfmt = new;
1595         if (new)
1596                 __module_get(new->module);
1597 }
1598 EXPORT_SYMBOL(set_binfmt);
1599
1600 /*
1601  * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
1602  */
1603 void set_dumpable(struct mm_struct *mm, int value)
1604 {
1605         unsigned long old, new;
1606
1607         if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
1608                 return;
1609
1610         do {
1611                 old = ACCESS_ONCE(mm->flags);
1612                 new = (old & ~MMF_DUMPABLE_MASK) | value;
1613         } while (cmpxchg(&mm->flags, old, new) != old);
1614 }
1615
1616 SYSCALL_DEFINE3(execve,
1617                 const char __user *, filename,
1618                 const char __user *const __user *, argv,
1619                 const char __user *const __user *, envp)
1620 {
1621         return do_execve(getname(filename), argv, envp);
1622 }
1623 #ifdef CONFIG_COMPAT
1624 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
1625         const compat_uptr_t __user *, argv,
1626         const compat_uptr_t __user *, envp)
1627 {
1628         return compat_do_execve(getname(filename), argv, envp);
1629 }
1630 #endif