1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/livepatch.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
60 * SLAB caches for signal bits.
63 static struct kmem_cache *sigqueue_cachep;
65 int print_fatal_signals __read_mostly;
67 static void __user *sig_handler(struct task_struct *t, int sig)
69 return t->sighand->action[sig - 1].sa.sa_handler;
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
83 handler = sig_handler(t, sig);
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
98 return sig_handler_ignored(handler, sig);
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
116 if (t->ptrace && sig != SIGKILL)
119 return sig_task_ignored(t, sig, force);
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
131 switch (_NSIG_WORDS) {
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
154 static bool recalc_sigpending_tsk(struct task_struct *t)
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
176 void recalc_sigpending_and_wake(struct task_struct *t)
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
182 void recalc_sigpending(void)
184 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
185 !klp_patch_pending(current))
186 clear_thread_flag(TIF_SIGPENDING);
189 EXPORT_SYMBOL(recalc_sigpending);
191 void calculate_sigpending(void)
193 /* Have any signals or users of TIF_SIGPENDING been delayed
196 spin_lock_irq(¤t->sighand->siglock);
197 set_tsk_thread_flag(current, TIF_SIGPENDING);
199 spin_unlock_irq(¤t->sighand->siglock);
202 /* Given the mask, find the first available signal that should be serviced. */
204 #define SYNCHRONOUS_MASK \
205 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
208 int next_signal(struct sigpending *pending, sigset_t *mask)
210 unsigned long i, *s, *m, x;
213 s = pending->signal.sig;
217 * Handle the first word specially: it contains the
218 * synchronous signals that need to be dequeued first.
222 if (x & SYNCHRONOUS_MASK)
223 x &= SYNCHRONOUS_MASK;
228 switch (_NSIG_WORDS) {
230 for (i = 1; i < _NSIG_WORDS; ++i) {
234 sig = ffz(~x) + i*_NSIG_BPW + 1;
243 sig = ffz(~x) + _NSIG_BPW + 1;
254 static inline void print_dropped_signal(int sig)
256 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
258 if (!print_fatal_signals)
261 if (!__ratelimit(&ratelimit_state))
264 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 current->comm, current->pid, sig);
269 * task_set_jobctl_pending - set jobctl pending bits
271 * @mask: pending bits to set
273 * Clear @mask from @task->jobctl. @mask must be subset of
274 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
275 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
276 * cleared. If @task is already being killed or exiting, this function
280 * Must be called with @task->sighand->siglock held.
283 * %true if @mask is set, %false if made noop because @task was dying.
285 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
287 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
291 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
294 if (mask & JOBCTL_STOP_SIGMASK)
295 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
297 task->jobctl |= mask;
302 * task_clear_jobctl_trapping - clear jobctl trapping bit
305 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
306 * Clear it and wake up the ptracer. Note that we don't need any further
307 * locking. @task->siglock guarantees that @task->parent points to the
311 * Must be called with @task->sighand->siglock held.
313 void task_clear_jobctl_trapping(struct task_struct *task)
315 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
316 task->jobctl &= ~JOBCTL_TRAPPING;
317 smp_mb(); /* advised by wake_up_bit() */
318 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
323 * task_clear_jobctl_pending - clear jobctl pending bits
325 * @mask: pending bits to clear
327 * Clear @mask from @task->jobctl. @mask must be subset of
328 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
329 * STOP bits are cleared together.
331 * If clearing of @mask leaves no stop or trap pending, this function calls
332 * task_clear_jobctl_trapping().
335 * Must be called with @task->sighand->siglock held.
337 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
339 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
341 if (mask & JOBCTL_STOP_PENDING)
342 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
344 task->jobctl &= ~mask;
346 if (!(task->jobctl & JOBCTL_PENDING_MASK))
347 task_clear_jobctl_trapping(task);
351 * task_participate_group_stop - participate in a group stop
352 * @task: task participating in a group stop
354 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
355 * Group stop states are cleared and the group stop count is consumed if
356 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
357 * stop, the appropriate `SIGNAL_*` flags are set.
360 * Must be called with @task->sighand->siglock held.
363 * %true if group stop completion should be notified to the parent, %false
366 static bool task_participate_group_stop(struct task_struct *task)
368 struct signal_struct *sig = task->signal;
369 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
371 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
373 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
378 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
379 sig->group_stop_count--;
382 * Tell the caller to notify completion iff we are entering into a
383 * fresh group stop. Read comment in do_signal_stop() for details.
385 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
386 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
392 void task_join_group_stop(struct task_struct *task)
394 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
395 struct signal_struct *sig = current->signal;
397 if (sig->group_stop_count) {
398 sig->group_stop_count++;
399 mask |= JOBCTL_STOP_CONSUME;
400 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
403 /* Have the new thread join an on-going signal group stop */
404 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
408 * allocate a new signal queue record
409 * - this may be called without locks if and only if t == current, otherwise an
410 * appropriate lock must be held to stop the target task from exiting
412 static struct sigqueue *
413 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
414 int override_rlimit, const unsigned int sigqueue_flags)
416 struct sigqueue *q = NULL;
417 struct user_struct *user;
421 * Protect access to @t credentials. This can go away when all
422 * callers hold rcu read lock.
424 * NOTE! A pending signal will hold on to the user refcount,
425 * and we get/put the refcount only when the sigpending count
426 * changes from/to zero.
429 user = __task_cred(t)->user;
430 sigpending = atomic_inc_return(&user->sigpending);
435 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
437 * Preallocation does not hold sighand::siglock so it can't
438 * use the cache. The lockless caching requires that only
439 * one consumer and only one producer run at a time.
441 q = READ_ONCE(t->sigqueue_cache);
442 if (!q || sigqueue_flags)
443 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
445 WRITE_ONCE(t->sigqueue_cache, NULL);
447 print_dropped_signal(sig);
450 if (unlikely(q == NULL)) {
451 if (atomic_dec_and_test(&user->sigpending))
454 INIT_LIST_HEAD(&q->list);
455 q->flags = sigqueue_flags;
462 void exit_task_sigqueue_cache(struct task_struct *tsk)
464 /* Race free because @tsk is mopped up */
465 struct sigqueue *q = tsk->sigqueue_cache;
468 tsk->sigqueue_cache = NULL;
470 * Hand it back to the cache as the task might
471 * be self reaping which would leak the object.
473 kmem_cache_free(sigqueue_cachep, q);
477 static void sigqueue_cache_or_free(struct sigqueue *q)
480 * Cache one sigqueue per task. This pairs with the consumer side
481 * in __sigqueue_alloc() and needs READ/WRITE_ONCE() to prevent the
482 * compiler from store tearing and to tell KCSAN that the data race
483 * is intentional when run without holding current->sighand->siglock,
484 * which is fine as current obviously cannot run __sigqueue_free()
487 if (!READ_ONCE(current->sigqueue_cache))
488 WRITE_ONCE(current->sigqueue_cache, q);
490 kmem_cache_free(sigqueue_cachep, q);
493 static void __sigqueue_free(struct sigqueue *q)
495 if (q->flags & SIGQUEUE_PREALLOC)
497 if (atomic_dec_and_test(&q->user->sigpending))
499 sigqueue_cache_or_free(q);
502 void flush_sigqueue(struct sigpending *queue)
506 sigemptyset(&queue->signal);
507 while (!list_empty(&queue->list)) {
508 q = list_entry(queue->list.next, struct sigqueue , list);
509 list_del_init(&q->list);
515 * Flush all pending signals for this kthread.
517 void flush_signals(struct task_struct *t)
521 spin_lock_irqsave(&t->sighand->siglock, flags);
522 clear_tsk_thread_flag(t, TIF_SIGPENDING);
523 flush_sigqueue(&t->pending);
524 flush_sigqueue(&t->signal->shared_pending);
525 spin_unlock_irqrestore(&t->sighand->siglock, flags);
527 EXPORT_SYMBOL(flush_signals);
529 #ifdef CONFIG_POSIX_TIMERS
530 static void __flush_itimer_signals(struct sigpending *pending)
532 sigset_t signal, retain;
533 struct sigqueue *q, *n;
535 signal = pending->signal;
536 sigemptyset(&retain);
538 list_for_each_entry_safe(q, n, &pending->list, list) {
539 int sig = q->info.si_signo;
541 if (likely(q->info.si_code != SI_TIMER)) {
542 sigaddset(&retain, sig);
544 sigdelset(&signal, sig);
545 list_del_init(&q->list);
550 sigorsets(&pending->signal, &signal, &retain);
553 void flush_itimer_signals(void)
555 struct task_struct *tsk = current;
558 spin_lock_irqsave(&tsk->sighand->siglock, flags);
559 __flush_itimer_signals(&tsk->pending);
560 __flush_itimer_signals(&tsk->signal->shared_pending);
561 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
565 void ignore_signals(struct task_struct *t)
569 for (i = 0; i < _NSIG; ++i)
570 t->sighand->action[i].sa.sa_handler = SIG_IGN;
576 * Flush all handlers for a task.
580 flush_signal_handlers(struct task_struct *t, int force_default)
583 struct k_sigaction *ka = &t->sighand->action[0];
584 for (i = _NSIG ; i != 0 ; i--) {
585 if (force_default || ka->sa.sa_handler != SIG_IGN)
586 ka->sa.sa_handler = SIG_DFL;
588 #ifdef __ARCH_HAS_SA_RESTORER
589 ka->sa.sa_restorer = NULL;
591 sigemptyset(&ka->sa.sa_mask);
596 bool unhandled_signal(struct task_struct *tsk, int sig)
598 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
599 if (is_global_init(tsk))
602 if (handler != SIG_IGN && handler != SIG_DFL)
605 /* if ptraced, let the tracer determine */
609 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
612 struct sigqueue *q, *first = NULL;
615 * Collect the siginfo appropriate to this signal. Check if
616 * there is another siginfo for the same signal.
618 list_for_each_entry(q, &list->list, list) {
619 if (q->info.si_signo == sig) {
626 sigdelset(&list->signal, sig);
630 list_del_init(&first->list);
631 copy_siginfo(info, &first->info);
634 (first->flags & SIGQUEUE_PREALLOC) &&
635 (info->si_code == SI_TIMER) &&
636 (info->si_sys_private);
638 __sigqueue_free(first);
641 * Ok, it wasn't in the queue. This must be
642 * a fast-pathed signal or we must have been
643 * out of queue space. So zero out the info.
646 info->si_signo = sig;
648 info->si_code = SI_USER;
654 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
655 kernel_siginfo_t *info, bool *resched_timer)
657 int sig = next_signal(pending, mask);
660 collect_signal(sig, pending, info, resched_timer);
665 * Dequeue a signal and return the element to the caller, which is
666 * expected to free it.
668 * All callers have to hold the siglock.
670 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
672 bool resched_timer = false;
675 /* We only dequeue private signals from ourselves, we don't let
676 * signalfd steal them
678 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
680 signr = __dequeue_signal(&tsk->signal->shared_pending,
681 mask, info, &resched_timer);
682 #ifdef CONFIG_POSIX_TIMERS
686 * itimers are process shared and we restart periodic
687 * itimers in the signal delivery path to prevent DoS
688 * attacks in the high resolution timer case. This is
689 * compliant with the old way of self-restarting
690 * itimers, as the SIGALRM is a legacy signal and only
691 * queued once. Changing the restart behaviour to
692 * restart the timer in the signal dequeue path is
693 * reducing the timer noise on heavy loaded !highres
696 if (unlikely(signr == SIGALRM)) {
697 struct hrtimer *tmr = &tsk->signal->real_timer;
699 if (!hrtimer_is_queued(tmr) &&
700 tsk->signal->it_real_incr != 0) {
701 hrtimer_forward(tmr, tmr->base->get_time(),
702 tsk->signal->it_real_incr);
703 hrtimer_restart(tmr);
713 if (unlikely(sig_kernel_stop(signr))) {
715 * Set a marker that we have dequeued a stop signal. Our
716 * caller might release the siglock and then the pending
717 * stop signal it is about to process is no longer in the
718 * pending bitmasks, but must still be cleared by a SIGCONT
719 * (and overruled by a SIGKILL). So those cases clear this
720 * shared flag after we've set it. Note that this flag may
721 * remain set after the signal we return is ignored or
722 * handled. That doesn't matter because its only purpose
723 * is to alert stop-signal processing code when another
724 * processor has come along and cleared the flag.
726 current->jobctl |= JOBCTL_STOP_DEQUEUED;
728 #ifdef CONFIG_POSIX_TIMERS
731 * Release the siglock to ensure proper locking order
732 * of timer locks outside of siglocks. Note, we leave
733 * irqs disabled here, since the posix-timers code is
734 * about to disable them again anyway.
736 spin_unlock(&tsk->sighand->siglock);
737 posixtimer_rearm(info);
738 spin_lock(&tsk->sighand->siglock);
740 /* Don't expose the si_sys_private value to userspace */
741 info->si_sys_private = 0;
746 EXPORT_SYMBOL_GPL(dequeue_signal);
748 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
750 struct task_struct *tsk = current;
751 struct sigpending *pending = &tsk->pending;
752 struct sigqueue *q, *sync = NULL;
755 * Might a synchronous signal be in the queue?
757 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
761 * Return the first synchronous signal in the queue.
763 list_for_each_entry(q, &pending->list, list) {
764 /* Synchronous signals have a positive si_code */
765 if ((q->info.si_code > SI_USER) &&
766 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
774 * Check if there is another siginfo for the same signal.
776 list_for_each_entry_continue(q, &pending->list, list) {
777 if (q->info.si_signo == sync->info.si_signo)
781 sigdelset(&pending->signal, sync->info.si_signo);
784 list_del_init(&sync->list);
785 copy_siginfo(info, &sync->info);
786 __sigqueue_free(sync);
787 return info->si_signo;
791 * Tell a process that it has a new active signal..
793 * NOTE! we rely on the previous spin_lock to
794 * lock interrupts for us! We can only be called with
795 * "siglock" held, and the local interrupt must
796 * have been disabled when that got acquired!
798 * No need to set need_resched since signal event passing
799 * goes through ->blocked
801 void signal_wake_up_state(struct task_struct *t, unsigned int state)
803 set_tsk_thread_flag(t, TIF_SIGPENDING);
805 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
806 * case. We don't check t->state here because there is a race with it
807 * executing another processor and just now entering stopped state.
808 * By using wake_up_state, we ensure the process will wake up and
809 * handle its death signal.
811 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
816 * Remove signals in mask from the pending set and queue.
817 * Returns 1 if any signals were found.
819 * All callers must be holding the siglock.
821 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
823 struct sigqueue *q, *n;
826 sigandsets(&m, mask, &s->signal);
827 if (sigisemptyset(&m))
830 sigandnsets(&s->signal, &s->signal, mask);
831 list_for_each_entry_safe(q, n, &s->list, list) {
832 if (sigismember(mask, q->info.si_signo)) {
833 list_del_init(&q->list);
839 static inline int is_si_special(const struct kernel_siginfo *info)
841 return info <= SEND_SIG_PRIV;
844 static inline bool si_fromuser(const struct kernel_siginfo *info)
846 return info == SEND_SIG_NOINFO ||
847 (!is_si_special(info) && SI_FROMUSER(info));
851 * called with RCU read lock from check_kill_permission()
853 static bool kill_ok_by_cred(struct task_struct *t)
855 const struct cred *cred = current_cred();
856 const struct cred *tcred = __task_cred(t);
858 return uid_eq(cred->euid, tcred->suid) ||
859 uid_eq(cred->euid, tcred->uid) ||
860 uid_eq(cred->uid, tcred->suid) ||
861 uid_eq(cred->uid, tcred->uid) ||
862 ns_capable(tcred->user_ns, CAP_KILL);
866 * Bad permissions for sending the signal
867 * - the caller must hold the RCU read lock
869 static int check_kill_permission(int sig, struct kernel_siginfo *info,
870 struct task_struct *t)
875 if (!valid_signal(sig))
878 if (!si_fromuser(info))
881 error = audit_signal_info(sig, t); /* Let audit system see the signal */
885 if (!same_thread_group(current, t) &&
886 !kill_ok_by_cred(t)) {
889 sid = task_session(t);
891 * We don't return the error if sid == NULL. The
892 * task was unhashed, the caller must notice this.
894 if (!sid || sid == task_session(current))
902 return security_task_kill(t, info, sig, NULL);
906 * ptrace_trap_notify - schedule trap to notify ptracer
907 * @t: tracee wanting to notify tracer
909 * This function schedules sticky ptrace trap which is cleared on the next
910 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
913 * If @t is running, STOP trap will be taken. If trapped for STOP and
914 * ptracer is listening for events, tracee is woken up so that it can
915 * re-trap for the new event. If trapped otherwise, STOP trap will be
916 * eventually taken without returning to userland after the existing traps
917 * are finished by PTRACE_CONT.
920 * Must be called with @task->sighand->siglock held.
922 static void ptrace_trap_notify(struct task_struct *t)
924 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
925 assert_spin_locked(&t->sighand->siglock);
927 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
928 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
932 * Handle magic process-wide effects of stop/continue signals. Unlike
933 * the signal actions, these happen immediately at signal-generation
934 * time regardless of blocking, ignoring, or handling. This does the
935 * actual continuing for SIGCONT, but not the actual stopping for stop
936 * signals. The process stop is done as a signal action for SIG_DFL.
938 * Returns true if the signal should be actually delivered, otherwise
939 * it should be dropped.
941 static bool prepare_signal(int sig, struct task_struct *p, bool force)
943 struct signal_struct *signal = p->signal;
944 struct task_struct *t;
947 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
948 if (!(signal->flags & SIGNAL_GROUP_EXIT))
949 return sig == SIGKILL;
951 * The process is in the middle of dying, nothing to do.
953 } else if (sig_kernel_stop(sig)) {
955 * This is a stop signal. Remove SIGCONT from all queues.
957 siginitset(&flush, sigmask(SIGCONT));
958 flush_sigqueue_mask(&flush, &signal->shared_pending);
959 for_each_thread(p, t)
960 flush_sigqueue_mask(&flush, &t->pending);
961 } else if (sig == SIGCONT) {
964 * Remove all stop signals from all queues, wake all threads.
966 siginitset(&flush, SIG_KERNEL_STOP_MASK);
967 flush_sigqueue_mask(&flush, &signal->shared_pending);
968 for_each_thread(p, t) {
969 flush_sigqueue_mask(&flush, &t->pending);
970 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
971 if (likely(!(t->ptrace & PT_SEIZED)))
972 wake_up_state(t, __TASK_STOPPED);
974 ptrace_trap_notify(t);
978 * Notify the parent with CLD_CONTINUED if we were stopped.
980 * If we were in the middle of a group stop, we pretend it
981 * was already finished, and then continued. Since SIGCHLD
982 * doesn't queue we report only CLD_STOPPED, as if the next
983 * CLD_CONTINUED was dropped.
986 if (signal->flags & SIGNAL_STOP_STOPPED)
987 why |= SIGNAL_CLD_CONTINUED;
988 else if (signal->group_stop_count)
989 why |= SIGNAL_CLD_STOPPED;
993 * The first thread which returns from do_signal_stop()
994 * will take ->siglock, notice SIGNAL_CLD_MASK, and
995 * notify its parent. See get_signal().
997 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
998 signal->group_stop_count = 0;
999 signal->group_exit_code = 0;
1003 return !sig_ignored(p, sig, force);
1007 * Test if P wants to take SIG. After we've checked all threads with this,
1008 * it's equivalent to finding no threads not blocking SIG. Any threads not
1009 * blocking SIG were ruled out because they are not running and already
1010 * have pending signals. Such threads will dequeue from the shared queue
1011 * as soon as they're available, so putting the signal on the shared queue
1012 * will be equivalent to sending it to one such thread.
1014 static inline bool wants_signal(int sig, struct task_struct *p)
1016 if (sigismember(&p->blocked, sig))
1019 if (p->flags & PF_EXITING)
1025 if (task_is_stopped_or_traced(p))
1028 return task_curr(p) || !task_sigpending(p);
1031 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1033 struct signal_struct *signal = p->signal;
1034 struct task_struct *t;
1037 * Now find a thread we can wake up to take the signal off the queue.
1039 * If the main thread wants the signal, it gets first crack.
1040 * Probably the least surprising to the average bear.
1042 if (wants_signal(sig, p))
1044 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1046 * There is just one thread and it does not need to be woken.
1047 * It will dequeue unblocked signals before it runs again.
1052 * Otherwise try to find a suitable thread.
1054 t = signal->curr_target;
1055 while (!wants_signal(sig, t)) {
1057 if (t == signal->curr_target)
1059 * No thread needs to be woken.
1060 * Any eligible threads will see
1061 * the signal in the queue soon.
1065 signal->curr_target = t;
1069 * Found a killable thread. If the signal will be fatal,
1070 * then start taking the whole group down immediately.
1072 if (sig_fatal(p, sig) &&
1073 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1074 !sigismember(&t->real_blocked, sig) &&
1075 (sig == SIGKILL || !p->ptrace)) {
1077 * This signal will be fatal to the whole group.
1079 if (!sig_kernel_coredump(sig)) {
1081 * Start a group exit and wake everybody up.
1082 * This way we don't have other threads
1083 * running and doing things after a slower
1084 * thread has the fatal signal pending.
1086 signal->flags = SIGNAL_GROUP_EXIT;
1087 signal->group_exit_code = sig;
1088 signal->group_stop_count = 0;
1091 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1092 sigaddset(&t->pending.signal, SIGKILL);
1093 signal_wake_up(t, 1);
1094 } while_each_thread(p, t);
1100 * The signal is already in the shared-pending queue.
1101 * Tell the chosen thread to wake up and dequeue it.
1103 signal_wake_up(t, sig == SIGKILL);
1107 static inline bool legacy_queue(struct sigpending *signals, int sig)
1109 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1112 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1113 enum pid_type type, bool force)
1115 struct sigpending *pending;
1117 int override_rlimit;
1118 int ret = 0, result;
1120 assert_spin_locked(&t->sighand->siglock);
1122 result = TRACE_SIGNAL_IGNORED;
1123 if (!prepare_signal(sig, t, force))
1126 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1128 * Short-circuit ignored signals and support queuing
1129 * exactly one non-rt signal, so that we can get more
1130 * detailed information about the cause of the signal.
1132 result = TRACE_SIGNAL_ALREADY_PENDING;
1133 if (legacy_queue(pending, sig))
1136 result = TRACE_SIGNAL_DELIVERED;
1138 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1140 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1144 * Real-time signals must be queued if sent by sigqueue, or
1145 * some other real-time mechanism. It is implementation
1146 * defined whether kill() does so. We attempt to do so, on
1147 * the principle of least surprise, but since kill is not
1148 * allowed to fail with EAGAIN when low on memory we just
1149 * make sure at least one signal gets delivered and don't
1150 * pass on the info struct.
1153 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1155 override_rlimit = 0;
1157 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1160 list_add_tail(&q->list, &pending->list);
1161 switch ((unsigned long) info) {
1162 case (unsigned long) SEND_SIG_NOINFO:
1163 clear_siginfo(&q->info);
1164 q->info.si_signo = sig;
1165 q->info.si_errno = 0;
1166 q->info.si_code = SI_USER;
1167 q->info.si_pid = task_tgid_nr_ns(current,
1168 task_active_pid_ns(t));
1171 from_kuid_munged(task_cred_xxx(t, user_ns),
1175 case (unsigned long) SEND_SIG_PRIV:
1176 clear_siginfo(&q->info);
1177 q->info.si_signo = sig;
1178 q->info.si_errno = 0;
1179 q->info.si_code = SI_KERNEL;
1184 copy_siginfo(&q->info, info);
1187 } else if (!is_si_special(info) &&
1188 sig >= SIGRTMIN && info->si_code != SI_USER) {
1190 * Queue overflow, abort. We may abort if the
1191 * signal was rt and sent by user using something
1192 * other than kill().
1194 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1199 * This is a silent loss of information. We still
1200 * send the signal, but the *info bits are lost.
1202 result = TRACE_SIGNAL_LOSE_INFO;
1206 signalfd_notify(t, sig);
1207 sigaddset(&pending->signal, sig);
1209 /* Let multiprocess signals appear after on-going forks */
1210 if (type > PIDTYPE_TGID) {
1211 struct multiprocess_signals *delayed;
1212 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1213 sigset_t *signal = &delayed->signal;
1214 /* Can't queue both a stop and a continue signal */
1216 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1217 else if (sig_kernel_stop(sig))
1218 sigdelset(signal, SIGCONT);
1219 sigaddset(signal, sig);
1223 complete_signal(sig, t, type);
1225 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1229 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1232 switch (siginfo_layout(info->si_signo, info->si_code)) {
1241 case SIL_FAULT_MCEERR:
1242 case SIL_FAULT_BNDERR:
1243 case SIL_FAULT_PKUERR:
1251 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1254 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1257 if (info == SEND_SIG_NOINFO) {
1258 /* Force if sent from an ancestor pid namespace */
1259 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1260 } else if (info == SEND_SIG_PRIV) {
1261 /* Don't ignore kernel generated signals */
1263 } else if (has_si_pid_and_uid(info)) {
1264 /* SIGKILL and SIGSTOP is special or has ids */
1265 struct user_namespace *t_user_ns;
1268 t_user_ns = task_cred_xxx(t, user_ns);
1269 if (current_user_ns() != t_user_ns) {
1270 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1271 info->si_uid = from_kuid_munged(t_user_ns, uid);
1275 /* A kernel generated signal? */
1276 force = (info->si_code == SI_KERNEL);
1278 /* From an ancestor pid namespace? */
1279 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1284 return __send_signal(sig, info, t, type, force);
1287 static void print_fatal_signal(int signr)
1289 struct pt_regs *regs = signal_pt_regs();
1290 pr_info("potentially unexpected fatal signal %d.\n", signr);
1292 #if defined(__i386__) && !defined(__arch_um__)
1293 pr_info("code at %08lx: ", regs->ip);
1296 for (i = 0; i < 16; i++) {
1299 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1301 pr_cont("%02x ", insn);
1311 static int __init setup_print_fatal_signals(char *str)
1313 get_option (&str, &print_fatal_signals);
1318 __setup("print-fatal-signals=", setup_print_fatal_signals);
1321 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1323 return send_signal(sig, info, p, PIDTYPE_TGID);
1326 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1329 unsigned long flags;
1332 if (lock_task_sighand(p, &flags)) {
1333 ret = send_signal(sig, info, p, type);
1334 unlock_task_sighand(p, &flags);
1341 * Force a signal that the process can't ignore: if necessary
1342 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1344 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1345 * since we do not want to have a signal handler that was blocked
1346 * be invoked when user space had explicitly blocked it.
1348 * We don't want to have recursive SIGSEGV's etc, for example,
1349 * that is why we also clear SIGNAL_UNKILLABLE.
1352 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1354 unsigned long int flags;
1355 int ret, blocked, ignored;
1356 struct k_sigaction *action;
1357 int sig = info->si_signo;
1359 spin_lock_irqsave(&t->sighand->siglock, flags);
1360 action = &t->sighand->action[sig-1];
1361 ignored = action->sa.sa_handler == SIG_IGN;
1362 blocked = sigismember(&t->blocked, sig);
1363 if (blocked || ignored) {
1364 action->sa.sa_handler = SIG_DFL;
1366 sigdelset(&t->blocked, sig);
1367 recalc_sigpending_and_wake(t);
1371 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1372 * debugging to leave init killable.
1374 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1375 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1376 ret = send_signal(sig, info, t, PIDTYPE_PID);
1377 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1382 int force_sig_info(struct kernel_siginfo *info)
1384 return force_sig_info_to_task(info, current);
1388 * Nuke all other threads in the group.
1390 int zap_other_threads(struct task_struct *p)
1392 struct task_struct *t = p;
1395 p->signal->group_stop_count = 0;
1397 while_each_thread(p, t) {
1398 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1401 /* Don't bother with already dead threads */
1404 sigaddset(&t->pending.signal, SIGKILL);
1405 signal_wake_up(t, 1);
1411 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1412 unsigned long *flags)
1414 struct sighand_struct *sighand;
1418 sighand = rcu_dereference(tsk->sighand);
1419 if (unlikely(sighand == NULL))
1423 * This sighand can be already freed and even reused, but
1424 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1425 * initializes ->siglock: this slab can't go away, it has
1426 * the same object type, ->siglock can't be reinitialized.
1428 * We need to ensure that tsk->sighand is still the same
1429 * after we take the lock, we can race with de_thread() or
1430 * __exit_signal(). In the latter case the next iteration
1431 * must see ->sighand == NULL.
1433 spin_lock_irqsave(&sighand->siglock, *flags);
1434 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1436 spin_unlock_irqrestore(&sighand->siglock, *flags);
1444 * send signal info to all the members of a group
1446 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1447 struct task_struct *p, enum pid_type type)
1452 ret = check_kill_permission(sig, info, p);
1456 ret = do_send_sig_info(sig, info, p, type);
1462 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1463 * control characters do (^C, ^Z etc)
1464 * - the caller must hold at least a readlock on tasklist_lock
1466 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1468 struct task_struct *p = NULL;
1469 int retval, success;
1473 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1474 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1477 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1478 return success ? 0 : retval;
1481 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1484 struct task_struct *p;
1488 p = pid_task(pid, PIDTYPE_PID);
1490 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1492 if (likely(!p || error != -ESRCH))
1496 * The task was unhashed in between, try again. If it
1497 * is dead, pid_task() will return NULL, if we race with
1498 * de_thread() it will find the new leader.
1503 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1507 error = kill_pid_info(sig, info, find_vpid(pid));
1512 static inline bool kill_as_cred_perm(const struct cred *cred,
1513 struct task_struct *target)
1515 const struct cred *pcred = __task_cred(target);
1517 return uid_eq(cred->euid, pcred->suid) ||
1518 uid_eq(cred->euid, pcred->uid) ||
1519 uid_eq(cred->uid, pcred->suid) ||
1520 uid_eq(cred->uid, pcred->uid);
1524 * The usb asyncio usage of siginfo is wrong. The glibc support
1525 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1526 * AKA after the generic fields:
1527 * kernel_pid_t si_pid;
1528 * kernel_uid32_t si_uid;
1529 * sigval_t si_value;
1531 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1532 * after the generic fields is:
1533 * void __user *si_addr;
1535 * This is a practical problem when there is a 64bit big endian kernel
1536 * and a 32bit userspace. As the 32bit address will encoded in the low
1537 * 32bits of the pointer. Those low 32bits will be stored at higher
1538 * address than appear in a 32 bit pointer. So userspace will not
1539 * see the address it was expecting for it's completions.
1541 * There is nothing in the encoding that can allow
1542 * copy_siginfo_to_user32 to detect this confusion of formats, so
1543 * handle this by requiring the caller of kill_pid_usb_asyncio to
1544 * notice when this situration takes place and to store the 32bit
1545 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1548 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1549 struct pid *pid, const struct cred *cred)
1551 struct kernel_siginfo info;
1552 struct task_struct *p;
1553 unsigned long flags;
1556 if (!valid_signal(sig))
1559 clear_siginfo(&info);
1560 info.si_signo = sig;
1561 info.si_errno = errno;
1562 info.si_code = SI_ASYNCIO;
1563 *((sigval_t *)&info.si_pid) = addr;
1566 p = pid_task(pid, PIDTYPE_PID);
1571 if (!kill_as_cred_perm(cred, p)) {
1575 ret = security_task_kill(p, &info, sig, cred);
1580 if (lock_task_sighand(p, &flags)) {
1581 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1582 unlock_task_sighand(p, &flags);
1590 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1593 * kill_something_info() interprets pid in interesting ways just like kill(2).
1595 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1596 * is probably wrong. Should make it like BSD or SYSV.
1599 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1604 return kill_proc_info(sig, info, pid);
1606 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1610 read_lock(&tasklist_lock);
1612 ret = __kill_pgrp_info(sig, info,
1613 pid ? find_vpid(-pid) : task_pgrp(current));
1615 int retval = 0, count = 0;
1616 struct task_struct * p;
1618 for_each_process(p) {
1619 if (task_pid_vnr(p) > 1 &&
1620 !same_thread_group(p, current)) {
1621 int err = group_send_sig_info(sig, info, p,
1628 ret = count ? retval : -ESRCH;
1630 read_unlock(&tasklist_lock);
1636 * These are for backward compatibility with the rest of the kernel source.
1639 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1642 * Make sure legacy kernel users don't send in bad values
1643 * (normal paths check this in check_kill_permission).
1645 if (!valid_signal(sig))
1648 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1650 EXPORT_SYMBOL(send_sig_info);
1652 #define __si_special(priv) \
1653 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1656 send_sig(int sig, struct task_struct *p, int priv)
1658 return send_sig_info(sig, __si_special(priv), p);
1660 EXPORT_SYMBOL(send_sig);
1662 void force_sig(int sig)
1664 struct kernel_siginfo info;
1666 clear_siginfo(&info);
1667 info.si_signo = sig;
1669 info.si_code = SI_KERNEL;
1672 force_sig_info(&info);
1674 EXPORT_SYMBOL(force_sig);
1677 * When things go south during signal handling, we
1678 * will force a SIGSEGV. And if the signal that caused
1679 * the problem was already a SIGSEGV, we'll want to
1680 * make sure we don't even try to deliver the signal..
1682 void force_sigsegv(int sig)
1684 struct task_struct *p = current;
1686 if (sig == SIGSEGV) {
1687 unsigned long flags;
1688 spin_lock_irqsave(&p->sighand->siglock, flags);
1689 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1690 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1695 int force_sig_fault_to_task(int sig, int code, void __user *addr
1696 ___ARCH_SI_TRAPNO(int trapno)
1697 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1698 , struct task_struct *t)
1700 struct kernel_siginfo info;
1702 clear_siginfo(&info);
1703 info.si_signo = sig;
1705 info.si_code = code;
1706 info.si_addr = addr;
1707 #ifdef __ARCH_SI_TRAPNO
1708 info.si_trapno = trapno;
1712 info.si_flags = flags;
1715 return force_sig_info_to_task(&info, t);
1718 int force_sig_fault(int sig, int code, void __user *addr
1719 ___ARCH_SI_TRAPNO(int trapno)
1720 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1722 return force_sig_fault_to_task(sig, code, addr
1723 ___ARCH_SI_TRAPNO(trapno)
1724 ___ARCH_SI_IA64(imm, flags, isr), current);
1727 int send_sig_fault(int sig, int code, void __user *addr
1728 ___ARCH_SI_TRAPNO(int trapno)
1729 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1730 , struct task_struct *t)
1732 struct kernel_siginfo info;
1734 clear_siginfo(&info);
1735 info.si_signo = sig;
1737 info.si_code = code;
1738 info.si_addr = addr;
1739 #ifdef __ARCH_SI_TRAPNO
1740 info.si_trapno = trapno;
1744 info.si_flags = flags;
1747 return send_sig_info(info.si_signo, &info, t);
1750 int force_sig_mceerr(int code, void __user *addr, short lsb)
1752 struct kernel_siginfo info;
1754 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1755 clear_siginfo(&info);
1756 info.si_signo = SIGBUS;
1758 info.si_code = code;
1759 info.si_addr = addr;
1760 info.si_addr_lsb = lsb;
1761 return force_sig_info(&info);
1764 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1766 struct kernel_siginfo info;
1768 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1769 clear_siginfo(&info);
1770 info.si_signo = SIGBUS;
1772 info.si_code = code;
1773 info.si_addr = addr;
1774 info.si_addr_lsb = lsb;
1775 return send_sig_info(info.si_signo, &info, t);
1777 EXPORT_SYMBOL(send_sig_mceerr);
1779 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1781 struct kernel_siginfo info;
1783 clear_siginfo(&info);
1784 info.si_signo = SIGSEGV;
1786 info.si_code = SEGV_BNDERR;
1787 info.si_addr = addr;
1788 info.si_lower = lower;
1789 info.si_upper = upper;
1790 return force_sig_info(&info);
1794 int force_sig_pkuerr(void __user *addr, u32 pkey)
1796 struct kernel_siginfo info;
1798 clear_siginfo(&info);
1799 info.si_signo = SIGSEGV;
1801 info.si_code = SEGV_PKUERR;
1802 info.si_addr = addr;
1803 info.si_pkey = pkey;
1804 return force_sig_info(&info);
1808 /* For the crazy architectures that include trap information in
1809 * the errno field, instead of an actual errno value.
1811 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1813 struct kernel_siginfo info;
1815 clear_siginfo(&info);
1816 info.si_signo = SIGTRAP;
1817 info.si_errno = errno;
1818 info.si_code = TRAP_HWBKPT;
1819 info.si_addr = addr;
1820 return force_sig_info(&info);
1823 int kill_pgrp(struct pid *pid, int sig, int priv)
1827 read_lock(&tasklist_lock);
1828 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1829 read_unlock(&tasklist_lock);
1833 EXPORT_SYMBOL(kill_pgrp);
1835 int kill_pid(struct pid *pid, int sig, int priv)
1837 return kill_pid_info(sig, __si_special(priv), pid);
1839 EXPORT_SYMBOL(kill_pid);
1842 * These functions support sending signals using preallocated sigqueue
1843 * structures. This is needed "because realtime applications cannot
1844 * afford to lose notifications of asynchronous events, like timer
1845 * expirations or I/O completions". In the case of POSIX Timers
1846 * we allocate the sigqueue structure from the timer_create. If this
1847 * allocation fails we are able to report the failure to the application
1848 * with an EAGAIN error.
1850 struct sigqueue *sigqueue_alloc(void)
1852 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1855 void sigqueue_free(struct sigqueue *q)
1857 unsigned long flags;
1858 spinlock_t *lock = ¤t->sighand->siglock;
1860 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1862 * We must hold ->siglock while testing q->list
1863 * to serialize with collect_signal() or with
1864 * __exit_signal()->flush_sigqueue().
1866 spin_lock_irqsave(lock, flags);
1867 q->flags &= ~SIGQUEUE_PREALLOC;
1869 * If it is queued it will be freed when dequeued,
1870 * like the "regular" sigqueue.
1872 if (!list_empty(&q->list))
1874 spin_unlock_irqrestore(lock, flags);
1880 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1882 int sig = q->info.si_signo;
1883 struct sigpending *pending;
1884 struct task_struct *t;
1885 unsigned long flags;
1888 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1892 t = pid_task(pid, type);
1893 if (!t || !likely(lock_task_sighand(t, &flags)))
1896 ret = 1; /* the signal is ignored */
1897 result = TRACE_SIGNAL_IGNORED;
1898 if (!prepare_signal(sig, t, false))
1902 if (unlikely(!list_empty(&q->list))) {
1904 * If an SI_TIMER entry is already queue just increment
1905 * the overrun count.
1907 BUG_ON(q->info.si_code != SI_TIMER);
1908 q->info.si_overrun++;
1909 result = TRACE_SIGNAL_ALREADY_PENDING;
1912 q->info.si_overrun = 0;
1914 signalfd_notify(t, sig);
1915 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1916 list_add_tail(&q->list, &pending->list);
1917 sigaddset(&pending->signal, sig);
1918 complete_signal(sig, t, type);
1919 result = TRACE_SIGNAL_DELIVERED;
1921 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1922 unlock_task_sighand(t, &flags);
1928 static void do_notify_pidfd(struct task_struct *task)
1932 WARN_ON(task->exit_state == 0);
1933 pid = task_pid(task);
1934 wake_up_all(&pid->wait_pidfd);
1938 * Let a parent know about the death of a child.
1939 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1941 * Returns true if our parent ignored us and so we've switched to
1944 bool do_notify_parent(struct task_struct *tsk, int sig)
1946 struct kernel_siginfo info;
1947 unsigned long flags;
1948 struct sighand_struct *psig;
1949 bool autoreap = false;
1954 /* do_notify_parent_cldstop should have been called instead. */
1955 BUG_ON(task_is_stopped_or_traced(tsk));
1957 BUG_ON(!tsk->ptrace &&
1958 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1960 /* Wake up all pidfd waiters */
1961 do_notify_pidfd(tsk);
1963 if (sig != SIGCHLD) {
1965 * This is only possible if parent == real_parent.
1966 * Check if it has changed security domain.
1968 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1972 clear_siginfo(&info);
1973 info.si_signo = sig;
1976 * We are under tasklist_lock here so our parent is tied to
1977 * us and cannot change.
1979 * task_active_pid_ns will always return the same pid namespace
1980 * until a task passes through release_task.
1982 * write_lock() currently calls preempt_disable() which is the
1983 * same as rcu_read_lock(), but according to Oleg, this is not
1984 * correct to rely on this
1987 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1988 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1992 task_cputime(tsk, &utime, &stime);
1993 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1994 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1996 info.si_status = tsk->exit_code & 0x7f;
1997 if (tsk->exit_code & 0x80)
1998 info.si_code = CLD_DUMPED;
1999 else if (tsk->exit_code & 0x7f)
2000 info.si_code = CLD_KILLED;
2002 info.si_code = CLD_EXITED;
2003 info.si_status = tsk->exit_code >> 8;
2006 psig = tsk->parent->sighand;
2007 spin_lock_irqsave(&psig->siglock, flags);
2008 if (!tsk->ptrace && sig == SIGCHLD &&
2009 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2010 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2012 * We are exiting and our parent doesn't care. POSIX.1
2013 * defines special semantics for setting SIGCHLD to SIG_IGN
2014 * or setting the SA_NOCLDWAIT flag: we should be reaped
2015 * automatically and not left for our parent's wait4 call.
2016 * Rather than having the parent do it as a magic kind of
2017 * signal handler, we just set this to tell do_exit that we
2018 * can be cleaned up without becoming a zombie. Note that
2019 * we still call __wake_up_parent in this case, because a
2020 * blocked sys_wait4 might now return -ECHILD.
2022 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2023 * is implementation-defined: we do (if you don't want
2024 * it, just use SIG_IGN instead).
2027 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2031 * Send with __send_signal as si_pid and si_uid are in the
2032 * parent's namespaces.
2034 if (valid_signal(sig) && sig)
2035 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2036 __wake_up_parent(tsk, tsk->parent);
2037 spin_unlock_irqrestore(&psig->siglock, flags);
2043 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2044 * @tsk: task reporting the state change
2045 * @for_ptracer: the notification is for ptracer
2046 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2048 * Notify @tsk's parent that the stopped/continued state has changed. If
2049 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2050 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2053 * Must be called with tasklist_lock at least read locked.
2055 static void do_notify_parent_cldstop(struct task_struct *tsk,
2056 bool for_ptracer, int why)
2058 struct kernel_siginfo info;
2059 unsigned long flags;
2060 struct task_struct *parent;
2061 struct sighand_struct *sighand;
2065 parent = tsk->parent;
2067 tsk = tsk->group_leader;
2068 parent = tsk->real_parent;
2071 clear_siginfo(&info);
2072 info.si_signo = SIGCHLD;
2075 * see comment in do_notify_parent() about the following 4 lines
2078 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2079 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2082 task_cputime(tsk, &utime, &stime);
2083 info.si_utime = nsec_to_clock_t(utime);
2084 info.si_stime = nsec_to_clock_t(stime);
2089 info.si_status = SIGCONT;
2092 info.si_status = tsk->signal->group_exit_code & 0x7f;
2095 info.si_status = tsk->exit_code & 0x7f;
2101 sighand = parent->sighand;
2102 spin_lock_irqsave(&sighand->siglock, flags);
2103 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2104 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2105 __group_send_sig_info(SIGCHLD, &info, parent);
2107 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2109 __wake_up_parent(tsk, parent);
2110 spin_unlock_irqrestore(&sighand->siglock, flags);
2113 static inline bool may_ptrace_stop(void)
2115 if (!likely(current->ptrace))
2118 * Are we in the middle of do_coredump?
2119 * If so and our tracer is also part of the coredump stopping
2120 * is a deadlock situation, and pointless because our tracer
2121 * is dead so don't allow us to stop.
2122 * If SIGKILL was already sent before the caller unlocked
2123 * ->siglock we must see ->core_state != NULL. Otherwise it
2124 * is safe to enter schedule().
2126 * This is almost outdated, a task with the pending SIGKILL can't
2127 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2128 * after SIGKILL was already dequeued.
2130 if (unlikely(current->mm->core_state) &&
2131 unlikely(current->mm == current->parent->mm))
2138 * Return non-zero if there is a SIGKILL that should be waking us up.
2139 * Called with the siglock held.
2141 static bool sigkill_pending(struct task_struct *tsk)
2143 return sigismember(&tsk->pending.signal, SIGKILL) ||
2144 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2148 * This must be called with current->sighand->siglock held.
2150 * This should be the path for all ptrace stops.
2151 * We always set current->last_siginfo while stopped here.
2152 * That makes it a way to test a stopped process for
2153 * being ptrace-stopped vs being job-control-stopped.
2155 * If we actually decide not to stop at all because the tracer
2156 * is gone, we keep current->exit_code unless clear_code.
2158 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2159 __releases(¤t->sighand->siglock)
2160 __acquires(¤t->sighand->siglock)
2162 bool gstop_done = false;
2164 if (arch_ptrace_stop_needed(exit_code, info)) {
2166 * The arch code has something special to do before a
2167 * ptrace stop. This is allowed to block, e.g. for faults
2168 * on user stack pages. We can't keep the siglock while
2169 * calling arch_ptrace_stop, so we must release it now.
2170 * To preserve proper semantics, we must do this before
2171 * any signal bookkeeping like checking group_stop_count.
2172 * Meanwhile, a SIGKILL could come in before we retake the
2173 * siglock. That must prevent us from sleeping in TASK_TRACED.
2174 * So after regaining the lock, we must check for SIGKILL.
2176 spin_unlock_irq(¤t->sighand->siglock);
2177 arch_ptrace_stop(exit_code, info);
2178 spin_lock_irq(¤t->sighand->siglock);
2179 if (sigkill_pending(current))
2183 set_special_state(TASK_TRACED);
2186 * We're committing to trapping. TRACED should be visible before
2187 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2188 * Also, transition to TRACED and updates to ->jobctl should be
2189 * atomic with respect to siglock and should be done after the arch
2190 * hook as siglock is released and regrabbed across it.
2195 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2197 * set_current_state() smp_wmb();
2199 * wait_task_stopped()
2200 * task_stopped_code()
2201 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2205 current->last_siginfo = info;
2206 current->exit_code = exit_code;
2209 * If @why is CLD_STOPPED, we're trapping to participate in a group
2210 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2211 * across siglock relocks since INTERRUPT was scheduled, PENDING
2212 * could be clear now. We act as if SIGCONT is received after
2213 * TASK_TRACED is entered - ignore it.
2215 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2216 gstop_done = task_participate_group_stop(current);
2218 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2219 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2220 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2221 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2223 /* entering a trap, clear TRAPPING */
2224 task_clear_jobctl_trapping(current);
2226 spin_unlock_irq(¤t->sighand->siglock);
2227 read_lock(&tasklist_lock);
2228 if (may_ptrace_stop()) {
2230 * Notify parents of the stop.
2232 * While ptraced, there are two parents - the ptracer and
2233 * the real_parent of the group_leader. The ptracer should
2234 * know about every stop while the real parent is only
2235 * interested in the completion of group stop. The states
2236 * for the two don't interact with each other. Notify
2237 * separately unless they're gonna be duplicates.
2239 do_notify_parent_cldstop(current, true, why);
2240 if (gstop_done && ptrace_reparented(current))
2241 do_notify_parent_cldstop(current, false, why);
2244 * Don't want to allow preemption here, because
2245 * sys_ptrace() needs this task to be inactive.
2247 * XXX: implement read_unlock_no_resched().
2250 read_unlock(&tasklist_lock);
2251 cgroup_enter_frozen();
2252 preempt_enable_no_resched();
2253 freezable_schedule();
2254 cgroup_leave_frozen(true);
2257 * By the time we got the lock, our tracer went away.
2258 * Don't drop the lock yet, another tracer may come.
2260 * If @gstop_done, the ptracer went away between group stop
2261 * completion and here. During detach, it would have set
2262 * JOBCTL_STOP_PENDING on us and we'll re-enter
2263 * TASK_STOPPED in do_signal_stop() on return, so notifying
2264 * the real parent of the group stop completion is enough.
2267 do_notify_parent_cldstop(current, false, why);
2269 /* tasklist protects us from ptrace_freeze_traced() */
2270 __set_current_state(TASK_RUNNING);
2272 current->exit_code = 0;
2273 read_unlock(&tasklist_lock);
2277 * We are back. Now reacquire the siglock before touching
2278 * last_siginfo, so that we are sure to have synchronized with
2279 * any signal-sending on another CPU that wants to examine it.
2281 spin_lock_irq(¤t->sighand->siglock);
2282 current->last_siginfo = NULL;
2284 /* LISTENING can be set only during STOP traps, clear it */
2285 current->jobctl &= ~JOBCTL_LISTENING;
2288 * Queued signals ignored us while we were stopped for tracing.
2289 * So check for any that we should take before resuming user mode.
2290 * This sets TIF_SIGPENDING, but never clears it.
2292 recalc_sigpending_tsk(current);
2295 static void ptrace_do_notify(int signr, int exit_code, int why)
2297 kernel_siginfo_t info;
2299 clear_siginfo(&info);
2300 info.si_signo = signr;
2301 info.si_code = exit_code;
2302 info.si_pid = task_pid_vnr(current);
2303 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2305 /* Let the debugger run. */
2306 ptrace_stop(exit_code, why, 1, &info);
2309 void ptrace_notify(int exit_code)
2311 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2312 if (unlikely(current->task_works))
2315 spin_lock_irq(¤t->sighand->siglock);
2316 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2317 spin_unlock_irq(¤t->sighand->siglock);
2321 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2322 * @signr: signr causing group stop if initiating
2324 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2325 * and participate in it. If already set, participate in the existing
2326 * group stop. If participated in a group stop (and thus slept), %true is
2327 * returned with siglock released.
2329 * If ptraced, this function doesn't handle stop itself. Instead,
2330 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2331 * untouched. The caller must ensure that INTERRUPT trap handling takes
2332 * places afterwards.
2335 * Must be called with @current->sighand->siglock held, which is released
2339 * %false if group stop is already cancelled or ptrace trap is scheduled.
2340 * %true if participated in group stop.
2342 static bool do_signal_stop(int signr)
2343 __releases(¤t->sighand->siglock)
2345 struct signal_struct *sig = current->signal;
2347 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2348 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2349 struct task_struct *t;
2351 /* signr will be recorded in task->jobctl for retries */
2352 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2354 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2355 unlikely(signal_group_exit(sig)))
2358 * There is no group stop already in progress. We must
2361 * While ptraced, a task may be resumed while group stop is
2362 * still in effect and then receive a stop signal and
2363 * initiate another group stop. This deviates from the
2364 * usual behavior as two consecutive stop signals can't
2365 * cause two group stops when !ptraced. That is why we
2366 * also check !task_is_stopped(t) below.
2368 * The condition can be distinguished by testing whether
2369 * SIGNAL_STOP_STOPPED is already set. Don't generate
2370 * group_exit_code in such case.
2372 * This is not necessary for SIGNAL_STOP_CONTINUED because
2373 * an intervening stop signal is required to cause two
2374 * continued events regardless of ptrace.
2376 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2377 sig->group_exit_code = signr;
2379 sig->group_stop_count = 0;
2381 if (task_set_jobctl_pending(current, signr | gstop))
2382 sig->group_stop_count++;
2385 while_each_thread(current, t) {
2387 * Setting state to TASK_STOPPED for a group
2388 * stop is always done with the siglock held,
2389 * so this check has no races.
2391 if (!task_is_stopped(t) &&
2392 task_set_jobctl_pending(t, signr | gstop)) {
2393 sig->group_stop_count++;
2394 if (likely(!(t->ptrace & PT_SEIZED)))
2395 signal_wake_up(t, 0);
2397 ptrace_trap_notify(t);
2402 if (likely(!current->ptrace)) {
2406 * If there are no other threads in the group, or if there
2407 * is a group stop in progress and we are the last to stop,
2408 * report to the parent.
2410 if (task_participate_group_stop(current))
2411 notify = CLD_STOPPED;
2413 set_special_state(TASK_STOPPED);
2414 spin_unlock_irq(¤t->sighand->siglock);
2417 * Notify the parent of the group stop completion. Because
2418 * we're not holding either the siglock or tasklist_lock
2419 * here, ptracer may attach inbetween; however, this is for
2420 * group stop and should always be delivered to the real
2421 * parent of the group leader. The new ptracer will get
2422 * its notification when this task transitions into
2426 read_lock(&tasklist_lock);
2427 do_notify_parent_cldstop(current, false, notify);
2428 read_unlock(&tasklist_lock);
2431 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2432 cgroup_enter_frozen();
2433 freezable_schedule();
2437 * While ptraced, group stop is handled by STOP trap.
2438 * Schedule it and let the caller deal with it.
2440 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2446 * do_jobctl_trap - take care of ptrace jobctl traps
2448 * When PT_SEIZED, it's used for both group stop and explicit
2449 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2450 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2451 * the stop signal; otherwise, %SIGTRAP.
2453 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2454 * number as exit_code and no siginfo.
2457 * Must be called with @current->sighand->siglock held, which may be
2458 * released and re-acquired before returning with intervening sleep.
2460 static void do_jobctl_trap(void)
2462 struct signal_struct *signal = current->signal;
2463 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2465 if (current->ptrace & PT_SEIZED) {
2466 if (!signal->group_stop_count &&
2467 !(signal->flags & SIGNAL_STOP_STOPPED))
2469 WARN_ON_ONCE(!signr);
2470 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2473 WARN_ON_ONCE(!signr);
2474 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2475 current->exit_code = 0;
2480 * do_freezer_trap - handle the freezer jobctl trap
2482 * Puts the task into frozen state, if only the task is not about to quit.
2483 * In this case it drops JOBCTL_TRAP_FREEZE.
2486 * Must be called with @current->sighand->siglock held,
2487 * which is always released before returning.
2489 static void do_freezer_trap(void)
2490 __releases(¤t->sighand->siglock)
2493 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2494 * let's make another loop to give it a chance to be handled.
2495 * In any case, we'll return back.
2497 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2498 JOBCTL_TRAP_FREEZE) {
2499 spin_unlock_irq(¤t->sighand->siglock);
2504 * Now we're sure that there is no pending fatal signal and no
2505 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2506 * immediately (if there is a non-fatal signal pending), and
2507 * put the task into sleep.
2509 __set_current_state(TASK_INTERRUPTIBLE);
2510 clear_thread_flag(TIF_SIGPENDING);
2511 spin_unlock_irq(¤t->sighand->siglock);
2512 cgroup_enter_frozen();
2513 freezable_schedule();
2516 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2519 * We do not check sig_kernel_stop(signr) but set this marker
2520 * unconditionally because we do not know whether debugger will
2521 * change signr. This flag has no meaning unless we are going
2522 * to stop after return from ptrace_stop(). In this case it will
2523 * be checked in do_signal_stop(), we should only stop if it was
2524 * not cleared by SIGCONT while we were sleeping. See also the
2525 * comment in dequeue_signal().
2527 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2528 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2530 /* We're back. Did the debugger cancel the sig? */
2531 signr = current->exit_code;
2535 current->exit_code = 0;
2538 * Update the siginfo structure if the signal has
2539 * changed. If the debugger wanted something
2540 * specific in the siginfo structure then it should
2541 * have updated *info via PTRACE_SETSIGINFO.
2543 if (signr != info->si_signo) {
2544 clear_siginfo(info);
2545 info->si_signo = signr;
2547 info->si_code = SI_USER;
2549 info->si_pid = task_pid_vnr(current->parent);
2550 info->si_uid = from_kuid_munged(current_user_ns(),
2551 task_uid(current->parent));
2555 /* If the (new) signal is now blocked, requeue it. */
2556 if (sigismember(¤t->blocked, signr)) {
2557 send_signal(signr, info, current, PIDTYPE_PID);
2564 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2566 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2568 case SIL_FAULT_MCEERR:
2569 case SIL_FAULT_BNDERR:
2570 case SIL_FAULT_PKUERR:
2571 ksig->info.si_addr = arch_untagged_si_addr(
2572 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2584 bool get_signal(struct ksignal *ksig)
2586 struct sighand_struct *sighand = current->sighand;
2587 struct signal_struct *signal = current->signal;
2590 if (unlikely(current->task_works))
2594 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2595 * that the arch handlers don't all have to do it. If we get here
2596 * without TIF_SIGPENDING, just exit after running signal work.
2598 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2599 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2600 tracehook_notify_signal();
2601 if (!task_sigpending(current))
2605 if (unlikely(uprobe_deny_signal()))
2609 * Do this once, we can't return to user-mode if freezing() == T.
2610 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2611 * thus do not need another check after return.
2616 spin_lock_irq(&sighand->siglock);
2619 * Every stopped thread goes here after wakeup. Check to see if
2620 * we should notify the parent, prepare_signal(SIGCONT) encodes
2621 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2623 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2626 if (signal->flags & SIGNAL_CLD_CONTINUED)
2627 why = CLD_CONTINUED;
2631 signal->flags &= ~SIGNAL_CLD_MASK;
2633 spin_unlock_irq(&sighand->siglock);
2636 * Notify the parent that we're continuing. This event is
2637 * always per-process and doesn't make whole lot of sense
2638 * for ptracers, who shouldn't consume the state via
2639 * wait(2) either, but, for backward compatibility, notify
2640 * the ptracer of the group leader too unless it's gonna be
2643 read_lock(&tasklist_lock);
2644 do_notify_parent_cldstop(current, false, why);
2646 if (ptrace_reparented(current->group_leader))
2647 do_notify_parent_cldstop(current->group_leader,
2649 read_unlock(&tasklist_lock);
2654 /* Has this task already been marked for death? */
2655 if (signal_group_exit(signal)) {
2656 ksig->info.si_signo = signr = SIGKILL;
2657 sigdelset(¤t->pending.signal, SIGKILL);
2658 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2659 &sighand->action[SIGKILL - 1]);
2660 recalc_sigpending();
2665 struct k_sigaction *ka;
2667 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2671 if (unlikely(current->jobctl &
2672 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2673 if (current->jobctl & JOBCTL_TRAP_MASK) {
2675 spin_unlock_irq(&sighand->siglock);
2676 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2683 * If the task is leaving the frozen state, let's update
2684 * cgroup counters and reset the frozen bit.
2686 if (unlikely(cgroup_task_frozen(current))) {
2687 spin_unlock_irq(&sighand->siglock);
2688 cgroup_leave_frozen(false);
2693 * Signals generated by the execution of an instruction
2694 * need to be delivered before any other pending signals
2695 * so that the instruction pointer in the signal stack
2696 * frame points to the faulting instruction.
2698 signr = dequeue_synchronous_signal(&ksig->info);
2700 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2703 break; /* will return 0 */
2705 if (unlikely(current->ptrace) && signr != SIGKILL) {
2706 signr = ptrace_signal(signr, &ksig->info);
2711 ka = &sighand->action[signr-1];
2713 /* Trace actually delivered signals. */
2714 trace_signal_deliver(signr, &ksig->info, ka);
2716 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2718 if (ka->sa.sa_handler != SIG_DFL) {
2719 /* Run the handler. */
2722 if (ka->sa.sa_flags & SA_ONESHOT)
2723 ka->sa.sa_handler = SIG_DFL;
2725 break; /* will return non-zero "signr" value */
2729 * Now we are doing the default action for this signal.
2731 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2735 * Global init gets no signals it doesn't want.
2736 * Container-init gets no signals it doesn't want from same
2739 * Note that if global/container-init sees a sig_kernel_only()
2740 * signal here, the signal must have been generated internally
2741 * or must have come from an ancestor namespace. In either
2742 * case, the signal cannot be dropped.
2744 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2745 !sig_kernel_only(signr))
2748 if (sig_kernel_stop(signr)) {
2750 * The default action is to stop all threads in
2751 * the thread group. The job control signals
2752 * do nothing in an orphaned pgrp, but SIGSTOP
2753 * always works. Note that siglock needs to be
2754 * dropped during the call to is_orphaned_pgrp()
2755 * because of lock ordering with tasklist_lock.
2756 * This allows an intervening SIGCONT to be posted.
2757 * We need to check for that and bail out if necessary.
2759 if (signr != SIGSTOP) {
2760 spin_unlock_irq(&sighand->siglock);
2762 /* signals can be posted during this window */
2764 if (is_current_pgrp_orphaned())
2767 spin_lock_irq(&sighand->siglock);
2770 if (likely(do_signal_stop(ksig->info.si_signo))) {
2771 /* It released the siglock. */
2776 * We didn't actually stop, due to a race
2777 * with SIGCONT or something like that.
2783 spin_unlock_irq(&sighand->siglock);
2784 if (unlikely(cgroup_task_frozen(current)))
2785 cgroup_leave_frozen(true);
2788 * Anything else is fatal, maybe with a core dump.
2790 current->flags |= PF_SIGNALED;
2792 if (sig_kernel_coredump(signr)) {
2793 if (print_fatal_signals)
2794 print_fatal_signal(ksig->info.si_signo);
2795 proc_coredump_connector(current);
2797 * If it was able to dump core, this kills all
2798 * other threads in the group and synchronizes with
2799 * their demise. If we lost the race with another
2800 * thread getting here, it set group_exit_code
2801 * first and our do_group_exit call below will use
2802 * that value and ignore the one we pass it.
2804 do_coredump(&ksig->info);
2808 * PF_IO_WORKER threads will catch and exit on fatal signals
2809 * themselves. They have cleanup that must be performed, so
2810 * we cannot call do_exit() on their behalf.
2812 if (current->flags & PF_IO_WORKER)
2816 * Death signals, no core dump.
2818 do_group_exit(ksig->info.si_signo);
2821 spin_unlock_irq(&sighand->siglock);
2825 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2826 hide_si_addr_tag_bits(ksig);
2828 return ksig->sig > 0;
2832 * signal_delivered -
2833 * @ksig: kernel signal struct
2834 * @stepping: nonzero if debugger single-step or block-step in use
2836 * This function should be called when a signal has successfully been
2837 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2838 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2839 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2841 static void signal_delivered(struct ksignal *ksig, int stepping)
2845 /* A signal was successfully delivered, and the
2846 saved sigmask was stored on the signal frame,
2847 and will be restored by sigreturn. So we can
2848 simply clear the restore sigmask flag. */
2849 clear_restore_sigmask();
2851 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2852 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2853 sigaddset(&blocked, ksig->sig);
2854 set_current_blocked(&blocked);
2855 tracehook_signal_handler(stepping);
2858 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2861 force_sigsegv(ksig->sig);
2863 signal_delivered(ksig, stepping);
2867 * It could be that complete_signal() picked us to notify about the
2868 * group-wide signal. Other threads should be notified now to take
2869 * the shared signals in @which since we will not.
2871 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2874 struct task_struct *t;
2876 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2877 if (sigisemptyset(&retarget))
2881 while_each_thread(tsk, t) {
2882 if (t->flags & PF_EXITING)
2885 if (!has_pending_signals(&retarget, &t->blocked))
2887 /* Remove the signals this thread can handle. */
2888 sigandsets(&retarget, &retarget, &t->blocked);
2890 if (!task_sigpending(t))
2891 signal_wake_up(t, 0);
2893 if (sigisemptyset(&retarget))
2898 void exit_signals(struct task_struct *tsk)
2904 * @tsk is about to have PF_EXITING set - lock out users which
2905 * expect stable threadgroup.
2907 cgroup_threadgroup_change_begin(tsk);
2909 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2910 tsk->flags |= PF_EXITING;
2911 cgroup_threadgroup_change_end(tsk);
2915 spin_lock_irq(&tsk->sighand->siglock);
2917 * From now this task is not visible for group-wide signals,
2918 * see wants_signal(), do_signal_stop().
2920 tsk->flags |= PF_EXITING;
2922 cgroup_threadgroup_change_end(tsk);
2924 if (!task_sigpending(tsk))
2927 unblocked = tsk->blocked;
2928 signotset(&unblocked);
2929 retarget_shared_pending(tsk, &unblocked);
2931 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2932 task_participate_group_stop(tsk))
2933 group_stop = CLD_STOPPED;
2935 spin_unlock_irq(&tsk->sighand->siglock);
2938 * If group stop has completed, deliver the notification. This
2939 * should always go to the real parent of the group leader.
2941 if (unlikely(group_stop)) {
2942 read_lock(&tasklist_lock);
2943 do_notify_parent_cldstop(tsk, false, group_stop);
2944 read_unlock(&tasklist_lock);
2949 * System call entry points.
2953 * sys_restart_syscall - restart a system call
2955 SYSCALL_DEFINE0(restart_syscall)
2957 struct restart_block *restart = ¤t->restart_block;
2958 return restart->fn(restart);
2961 long do_no_restart_syscall(struct restart_block *param)
2966 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2968 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
2969 sigset_t newblocked;
2970 /* A set of now blocked but previously unblocked signals. */
2971 sigandnsets(&newblocked, newset, ¤t->blocked);
2972 retarget_shared_pending(tsk, &newblocked);
2974 tsk->blocked = *newset;
2975 recalc_sigpending();
2979 * set_current_blocked - change current->blocked mask
2982 * It is wrong to change ->blocked directly, this helper should be used
2983 * to ensure the process can't miss a shared signal we are going to block.
2985 void set_current_blocked(sigset_t *newset)
2987 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2988 __set_current_blocked(newset);
2991 void __set_current_blocked(const sigset_t *newset)
2993 struct task_struct *tsk = current;
2996 * In case the signal mask hasn't changed, there is nothing we need
2997 * to do. The current->blocked shouldn't be modified by other task.
2999 if (sigequalsets(&tsk->blocked, newset))
3002 spin_lock_irq(&tsk->sighand->siglock);
3003 __set_task_blocked(tsk, newset);
3004 spin_unlock_irq(&tsk->sighand->siglock);
3008 * This is also useful for kernel threads that want to temporarily
3009 * (or permanently) block certain signals.
3011 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3012 * interface happily blocks "unblockable" signals like SIGKILL
3015 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3017 struct task_struct *tsk = current;
3020 /* Lockless, only current can change ->blocked, never from irq */
3022 *oldset = tsk->blocked;
3026 sigorsets(&newset, &tsk->blocked, set);
3029 sigandnsets(&newset, &tsk->blocked, set);
3038 __set_current_blocked(&newset);
3041 EXPORT_SYMBOL(sigprocmask);
3044 * The api helps set app-provided sigmasks.
3046 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3047 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3049 * Note that it does set_restore_sigmask() in advance, so it must be always
3050 * paired with restore_saved_sigmask_unless() before return from syscall.
3052 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3058 if (sigsetsize != sizeof(sigset_t))
3060 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3063 set_restore_sigmask();
3064 current->saved_sigmask = current->blocked;
3065 set_current_blocked(&kmask);
3070 #ifdef CONFIG_COMPAT
3071 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3078 if (sigsetsize != sizeof(compat_sigset_t))
3080 if (get_compat_sigset(&kmask, umask))
3083 set_restore_sigmask();
3084 current->saved_sigmask = current->blocked;
3085 set_current_blocked(&kmask);
3092 * sys_rt_sigprocmask - change the list of currently blocked signals
3093 * @how: whether to add, remove, or set signals
3094 * @nset: stores pending signals
3095 * @oset: previous value of signal mask if non-null
3096 * @sigsetsize: size of sigset_t type
3098 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3099 sigset_t __user *, oset, size_t, sigsetsize)
3101 sigset_t old_set, new_set;
3104 /* XXX: Don't preclude handling different sized sigset_t's. */
3105 if (sigsetsize != sizeof(sigset_t))
3108 old_set = current->blocked;
3111 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3113 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3115 error = sigprocmask(how, &new_set, NULL);
3121 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3128 #ifdef CONFIG_COMPAT
3129 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3130 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3132 sigset_t old_set = current->blocked;
3134 /* XXX: Don't preclude handling different sized sigset_t's. */
3135 if (sigsetsize != sizeof(sigset_t))
3141 if (get_compat_sigset(&new_set, nset))
3143 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3145 error = sigprocmask(how, &new_set, NULL);
3149 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3153 static void do_sigpending(sigset_t *set)
3155 spin_lock_irq(¤t->sighand->siglock);
3156 sigorsets(set, ¤t->pending.signal,
3157 ¤t->signal->shared_pending.signal);
3158 spin_unlock_irq(¤t->sighand->siglock);
3160 /* Outside the lock because only this thread touches it. */
3161 sigandsets(set, ¤t->blocked, set);
3165 * sys_rt_sigpending - examine a pending signal that has been raised
3167 * @uset: stores pending signals
3168 * @sigsetsize: size of sigset_t type or larger
3170 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3174 if (sigsetsize > sizeof(*uset))
3177 do_sigpending(&set);
3179 if (copy_to_user(uset, &set, sigsetsize))
3185 #ifdef CONFIG_COMPAT
3186 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3187 compat_size_t, sigsetsize)
3191 if (sigsetsize > sizeof(*uset))
3194 do_sigpending(&set);
3196 return put_compat_sigset(uset, &set, sigsetsize);
3200 static const struct {
3201 unsigned char limit, layout;
3203 [SIGILL] = { NSIGILL, SIL_FAULT },
3204 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3205 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3206 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3207 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3209 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3211 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3212 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3213 [SIGSYS] = { NSIGSYS, SIL_SYS },
3216 static bool known_siginfo_layout(unsigned sig, int si_code)
3218 if (si_code == SI_KERNEL)
3220 else if ((si_code > SI_USER)) {
3221 if (sig_specific_sicodes(sig)) {
3222 if (si_code <= sig_sicodes[sig].limit)
3225 else if (si_code <= NSIGPOLL)
3228 else if (si_code >= SI_DETHREAD)
3230 else if (si_code == SI_ASYNCNL)
3235 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3237 enum siginfo_layout layout = SIL_KILL;
3238 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3239 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3240 (si_code <= sig_sicodes[sig].limit)) {
3241 layout = sig_sicodes[sig].layout;
3242 /* Handle the exceptions */
3243 if ((sig == SIGBUS) &&
3244 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3245 layout = SIL_FAULT_MCEERR;
3246 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3247 layout = SIL_FAULT_BNDERR;
3249 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3250 layout = SIL_FAULT_PKUERR;
3253 else if (si_code <= NSIGPOLL)
3256 if (si_code == SI_TIMER)
3258 else if (si_code == SI_SIGIO)
3260 else if (si_code < 0)
3266 static inline char __user *si_expansion(const siginfo_t __user *info)
3268 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3271 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3273 char __user *expansion = si_expansion(to);
3274 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3276 if (clear_user(expansion, SI_EXPANSION_SIZE))
3281 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3282 const siginfo_t __user *from)
3284 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3285 char __user *expansion = si_expansion(from);
3286 char buf[SI_EXPANSION_SIZE];
3289 * An unknown si_code might need more than
3290 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3291 * extra bytes are 0. This guarantees copy_siginfo_to_user
3292 * will return this data to userspace exactly.
3294 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3296 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3304 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3305 const siginfo_t __user *from)
3307 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3309 to->si_signo = signo;
3310 return post_copy_siginfo_from_user(to, from);
3313 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3315 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3317 return post_copy_siginfo_from_user(to, from);
3320 #ifdef CONFIG_COMPAT
3322 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3323 * @to: compat siginfo destination
3324 * @from: kernel siginfo source
3326 * Note: This function does not work properly for the SIGCHLD on x32, but
3327 * fortunately it doesn't have to. The only valid callers for this function are
3328 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3329 * The latter does not care because SIGCHLD will never cause a coredump.
3331 void copy_siginfo_to_external32(struct compat_siginfo *to,
3332 const struct kernel_siginfo *from)
3334 memset(to, 0, sizeof(*to));
3336 to->si_signo = from->si_signo;
3337 to->si_errno = from->si_errno;
3338 to->si_code = from->si_code;
3339 switch(siginfo_layout(from->si_signo, from->si_code)) {
3341 to->si_pid = from->si_pid;
3342 to->si_uid = from->si_uid;
3345 to->si_tid = from->si_tid;
3346 to->si_overrun = from->si_overrun;
3347 to->si_int = from->si_int;
3350 to->si_band = from->si_band;
3351 to->si_fd = from->si_fd;
3354 to->si_addr = ptr_to_compat(from->si_addr);
3355 #ifdef __ARCH_SI_TRAPNO
3356 to->si_trapno = from->si_trapno;
3359 case SIL_FAULT_MCEERR:
3360 to->si_addr = ptr_to_compat(from->si_addr);
3361 #ifdef __ARCH_SI_TRAPNO
3362 to->si_trapno = from->si_trapno;
3364 to->si_addr_lsb = from->si_addr_lsb;
3366 case SIL_FAULT_BNDERR:
3367 to->si_addr = ptr_to_compat(from->si_addr);
3368 #ifdef __ARCH_SI_TRAPNO
3369 to->si_trapno = from->si_trapno;
3371 to->si_lower = ptr_to_compat(from->si_lower);
3372 to->si_upper = ptr_to_compat(from->si_upper);
3374 case SIL_FAULT_PKUERR:
3375 to->si_addr = ptr_to_compat(from->si_addr);
3376 #ifdef __ARCH_SI_TRAPNO
3377 to->si_trapno = from->si_trapno;
3379 to->si_pkey = from->si_pkey;
3382 to->si_pid = from->si_pid;
3383 to->si_uid = from->si_uid;
3384 to->si_status = from->si_status;
3385 to->si_utime = from->si_utime;
3386 to->si_stime = from->si_stime;
3389 to->si_pid = from->si_pid;
3390 to->si_uid = from->si_uid;
3391 to->si_int = from->si_int;
3394 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3395 to->si_syscall = from->si_syscall;
3396 to->si_arch = from->si_arch;
3401 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3402 const struct kernel_siginfo *from)
3404 struct compat_siginfo new;
3406 copy_siginfo_to_external32(&new, from);
3407 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3412 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3413 const struct compat_siginfo *from)
3416 to->si_signo = from->si_signo;
3417 to->si_errno = from->si_errno;
3418 to->si_code = from->si_code;
3419 switch(siginfo_layout(from->si_signo, from->si_code)) {
3421 to->si_pid = from->si_pid;
3422 to->si_uid = from->si_uid;
3425 to->si_tid = from->si_tid;
3426 to->si_overrun = from->si_overrun;
3427 to->si_int = from->si_int;
3430 to->si_band = from->si_band;
3431 to->si_fd = from->si_fd;
3434 to->si_addr = compat_ptr(from->si_addr);
3435 #ifdef __ARCH_SI_TRAPNO
3436 to->si_trapno = from->si_trapno;
3439 case SIL_FAULT_MCEERR:
3440 to->si_addr = compat_ptr(from->si_addr);
3441 #ifdef __ARCH_SI_TRAPNO
3442 to->si_trapno = from->si_trapno;
3444 to->si_addr_lsb = from->si_addr_lsb;
3446 case SIL_FAULT_BNDERR:
3447 to->si_addr = compat_ptr(from->si_addr);
3448 #ifdef __ARCH_SI_TRAPNO
3449 to->si_trapno = from->si_trapno;
3451 to->si_lower = compat_ptr(from->si_lower);
3452 to->si_upper = compat_ptr(from->si_upper);
3454 case SIL_FAULT_PKUERR:
3455 to->si_addr = compat_ptr(from->si_addr);
3456 #ifdef __ARCH_SI_TRAPNO
3457 to->si_trapno = from->si_trapno;
3459 to->si_pkey = from->si_pkey;
3462 to->si_pid = from->si_pid;
3463 to->si_uid = from->si_uid;
3464 to->si_status = from->si_status;
3465 #ifdef CONFIG_X86_X32_ABI
3466 if (in_x32_syscall()) {
3467 to->si_utime = from->_sifields._sigchld_x32._utime;
3468 to->si_stime = from->_sifields._sigchld_x32._stime;
3472 to->si_utime = from->si_utime;
3473 to->si_stime = from->si_stime;
3477 to->si_pid = from->si_pid;
3478 to->si_uid = from->si_uid;
3479 to->si_int = from->si_int;
3482 to->si_call_addr = compat_ptr(from->si_call_addr);
3483 to->si_syscall = from->si_syscall;
3484 to->si_arch = from->si_arch;
3490 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3491 const struct compat_siginfo __user *ufrom)
3493 struct compat_siginfo from;
3495 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3498 from.si_signo = signo;
3499 return post_copy_siginfo_from_user32(to, &from);
3502 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3503 const struct compat_siginfo __user *ufrom)
3505 struct compat_siginfo from;
3507 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3510 return post_copy_siginfo_from_user32(to, &from);
3512 #endif /* CONFIG_COMPAT */
3515 * do_sigtimedwait - wait for queued signals specified in @which
3516 * @which: queued signals to wait for
3517 * @info: if non-null, the signal's siginfo is returned here
3518 * @ts: upper bound on process time suspension
3520 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3521 const struct timespec64 *ts)
3523 ktime_t *to = NULL, timeout = KTIME_MAX;
3524 struct task_struct *tsk = current;
3525 sigset_t mask = *which;
3529 if (!timespec64_valid(ts))
3531 timeout = timespec64_to_ktime(*ts);
3536 * Invert the set of allowed signals to get those we want to block.
3538 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3541 spin_lock_irq(&tsk->sighand->siglock);
3542 sig = dequeue_signal(tsk, &mask, info);
3543 if (!sig && timeout) {
3545 * None ready, temporarily unblock those we're interested
3546 * while we are sleeping in so that we'll be awakened when
3547 * they arrive. Unblocking is always fine, we can avoid
3548 * set_current_blocked().
3550 tsk->real_blocked = tsk->blocked;
3551 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3552 recalc_sigpending();
3553 spin_unlock_irq(&tsk->sighand->siglock);
3555 __set_current_state(TASK_INTERRUPTIBLE);
3556 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3558 spin_lock_irq(&tsk->sighand->siglock);
3559 __set_task_blocked(tsk, &tsk->real_blocked);
3560 sigemptyset(&tsk->real_blocked);
3561 sig = dequeue_signal(tsk, &mask, info);
3563 spin_unlock_irq(&tsk->sighand->siglock);
3567 return ret ? -EINTR : -EAGAIN;
3571 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3573 * @uthese: queued signals to wait for
3574 * @uinfo: if non-null, the signal's siginfo is returned here
3575 * @uts: upper bound on process time suspension
3576 * @sigsetsize: size of sigset_t type
3578 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3579 siginfo_t __user *, uinfo,
3580 const struct __kernel_timespec __user *, uts,
3584 struct timespec64 ts;
3585 kernel_siginfo_t info;
3588 /* XXX: Don't preclude handling different sized sigset_t's. */
3589 if (sigsetsize != sizeof(sigset_t))
3592 if (copy_from_user(&these, uthese, sizeof(these)))
3596 if (get_timespec64(&ts, uts))
3600 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3602 if (ret > 0 && uinfo) {
3603 if (copy_siginfo_to_user(uinfo, &info))
3610 #ifdef CONFIG_COMPAT_32BIT_TIME
3611 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3612 siginfo_t __user *, uinfo,
3613 const struct old_timespec32 __user *, uts,
3617 struct timespec64 ts;
3618 kernel_siginfo_t info;
3621 if (sigsetsize != sizeof(sigset_t))
3624 if (copy_from_user(&these, uthese, sizeof(these)))
3628 if (get_old_timespec32(&ts, uts))
3632 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3634 if (ret > 0 && uinfo) {
3635 if (copy_siginfo_to_user(uinfo, &info))
3643 #ifdef CONFIG_COMPAT
3644 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3645 struct compat_siginfo __user *, uinfo,
3646 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3649 struct timespec64 t;
3650 kernel_siginfo_t info;
3653 if (sigsetsize != sizeof(sigset_t))
3656 if (get_compat_sigset(&s, uthese))
3660 if (get_timespec64(&t, uts))
3664 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3666 if (ret > 0 && uinfo) {
3667 if (copy_siginfo_to_user32(uinfo, &info))
3674 #ifdef CONFIG_COMPAT_32BIT_TIME
3675 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3676 struct compat_siginfo __user *, uinfo,
3677 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3680 struct timespec64 t;
3681 kernel_siginfo_t info;
3684 if (sigsetsize != sizeof(sigset_t))
3687 if (get_compat_sigset(&s, uthese))
3691 if (get_old_timespec32(&t, uts))
3695 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3697 if (ret > 0 && uinfo) {
3698 if (copy_siginfo_to_user32(uinfo, &info))
3707 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3709 clear_siginfo(info);
3710 info->si_signo = sig;
3712 info->si_code = SI_USER;
3713 info->si_pid = task_tgid_vnr(current);
3714 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3718 * sys_kill - send a signal to a process
3719 * @pid: the PID of the process
3720 * @sig: signal to be sent
3722 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3724 struct kernel_siginfo info;
3726 prepare_kill_siginfo(sig, &info);
3728 return kill_something_info(sig, &info, pid);
3732 * Verify that the signaler and signalee either are in the same pid namespace
3733 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3736 static bool access_pidfd_pidns(struct pid *pid)
3738 struct pid_namespace *active = task_active_pid_ns(current);
3739 struct pid_namespace *p = ns_of_pid(pid);
3752 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3753 siginfo_t __user *info)
3755 #ifdef CONFIG_COMPAT
3757 * Avoid hooking up compat syscalls and instead handle necessary
3758 * conversions here. Note, this is a stop-gap measure and should not be
3759 * considered a generic solution.
3761 if (in_compat_syscall())
3762 return copy_siginfo_from_user32(
3763 kinfo, (struct compat_siginfo __user *)info);
3765 return copy_siginfo_from_user(kinfo, info);
3768 static struct pid *pidfd_to_pid(const struct file *file)
3772 pid = pidfd_pid(file);
3776 return tgid_pidfd_to_pid(file);
3780 * sys_pidfd_send_signal - Signal a process through a pidfd
3781 * @pidfd: file descriptor of the process
3782 * @sig: signal to send
3783 * @info: signal info
3784 * @flags: future flags
3786 * The syscall currently only signals via PIDTYPE_PID which covers
3787 * kill(<positive-pid>, <signal>. It does not signal threads or process
3789 * In order to extend the syscall to threads and process groups the @flags
3790 * argument should be used. In essence, the @flags argument will determine
3791 * what is signaled and not the file descriptor itself. Put in other words,
3792 * grouping is a property of the flags argument not a property of the file
3795 * Return: 0 on success, negative errno on failure
3797 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3798 siginfo_t __user *, info, unsigned int, flags)
3803 kernel_siginfo_t kinfo;
3805 /* Enforce flags be set to 0 until we add an extension. */
3813 /* Is this a pidfd? */
3814 pid = pidfd_to_pid(f.file);
3821 if (!access_pidfd_pidns(pid))
3825 ret = copy_siginfo_from_user_any(&kinfo, info);
3830 if (unlikely(sig != kinfo.si_signo))
3833 /* Only allow sending arbitrary signals to yourself. */
3835 if ((task_pid(current) != pid) &&
3836 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3839 prepare_kill_siginfo(sig, &kinfo);
3842 ret = kill_pid_info(sig, &kinfo, pid);
3850 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3852 struct task_struct *p;
3856 p = find_task_by_vpid(pid);
3857 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3858 error = check_kill_permission(sig, info, p);
3860 * The null signal is a permissions and process existence
3861 * probe. No signal is actually delivered.
3863 if (!error && sig) {
3864 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3866 * If lock_task_sighand() failed we pretend the task
3867 * dies after receiving the signal. The window is tiny,
3868 * and the signal is private anyway.
3870 if (unlikely(error == -ESRCH))
3879 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3881 struct kernel_siginfo info;
3883 clear_siginfo(&info);
3884 info.si_signo = sig;
3886 info.si_code = SI_TKILL;
3887 info.si_pid = task_tgid_vnr(current);
3888 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3890 return do_send_specific(tgid, pid, sig, &info);
3894 * sys_tgkill - send signal to one specific thread
3895 * @tgid: the thread group ID of the thread
3896 * @pid: the PID of the thread
3897 * @sig: signal to be sent
3899 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3900 * exists but it's not belonging to the target process anymore. This
3901 * method solves the problem of threads exiting and PIDs getting reused.
3903 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3905 /* This is only valid for single tasks */
3906 if (pid <= 0 || tgid <= 0)
3909 return do_tkill(tgid, pid, sig);
3913 * sys_tkill - send signal to one specific task
3914 * @pid: the PID of the task
3915 * @sig: signal to be sent
3917 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3919 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3921 /* This is only valid for single tasks */
3925 return do_tkill(0, pid, sig);
3928 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3930 /* Not even root can pretend to send signals from the kernel.
3931 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3933 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3934 (task_pid_vnr(current) != pid))
3937 /* POSIX.1b doesn't mention process groups. */
3938 return kill_proc_info(sig, info, pid);
3942 * sys_rt_sigqueueinfo - send signal information to a signal
3943 * @pid: the PID of the thread
3944 * @sig: signal to be sent
3945 * @uinfo: signal info to be sent
3947 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3948 siginfo_t __user *, uinfo)
3950 kernel_siginfo_t info;
3951 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3954 return do_rt_sigqueueinfo(pid, sig, &info);
3957 #ifdef CONFIG_COMPAT
3958 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3961 struct compat_siginfo __user *, uinfo)
3963 kernel_siginfo_t info;
3964 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3967 return do_rt_sigqueueinfo(pid, sig, &info);
3971 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3973 /* This is only valid for single tasks */
3974 if (pid <= 0 || tgid <= 0)
3977 /* Not even root can pretend to send signals from the kernel.
3978 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3980 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3981 (task_pid_vnr(current) != pid))
3984 return do_send_specific(tgid, pid, sig, info);
3987 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3988 siginfo_t __user *, uinfo)
3990 kernel_siginfo_t info;
3991 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3994 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3997 #ifdef CONFIG_COMPAT
3998 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4002 struct compat_siginfo __user *, uinfo)
4004 kernel_siginfo_t info;
4005 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4008 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4013 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4015 void kernel_sigaction(int sig, __sighandler_t action)
4017 spin_lock_irq(¤t->sighand->siglock);
4018 current->sighand->action[sig - 1].sa.sa_handler = action;
4019 if (action == SIG_IGN) {
4023 sigaddset(&mask, sig);
4025 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4026 flush_sigqueue_mask(&mask, ¤t->pending);
4027 recalc_sigpending();
4029 spin_unlock_irq(¤t->sighand->siglock);
4031 EXPORT_SYMBOL(kernel_sigaction);
4033 void __weak sigaction_compat_abi(struct k_sigaction *act,
4034 struct k_sigaction *oact)
4038 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4040 struct task_struct *p = current, *t;
4041 struct k_sigaction *k;
4044 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4047 k = &p->sighand->action[sig-1];
4049 spin_lock_irq(&p->sighand->siglock);
4054 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4055 * e.g. by having an architecture use the bit in their uapi.
4057 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4060 * Clear unknown flag bits in order to allow userspace to detect missing
4061 * support for flag bits and to allow the kernel to use non-uapi bits
4065 act->sa.sa_flags &= UAPI_SA_FLAGS;
4067 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4069 sigaction_compat_abi(act, oact);
4072 sigdelsetmask(&act->sa.sa_mask,
4073 sigmask(SIGKILL) | sigmask(SIGSTOP));
4077 * "Setting a signal action to SIG_IGN for a signal that is
4078 * pending shall cause the pending signal to be discarded,
4079 * whether or not it is blocked."
4081 * "Setting a signal action to SIG_DFL for a signal that is
4082 * pending and whose default action is to ignore the signal
4083 * (for example, SIGCHLD), shall cause the pending signal to
4084 * be discarded, whether or not it is blocked"
4086 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4088 sigaddset(&mask, sig);
4089 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4090 for_each_thread(p, t)
4091 flush_sigqueue_mask(&mask, &t->pending);
4095 spin_unlock_irq(&p->sighand->siglock);
4100 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4103 struct task_struct *t = current;
4106 memset(oss, 0, sizeof(stack_t));
4107 oss->ss_sp = (void __user *) t->sas_ss_sp;
4108 oss->ss_size = t->sas_ss_size;
4109 oss->ss_flags = sas_ss_flags(sp) |
4110 (current->sas_ss_flags & SS_FLAG_BITS);
4114 void __user *ss_sp = ss->ss_sp;
4115 size_t ss_size = ss->ss_size;
4116 unsigned ss_flags = ss->ss_flags;
4119 if (unlikely(on_sig_stack(sp)))
4122 ss_mode = ss_flags & ~SS_FLAG_BITS;
4123 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4127 if (ss_mode == SS_DISABLE) {
4131 if (unlikely(ss_size < min_ss_size))
4135 t->sas_ss_sp = (unsigned long) ss_sp;
4136 t->sas_ss_size = ss_size;
4137 t->sas_ss_flags = ss_flags;
4142 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4146 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4148 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4149 current_user_stack_pointer(),
4151 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4156 int restore_altstack(const stack_t __user *uss)
4159 if (copy_from_user(&new, uss, sizeof(stack_t)))
4161 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4163 /* squash all but EFAULT for now */
4167 int __save_altstack(stack_t __user *uss, unsigned long sp)
4169 struct task_struct *t = current;
4170 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4171 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4172 __put_user(t->sas_ss_size, &uss->ss_size);
4175 if (t->sas_ss_flags & SS_AUTODISARM)
4180 #ifdef CONFIG_COMPAT
4181 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4182 compat_stack_t __user *uoss_ptr)
4188 compat_stack_t uss32;
4189 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4191 uss.ss_sp = compat_ptr(uss32.ss_sp);
4192 uss.ss_flags = uss32.ss_flags;
4193 uss.ss_size = uss32.ss_size;
4195 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4196 compat_user_stack_pointer(),
4197 COMPAT_MINSIGSTKSZ);
4198 if (ret >= 0 && uoss_ptr) {
4200 memset(&old, 0, sizeof(old));
4201 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4202 old.ss_flags = uoss.ss_flags;
4203 old.ss_size = uoss.ss_size;
4204 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4210 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4211 const compat_stack_t __user *, uss_ptr,
4212 compat_stack_t __user *, uoss_ptr)
4214 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4217 int compat_restore_altstack(const compat_stack_t __user *uss)
4219 int err = do_compat_sigaltstack(uss, NULL);
4220 /* squash all but -EFAULT for now */
4221 return err == -EFAULT ? err : 0;
4224 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4227 struct task_struct *t = current;
4228 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4230 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4231 __put_user(t->sas_ss_size, &uss->ss_size);
4234 if (t->sas_ss_flags & SS_AUTODISARM)
4240 #ifdef __ARCH_WANT_SYS_SIGPENDING
4243 * sys_sigpending - examine pending signals
4244 * @uset: where mask of pending signal is returned
4246 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4250 if (sizeof(old_sigset_t) > sizeof(*uset))
4253 do_sigpending(&set);
4255 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4261 #ifdef CONFIG_COMPAT
4262 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4266 do_sigpending(&set);
4268 return put_user(set.sig[0], set32);
4274 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4276 * sys_sigprocmask - examine and change blocked signals
4277 * @how: whether to add, remove, or set signals
4278 * @nset: signals to add or remove (if non-null)
4279 * @oset: previous value of signal mask if non-null
4281 * Some platforms have their own version with special arguments;
4282 * others support only sys_rt_sigprocmask.
4285 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4286 old_sigset_t __user *, oset)
4288 old_sigset_t old_set, new_set;
4289 sigset_t new_blocked;
4291 old_set = current->blocked.sig[0];
4294 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4297 new_blocked = current->blocked;
4301 sigaddsetmask(&new_blocked, new_set);
4304 sigdelsetmask(&new_blocked, new_set);
4307 new_blocked.sig[0] = new_set;
4313 set_current_blocked(&new_blocked);
4317 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4323 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4325 #ifndef CONFIG_ODD_RT_SIGACTION
4327 * sys_rt_sigaction - alter an action taken by a process
4328 * @sig: signal to be sent
4329 * @act: new sigaction
4330 * @oact: used to save the previous sigaction
4331 * @sigsetsize: size of sigset_t type
4333 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4334 const struct sigaction __user *, act,
4335 struct sigaction __user *, oact,
4338 struct k_sigaction new_sa, old_sa;
4341 /* XXX: Don't preclude handling different sized sigset_t's. */
4342 if (sigsetsize != sizeof(sigset_t))
4345 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4348 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4352 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4357 #ifdef CONFIG_COMPAT
4358 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4359 const struct compat_sigaction __user *, act,
4360 struct compat_sigaction __user *, oact,
4361 compat_size_t, sigsetsize)
4363 struct k_sigaction new_ka, old_ka;
4364 #ifdef __ARCH_HAS_SA_RESTORER
4365 compat_uptr_t restorer;
4369 /* XXX: Don't preclude handling different sized sigset_t's. */
4370 if (sigsetsize != sizeof(compat_sigset_t))
4374 compat_uptr_t handler;
4375 ret = get_user(handler, &act->sa_handler);
4376 new_ka.sa.sa_handler = compat_ptr(handler);
4377 #ifdef __ARCH_HAS_SA_RESTORER
4378 ret |= get_user(restorer, &act->sa_restorer);
4379 new_ka.sa.sa_restorer = compat_ptr(restorer);
4381 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4382 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4387 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4389 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4391 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4392 sizeof(oact->sa_mask));
4393 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4394 #ifdef __ARCH_HAS_SA_RESTORER
4395 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4396 &oact->sa_restorer);
4402 #endif /* !CONFIG_ODD_RT_SIGACTION */
4404 #ifdef CONFIG_OLD_SIGACTION
4405 SYSCALL_DEFINE3(sigaction, int, sig,
4406 const struct old_sigaction __user *, act,
4407 struct old_sigaction __user *, oact)
4409 struct k_sigaction new_ka, old_ka;
4414 if (!access_ok(act, sizeof(*act)) ||
4415 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4416 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4417 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4418 __get_user(mask, &act->sa_mask))
4420 #ifdef __ARCH_HAS_KA_RESTORER
4421 new_ka.ka_restorer = NULL;
4423 siginitset(&new_ka.sa.sa_mask, mask);
4426 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4429 if (!access_ok(oact, sizeof(*oact)) ||
4430 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4431 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4432 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4433 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4440 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4441 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4442 const struct compat_old_sigaction __user *, act,
4443 struct compat_old_sigaction __user *, oact)
4445 struct k_sigaction new_ka, old_ka;
4447 compat_old_sigset_t mask;
4448 compat_uptr_t handler, restorer;
4451 if (!access_ok(act, sizeof(*act)) ||
4452 __get_user(handler, &act->sa_handler) ||
4453 __get_user(restorer, &act->sa_restorer) ||
4454 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4455 __get_user(mask, &act->sa_mask))
4458 #ifdef __ARCH_HAS_KA_RESTORER
4459 new_ka.ka_restorer = NULL;
4461 new_ka.sa.sa_handler = compat_ptr(handler);
4462 new_ka.sa.sa_restorer = compat_ptr(restorer);
4463 siginitset(&new_ka.sa.sa_mask, mask);
4466 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4469 if (!access_ok(oact, sizeof(*oact)) ||
4470 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4471 &oact->sa_handler) ||
4472 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4473 &oact->sa_restorer) ||
4474 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4475 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4482 #ifdef CONFIG_SGETMASK_SYSCALL
4485 * For backwards compatibility. Functionality superseded by sigprocmask.
4487 SYSCALL_DEFINE0(sgetmask)
4490 return current->blocked.sig[0];
4493 SYSCALL_DEFINE1(ssetmask, int, newmask)
4495 int old = current->blocked.sig[0];
4498 siginitset(&newset, newmask);
4499 set_current_blocked(&newset);
4503 #endif /* CONFIG_SGETMASK_SYSCALL */
4505 #ifdef __ARCH_WANT_SYS_SIGNAL
4507 * For backwards compatibility. Functionality superseded by sigaction.
4509 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4511 struct k_sigaction new_sa, old_sa;
4514 new_sa.sa.sa_handler = handler;
4515 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4516 sigemptyset(&new_sa.sa.sa_mask);
4518 ret = do_sigaction(sig, &new_sa, &old_sa);
4520 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4522 #endif /* __ARCH_WANT_SYS_SIGNAL */
4524 #ifdef __ARCH_WANT_SYS_PAUSE
4526 SYSCALL_DEFINE0(pause)
4528 while (!signal_pending(current)) {
4529 __set_current_state(TASK_INTERRUPTIBLE);
4532 return -ERESTARTNOHAND;
4537 static int sigsuspend(sigset_t *set)
4539 current->saved_sigmask = current->blocked;
4540 set_current_blocked(set);
4542 while (!signal_pending(current)) {
4543 __set_current_state(TASK_INTERRUPTIBLE);
4546 set_restore_sigmask();
4547 return -ERESTARTNOHAND;
4551 * sys_rt_sigsuspend - replace the signal mask for a value with the
4552 * @unewset value until a signal is received
4553 * @unewset: new signal mask value
4554 * @sigsetsize: size of sigset_t type
4556 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4560 /* XXX: Don't preclude handling different sized sigset_t's. */
4561 if (sigsetsize != sizeof(sigset_t))
4564 if (copy_from_user(&newset, unewset, sizeof(newset)))
4566 return sigsuspend(&newset);
4569 #ifdef CONFIG_COMPAT
4570 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4574 /* XXX: Don't preclude handling different sized sigset_t's. */
4575 if (sigsetsize != sizeof(sigset_t))
4578 if (get_compat_sigset(&newset, unewset))
4580 return sigsuspend(&newset);
4584 #ifdef CONFIG_OLD_SIGSUSPEND
4585 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4588 siginitset(&blocked, mask);
4589 return sigsuspend(&blocked);
4592 #ifdef CONFIG_OLD_SIGSUSPEND3
4593 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4596 siginitset(&blocked, mask);
4597 return sigsuspend(&blocked);
4601 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4606 static inline void siginfo_buildtime_checks(void)
4608 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4610 /* Verify the offsets in the two siginfos match */
4611 #define CHECK_OFFSET(field) \
4612 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4615 CHECK_OFFSET(si_pid);
4616 CHECK_OFFSET(si_uid);
4619 CHECK_OFFSET(si_tid);
4620 CHECK_OFFSET(si_overrun);
4621 CHECK_OFFSET(si_value);
4624 CHECK_OFFSET(si_pid);
4625 CHECK_OFFSET(si_uid);
4626 CHECK_OFFSET(si_value);
4629 CHECK_OFFSET(si_pid);
4630 CHECK_OFFSET(si_uid);
4631 CHECK_OFFSET(si_status);
4632 CHECK_OFFSET(si_utime);
4633 CHECK_OFFSET(si_stime);
4636 CHECK_OFFSET(si_addr);
4637 CHECK_OFFSET(si_addr_lsb);
4638 CHECK_OFFSET(si_lower);
4639 CHECK_OFFSET(si_upper);
4640 CHECK_OFFSET(si_pkey);
4643 CHECK_OFFSET(si_band);
4644 CHECK_OFFSET(si_fd);
4647 CHECK_OFFSET(si_call_addr);
4648 CHECK_OFFSET(si_syscall);
4649 CHECK_OFFSET(si_arch);
4653 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4654 offsetof(struct siginfo, si_addr));
4655 if (sizeof(int) == sizeof(void __user *)) {
4656 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4657 sizeof(void __user *));
4659 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4660 sizeof_field(struct siginfo, si_uid)) !=
4661 sizeof(void __user *));
4662 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4663 offsetof(struct siginfo, si_uid));
4665 #ifdef CONFIG_COMPAT
4666 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4667 offsetof(struct compat_siginfo, si_addr));
4668 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4669 sizeof(compat_uptr_t));
4670 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4671 sizeof_field(struct siginfo, si_pid));
4675 void __init signals_init(void)
4677 siginfo_buildtime_checks();
4679 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4682 #ifdef CONFIG_KGDB_KDB
4683 #include <linux/kdb.h>
4685 * kdb_send_sig - Allows kdb to send signals without exposing
4686 * signal internals. This function checks if the required locks are
4687 * available before calling the main signal code, to avoid kdb
4690 void kdb_send_sig(struct task_struct *t, int sig)
4692 static struct task_struct *kdb_prev_t;
4694 if (!spin_trylock(&t->sighand->siglock)) {
4695 kdb_printf("Can't do kill command now.\n"
4696 "The sigmask lock is held somewhere else in "
4697 "kernel, try again later\n");
4700 new_t = kdb_prev_t != t;
4702 if (t->state != TASK_RUNNING && new_t) {
4703 spin_unlock(&t->sighand->siglock);
4704 kdb_printf("Process is not RUNNING, sending a signal from "
4705 "kdb risks deadlock\n"
4706 "on the run queue locks. "
4707 "The signal has _not_ been sent.\n"
4708 "Reissue the kill command if you want to risk "
4712 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4713 spin_unlock(&t->sighand->siglock);
4715 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4718 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4720 #endif /* CONFIG_KGDB_KDB */