V4L/DVB (7800): tuner_symbol_probe(): don't do symbol_put() if symbol_request() failed
[sfrench/cifs-2.6.git] / kernel / signal.c
index db442c59219ea204da585b25aa0fb919a4159e7c..72bb4f51f9634c448ad6eb4986d8ed7f35deb1f1 100644 (file)
@@ -533,6 +533,7 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
 static int check_kill_permission(int sig, struct siginfo *info,
                                 struct task_struct *t)
 {
+       struct pid *sid;
        int error;
 
        if (!valid_signal(sig))
@@ -545,11 +546,22 @@ static int check_kill_permission(int sig, struct siginfo *info,
        if (error)
                return error;
 
-       if (((sig != SIGCONT) || (task_session_nr(current) != task_session_nr(t)))
-           && (current->euid ^ t->suid) && (current->euid ^ t->uid)
-           && (current->uid ^ t->suid) && (current->uid ^ t->uid)
-           && !capable(CAP_KILL))
-               return -EPERM;
+       if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
+           (current->uid  ^ t->suid) && (current->uid  ^ t->uid) &&
+           !capable(CAP_KILL)) {
+               switch (sig) {
+               case SIGCONT:
+                       sid = task_session(t);
+                       /*
+                        * We don't return the error if sid == NULL. The
+                        * task was unhashed, the caller must notice this.
+                        */
+                       if (!sid || sid == task_session(current))
+                               break;
+               default:
+                       return -EPERM;
+               }
+       }
 
        return security_task_kill(t, info, sig, 0);
 }
@@ -558,24 +570,25 @@ static int check_kill_permission(int sig, struct siginfo *info,
 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
 
 /*
- * Handle magic process-wide effects of stop/continue signals.
- * Unlike the signal actions, these happen immediately at signal-generation
+ * Handle magic process-wide effects of stop/continue signals. Unlike
+ * the signal actions, these happen immediately at signal-generation
  * time regardless of blocking, ignoring, or handling.  This does the
  * actual continuing for SIGCONT, but not the actual stopping for stop
- * signals.  The process stop is done as a signal action for SIG_DFL.
+ * signals. The process stop is done as a signal action for SIG_DFL.
+ *
+ * Returns true if the signal should be actually delivered, otherwise
+ * it should be dropped.
  */
-static void handle_stop_signal(int sig, struct task_struct *p)
+static int prepare_signal(int sig, struct task_struct *p)
 {
        struct signal_struct *signal = p->signal;
        struct task_struct *t;
 
-       if (signal->flags & SIGNAL_GROUP_EXIT)
+       if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
                /*
-                * The process is in the middle of dying already.
+                * The process is in the middle of dying, nothing to do.
                 */
-               return;
-
-       if (sig_kernel_stop(sig)) {
+       } else if (sig_kernel_stop(sig)) {
                /*
                 * This is a stop signal.  Remove SIGCONT from all queues.
                 */
@@ -632,6 +645,11 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                        why |= SIGNAL_CLD_STOPPED;
 
                if (why) {
+                       /*
+                        * The first thread which returns from finish_stop()
+                        * will take ->siglock, notice SIGNAL_CLD_MASK, and
+                        * notify its parent. See get_signal_to_deliver().
+                        */
                        signal->flags = why | SIGNAL_STOP_CONTINUED;
                        signal->group_stop_count = 0;
                        signal->group_exit_code = 0;
@@ -643,13 +661,105 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                         */
                        signal->flags &= ~SIGNAL_STOP_DEQUEUED;
                }
-       } else if (sig == SIGKILL) {
+       }
+
+       return !sig_ignored(p, sig);
+}
+
+/*
+ * Test if P wants to take SIG.  After we've checked all threads with this,
+ * it's equivalent to finding no threads not blocking SIG.  Any threads not
+ * blocking SIG were ruled out because they are not running and already
+ * have pending signals.  Such threads will dequeue from the shared queue
+ * as soon as they're available, so putting the signal on the shared queue
+ * will be equivalent to sending it to one such thread.
+ */
+static inline int wants_signal(int sig, struct task_struct *p)
+{
+       if (sigismember(&p->blocked, sig))
+               return 0;
+       if (p->flags & PF_EXITING)
+               return 0;
+       if (sig == SIGKILL)
+               return 1;
+       if (task_is_stopped_or_traced(p))
+               return 0;
+       return task_curr(p) || !signal_pending(p);
+}
+
+static void complete_signal(int sig, struct task_struct *p, int group)
+{
+       struct signal_struct *signal = p->signal;
+       struct task_struct *t;
+
+       /*
+        * Now find a thread we can wake up to take the signal off the queue.
+        *
+        * If the main thread wants the signal, it gets first crack.
+        * Probably the least surprising to the average bear.
+        */
+       if (wants_signal(sig, p))
+               t = p;
+       else if (!group || thread_group_empty(p))
+               /*
+                * There is just one thread and it does not need to be woken.
+                * It will dequeue unblocked signals before it runs again.
+                */
+               return;
+       else {
+               /*
+                * Otherwise try to find a suitable thread.
+                */
+               t = signal->curr_target;
+               while (!wants_signal(sig, t)) {
+                       t = next_thread(t);
+                       if (t == signal->curr_target)
+                               /*
+                                * No thread needs to be woken.
+                                * Any eligible threads will see
+                                * the signal in the queue soon.
+                                */
+                               return;
+               }
+               signal->curr_target = t;
+       }
+
+       /*
+        * Found a killable thread.  If the signal will be fatal,
+        * then start taking the whole group down immediately.
+        */
+       if (sig_fatal(p, sig) &&
+           !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
+           !sigismember(&t->real_blocked, sig) &&
+           (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
                /*
-                * Make sure that any pending stop signal already dequeued
-                * is undone by the wakeup for SIGKILL.
+                * This signal will be fatal to the whole group.
                 */
-               signal->flags &= ~SIGNAL_STOP_DEQUEUED;
+               if (!sig_kernel_coredump(sig)) {
+                       /*
+                        * Start a group exit and wake everybody up.
+                        * This way we don't have other threads
+                        * running and doing things after a slower
+                        * thread has the fatal signal pending.
+                        */
+                       signal->flags = SIGNAL_GROUP_EXIT;
+                       signal->group_exit_code = sig;
+                       signal->group_stop_count = 0;
+                       t = p;
+                       do {
+                               sigaddset(&t->pending.signal, SIGKILL);
+                               signal_wake_up(t, 1);
+                       } while_each_thread(p, t);
+                       return;
+               }
        }
+
+       /*
+        * The signal is already in the shared-pending queue.
+        * Tell the chosen thread to wake up and dequeue it.
+        */
+       signal_wake_up(t, sig == SIGKILL);
+       return;
 }
 
 static inline int legacy_queue(struct sigpending *signals, int sig)
@@ -658,24 +768,23 @@ static inline int legacy_queue(struct sigpending *signals, int sig)
 }
 
 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
-                       struct sigpending *signals)
+                       int group)
 {
-       struct sigqueue * q = NULL;
+       struct sigpending *pending;
+       struct sigqueue *q;
 
+       assert_spin_locked(&t->sighand->siglock);
+       if (!prepare_signal(sig, t))
+               return 0;
+
+       pending = group ? &t->signal->shared_pending : &t->pending;
        /*
         * Short-circuit ignored signals and support queuing
         * exactly one non-rt signal, so that we can get more
         * detailed information about the cause of the signal.
         */
-       if (sig_ignored(t, sig) || legacy_queue(signals, sig))
+       if (legacy_queue(pending, sig))
                return 0;
-
-       /*
-        * Deliver the signal to listening signalfds. This must be called
-        * with the sighand lock held.
-        */
-       signalfd_notify(t, sig);
-
        /*
         * fast-pathed signals for kernel-internal things like SIGSTOP
         * or SIGKILL.
@@ -695,7 +804,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
                                             (is_si_special(info) ||
                                              info->si_code >= 0)));
        if (q) {
-               list_add_tail(&q->list, &signals->list);
+               list_add_tail(&q->list, &pending->list);
                switch ((unsigned long) info) {
                case (unsigned long) SEND_SIG_NOINFO:
                        q->info.si_signo = sig;
@@ -725,8 +834,10 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
        }
 
 out_set:
-       sigaddset(&signals->signal, sig);
-       return 1;
+       signalfd_notify(t, sig);
+       sigaddset(&pending->signal, sig);
+       complete_signal(sig, t, group);
+       return 0;
 }
 
 int print_fatal_signals;
@@ -761,21 +872,16 @@ static int __init setup_print_fatal_signals(char *str)
 
 __setup("print-fatal-signals=", setup_print_fatal_signals);
 
+int
+__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+{
+       return send_signal(sig, info, p, 1);
+}
+
 static int
 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 {
-       int ret;
-
-       BUG_ON(!irqs_disabled());
-       assert_spin_locked(&t->sighand->siglock);
-
-       ret = send_signal(sig, info, t, &t->pending);
-       if (ret <= 0)
-               return ret;
-
-       if (!sigismember(&t->blocked, sig))
-               signal_wake_up(t, sig == SIGKILL);
-       return 0;
+       return send_signal(sig, info, t, 0);
 }
 
 /*
@@ -786,7 +892,8 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
  * since we do not want to have a signal handler that was blocked
  * be invoked when user space had explicitly blocked it.
  *
- * We don't want to have recursive SIGSEGV's etc, for example.
+ * We don't want to have recursive SIGSEGV's etc, for example,
+ * that is why we also clear SIGNAL_UNKILLABLE.
  */
 int
 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
@@ -806,6 +913,8 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
                        recalc_sigpending_and_wake(t);
                }
        }
+       if (action->sa.sa_handler == SIG_DFL)
+               t->signal->flags &= ~SIGNAL_UNKILLABLE;
        ret = specific_send_sig_info(sig, info, t);
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
 
@@ -818,127 +927,6 @@ force_sig_specific(int sig, struct task_struct *t)
        force_sig_info(sig, SEND_SIG_FORCED, t);
 }
 
-/*
- * Test if P wants to take SIG.  After we've checked all threads with this,
- * it's equivalent to finding no threads not blocking SIG.  Any threads not
- * blocking SIG were ruled out because they are not running and already
- * have pending signals.  Such threads will dequeue from the shared queue
- * as soon as they're available, so putting the signal on the shared queue
- * will be equivalent to sending it to one such thread.
- */
-static inline int wants_signal(int sig, struct task_struct *p)
-{
-       if (sigismember(&p->blocked, sig))
-               return 0;
-       if (p->flags & PF_EXITING)
-               return 0;
-       if (sig == SIGKILL)
-               return 1;
-       if (task_is_stopped_or_traced(p))
-               return 0;
-       return task_curr(p) || !signal_pending(p);
-}
-
-static void
-__group_complete_signal(int sig, struct task_struct *p)
-{
-       struct signal_struct *signal = p->signal;
-       struct task_struct *t;
-
-       /*
-        * Now find a thread we can wake up to take the signal off the queue.
-        *
-        * If the main thread wants the signal, it gets first crack.
-        * Probably the least surprising to the average bear.
-        */
-       if (wants_signal(sig, p))
-               t = p;
-       else if (thread_group_empty(p))
-               /*
-                * There is just one thread and it does not need to be woken.
-                * It will dequeue unblocked signals before it runs again.
-                */
-               return;
-       else {
-               /*
-                * Otherwise try to find a suitable thread.
-                */
-               t = signal->curr_target;
-               if (t == NULL)
-                       /* restart balancing at this thread */
-                       t = signal->curr_target = p;
-
-               while (!wants_signal(sig, t)) {
-                       t = next_thread(t);
-                       if (t == signal->curr_target)
-                               /*
-                                * No thread needs to be woken.
-                                * Any eligible threads will see
-                                * the signal in the queue soon.
-                                */
-                               return;
-               }
-               signal->curr_target = t;
-       }
-
-       /*
-        * Found a killable thread.  If the signal will be fatal,
-        * then start taking the whole group down immediately.
-        */
-       if (sig_fatal(p, sig) && !(signal->flags & SIGNAL_GROUP_EXIT) &&
-           !sigismember(&t->real_blocked, sig) &&
-           (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
-               /*
-                * This signal will be fatal to the whole group.
-                */
-               if (!sig_kernel_coredump(sig)) {
-                       /*
-                        * Start a group exit and wake everybody up.
-                        * This way we don't have other threads
-                        * running and doing things after a slower
-                        * thread has the fatal signal pending.
-                        */
-                       signal->flags = SIGNAL_GROUP_EXIT;
-                       signal->group_exit_code = sig;
-                       signal->group_stop_count = 0;
-                       t = p;
-                       do {
-                               sigaddset(&t->pending.signal, SIGKILL);
-                               signal_wake_up(t, 1);
-                       } while_each_thread(p, t);
-                       return;
-               }
-       }
-
-       /*
-        * The signal is already in the shared-pending queue.
-        * Tell the chosen thread to wake up and dequeue it.
-        */
-       signal_wake_up(t, sig == SIGKILL);
-       return;
-}
-
-int
-__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
-{
-       int ret;
-
-       assert_spin_locked(&p->sighand->siglock);
-       handle_stop_signal(sig, p);
-
-       /*
-        * Put this signal on the shared-pending queue, or fail with EAGAIN.
-        * We always use the shared queue for process-wide signals,
-        * to avoid several races.
-        */
-       ret = send_signal(sig, info, p, &p->signal->shared_pending);
-       if (ret <= 0)
-               return ret;
-
-       __group_complete_signal(sig, p);
-       return 0;
-}
-
 /*
  * Nuke all other threads in the group.
  */
@@ -1142,8 +1130,7 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
  */
 
 /*
- * These two are the most common entry points.  They send a signal
- * just to the specific thread.
+ * The caller must ensure the task can't exit.
  */
 int
 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
@@ -1158,17 +1145,9 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
        if (!valid_signal(sig))
                return -EINVAL;
 
-       /*
-        * We need the tasklist lock even for the specific
-        * thread case (when we don't need to follow the group
-        * lists) in order to avoid races with "p->sighand"
-        * going away or changing from under us.
-        */
-       read_lock(&tasklist_lock);  
        spin_lock_irqsave(&p->sighand->siglock, flags);
        ret = specific_send_sig_info(sig, info, p);
        spin_unlock_irqrestore(&p->sighand->siglock, flags);
-       read_unlock(&tasklist_lock);
        return ret;
 }
 
@@ -1274,76 +1253,42 @@ void sigqueue_free(struct sigqueue *q)
        __sigqueue_free(q);
 }
 
-static int do_send_sigqueue(int sig, struct sigqueue *q, struct task_struct *t,
-                               struct sigpending *pending)
+int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
 {
-       handle_stop_signal(sig, t);
+       int sig = q->info.si_signo;
+       struct sigpending *pending;
+       unsigned long flags;
+       int ret;
+
+       BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
+
+       ret = -1;
+       if (!likely(lock_task_sighand(t, &flags)))
+               goto ret;
 
+       ret = 1; /* the signal is ignored */
+       if (!prepare_signal(sig, t))
+               goto out;
+
+       ret = 0;
        if (unlikely(!list_empty(&q->list))) {
                /*
                 * If an SI_TIMER entry is already queue just increment
                 * the overrun count.
                 */
-
                BUG_ON(q->info.si_code != SI_TIMER);
                q->info.si_overrun++;
-               return 0;
+               goto out;
        }
 
-       if (sig_ignored(t, sig))
-               return 1;
-
        signalfd_notify(t, sig);
+       pending = group ? &t->signal->shared_pending : &t->pending;
        list_add_tail(&q->list, &pending->list);
        sigaddset(&pending->signal, sig);
-       return 0;
-}
-
-int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
-{
-       unsigned long flags;
-       int ret = -1;
-
-       BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
-
-       /*
-        * The rcu based delayed sighand destroy makes it possible to
-        * run this without tasklist lock held. The task struct itself
-        * cannot go away as create_timer did get_task_struct().
-        *
-        * We return -1, when the task is marked exiting, so
-        * posix_timer_event can redirect it to the group leader
-        */
-       if (!likely(lock_task_sighand(p, &flags)))
-               goto out_err;
-
-       ret = do_send_sigqueue(sig, q, p, &p->pending);
-
-       if (!sigismember(&p->blocked, sig))
-               signal_wake_up(p, sig == SIGKILL);
-
-       unlock_task_sighand(p, &flags);
-out_err:
-       return ret;
-}
-
-int
-send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
-{
-       unsigned long flags;
-       int ret;
-
-       BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
-
-       /* Since it_lock is held, p->sighand cannot be NULL. */
-       spin_lock_irqsave(&p->sighand->siglock, flags);
-
-       ret = do_send_sigqueue(sig, q, p, &p->signal->shared_pending);
-
-       __group_complete_signal(sig, p);
-
-       spin_unlock_irqrestore(&p->sighand->siglock, flags);
-
+       complete_signal(sig, t, group);
+out:
+       unlock_task_sighand(t, &flags);
+ret:
        return ret;
 }
 
@@ -1674,7 +1619,8 @@ static int do_signal_stop(int signr)
        } else {
                struct task_struct *t;
 
-               if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
+               if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE))
+                                        != SIGNAL_STOP_DEQUEUED) ||
                    unlikely(signal_group_exit(sig)))
                        return 0;
                /*
@@ -1764,7 +1710,11 @@ relock:
        try_to_freeze();
 
        spin_lock_irq(&sighand->siglock);
-
+       /*
+        * Every stopped thread goes here after wakeup. Check to see if
+        * we should notify the parent, prepare_signal(SIGCONT) encodes
+        * the CLD_ si_code into SIGNAL_CLD_MASK bits.
+        */
        if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
                int why = (signal->flags & SIGNAL_STOP_CONTINUED)
                                ? CLD_CONTINUED : CLD_STOPPED;
@@ -1816,7 +1766,8 @@ relock:
                /*
                 * Global init gets no signals it doesn't want.
                 */
-               if (is_global_init(current))
+               if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
+                   !signal_group_exit(signal))
                        continue;
 
                if (sig_kernel_stop(signr)) {
@@ -1859,9 +1810,10 @@ relock:
                 * Anything else is fatal, maybe with a core dump.
                 */
                current->flags |= PF_SIGNALED;
-               if ((signr != SIGKILL) && print_fatal_signals)
-                       print_fatal_signal(regs, signr);
+
                if (sig_kernel_coredump(signr)) {
+                       if (print_fatal_signals)
+                               print_fatal_signal(regs, signr);
                        /*
                         * If it was able to dump core, this kills all
                         * other threads in the group and synchronizes with
@@ -2223,6 +2175,7 @@ static int do_tkill(int tgid, int pid, int sig)
        int error;
        struct siginfo info;
        struct task_struct *p;
+       unsigned long flags;
 
        error = -ESRCH;
        info.si_signo = sig;
@@ -2231,22 +2184,24 @@ static int do_tkill(int tgid, int pid, int sig)
        info.si_pid = task_tgid_vnr(current);
        info.si_uid = current->uid;
 
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        p = find_task_by_vpid(pid);
        if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
                error = check_kill_permission(sig, &info, p);
                /*
                 * The null signal is a permissions and process existence
                 * probe.  No signal is actually delivered.
+                *
+                * If lock_task_sighand() fails we pretend the task dies
+                * after receiving the signal. The window is tiny, and the
+                * signal is private anyway.
                 */
-               if (!error && sig && p->sighand) {
-                       spin_lock_irq(&p->sighand->siglock);
-                       handle_stop_signal(sig, p);
+               if (!error && sig && lock_task_sighand(p, &flags)) {
                        error = specific_send_sig_info(sig, &info, p);
-                       spin_unlock_irq(&p->sighand->siglock);
+                       unlock_task_sighand(p, &flags);
                }
        }
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 
        return error;
 }
@@ -2586,7 +2541,7 @@ asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
 
        current->state = TASK_INTERRUPTIBLE;
        schedule();
-       set_thread_flag(TIF_RESTORE_SIGMASK);
+       set_restore_sigmask();
        return -ERESTARTNOHAND;
 }
 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */