Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzi...
[sfrench/cifs-2.6.git] / kernel / signal.c
index 657aa16d97cbfa860b73463bee90a15830fa8b31..84917fe507f77b8ff949af7f958e11ac2ed423b7 100644 (file)
@@ -733,13 +733,13 @@ static void print_fatal_signal(struct pt_regs *regs, int signr)
                current->comm, task_pid_nr(current), signr);
 
 #if defined(__i386__) && !defined(__arch_um__)
-       printk("code at %08lx: ", regs->eip);
+       printk("code at %08lx: ", regs->ip);
        {
                int i;
                for (i = 0; i < 16; i++) {
                        unsigned char insn;
 
-                       __get_user(insn, (unsigned char *)(regs->eip + i));
+                       __get_user(insn, (unsigned char *)(regs->ip + i));
                        printk("%02x ", insn);
                }
        }
@@ -911,27 +911,6 @@ __group_complete_signal(int sig, struct task_struct *p)
                        } while_each_thread(p, t);
                        return;
                }
-
-               /*
-                * There will be a core dump.  We make all threads other
-                * than the chosen one go into a group stop so that nothing
-                * happens until it gets scheduled, takes the signal off
-                * the shared queue, and does the core dump.  This is a
-                * little more complicated than strictly necessary, but it
-                * keeps the signal state that winds up in the core dump
-                * unchanged from the death state, e.g. which thread had
-                * the core-dump signal unblocked.
-                */
-               rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
-               rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
-               p->signal->group_stop_count = 0;
-               p->signal->group_exit_task = t;
-               p = t;
-               do {
-                       p->signal->group_stop_count++;
-                       signal_wake_up(t, t == p);
-               } while_each_thread(p, t);
-               return;
        }
 
        /*
@@ -978,7 +957,6 @@ void zap_other_threads(struct task_struct *p)
 {
        struct task_struct *t;
 
-       p->signal->flags = SIGNAL_GROUP_EXIT;
        p->signal->group_stop_count = 0;
 
        for (t = next_thread(p); t != p; t = next_thread(t)) {
@@ -994,10 +972,11 @@ void zap_other_threads(struct task_struct *p)
        }
 }
 
-int fastcall __fatal_signal_pending(struct task_struct *tsk)
+int __fatal_signal_pending(struct task_struct *tsk)
 {
        return sigismember(&tsk->pending.signal, SIGKILL);
 }
+EXPORT_SYMBOL(__fatal_signal_pending);
 
 /*
  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
@@ -1039,7 +1018,7 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 }
 
 /*
- * kill_pgrp_info() sends a signal to a process group: this is what the tty
+ * __kill_pgrp_info() sends a signal to a process group: this is what the tty
  * control characters do (^C, ^Z etc)
  */
 
@@ -1058,30 +1037,28 @@ int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
        return success ? 0 : retval;
 }
 
-int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
-{
-       int retval;
-
-       read_lock(&tasklist_lock);
-       retval = __kill_pgrp_info(sig, info, pgrp);
-       read_unlock(&tasklist_lock);
-
-       return retval;
-}
-
 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
 {
-       int error;
+       int error = -ESRCH;
        struct task_struct *p;
 
        rcu_read_lock();
        if (unlikely(sig_needs_tasklist(sig)))
                read_lock(&tasklist_lock);
 
+retry:
        p = pid_task(pid, PIDTYPE_PID);
-       error = -ESRCH;
-       if (p)
+       if (p) {
                error = group_send_sig_info(sig, info, p);
+               if (unlikely(error == -ESRCH))
+                       /*
+                        * The task was unhashed in between, try again.
+                        * If it is dead, pid_task() will return NULL,
+                        * if we race with de_thread() it will find the
+                        * new leader.
+                        */
+                       goto retry;
+       }
 
        if (unlikely(sig_needs_tasklist(sig)))
                read_unlock(&tasklist_lock);
@@ -1146,14 +1123,22 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
 static int kill_something_info(int sig, struct siginfo *info, int pid)
 {
        int ret;
-       rcu_read_lock();
-       if (!pid) {
-               ret = kill_pgrp_info(sig, info, task_pgrp(current));
-       } else if (pid == -1) {
+
+       if (pid > 0) {
+               rcu_read_lock();
+               ret = kill_pid_info(sig, info, find_vpid(pid));
+               rcu_read_unlock();
+               return ret;
+       }
+
+       read_lock(&tasklist_lock);
+       if (pid != -1) {
+               ret = __kill_pgrp_info(sig, info,
+                               pid ? find_vpid(-pid) : task_pgrp(current));
+       } else {
                int retval = 0, count = 0;
                struct task_struct * p;
 
-               read_lock(&tasklist_lock);
                for_each_process(p) {
                        if (p->pid > 1 && !same_thread_group(p, current)) {
                                int err = group_send_sig_info(sig, info, p);
@@ -1162,14 +1147,10 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
                                        retval = err;
                        }
                }
-               read_unlock(&tasklist_lock);
                ret = count ? retval : -ESRCH;
-       } else if (pid < 0) {
-               ret = kill_pgrp_info(sig, info, find_vpid(-pid));
-       } else {
-               ret = kill_pid_info(sig, info, find_vpid(pid));
        }
-       rcu_read_unlock();
+       read_unlock(&tasklist_lock);
+
        return ret;
 }
 
@@ -1217,20 +1198,6 @@ send_sig(int sig, struct task_struct *p, int priv)
        return send_sig_info(sig, __si_special(priv), p);
 }
 
-/*
- * This is the entry point for "process-wide" signals.
- * They will go to an appropriate thread in the thread group.
- */
-int
-send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
-{
-       int ret;
-       read_lock(&tasklist_lock);
-       ret = group_send_sig_info(sig, info, p);
-       read_unlock(&tasklist_lock);
-       return ret;
-}
-
 void
 force_sig(int sig, struct task_struct *p)
 {
@@ -1258,7 +1225,13 @@ force_sigsegv(int sig, struct task_struct *p)
 
 int kill_pgrp(struct pid *pid, int sig, int priv)
 {
-       return kill_pgrp_info(sig, __si_special(priv), pid);
+       int ret;
+
+       read_lock(&tasklist_lock);
+       ret = __kill_pgrp_info(sig, __si_special(priv), pid);
+       read_unlock(&tasklist_lock);
+
+       return ret;
 }
 EXPORT_SYMBOL(kill_pgrp);
 
@@ -1577,11 +1550,6 @@ static inline int may_ptrace_stop(void)
 {
        if (!likely(current->ptrace & PT_PTRACED))
                return 0;
-
-       if (unlikely(current->parent == current->real_parent &&
-                   (current->ptrace & PT_ATTACHED)))
-               return 0;
-
        /*
         * Are we in the middle of do_coredump?
         * If so and our tracer is also part of the coredump stopping
@@ -1598,6 +1566,17 @@ static inline int may_ptrace_stop(void)
        return 1;
 }
 
+/*
+ * Return nonzero if there is a SIGKILL that should be waking us up.
+ * Called with the siglock held.
+ */
+static int sigkill_pending(struct task_struct *tsk)
+{
+       return ((sigismember(&tsk->pending.signal, SIGKILL) ||
+                sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
+               !unlikely(sigismember(&tsk->blocked, SIGKILL)));
+}
+
 /*
  * This must be called with current->sighand->siglock held.
  *
@@ -1606,11 +1585,31 @@ static inline int may_ptrace_stop(void)
  * That makes it a way to test a stopped process for
  * being ptrace-stopped vs being job-control-stopped.
  *
- * If we actually decide not to stop at all because the tracer is gone,
- * we leave nostop_code in current->exit_code.
+ * If we actually decide not to stop at all because the tracer
+ * is gone, we keep current->exit_code unless clear_code.
  */
-static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
+static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
 {
+       int killed = 0;
+
+       if (arch_ptrace_stop_needed(exit_code, info)) {
+               /*
+                * The arch code has something special to do before a
+                * ptrace stop.  This is allowed to block, e.g. for faults
+                * on user stack pages.  We can't keep the siglock while
+                * calling arch_ptrace_stop, so we must release it now.
+                * To preserve proper semantics, we must do this before
+                * any signal bookkeeping like checking group_stop_count.
+                * Meanwhile, a SIGKILL could come in before we retake the
+                * siglock.  That must prevent us from sleeping in TASK_TRACED.
+                * So after regaining the lock, we must check for SIGKILL.
+                */
+               spin_unlock_irq(&current->sighand->siglock);
+               arch_ptrace_stop(exit_code, info);
+               spin_lock_irq(&current->sighand->siglock);
+               killed = sigkill_pending(current);
+       }
+
        /*
         * If there is a group stop in progress,
         * we must participate in the bookkeeping.
@@ -1622,22 +1621,23 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
        current->exit_code = exit_code;
 
        /* Let the debugger run.  */
-       set_current_state(TASK_TRACED);
+       __set_current_state(TASK_TRACED);
        spin_unlock_irq(&current->sighand->siglock);
        try_to_freeze();
        read_lock(&tasklist_lock);
-       if (may_ptrace_stop()) {
+       if (!unlikely(killed) && may_ptrace_stop()) {
                do_notify_parent_cldstop(current, CLD_TRAPPED);
                read_unlock(&tasklist_lock);
                schedule();
        } else {
                /*
                 * By the time we got the lock, our tracer went away.
-                * Don't stop here.
+                * Don't drop the lock yet, another tracer may come.
                 */
+               __set_current_state(TASK_RUNNING);
+               if (clear_code)
+                       current->exit_code = 0;
                read_unlock(&tasklist_lock);
-               set_current_state(TASK_RUNNING);
-               current->exit_code = nostop_code;
        }
 
        /*
@@ -1670,7 +1670,7 @@ void ptrace_notify(int exit_code)
 
        /* Let the debugger run.  */
        spin_lock_irq(&current->sighand->siglock);
-       ptrace_stop(exit_code, 0, &info);
+       ptrace_stop(exit_code, 1, &info);
        spin_unlock_irq(&current->sighand->siglock);
 }
 
@@ -1708,9 +1708,6 @@ static int do_signal_stop(int signr)
        struct signal_struct *sig = current->signal;
        int stop_count;
 
-       if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
-               return 0;
-
        if (sig->group_stop_count > 0) {
                /*
                 * There is a group stop in progress.  We don't need to
@@ -1718,12 +1715,15 @@ static int do_signal_stop(int signr)
                 */
                stop_count = --sig->group_stop_count;
        } else {
+               struct task_struct *t;
+
+               if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
+                   unlikely(sig->group_exit_task))
+                       return 0;
                /*
                 * There is no group stop already in progress.
                 * We must initiate one now.
                 */
-               struct task_struct *t;
-
                sig->group_exit_code = signr;
 
                stop_count = 0;
@@ -1733,7 +1733,7 @@ static int do_signal_stop(int signr)
                         * stop is always done with the siglock held,
                         * so this check has no races.
                         */
-                       if (!t->exit_state &&
+                       if (!(t->flags & PF_EXITING) &&
                            !task_is_stopped_or_traced(t)) {
                                stop_count++;
                                signal_wake_up(t, 0);
@@ -1751,47 +1751,6 @@ static int do_signal_stop(int signr)
        return 1;
 }
 
-/*
- * Do appropriate magic when group_stop_count > 0.
- * We return nonzero if we stopped, after releasing the siglock.
- * We return zero if we still hold the siglock and should look
- * for another signal without checking group_stop_count again.
- */
-static int handle_group_stop(void)
-{
-       int stop_count;
-
-       if (current->signal->group_exit_task == current) {
-               /*
-                * Group stop is so we can do a core dump,
-                * We are the initiating thread, so get on with it.
-                */
-               current->signal->group_exit_task = NULL;
-               return 0;
-       }
-
-       if (current->signal->flags & SIGNAL_GROUP_EXIT)
-               /*
-                * Group stop is so another thread can do a core dump,
-                * or else we are racing against a death signal.
-                * Just punt the stop so we can get the next signal.
-                */
-               return 0;
-
-       /*
-        * There is a group stop in progress.  We stop
-        * without any associated signal being in our queue.
-        */
-       stop_count = --current->signal->group_stop_count;
-       if (stop_count == 0)
-               current->signal->flags = SIGNAL_STOP_STOPPED;
-       current->exit_code = current->signal->group_exit_code;
-       set_current_state(TASK_STOPPED);
-       spin_unlock_irq(&current->sighand->siglock);
-       finish_stop(stop_count);
-       return 1;
-}
-
 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
                          struct pt_regs *regs, void *cookie)
 {
@@ -1806,7 +1765,7 @@ relock:
                struct k_sigaction *ka;
 
                if (unlikely(current->signal->group_stop_count > 0) &&
-                   handle_group_stop())
+                   do_signal_stop(0))
                        goto relock;
 
                signr = dequeue_signal(current, mask, info);
@@ -1818,7 +1777,7 @@ relock:
                        ptrace_signal_deliver(regs, cookie);
 
                        /* Let the debugger run.  */
-                       ptrace_stop(signr, signr, info);
+                       ptrace_stop(signr, 0, info);
 
                        /* We're back.  Did the debugger cancel the sig?  */
                        signr = current->exit_code;
@@ -1935,6 +1894,48 @@ relock:
        return signr;
 }
 
+void exit_signals(struct task_struct *tsk)
+{
+       int group_stop = 0;
+       struct task_struct *t;
+
+       if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
+               tsk->flags |= PF_EXITING;
+               return;
+       }
+
+       spin_lock_irq(&tsk->sighand->siglock);
+       /*
+        * From now this task is not visible for group-wide signals,
+        * see wants_signal(), do_signal_stop().
+        */
+       tsk->flags |= PF_EXITING;
+       if (!signal_pending(tsk))
+               goto out;
+
+       /* It could be that __group_complete_signal() choose us to
+        * notify about group-wide signal. Another thread should be
+        * woken now to take the signal since we will not.
+        */
+       for (t = tsk; (t = next_thread(t)) != tsk; )
+               if (!signal_pending(t) && !(t->flags & PF_EXITING))
+                       recalc_sigpending_and_wake(t);
+
+       if (unlikely(tsk->signal->group_stop_count) &&
+                       !--tsk->signal->group_stop_count) {
+               tsk->signal->flags = SIGNAL_STOP_STOPPED;
+               group_stop = 1;
+       }
+out:
+       spin_unlock_irq(&tsk->sighand->siglock);
+
+       if (unlikely(group_stop)) {
+               read_lock(&tasklist_lock);
+               do_notify_parent_cldstop(tsk, CLD_STOPPED);
+               read_unlock(&tasklist_lock);
+       }
+}
+
 EXPORT_SYMBOL(recalc_sigpending);
 EXPORT_SYMBOL_GPL(dequeue_signal);
 EXPORT_SYMBOL(flush_signals);