Linux 6.9-rc4
[sfrench/cifs-2.6.git] / kernel / ptrace.c
index 5710d07e67cf7f0f94342b0194939d479871b821..d5f89f9ef29f65f137ef8a6978e6dbe75564056b 100644 (file)
 #include <linux/cn_proc.h>
 #include <linux/compat.h>
 #include <linux/sched/signal.h>
+#include <linux/minmax.h>
+#include <linux/syscall_user_dispatch.h>
+
+#include <asm/syscall.h>       /* for syscall_get_* */
 
 /*
  * Access another process' address space via ptrace.
@@ -55,7 +59,7 @@ int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
                return 0;
        }
 
-       ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
+       ret = access_remote_vm(mm, addr, buf, len, gup_flags);
        mmput(mm);
 
        return ret;
@@ -79,9 +83,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
  */
 static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
 {
-       rcu_read_lock();
-       __ptrace_link(child, new_parent, __task_cred(new_parent));
-       rcu_read_unlock();
+       __ptrace_link(child, new_parent, current_cred());
 }
 
 /**
@@ -117,7 +119,10 @@ void __ptrace_unlink(struct task_struct *child)
        const struct cred *old_cred;
        BUG_ON(!child->ptrace);
 
-       clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+       clear_task_syscall_work(child, SYSCALL_TRACE);
+#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
+       clear_task_syscall_work(child, SYSCALL_EMU);
+#endif
 
        child->parent = child->real_parent;
        list_del_init(&child->ptrace_entry);
@@ -140,20 +145,9 @@ void __ptrace_unlink(struct task_struct *child)
         */
        if (!(child->flags & PF_EXITING) &&
            (child->signal->flags & SIGNAL_STOP_STOPPED ||
-            child->signal->group_stop_count)) {
+            child->signal->group_stop_count))
                child->jobctl |= JOBCTL_STOP_PENDING;
 
-               /*
-                * This is only possible if this thread was cloned by the
-                * traced task running in the stopped group, set the signal
-                * for the future reports.
-                * FIXME: we should change ptrace_init_task() to handle this
-                * case.
-                */
-               if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
-                       child->jobctl |= SIGSTOP;
-       }
-
        /*
         * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
         * @child in the butt.  Note that @resume should be used iff @child
@@ -166,7 +160,27 @@ void __ptrace_unlink(struct task_struct *child)
        spin_unlock(&child->sighand->siglock);
 }
 
-/* Ensure that nothing can wake it up, even SIGKILL */
+static bool looks_like_a_spurious_pid(struct task_struct *task)
+{
+       if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
+               return false;
+
+       if (task_pid_vnr(task) == task->ptrace_message)
+               return false;
+       /*
+        * The tracee changed its pid but the PTRACE_EVENT_EXEC event
+        * was not wait()'ed, most probably debugger targets the old
+        * leader which was destroyed in de_thread().
+        */
+       return true;
+}
+
+/*
+ * Ensure that nothing can wake it up, even SIGKILL
+ *
+ * A task is switched to this state while a ptrace operation is in progress;
+ * such that the ptrace operation is uninterruptible.
+ */
 static bool ptrace_freeze_traced(struct task_struct *task)
 {
        bool ret = false;
@@ -176,8 +190,9 @@ static bool ptrace_freeze_traced(struct task_struct *task)
                return ret;
 
        spin_lock_irq(&task->sighand->siglock);
-       if (task_is_traced(task) && !__fatal_signal_pending(task)) {
-               task->state = __TASK_TRACED;
+       if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
+           !__fatal_signal_pending(task)) {
+               task->jobctl |= JOBCTL_PTRACE_FROZEN;
                ret = true;
        }
        spin_unlock_irq(&task->sighand->siglock);
@@ -187,23 +202,21 @@ static bool ptrace_freeze_traced(struct task_struct *task)
 
 static void ptrace_unfreeze_traced(struct task_struct *task)
 {
-       if (task->state != __TASK_TRACED)
-               return;
-
-       WARN_ON(!task->ptrace || task->parent != current);
+       unsigned long flags;
 
        /*
-        * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
-        * Recheck state under the lock to close this race.
+        * The child may be awake and may have cleared
+        * JOBCTL_PTRACE_FROZEN (see ptrace_resume).  The child will
+        * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew.
         */
-       spin_lock_irq(&task->sighand->siglock);
-       if (task->state == __TASK_TRACED) {
-               if (__fatal_signal_pending(task))
+       if (lock_task_sighand(task, &flags)) {
+               task->jobctl &= ~JOBCTL_PTRACE_FROZEN;
+               if (__fatal_signal_pending(task)) {
+                       task->jobctl &= ~JOBCTL_TRACED;
                        wake_up_state(task, __TASK_TRACED);
-               else
-                       task->state = TASK_TRACED;
+               }
+               unlock_task_sighand(task, &flags);
        }
-       spin_unlock_irq(&task->sighand->siglock);
 }
 
 /**
@@ -236,7 +249,6 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
         */
        read_lock(&tasklist_lock);
        if (child->ptrace && child->parent == current) {
-               WARN_ON(child->state == __TASK_TRACED);
                /*
                 * child->sighand can't be NULL, release_task()
                 * does ptrace_unlink() before __exit_signal().
@@ -246,27 +258,18 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
        }
        read_unlock(&tasklist_lock);
 
-       if (!ret && !ignore_state) {
-               if (!wait_task_inactive(child, __TASK_TRACED)) {
-                       /*
-                        * This can only happen if may_ptrace_stop() fails and
-                        * ptrace_stop() changes ->state back to TASK_RUNNING,
-                        * so we should not worry about leaking __TASK_TRACED.
-                        */
-                       WARN_ON(child->state == __TASK_TRACED);
-                       ret = -ESRCH;
-               }
-       }
+       if (!ret && !ignore_state &&
+           WARN_ON_ONCE(!wait_task_inactive(child, __TASK_TRACED|TASK_FROZEN)))
+               ret = -ESRCH;
 
        return ret;
 }
 
-static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
+static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 {
        if (mode & PTRACE_MODE_NOAUDIT)
-               return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
-       else
-               return has_ns_capability(current, ns, CAP_SYS_PTRACE);
+               return ns_capable_noaudit(ns, CAP_SYS_PTRACE);
+       return ns_capable(ns, CAP_SYS_PTRACE);
 }
 
 /* Returns 0 on success, -errno on denial. */
@@ -324,6 +327,16 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
        return -EPERM;
 ok:
        rcu_read_unlock();
+       /*
+        * If a task drops privileges and becomes nondumpable (through a syscall
+        * like setresuid()) while we are trying to access it, we must ensure
+        * that the dumpability is read after the credentials; otherwise,
+        * we may be able to attach to a task that we shouldn't be able to
+        * attach to (as if the task had dropped privileges without becoming
+        * nondumpable).
+        * Pairs with a write barrier in commit_creds().
+        */
+       smp_rmb();
        mm = task->mm;
        if (mm &&
            ((get_dumpable(mm) != SUID_DUMP_USER) &&
@@ -342,66 +355,33 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
        return !err;
 }
 
-static int ptrace_attach(struct task_struct *task, long request,
-                        unsigned long addr,
-                        unsigned long flags)
+static int check_ptrace_options(unsigned long data)
 {
-       bool seize = (request == PTRACE_SEIZE);
-       int retval;
-
-       retval = -EIO;
-       if (seize) {
-               if (addr != 0)
-                       goto out;
-               if (flags & ~(unsigned long)PTRACE_O_MASK)
-                       goto out;
-               flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
-       } else {
-               flags = PT_PTRACED;
-       }
-
-       audit_ptrace(task);
-
-       retval = -EPERM;
-       if (unlikely(task->flags & PF_KTHREAD))
-               goto out;
-       if (same_thread_group(task, current))
-               goto out;
-
-       /*
-        * Protect exec's credential calculations against our interference;
-        * SUID, SGID and LSM creds get determined differently
-        * under ptrace.
-        */
-       retval = -ERESTARTNOINTR;
-       if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
-               goto out;
+       if (data & ~(unsigned long)PTRACE_O_MASK)
+               return -EINVAL;
 
-       task_lock(task);
-       retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
-       task_unlock(task);
-       if (retval)
-               goto unlock_creds;
+       if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
+               if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
+                   !IS_ENABLED(CONFIG_SECCOMP))
+                       return -EINVAL;
 
-       write_lock_irq(&tasklist_lock);
-       retval = -EPERM;
-       if (unlikely(task->exit_state))
-               goto unlock_tasklist;
-       if (task->ptrace)
-               goto unlock_tasklist;
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
 
-       if (seize)
-               flags |= PT_SEIZED;
-       task->ptrace = flags;
+               if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
+                   current->ptrace & PT_SUSPEND_SECCOMP)
+                       return -EPERM;
+       }
+       return 0;
+}
 
-       ptrace_link(task, current);
+static inline void ptrace_set_stopped(struct task_struct *task, bool seize)
+{
+       guard(spinlock)(&task->sighand->siglock);
 
        /* SEIZE doesn't trap tracee on attach */
        if (!seize)
-               send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
-
-       spin_lock(&task->sighand->siglock);
-
+               send_signal_locked(SIGSTOP, SEND_SIG_PRIV, task, PIDTYPE_PID);
        /*
         * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
         * TRAPPING, and kick it so that it transits to TRACED.  TRAPPING
@@ -420,30 +400,82 @@ static int ptrace_attach(struct task_struct *task, long request,
         * in and out of STOPPED are protected by siglock.
         */
        if (task_is_stopped(task) &&
-           task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
+           task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) {
+               task->jobctl &= ~JOBCTL_STOPPED;
                signal_wake_up_state(task, __TASK_STOPPED);
+       }
+}
 
-       spin_unlock(&task->sighand->siglock);
+static int ptrace_attach(struct task_struct *task, long request,
+                        unsigned long addr,
+                        unsigned long flags)
+{
+       bool seize = (request == PTRACE_SEIZE);
+       int retval;
 
-       retval = 0;
-unlock_tasklist:
-       write_unlock_irq(&tasklist_lock);
-unlock_creds:
-       mutex_unlock(&task->signal->cred_guard_mutex);
-out:
-       if (!retval) {
+       if (seize) {
+               if (addr != 0)
+                       return -EIO;
                /*
-                * We do not bother to change retval or clear JOBCTL_TRAPPING
-                * if wait_on_bit() was interrupted by SIGKILL. The tracer will
-                * not return to user-mode, it will exit and clear this bit in
-                * __ptrace_unlink() if it wasn't already cleared by the tracee;
-                * and until then nobody can ptrace this task.
+                * This duplicates the check in check_ptrace_options() because
+                * ptrace_attach() and ptrace_setoptions() have historically
+                * used different error codes for unknown ptrace options.
                 */
-               wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
-               proc_ptrace_connector(task, PTRACE_ATTACH);
+               if (flags & ~(unsigned long)PTRACE_O_MASK)
+                       return -EIO;
+
+               retval = check_ptrace_options(flags);
+               if (retval)
+                       return retval;
+               flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
+       } else {
+               flags = PT_PTRACED;
+       }
+
+       audit_ptrace(task);
+
+       if (unlikely(task->flags & PF_KTHREAD))
+               return -EPERM;
+       if (same_thread_group(task, current))
+               return -EPERM;
+
+       /*
+        * Protect exec's credential calculations against our interference;
+        * SUID, SGID and LSM creds get determined differently
+        * under ptrace.
+        */
+       scoped_cond_guard (mutex_intr, return -ERESTARTNOINTR,
+                          &task->signal->cred_guard_mutex) {
+
+               scoped_guard (task_lock, task) {
+                       retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
+                       if (retval)
+                               return retval;
+               }
+
+               scoped_guard (write_lock_irq, &tasklist_lock) {
+                       if (unlikely(task->exit_state))
+                               return -EPERM;
+                       if (task->ptrace)
+                               return -EPERM;
+
+                       task->ptrace = flags;
+                       ptrace_link(task, current);
+                       ptrace_set_stopped(task, seize);
+               }
        }
 
-       return retval;
+       /*
+        * We do not bother to change retval or clear JOBCTL_TRAPPING
+        * if wait_on_bit() was interrupted by SIGKILL. The tracer will
+        * not return to user-mode, it will exit and clear this bit in
+        * __ptrace_unlink() if it wasn't already cleared by the tracee;
+        * and until then nobody can ptrace this task.
+        */
+       wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
+       proc_ptrace_connector(task, PTRACE_ATTACH);
+
+       return 0;
 }
 
 /**
@@ -627,22 +659,11 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
 static int ptrace_setoptions(struct task_struct *child, unsigned long data)
 {
        unsigned flags;
+       int ret;
 
-       if (data & ~(unsigned long)PTRACE_O_MASK)
-               return -EINVAL;
-
-       if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
-               if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
-                   !IS_ENABLED(CONFIG_SECCOMP))
-                       return -EINVAL;
-
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EPERM;
-
-               if (seccomp_mode(&current->seccomp) != SECCOMP_MODE_DISABLED ||
-                   current->ptrace & PT_SUSPEND_SECCOMP)
-                       return -EPERM;
-       }
+       ret = check_ptrace_options(data);
+       if (ret)
+               return ret;
 
        /* Avoid intermediate state when all opts are cleared */
        flags = child->ptrace;
@@ -705,6 +726,10 @@ static int ptrace_peek_siginfo(struct task_struct *child,
        if (arg.nr < 0)
                return -EINVAL;
 
+       /* Ensure arg.off fits in an unsigned long */
+       if (arg.off > ULONG_MAX)
+               return 0;
+
        if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
                pending = &child->signal->shared_pending;
        else
@@ -712,18 +737,20 @@ static int ptrace_peek_siginfo(struct task_struct *child,
 
        for (i = 0; i < arg.nr; ) {
                kernel_siginfo_t info;
-               s32 off = arg.off + i;
+               unsigned long off = arg.off + i;
+               bool found = false;
 
                spin_lock_irq(&child->sighand->siglock);
                list_for_each_entry(q, &pending->list, list) {
                        if (!off--) {
+                               found = true;
                                copy_siginfo(&info, &q->info);
                                break;
                        }
                }
                spin_unlock_irq(&child->sighand->siglock);
 
-               if (off >= 0) /* beyond the end of the list */
+               if (!found) /* beyond the end of the list */
                        break;
 
 #ifdef CONFIG_COMPAT
@@ -761,12 +788,26 @@ static int ptrace_peek_siginfo(struct task_struct *child,
        return ret;
 }
 
-#ifdef PTRACE_SINGLESTEP
-#define is_singlestep(request)         ((request) == PTRACE_SINGLESTEP)
-#else
-#define is_singlestep(request)         0
+#ifdef CONFIG_RSEQ
+static long ptrace_get_rseq_configuration(struct task_struct *task,
+                                         unsigned long size, void __user *data)
+{
+       struct ptrace_rseq_configuration conf = {
+               .rseq_abi_pointer = (u64)(uintptr_t)task->rseq,
+               .rseq_abi_size = task->rseq_len,
+               .signature = task->rseq_sig,
+               .flags = 0,
+       };
+
+       size = min_t(unsigned long, size, sizeof(conf));
+       if (copy_to_user(data, &conf, size))
+               return -EFAULT;
+       return sizeof(conf);
+}
 #endif
 
+#define is_singlestep(request)         ((request) == PTRACE_SINGLESTEP)
+
 #ifdef PTRACE_SINGLEBLOCK
 #define is_singleblock(request)                ((request) == PTRACE_SINGLEBLOCK)
 #else
@@ -782,21 +823,19 @@ static int ptrace_peek_siginfo(struct task_struct *child,
 static int ptrace_resume(struct task_struct *child, long request,
                         unsigned long data)
 {
-       bool need_siglock;
-
        if (!valid_signal(data))
                return -EIO;
 
        if (request == PTRACE_SYSCALL)
-               set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+               set_task_syscall_work(child, SYSCALL_TRACE);
        else
-               clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+               clear_task_syscall_work(child, SYSCALL_TRACE);
 
-#ifdef TIF_SYSCALL_EMU
+#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
        if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
-               set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
+               set_task_syscall_work(child, SYSCALL_EMU);
        else
-               clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
+               clear_task_syscall_work(child, SYSCALL_EMU);
 #endif
 
        if (is_singleblock(request)) {
@@ -819,18 +858,12 @@ static int ptrace_resume(struct task_struct *child, long request,
         * Note that we need siglock even if ->exit_code == data and/or this
         * status was not reported yet, the new status must not be cleared by
         * wait_task_stopped() after resume.
-        *
-        * If data == 0 we do not care if wait_task_stopped() reports the old
-        * status and clears the code too; this can't race with the tracee, it
-        * takes siglock after resume.
         */
-       need_siglock = data && !thread_group_empty(current);
-       if (need_siglock)
-               spin_lock_irq(&child->sighand->siglock);
+       spin_lock_irq(&child->sighand->siglock);
        child->exit_code = data;
+       child->jobctl &= ~JOBCTL_TRACED;
        wake_up_state(child, __TASK_TRACED);
-       if (need_siglock)
-               spin_unlock_irq(&child->sighand->siglock);
+       spin_unlock_irq(&child->sighand->siglock);
 
        return 0;
 }
@@ -880,7 +913,100 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
  * to ensure no machine forgets it.
  */
 EXPORT_SYMBOL_GPL(task_user_regset_view);
-#endif
+
+static unsigned long
+ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs,
+                             struct ptrace_syscall_info *info)
+{
+       unsigned long args[ARRAY_SIZE(info->entry.args)];
+       int i;
+
+       info->op = PTRACE_SYSCALL_INFO_ENTRY;
+       info->entry.nr = syscall_get_nr(child, regs);
+       syscall_get_arguments(child, regs, args);
+       for (i = 0; i < ARRAY_SIZE(args); i++)
+               info->entry.args[i] = args[i];
+
+       /* args is the last field in struct ptrace_syscall_info.entry */
+       return offsetofend(struct ptrace_syscall_info, entry.args);
+}
+
+static unsigned long
+ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs,
+                               struct ptrace_syscall_info *info)
+{
+       /*
+        * As struct ptrace_syscall_info.entry is currently a subset
+        * of struct ptrace_syscall_info.seccomp, it makes sense to
+        * initialize that subset using ptrace_get_syscall_info_entry().
+        * This can be reconsidered in the future if these structures
+        * diverge significantly enough.
+        */
+       ptrace_get_syscall_info_entry(child, regs, info);
+       info->op = PTRACE_SYSCALL_INFO_SECCOMP;
+       info->seccomp.ret_data = child->ptrace_message;
+
+       /* ret_data is the last field in struct ptrace_syscall_info.seccomp */
+       return offsetofend(struct ptrace_syscall_info, seccomp.ret_data);
+}
+
+static unsigned long
+ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs,
+                            struct ptrace_syscall_info *info)
+{
+       info->op = PTRACE_SYSCALL_INFO_EXIT;
+       info->exit.rval = syscall_get_error(child, regs);
+       info->exit.is_error = !!info->exit.rval;
+       if (!info->exit.is_error)
+               info->exit.rval = syscall_get_return_value(child, regs);
+
+       /* is_error is the last field in struct ptrace_syscall_info.exit */
+       return offsetofend(struct ptrace_syscall_info, exit.is_error);
+}
+
+static int
+ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size,
+                       void __user *datavp)
+{
+       struct pt_regs *regs = task_pt_regs(child);
+       struct ptrace_syscall_info info = {
+               .op = PTRACE_SYSCALL_INFO_NONE,
+               .arch = syscall_get_arch(child),
+               .instruction_pointer = instruction_pointer(regs),
+               .stack_pointer = user_stack_pointer(regs),
+       };
+       unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry);
+       unsigned long write_size;
+
+       /*
+        * This does not need lock_task_sighand() to access
+        * child->last_siginfo because ptrace_freeze_traced()
+        * called earlier by ptrace_check_attach() ensures that
+        * the tracee cannot go away and clear its last_siginfo.
+        */
+       switch (child->last_siginfo ? child->last_siginfo->si_code : 0) {
+       case SIGTRAP | 0x80:
+               switch (child->ptrace_message) {
+               case PTRACE_EVENTMSG_SYSCALL_ENTRY:
+                       actual_size = ptrace_get_syscall_info_entry(child, regs,
+                                                                   &info);
+                       break;
+               case PTRACE_EVENTMSG_SYSCALL_EXIT:
+                       actual_size = ptrace_get_syscall_info_exit(child, regs,
+                                                                  &info);
+                       break;
+               }
+               break;
+       case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8):
+               actual_size = ptrace_get_syscall_info_seccomp(child, regs,
+                                                             &info);
+               break;
+       }
+
+       write_size = min(actual_size, user_size);
+       return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size;
+}
+#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
 
 int ptrace_request(struct task_struct *child, long request,
                   unsigned long addr, unsigned long data)
@@ -1060,9 +1186,7 @@ int ptrace_request(struct task_struct *child, long request,
        }
 #endif
 
-#ifdef PTRACE_SINGLESTEP
        case PTRACE_SINGLESTEP:
-#endif
 #ifdef PTRACE_SINGLEBLOCK
        case PTRACE_SINGLEBLOCK:
 #endif
@@ -1075,9 +1199,8 @@ int ptrace_request(struct task_struct *child, long request,
                return ptrace_resume(child, request, data);
 
        case PTRACE_KILL:
-               if (child->exit_state)  /* already dead */
-                       return 0;
-               return ptrace_resume(child, request, SIGKILL);
+               send_sig_info(SIGKILL, SEND_SIG_NOINFO, child);
+               return 0;
 
 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
        case PTRACE_GETREGSET:
@@ -1097,6 +1220,10 @@ int ptrace_request(struct task_struct *child, long request,
                        ret = __put_user(kiov.iov_len, &uiov->iov_len);
                break;
        }
+
+       case PTRACE_GET_SYSCALL_INFO:
+               ret = ptrace_get_syscall_info(child, addr, datavp);
+               break;
 #endif
 
        case PTRACE_SECCOMP_GET_FILTER:
@@ -1107,6 +1234,20 @@ int ptrace_request(struct task_struct *child, long request,
                ret = seccomp_get_metadata(child, addr, datavp);
                break;
 
+#ifdef CONFIG_RSEQ
+       case PTRACE_GET_RSEQ_CONFIGURATION:
+               ret = ptrace_get_rseq_configuration(child, addr, datavp);
+               break;
+#endif
+
+       case PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG:
+               ret = syscall_user_dispatch_set_config(child, addr, datavp);
+               break;
+
+       case PTRACE_GET_SYSCALL_USER_DISPATCH_CONFIG:
+               ret = syscall_user_dispatch_get_config(child, addr, datavp);
+               break;
+
        default:
                break;
        }
@@ -1114,10 +1255,6 @@ int ptrace_request(struct task_struct *child, long request,
        return ret;
 }
 
-#ifndef arch_ptrace_attach
-#define arch_ptrace_attach(child)      do { } while (0)
-#endif
-
 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
                unsigned long, data)
 {
@@ -1126,8 +1263,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
 
        if (request == PTRACE_TRACEME) {
                ret = ptrace_traceme();
-               if (!ret)
-                       arch_ptrace_attach(current);
                goto out;
        }
 
@@ -1139,12 +1274,6 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
 
        if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
                ret = ptrace_attach(child, request, addr, data);
-               /*
-                * Some architectures need to do book-keeping after
-                * a ptrace attach.
-                */
-               if (!ret)
-                       arch_ptrace_attach(child);
                goto out_put_task_struct;
        }
 
@@ -1284,12 +1413,6 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
 
        if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
                ret = ptrace_attach(child, request, addr, data);
-               /*
-                * Some architectures need to do book-keeping after
-                * a ptrace attach.
-                */
-               if (!ret)
-                       arch_ptrace_attach(child);
                goto out_put_task_struct;
        }