Merge tag 'timers-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / kernel / time / posix-cpu-timers.c
index 2c48a7233b1960d2593207741909bd4c13eba0d9..2fd3b3fa68bf11013e7e107d20f8f64b4ddad9df 100644 (file)
@@ -118,6 +118,16 @@ static inline int validate_clock_permissions(const clockid_t clock)
        return __get_task_for_clock(clock, false, false) ? 0 : -EINVAL;
 }
 
+static inline enum pid_type cpu_timer_pid_type(struct k_itimer *timer)
+{
+       return CPUCLOCK_PERTHREAD(timer->it_clock) ? PIDTYPE_PID : PIDTYPE_TGID;
+}
+
+static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
+{
+       return pid_task(timer->it.cpu.pid, cpu_timer_pid_type(timer));
+}
+
 /*
  * Update expiry time from increment, and increase overrun count,
  * given the current clock sample.
@@ -336,9 +346,7 @@ static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
 /*
  * Sample a process (thread group) clock for the given task clkid. If the
  * group's cputime accounting is already enabled, read the atomic
- * store. Otherwise a full update is required.  Task's sighand lock must be
- * held to protect the task traversal on a full update. clkid is already
- * validated.
+ * store. Otherwise a full update is required.  clkid is already validated.
  */
 static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
                                  bool start)
@@ -393,7 +401,12 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
 
        new_timer->kclock = &clock_posix_cpu;
        timerqueue_init(&new_timer->it.cpu.node);
-       new_timer->it.cpu.task = p;
+       new_timer->it.cpu.pid = get_task_pid(p, cpu_timer_pid_type(new_timer));
+       /*
+        * get_task_for_clock() took a reference on @p. Drop it as the timer
+        * holds a reference on the pid of @p.
+        */
+       put_task_struct(p);
        return 0;
 }
 
@@ -406,13 +419,15 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
 static int posix_cpu_timer_del(struct k_itimer *timer)
 {
        struct cpu_timer *ctmr = &timer->it.cpu;
-       struct task_struct *p = ctmr->task;
        struct sighand_struct *sighand;
+       struct task_struct *p;
        unsigned long flags;
        int ret = 0;
 
-       if (WARN_ON_ONCE(!p))
-               return -EINVAL;
+       rcu_read_lock();
+       p = cpu_timer_task_rcu(timer);
+       if (!p)
+               goto out;
 
        /*
         * Protect against sighand release/switch in exit/exec and process/
@@ -434,8 +449,10 @@ static int posix_cpu_timer_del(struct k_itimer *timer)
                unlock_task_sighand(p, &flags);
        }
 
+out:
+       rcu_read_unlock();
        if (!ret)
-               put_task_struct(p);
+               put_pid(ctmr->pid);
 
        return ret;
 }
@@ -484,12 +501,11 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
  * Insert the timer on the appropriate list before any timers that
  * expire later.  This must be called with the sighand lock held.
  */
-static void arm_timer(struct k_itimer *timer)
+static void arm_timer(struct k_itimer *timer, struct task_struct *p)
 {
        int clkidx = CPUCLOCK_WHICH(timer->it_clock);
        struct cpu_timer *ctmr = &timer->it.cpu;
        u64 newexp = cpu_timer_getexpires(ctmr);
-       struct task_struct *p = ctmr->task;
        struct posix_cputimer_base *base;
 
        if (CPUCLOCK_PERTHREAD(timer->it_clock))
@@ -564,13 +580,21 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
        clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
        u64 old_expires, new_expires, old_incr, val;
        struct cpu_timer *ctmr = &timer->it.cpu;
-       struct task_struct *p = ctmr->task;
        struct sighand_struct *sighand;
+       struct task_struct *p;
        unsigned long flags;
        int ret = 0;
 
-       if (WARN_ON_ONCE(!p))
-               return -EINVAL;
+       rcu_read_lock();
+       p = cpu_timer_task_rcu(timer);
+       if (!p) {
+               /*
+                * If p has just been reaped, we can no
+                * longer get any information about it at all.
+                */
+               rcu_read_unlock();
+               return -ESRCH;
+       }
 
        /*
         * Use the to_ktime conversion because that clamps the maximum
@@ -587,8 +611,10 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
         * If p has just been reaped, we can no
         * longer get any information about it at all.
         */
-       if (unlikely(sighand == NULL))
+       if (unlikely(sighand == NULL)) {
+               rcu_read_unlock();
                return -ESRCH;
+       }
 
        /*
         * Disarm any old timer after extracting its expiry time.
@@ -662,7 +688,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
         */
        cpu_timer_setexpires(ctmr, new_expires);
        if (new_expires != 0 && val < new_expires) {
-               arm_timer(timer);
+               arm_timer(timer, p);
        }
 
        unlock_task_sighand(p, &flags);
@@ -693,6 +719,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
 
        ret = 0;
  out:
+       rcu_read_unlock();
        if (old)
                old->it_interval = ns_to_timespec64(old_incr);
 
@@ -704,10 +731,12 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp
        clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
        struct cpu_timer *ctmr = &timer->it.cpu;
        u64 now, expires = cpu_timer_getexpires(ctmr);
-       struct task_struct *p = ctmr->task;
+       struct task_struct *p;
 
-       if (WARN_ON_ONCE(!p))
-               return;
+       rcu_read_lock();
+       p = cpu_timer_task_rcu(timer);
+       if (!p)
+               goto out;
 
        /*
         * Easy part: convert the reload time.
@@ -715,36 +744,15 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp
        itp->it_interval = ktime_to_timespec64(timer->it_interval);
 
        if (!expires)
-               return;
+               goto out;
 
        /*
         * Sample the clock to take the difference with the expiry time.
         */
-       if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+       if (CPUCLOCK_PERTHREAD(timer->it_clock))
                now = cpu_clock_sample(clkid, p);
-       } else {
-               struct sighand_struct *sighand;
-               unsigned long flags;
-
-               /*
-                * Protect against sighand release/switch in exit/exec and
-                * also make timer sampling safe if it ends up calling
-                * thread_group_cputime().
-                */
-               sighand = lock_task_sighand(p, &flags);
-               if (unlikely(sighand == NULL)) {
-                       /*
-                        * The process has been reaped.
-                        * We can't even collect a sample any more.
-                        * Disarm the timer, nothing else to do.
-                        */
-                       cpu_timer_setexpires(ctmr, 0);
-                       return;
-               } else {
-                       now = cpu_clock_sample_group(clkid, p, false);
-                       unlock_task_sighand(p, &flags);
-               }
-       }
+       else
+               now = cpu_clock_sample_group(clkid, p, false);
 
        if (now < expires) {
                itp->it_value = ns_to_timespec64(expires - now);
@@ -756,6 +764,8 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp
                itp->it_value.tv_nsec = 1;
                itp->it_value.tv_sec = 0;
        }
+out:
+       rcu_read_unlock();
 }
 
 #define MAX_COLLECTED  20
@@ -976,56 +986,38 @@ static void check_process_timers(struct task_struct *tsk,
 static void posix_cpu_timer_rearm(struct k_itimer *timer)
 {
        clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
-       struct cpu_timer *ctmr = &timer->it.cpu;
-       struct task_struct *p = ctmr->task;
+       struct task_struct *p;
        struct sighand_struct *sighand;
        unsigned long flags;
        u64 now;
 
-       if (WARN_ON_ONCE(!p))
-               return;
+       rcu_read_lock();
+       p = cpu_timer_task_rcu(timer);
+       if (!p)
+               goto out;
 
        /*
         * Fetch the current sample and update the timer's expiry time.
         */
-       if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+       if (CPUCLOCK_PERTHREAD(timer->it_clock))
                now = cpu_clock_sample(clkid, p);
-               bump_cpu_timer(timer, now);
-               if (unlikely(p->exit_state))
-                       return;
-
-               /* Protect timer list r/w in arm_timer() */
-               sighand = lock_task_sighand(p, &flags);
-               if (!sighand)
-                       return;
-       } else {
-               /*
-                * Protect arm_timer() and timer sampling in case of call to
-                * thread_group_cputime().
-                */
-               sighand = lock_task_sighand(p, &flags);
-               if (unlikely(sighand == NULL)) {
-                       /*
-                        * The process has been reaped.
-                        * We can't even collect a sample any more.
-                        */
-                       cpu_timer_setexpires(ctmr, 0);
-                       return;
-               } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
-                       /* If the process is dying, no need to rearm */
-                       goto unlock;
-               }
+       else
                now = cpu_clock_sample_group(clkid, p, true);
-               bump_cpu_timer(timer, now);
-               /* Leave the sighand locked for the call below.  */
-       }
+
+       bump_cpu_timer(timer, now);
+
+       /* Protect timer list r/w in arm_timer() */
+       sighand = lock_task_sighand(p, &flags);
+       if (unlikely(sighand == NULL))
+               goto out;
 
        /*
         * Now re-arm for the new expiry time.
         */
-       arm_timer(timer);
-unlock:
+       arm_timer(timer, p);
        unlock_task_sighand(p, &flags);
+out:
+       rcu_read_unlock();
 }
 
 /**