clk: imx: correct i.MX7D AV PLL num/denom offset
[sfrench/cifs-2.6.git] / kernel / fork.c
index b69248e6f0e024c0407df16dfdc8a4919b590c78..9dcd18aa210b5fe930691c11ce3b920f5a92737d 100644 (file)
@@ -77,7 +77,6 @@
 #include <linux/blkdev.h>
 #include <linux/fs_struct.h>
 #include <linux/magic.h>
-#include <linux/sched/mm.h>
 #include <linux/perf_event.h>
 #include <linux/posix-timers.h>
 #include <linux/user-return-notifier.h>
@@ -429,7 +428,7 @@ static void release_task_stack(struct task_struct *tsk)
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 void put_task_stack(struct task_struct *tsk)
 {
-       if (atomic_dec_and_test(&tsk->stack_refcount))
+       if (refcount_dec_and_test(&tsk->stack_refcount))
                release_task_stack(tsk);
 }
 #endif
@@ -447,7 +446,7 @@ void free_task(struct task_struct *tsk)
         * If the task had a separate stack allocation, it should be gone
         * by now.
         */
-       WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
+       WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
 #endif
        rt_mutex_debug_task_free(tsk);
        ftrace_graph_exit_task(tsk);
@@ -710,14 +709,14 @@ static inline void free_signal_struct(struct signal_struct *sig)
 
 static inline void put_signal_struct(struct signal_struct *sig)
 {
-       if (atomic_dec_and_test(&sig->sigcnt))
+       if (refcount_dec_and_test(&sig->sigcnt))
                free_signal_struct(sig);
 }
 
 void __put_task_struct(struct task_struct *tsk)
 {
        WARN_ON(!tsk->exit_state);
-       WARN_ON(atomic_read(&tsk->usage));
+       WARN_ON(refcount_read(&tsk->usage));
        WARN_ON(tsk == current);
 
        cgroup_free(tsk);
@@ -867,7 +866,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
        tsk->stack_vm_area = stack_vm_area;
 #endif
 #ifdef CONFIG_THREAD_INFO_IN_TASK
-       atomic_set(&tsk->stack_refcount, 1);
+       refcount_set(&tsk->stack_refcount, 1);
 #endif
 
        if (err)
@@ -896,7 +895,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
         * One for us, one for whoever does the "release_task()" (usually
         * parent)
         */
-       atomic_set(&tsk->usage, 2);
+       refcount_set(&tsk->usage, 2);
 #ifdef CONFIG_BLK_DEV_IO_TRACE
        tsk->btrace_seq = 0;
 #endif
@@ -981,7 +980,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
        mm_pgtables_bytes_init(mm);
        mm->map_count = 0;
        mm->locked_vm = 0;
-       mm->pinned_vm = 0;
+       atomic64_set(&mm->pinned_vm, 0);
        memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
        spin_lock_init(&mm->page_table_lock);
        spin_lock_init(&mm->arg_lock);
@@ -1463,7 +1462,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
        struct sighand_struct *sig;
 
        if (clone_flags & CLONE_SIGHAND) {
-               atomic_inc(&current->sighand->count);
+               refcount_inc(&current->sighand->count);
                return 0;
        }
        sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
@@ -1471,7 +1470,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
        if (!sig)
                return -ENOMEM;
 
-       atomic_set(&sig->count, 1);
+       refcount_set(&sig->count, 1);
        spin_lock_irq(&current->sighand->siglock);
        memcpy(sig->action, current->sighand->action, sizeof(sig->action));
        spin_unlock_irq(&current->sighand->siglock);
@@ -1480,7 +1479,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
 
 void __cleanup_sighand(struct sighand_struct *sighand)
 {
-       if (atomic_dec_and_test(&sighand->count)) {
+       if (refcount_dec_and_test(&sighand->count)) {
                signalfd_cleanup(sighand);
                /*
                 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
@@ -1527,7 +1526,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
 
        sig->nr_threads = 1;
        atomic_set(&sig->live, 1);
-       atomic_set(&sig->sigcnt, 1);
+       refcount_set(&sig->sigcnt, 1);
 
        /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
        sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
@@ -2082,7 +2081,7 @@ static __latent_entropy struct task_struct *copy_process(
                } else {
                        current->signal->nr_threads++;
                        atomic_inc(&current->signal->live);
-                       atomic_inc(&current->signal->sigcnt);
+                       refcount_inc(&current->signal->sigcnt);
                        task_join_group_stop(p);
                        list_add_tail_rcu(&p->thread_group,
                                          &p->group_leader->thread_group);
@@ -2439,7 +2438,7 @@ static int check_unshare_flags(unsigned long unshare_flags)
                        return -EINVAL;
        }
        if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
-               if (atomic_read(&current->sighand->count) > 1)
+               if (refcount_read(&current->sighand->count) > 1)
                        return -EINVAL;
        }
        if (unshare_flags & CLONE_VM) {