cpuidle: powernv: Populate cpuidle state details by querying the device-tree
[sfrench/cifs-2.6.git] / kernel / power / process.c
1 /*
2  * drivers/power/process.c - Functions for starting/stopping processes on 
3  *                           suspend transitions.
4  *
5  * Originally from swsusp.
6  */
7
8
9 #undef DEBUG
10
11 #include <linux/interrupt.h>
12 #include <linux/oom.h>
13 #include <linux/suspend.h>
14 #include <linux/module.h>
15 #include <linux/syscalls.h>
16 #include <linux/freezer.h>
17 #include <linux/delay.h>
18 #include <linux/workqueue.h>
19 #include <linux/kmod.h>
20 #include <trace/events/power.h>
21
22 /* 
23  * Timeout for stopping processes
24  */
25 unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
26
27 static int try_to_freeze_tasks(bool user_only)
28 {
29         struct task_struct *g, *p;
30         unsigned long end_time;
31         unsigned int todo;
32         bool wq_busy = false;
33         struct timeval start, end;
34         u64 elapsed_msecs64;
35         unsigned int elapsed_msecs;
36         bool wakeup = false;
37         int sleep_usecs = USEC_PER_MSEC;
38
39         do_gettimeofday(&start);
40
41         end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
42
43         if (!user_only)
44                 freeze_workqueues_begin();
45
46         while (true) {
47                 todo = 0;
48                 read_lock(&tasklist_lock);
49                 do_each_thread(g, p) {
50                         if (p == current || !freeze_task(p))
51                                 continue;
52
53                         if (!freezer_should_skip(p))
54                                 todo++;
55                 } while_each_thread(g, p);
56                 read_unlock(&tasklist_lock);
57
58                 if (!user_only) {
59                         wq_busy = freeze_workqueues_busy();
60                         todo += wq_busy;
61                 }
62
63                 if (!todo || time_after(jiffies, end_time))
64                         break;
65
66                 if (pm_wakeup_pending()) {
67                         wakeup = true;
68                         break;
69                 }
70
71                 /*
72                  * We need to retry, but first give the freezing tasks some
73                  * time to enter the refrigerator.  Start with an initial
74                  * 1 ms sleep followed by exponential backoff until 8 ms.
75                  */
76                 usleep_range(sleep_usecs / 2, sleep_usecs);
77                 if (sleep_usecs < 8 * USEC_PER_MSEC)
78                         sleep_usecs *= 2;
79         }
80
81         do_gettimeofday(&end);
82         elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
83         do_div(elapsed_msecs64, NSEC_PER_MSEC);
84         elapsed_msecs = elapsed_msecs64;
85
86         if (todo) {
87                 printk("\n");
88                 printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
89                        "(%d tasks refusing to freeze, wq_busy=%d):\n",
90                        wakeup ? "aborted" : "failed",
91                        elapsed_msecs / 1000, elapsed_msecs % 1000,
92                        todo - wq_busy, wq_busy);
93
94                 if (!wakeup) {
95                         read_lock(&tasklist_lock);
96                         do_each_thread(g, p) {
97                                 if (p != current && !freezer_should_skip(p)
98                                     && freezing(p) && !frozen(p))
99                                         sched_show_task(p);
100                         } while_each_thread(g, p);
101                         read_unlock(&tasklist_lock);
102                 }
103         } else {
104                 printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
105                         elapsed_msecs % 1000);
106         }
107
108         return todo ? -EBUSY : 0;
109 }
110
111 /**
112  * freeze_processes - Signal user space processes to enter the refrigerator.
113  * The current thread will not be frozen.  The same process that calls
114  * freeze_processes must later call thaw_processes.
115  *
116  * On success, returns 0.  On failure, -errno and system is fully thawed.
117  */
118 int freeze_processes(void)
119 {
120         int error;
121
122         error = __usermodehelper_disable(UMH_FREEZING);
123         if (error)
124                 return error;
125
126         /* Make sure this task doesn't get frozen */
127         current->flags |= PF_SUSPEND_TASK;
128
129         if (!pm_freezing)
130                 atomic_inc(&system_freezing_cnt);
131
132         pm_wakeup_clear();
133         printk("Freezing user space processes ... ");
134         pm_freezing = true;
135         error = try_to_freeze_tasks(true);
136         if (!error) {
137                 printk("done.");
138                 __usermodehelper_set_disable_depth(UMH_DISABLED);
139                 oom_killer_disable();
140         }
141         printk("\n");
142         BUG_ON(in_atomic());
143
144         if (error)
145                 thaw_processes();
146         return error;
147 }
148
149 /**
150  * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
151  *
152  * On success, returns 0.  On failure, -errno and only the kernel threads are
153  * thawed, so as to give a chance to the caller to do additional cleanups
154  * (if any) before thawing the userspace tasks. So, it is the responsibility
155  * of the caller to thaw the userspace tasks, when the time is right.
156  */
157 int freeze_kernel_threads(void)
158 {
159         int error;
160
161         printk("Freezing remaining freezable tasks ... ");
162         pm_nosig_freezing = true;
163         error = try_to_freeze_tasks(false);
164         if (!error)
165                 printk("done.");
166
167         printk("\n");
168         BUG_ON(in_atomic());
169
170         if (error)
171                 thaw_kernel_threads();
172         return error;
173 }
174
175 void thaw_processes(void)
176 {
177         struct task_struct *g, *p;
178         struct task_struct *curr = current;
179
180         trace_suspend_resume(TPS("thaw_processes"), 0, true);
181         if (pm_freezing)
182                 atomic_dec(&system_freezing_cnt);
183         pm_freezing = false;
184         pm_nosig_freezing = false;
185
186         oom_killer_enable();
187
188         printk("Restarting tasks ... ");
189
190         __usermodehelper_set_disable_depth(UMH_FREEZING);
191         thaw_workqueues();
192
193         read_lock(&tasklist_lock);
194         do_each_thread(g, p) {
195                 /* No other threads should have PF_SUSPEND_TASK set */
196                 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
197                 __thaw_task(p);
198         } while_each_thread(g, p);
199         read_unlock(&tasklist_lock);
200
201         WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
202         curr->flags &= ~PF_SUSPEND_TASK;
203
204         usermodehelper_enable();
205
206         schedule();
207         printk("done.\n");
208         trace_suspend_resume(TPS("thaw_processes"), 0, false);
209 }
210
211 void thaw_kernel_threads(void)
212 {
213         struct task_struct *g, *p;
214
215         pm_nosig_freezing = false;
216         printk("Restarting kernel threads ... ");
217
218         thaw_workqueues();
219
220         read_lock(&tasklist_lock);
221         do_each_thread(g, p) {
222                 if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
223                         __thaw_task(p);
224         } while_each_thread(g, p);
225         read_unlock(&tasklist_lock);
226
227         schedule();
228         printk("done.\n");
229 }