Merge branch 'for-4.15/callbacks' into for-linus
[sfrench/cifs-2.6.git] / kernel / livepatch / transition.c
1 /*
2  * transition.c - Kernel Live Patching transition functions
3  *
4  * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/cpu.h>
23 #include <linux/stacktrace.h>
24 #include "core.h"
25 #include "patch.h"
26 #include "transition.h"
27 #include "../sched/sched.h"
28
29 #define MAX_STACK_ENTRIES  100
30 #define STACK_ERR_BUF_SIZE 128
31
32 struct klp_patch *klp_transition_patch;
33
34 static int klp_target_state = KLP_UNDEFINED;
35
36 /*
37  * This work can be performed periodically to finish patching or unpatching any
38  * "straggler" tasks which failed to transition in the first attempt.
39  */
40 static void klp_transition_work_fn(struct work_struct *work)
41 {
42         mutex_lock(&klp_mutex);
43
44         if (klp_transition_patch)
45                 klp_try_complete_transition();
46
47         mutex_unlock(&klp_mutex);
48 }
49 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
50
51 /*
52  * This function is just a stub to implement a hard force
53  * of synchronize_sched(). This requires synchronizing
54  * tasks even in userspace and idle.
55  */
56 static void klp_sync(struct work_struct *work)
57 {
58 }
59
60 /*
61  * We allow to patch also functions where RCU is not watching,
62  * e.g. before user_exit(). We can not rely on the RCU infrastructure
63  * to do the synchronization. Instead hard force the sched synchronization.
64  *
65  * This approach allows to use RCU functions for manipulating func_stack
66  * safely.
67  */
68 static void klp_synchronize_transition(void)
69 {
70         schedule_on_each_cpu(klp_sync);
71 }
72
73 /*
74  * The transition to the target patch state is complete.  Clean up the data
75  * structures.
76  */
77 static void klp_complete_transition(void)
78 {
79         struct klp_object *obj;
80         struct klp_func *func;
81         struct task_struct *g, *task;
82         unsigned int cpu;
83         bool immediate_func = false;
84
85         pr_debug("'%s': completing %s transition\n",
86                  klp_transition_patch->mod->name,
87                  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
88
89         if (klp_target_state == KLP_UNPATCHED) {
90                 /*
91                  * All tasks have transitioned to KLP_UNPATCHED so we can now
92                  * remove the new functions from the func_stack.
93                  */
94                 klp_unpatch_objects(klp_transition_patch);
95
96                 /*
97                  * Make sure klp_ftrace_handler() can no longer see functions
98                  * from this patch on the ops->func_stack.  Otherwise, after
99                  * func->transition gets cleared, the handler may choose a
100                  * removed function.
101                  */
102                 klp_synchronize_transition();
103         }
104
105         if (klp_transition_patch->immediate)
106                 goto done;
107
108         klp_for_each_object(klp_transition_patch, obj) {
109                 klp_for_each_func(obj, func) {
110                         func->transition = false;
111                         if (func->immediate)
112                                 immediate_func = true;
113                 }
114         }
115
116         /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
117         if (klp_target_state == KLP_PATCHED)
118                 klp_synchronize_transition();
119
120         read_lock(&tasklist_lock);
121         for_each_process_thread(g, task) {
122                 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
123                 task->patch_state = KLP_UNDEFINED;
124         }
125         read_unlock(&tasklist_lock);
126
127         for_each_possible_cpu(cpu) {
128                 task = idle_task(cpu);
129                 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
130                 task->patch_state = KLP_UNDEFINED;
131         }
132
133 done:
134         klp_for_each_object(klp_transition_patch, obj) {
135                 if (!klp_is_object_loaded(obj))
136                         continue;
137                 if (klp_target_state == KLP_PATCHED)
138                         klp_post_patch_callback(obj);
139                 else if (klp_target_state == KLP_UNPATCHED)
140                         klp_post_unpatch_callback(obj);
141         }
142
143         pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
144                   klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
145
146         /*
147          * See complementary comment in __klp_enable_patch() for why we
148          * keep the module reference for immediate patches.
149          */
150         if (!klp_transition_patch->immediate && !immediate_func &&
151             klp_target_state == KLP_UNPATCHED) {
152                 module_put(klp_transition_patch->mod);
153         }
154
155         klp_target_state = KLP_UNDEFINED;
156         klp_transition_patch = NULL;
157 }
158
159 /*
160  * This is called in the error path, to cancel a transition before it has
161  * started, i.e. klp_init_transition() has been called but
162  * klp_start_transition() hasn't.  If the transition *has* been started,
163  * klp_reverse_transition() should be used instead.
164  */
165 void klp_cancel_transition(void)
166 {
167         if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
168                 return;
169
170         pr_debug("'%s': canceling patching transition, going to unpatch\n",
171                  klp_transition_patch->mod->name);
172
173         klp_target_state = KLP_UNPATCHED;
174         klp_complete_transition();
175 }
176
177 /*
178  * Switch the patched state of the task to the set of functions in the target
179  * patch state.
180  *
181  * NOTE: If task is not 'current', the caller must ensure the task is inactive.
182  * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
183  */
184 void klp_update_patch_state(struct task_struct *task)
185 {
186         /*
187          * A variant of synchronize_sched() is used to allow patching functions
188          * where RCU is not watching, see klp_synchronize_transition().
189          */
190         preempt_disable_notrace();
191
192         /*
193          * This test_and_clear_tsk_thread_flag() call also serves as a read
194          * barrier (smp_rmb) for two cases:
195          *
196          * 1) Enforce the order of the TIF_PATCH_PENDING read and the
197          *    klp_target_state read.  The corresponding write barrier is in
198          *    klp_init_transition().
199          *
200          * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
201          *    of func->transition, if klp_ftrace_handler() is called later on
202          *    the same CPU.  See __klp_disable_patch().
203          */
204         if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
205                 task->patch_state = READ_ONCE(klp_target_state);
206
207         preempt_enable_notrace();
208 }
209
210 /*
211  * Determine whether the given stack trace includes any references to a
212  * to-be-patched or to-be-unpatched function.
213  */
214 static int klp_check_stack_func(struct klp_func *func,
215                                 struct stack_trace *trace)
216 {
217         unsigned long func_addr, func_size, address;
218         struct klp_ops *ops;
219         int i;
220
221         if (func->immediate)
222                 return 0;
223
224         for (i = 0; i < trace->nr_entries; i++) {
225                 address = trace->entries[i];
226
227                 if (klp_target_state == KLP_UNPATCHED) {
228                          /*
229                           * Check for the to-be-unpatched function
230                           * (the func itself).
231                           */
232                         func_addr = (unsigned long)func->new_func;
233                         func_size = func->new_size;
234                 } else {
235                         /*
236                          * Check for the to-be-patched function
237                          * (the previous func).
238                          */
239                         ops = klp_find_ops(func->old_addr);
240
241                         if (list_is_singular(&ops->func_stack)) {
242                                 /* original function */
243                                 func_addr = func->old_addr;
244                                 func_size = func->old_size;
245                         } else {
246                                 /* previously patched function */
247                                 struct klp_func *prev;
248
249                                 prev = list_next_entry(func, stack_node);
250                                 func_addr = (unsigned long)prev->new_func;
251                                 func_size = prev->new_size;
252                         }
253                 }
254
255                 if (address >= func_addr && address < func_addr + func_size)
256                         return -EAGAIN;
257         }
258
259         return 0;
260 }
261
262 /*
263  * Determine whether it's safe to transition the task to the target patch state
264  * by looking for any to-be-patched or to-be-unpatched functions on its stack.
265  */
266 static int klp_check_stack(struct task_struct *task, char *err_buf)
267 {
268         static unsigned long entries[MAX_STACK_ENTRIES];
269         struct stack_trace trace;
270         struct klp_object *obj;
271         struct klp_func *func;
272         int ret;
273
274         trace.skip = 0;
275         trace.nr_entries = 0;
276         trace.max_entries = MAX_STACK_ENTRIES;
277         trace.entries = entries;
278         ret = save_stack_trace_tsk_reliable(task, &trace);
279         WARN_ON_ONCE(ret == -ENOSYS);
280         if (ret) {
281                 snprintf(err_buf, STACK_ERR_BUF_SIZE,
282                          "%s: %s:%d has an unreliable stack\n",
283                          __func__, task->comm, task->pid);
284                 return ret;
285         }
286
287         klp_for_each_object(klp_transition_patch, obj) {
288                 if (!obj->patched)
289                         continue;
290                 klp_for_each_func(obj, func) {
291                         ret = klp_check_stack_func(func, &trace);
292                         if (ret) {
293                                 snprintf(err_buf, STACK_ERR_BUF_SIZE,
294                                          "%s: %s:%d is sleeping on function %s\n",
295                                          __func__, task->comm, task->pid,
296                                          func->old_name);
297                                 return ret;
298                         }
299                 }
300         }
301
302         return 0;
303 }
304
305 /*
306  * Try to safely switch a task to the target patch state.  If it's currently
307  * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
308  * if the stack is unreliable, return false.
309  */
310 static bool klp_try_switch_task(struct task_struct *task)
311 {
312         struct rq *rq;
313         struct rq_flags flags;
314         int ret;
315         bool success = false;
316         char err_buf[STACK_ERR_BUF_SIZE];
317
318         err_buf[0] = '\0';
319
320         /* check if this task has already switched over */
321         if (task->patch_state == klp_target_state)
322                 return true;
323
324         /*
325          * For arches which don't have reliable stack traces, we have to rely
326          * on other methods (e.g., switching tasks at kernel exit).
327          */
328         if (!klp_have_reliable_stack())
329                 return false;
330
331         /*
332          * Now try to check the stack for any to-be-patched or to-be-unpatched
333          * functions.  If all goes well, switch the task to the target patch
334          * state.
335          */
336         rq = task_rq_lock(task, &flags);
337
338         if (task_running(rq, task) && task != current) {
339                 snprintf(err_buf, STACK_ERR_BUF_SIZE,
340                          "%s: %s:%d is running\n", __func__, task->comm,
341                          task->pid);
342                 goto done;
343         }
344
345         ret = klp_check_stack(task, err_buf);
346         if (ret)
347                 goto done;
348
349         success = true;
350
351         clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
352         task->patch_state = klp_target_state;
353
354 done:
355         task_rq_unlock(rq, task, &flags);
356
357         /*
358          * Due to console deadlock issues, pr_debug() can't be used while
359          * holding the task rq lock.  Instead we have to use a temporary buffer
360          * and print the debug message after releasing the lock.
361          */
362         if (err_buf[0] != '\0')
363                 pr_debug("%s", err_buf);
364
365         return success;
366
367 }
368
369 /*
370  * Try to switch all remaining tasks to the target patch state by walking the
371  * stacks of sleeping tasks and looking for any to-be-patched or
372  * to-be-unpatched functions.  If such functions are found, the task can't be
373  * switched yet.
374  *
375  * If any tasks are still stuck in the initial patch state, schedule a retry.
376  */
377 void klp_try_complete_transition(void)
378 {
379         unsigned int cpu;
380         struct task_struct *g, *task;
381         bool complete = true;
382
383         WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
384
385         /*
386          * If the patch can be applied or reverted immediately, skip the
387          * per-task transitions.
388          */
389         if (klp_transition_patch->immediate)
390                 goto success;
391
392         /*
393          * Try to switch the tasks to the target patch state by walking their
394          * stacks and looking for any to-be-patched or to-be-unpatched
395          * functions.  If such functions are found on a stack, or if the stack
396          * is deemed unreliable, the task can't be switched yet.
397          *
398          * Usually this will transition most (or all) of the tasks on a system
399          * unless the patch includes changes to a very common function.
400          */
401         read_lock(&tasklist_lock);
402         for_each_process_thread(g, task)
403                 if (!klp_try_switch_task(task))
404                         complete = false;
405         read_unlock(&tasklist_lock);
406
407         /*
408          * Ditto for the idle "swapper" tasks.
409          */
410         get_online_cpus();
411         for_each_possible_cpu(cpu) {
412                 task = idle_task(cpu);
413                 if (cpu_online(cpu)) {
414                         if (!klp_try_switch_task(task))
415                                 complete = false;
416                 } else if (task->patch_state != klp_target_state) {
417                         /* offline idle tasks can be switched immediately */
418                         clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
419                         task->patch_state = klp_target_state;
420                 }
421         }
422         put_online_cpus();
423
424         if (!complete) {
425                 /*
426                  * Some tasks weren't able to be switched over.  Try again
427                  * later and/or wait for other methods like kernel exit
428                  * switching.
429                  */
430                 schedule_delayed_work(&klp_transition_work,
431                                       round_jiffies_relative(HZ));
432                 return;
433         }
434
435 success:
436         /* we're done, now cleanup the data structures */
437         klp_complete_transition();
438 }
439
440 /*
441  * Start the transition to the specified target patch state so tasks can begin
442  * switching to it.
443  */
444 void klp_start_transition(void)
445 {
446         struct task_struct *g, *task;
447         unsigned int cpu;
448
449         WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
450
451         pr_notice("'%s': starting %s transition\n",
452                   klp_transition_patch->mod->name,
453                   klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
454
455         /*
456          * If the patch can be applied or reverted immediately, skip the
457          * per-task transitions.
458          */
459         if (klp_transition_patch->immediate)
460                 return;
461
462         /*
463          * Mark all normal tasks as needing a patch state update.  They'll
464          * switch either in klp_try_complete_transition() or as they exit the
465          * kernel.
466          */
467         read_lock(&tasklist_lock);
468         for_each_process_thread(g, task)
469                 if (task->patch_state != klp_target_state)
470                         set_tsk_thread_flag(task, TIF_PATCH_PENDING);
471         read_unlock(&tasklist_lock);
472
473         /*
474          * Mark all idle tasks as needing a patch state update.  They'll switch
475          * either in klp_try_complete_transition() or at the idle loop switch
476          * point.
477          */
478         for_each_possible_cpu(cpu) {
479                 task = idle_task(cpu);
480                 if (task->patch_state != klp_target_state)
481                         set_tsk_thread_flag(task, TIF_PATCH_PENDING);
482         }
483 }
484
485 /*
486  * Initialize the global target patch state and all tasks to the initial patch
487  * state, and initialize all function transition states to true in preparation
488  * for patching or unpatching.
489  */
490 void klp_init_transition(struct klp_patch *patch, int state)
491 {
492         struct task_struct *g, *task;
493         unsigned int cpu;
494         struct klp_object *obj;
495         struct klp_func *func;
496         int initial_state = !state;
497
498         WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
499
500         klp_transition_patch = patch;
501
502         /*
503          * Set the global target patch state which tasks will switch to.  This
504          * has no effect until the TIF_PATCH_PENDING flags get set later.
505          */
506         klp_target_state = state;
507
508         pr_debug("'%s': initializing %s transition\n", patch->mod->name,
509                  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
510
511         /*
512          * If the patch can be applied or reverted immediately, skip the
513          * per-task transitions.
514          */
515         if (patch->immediate)
516                 return;
517
518         /*
519          * Initialize all tasks to the initial patch state to prepare them for
520          * switching to the target state.
521          */
522         read_lock(&tasklist_lock);
523         for_each_process_thread(g, task) {
524                 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
525                 task->patch_state = initial_state;
526         }
527         read_unlock(&tasklist_lock);
528
529         /*
530          * Ditto for the idle "swapper" tasks.
531          */
532         for_each_possible_cpu(cpu) {
533                 task = idle_task(cpu);
534                 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
535                 task->patch_state = initial_state;
536         }
537
538         /*
539          * Enforce the order of the task->patch_state initializations and the
540          * func->transition updates to ensure that klp_ftrace_handler() doesn't
541          * see a func in transition with a task->patch_state of KLP_UNDEFINED.
542          *
543          * Also enforce the order of the klp_target_state write and future
544          * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
545          * set a task->patch_state to KLP_UNDEFINED.
546          */
547         smp_wmb();
548
549         /*
550          * Set the func transition states so klp_ftrace_handler() will know to
551          * switch to the transition logic.
552          *
553          * When patching, the funcs aren't yet in the func_stack and will be
554          * made visible to the ftrace handler shortly by the calls to
555          * klp_patch_object().
556          *
557          * When unpatching, the funcs are already in the func_stack and so are
558          * already visible to the ftrace handler.
559          */
560         klp_for_each_object(patch, obj)
561                 klp_for_each_func(obj, func)
562                         func->transition = true;
563 }
564
565 /*
566  * This function can be called in the middle of an existing transition to
567  * reverse the direction of the target patch state.  This can be done to
568  * effectively cancel an existing enable or disable operation if there are any
569  * tasks which are stuck in the initial patch state.
570  */
571 void klp_reverse_transition(void)
572 {
573         unsigned int cpu;
574         struct task_struct *g, *task;
575
576         pr_debug("'%s': reversing transition from %s\n",
577                  klp_transition_patch->mod->name,
578                  klp_target_state == KLP_PATCHED ? "patching to unpatching" :
579                                                    "unpatching to patching");
580
581         klp_transition_patch->enabled = !klp_transition_patch->enabled;
582
583         klp_target_state = !klp_target_state;
584
585         /*
586          * Clear all TIF_PATCH_PENDING flags to prevent races caused by
587          * klp_update_patch_state() running in parallel with
588          * klp_start_transition().
589          */
590         read_lock(&tasklist_lock);
591         for_each_process_thread(g, task)
592                 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
593         read_unlock(&tasklist_lock);
594
595         for_each_possible_cpu(cpu)
596                 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
597
598         /* Let any remaining calls to klp_update_patch_state() complete */
599         klp_synchronize_transition();
600
601         klp_start_transition();
602 }
603
604 /* Called from copy_process() during fork */
605 void klp_copy_process(struct task_struct *child)
606 {
607         child->patch_state = current->patch_state;
608
609         /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
610 }