Merge branch 'for-4.15/callbacks' into for-linus
[sfrench/cifs-2.6.git] / kernel / livepatch / transition.c
index adc0cc64aa4b6ae199e5f4a5b2af0ad59c382175..56add6327736e834b9f925b7d1473e70fb1f7051 100644 (file)
@@ -48,6 +48,28 @@ static void klp_transition_work_fn(struct work_struct *work)
 }
 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
 
+/*
+ * This function is just a stub to implement a hard force
+ * of synchronize_sched(). This requires synchronizing
+ * tasks even in userspace and idle.
+ */
+static void klp_sync(struct work_struct *work)
+{
+}
+
+/*
+ * We allow to patch also functions where RCU is not watching,
+ * e.g. before user_exit(). We can not rely on the RCU infrastructure
+ * to do the synchronization. Instead hard force the sched synchronization.
+ *
+ * This approach allows to use RCU functions for manipulating func_stack
+ * safely.
+ */
+static void klp_synchronize_transition(void)
+{
+       schedule_on_each_cpu(klp_sync);
+}
+
 /*
  * The transition to the target patch state is complete.  Clean up the data
  * structures.
@@ -60,6 +82,10 @@ static void klp_complete_transition(void)
        unsigned int cpu;
        bool immediate_func = false;
 
+       pr_debug("'%s': completing %s transition\n",
+                klp_transition_patch->mod->name,
+                klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
+
        if (klp_target_state == KLP_UNPATCHED) {
                /*
                 * All tasks have transitioned to KLP_UNPATCHED so we can now
@@ -73,7 +99,7 @@ static void klp_complete_transition(void)
                 * func->transition gets cleared, the handler may choose a
                 * removed function.
                 */
-               synchronize_rcu();
+               klp_synchronize_transition();
        }
 
        if (klp_transition_patch->immediate)
@@ -87,12 +113,9 @@ static void klp_complete_transition(void)
                }
        }
 
-       if (klp_target_state == KLP_UNPATCHED && !immediate_func)
-               module_put(klp_transition_patch->mod);
-
        /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
        if (klp_target_state == KLP_PATCHED)
-               synchronize_rcu();
+               klp_synchronize_transition();
 
        read_lock(&tasklist_lock);
        for_each_process_thread(g, task) {
@@ -108,6 +131,27 @@ static void klp_complete_transition(void)
        }
 
 done:
+       klp_for_each_object(klp_transition_patch, obj) {
+               if (!klp_is_object_loaded(obj))
+                       continue;
+               if (klp_target_state == KLP_PATCHED)
+                       klp_post_patch_callback(obj);
+               else if (klp_target_state == KLP_UNPATCHED)
+                       klp_post_unpatch_callback(obj);
+       }
+
+       pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
+                 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
+
+       /*
+        * See complementary comment in __klp_enable_patch() for why we
+        * keep the module reference for immediate patches.
+        */
+       if (!klp_transition_patch->immediate && !immediate_func &&
+           klp_target_state == KLP_UNPATCHED) {
+               module_put(klp_transition_patch->mod);
+       }
+
        klp_target_state = KLP_UNDEFINED;
        klp_transition_patch = NULL;
 }
@@ -123,6 +167,9 @@ void klp_cancel_transition(void)
        if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
                return;
 
+       pr_debug("'%s': canceling patching transition, going to unpatch\n",
+                klp_transition_patch->mod->name);
+
        klp_target_state = KLP_UNPATCHED;
        klp_complete_transition();
 }
@@ -136,7 +183,11 @@ void klp_cancel_transition(void)
  */
 void klp_update_patch_state(struct task_struct *task)
 {
-       rcu_read_lock();
+       /*
+        * A variant of synchronize_sched() is used to allow patching functions
+        * where RCU is not watching, see klp_synchronize_transition().
+        */
+       preempt_disable_notrace();
 
        /*
         * This test_and_clear_tsk_thread_flag() call also serves as a read
@@ -153,7 +204,7 @@ void klp_update_patch_state(struct task_struct *task)
        if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
                task->patch_state = READ_ONCE(klp_target_state);
 
-       rcu_read_unlock();
+       preempt_enable_notrace();
 }
 
 /*
@@ -382,9 +433,6 @@ void klp_try_complete_transition(void)
        }
 
 success:
-       pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
-                 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
-
        /* we're done, now cleanup the data structures */
        klp_complete_transition();
 }
@@ -400,7 +448,8 @@ void klp_start_transition(void)
 
        WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
 
-       pr_notice("'%s': %s...\n", klp_transition_patch->mod->name,
+       pr_notice("'%s': starting %s transition\n",
+                 klp_transition_patch->mod->name,
                  klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
 
        /*
@@ -456,6 +505,9 @@ void klp_init_transition(struct klp_patch *patch, int state)
         */
        klp_target_state = state;
 
+       pr_debug("'%s': initializing %s transition\n", patch->mod->name,
+                klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
+
        /*
         * If the patch can be applied or reverted immediately, skip the
         * per-task transitions.
@@ -521,6 +573,11 @@ void klp_reverse_transition(void)
        unsigned int cpu;
        struct task_struct *g, *task;
 
+       pr_debug("'%s': reversing transition from %s\n",
+                klp_transition_patch->mod->name,
+                klp_target_state == KLP_PATCHED ? "patching to unpatching" :
+                                                  "unpatching to patching");
+
        klp_transition_patch->enabled = !klp_transition_patch->enabled;
 
        klp_target_state = !klp_target_state;
@@ -539,7 +596,7 @@ void klp_reverse_transition(void)
                clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
 
        /* Let any remaining calls to klp_update_patch_state() complete */
-       synchronize_rcu();
+       klp_synchronize_transition();
 
        klp_start_transition();
 }