Linux 6.9-rc5
[sfrench/cifs-2.6.git] / kernel / jump_label.c
index 0bfa10f4410c5d80049b94126ed97dab1b19010a..d9c822bbffb8d3c8977d410e683bb8a7c578f3c4 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/cpu.h>
 #include <asm/sections.h>
 
-/* mutex to protect coming/going of the the jump_label table */
+/* mutex to protect coming/going of the jump_label table */
 static DEFINE_MUTEX(jump_label_mutex);
 
 void jump_label_lock(void)
@@ -37,12 +37,26 @@ static int jump_label_cmp(const void *a, const void *b)
        const struct jump_entry *jea = a;
        const struct jump_entry *jeb = b;
 
+       /*
+        * Entrires are sorted by key.
+        */
        if (jump_entry_key(jea) < jump_entry_key(jeb))
                return -1;
 
        if (jump_entry_key(jea) > jump_entry_key(jeb))
                return 1;
 
+       /*
+        * In the batching mode, entries should also be sorted by the code
+        * inside the already sorted list of entries, enabling a bsearch in
+        * the vector.
+        */
+       if (jump_entry_code(jea) < jump_entry_code(jeb))
+               return -1;
+
+       if (jump_entry_code(jea) > jump_entry_code(jeb))
+               return 1;
+
        return 0;
 }
 
@@ -99,11 +113,40 @@ int static_key_count(struct static_key *key)
 }
 EXPORT_SYMBOL_GPL(static_key_count);
 
-void static_key_slow_inc_cpuslocked(struct static_key *key)
+/*
+ * static_key_fast_inc_not_disabled - adds a user for a static key
+ * @key: static key that must be already enabled
+ *
+ * The caller must make sure that the static key can't get disabled while
+ * in this function. It doesn't patch jump labels, only adds a user to
+ * an already enabled static key.
+ *
+ * Returns true if the increment was done. Unlike refcount_t the ref counter
+ * is not saturated, but will fail to increment on overflow.
+ */
+bool static_key_fast_inc_not_disabled(struct static_key *key)
 {
-       int v, v1;
+       int v;
 
        STATIC_KEY_CHECK_USE(key);
+       /*
+        * Negative key->enabled has a special meaning: it sends
+        * static_key_slow_inc() down the slow path, and it is non-zero
+        * so it counts as "enabled" in jump_label_update().  Note that
+        * atomic_inc_unless_negative() checks >= 0, so roll our own.
+        */
+       v = atomic_read(&key->enabled);
+       do {
+               if (v <= 0 || (v + 1) < 0)
+                       return false;
+       } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(static_key_fast_inc_not_disabled);
+
+bool static_key_slow_inc_cpuslocked(struct static_key *key)
+{
        lockdep_assert_cpus_held();
 
        /*
@@ -112,17 +155,9 @@ void static_key_slow_inc_cpuslocked(struct static_key *key)
         * jump_label_update() process.  At the same time, however,
         * the jump_label_update() call below wants to see
         * static_key_enabled(&key) for jumps to be updated properly.
-        *
-        * So give a special meaning to negative key->enabled: it sends
-        * static_key_slow_inc() down the slow path, and it is non-zero
-        * so it counts as "enabled" in jump_label_update().  Note that
-        * atomic_inc_unless_negative() checks >= 0, so roll our own.
         */
-       for (v = atomic_read(&key->enabled); v > 0; v = v1) {
-               v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
-               if (likely(v1 == v))
-                       return;
-       }
+       if (static_key_fast_inc_not_disabled(key))
+               return true;
 
        jump_label_lock();
        if (atomic_read(&key->enabled) == 0) {
@@ -134,16 +169,23 @@ void static_key_slow_inc_cpuslocked(struct static_key *key)
                 */
                atomic_set_release(&key->enabled, 1);
        } else {
-               atomic_inc(&key->enabled);
+               if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) {
+                       jump_label_unlock();
+                       return false;
+               }
        }
        jump_label_unlock();
+       return true;
 }
 
-void static_key_slow_inc(struct static_key *key)
+bool static_key_slow_inc(struct static_key *key)
 {
+       bool ret;
+
        cpus_read_lock();
-       static_key_slow_inc_cpuslocked(key);
+       ret = static_key_slow_inc_cpuslocked(key);
        cpus_read_unlock();
+       return ret;
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
 
@@ -295,38 +337,36 @@ EXPORT_SYMBOL_GPL(jump_label_rate_limit);
 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
 {
        if (jump_entry_code(entry) <= (unsigned long)end &&
-           jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
+           jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
                return 1;
 
        return 0;
 }
 
 static int __jump_label_text_reserved(struct jump_entry *iter_start,
-               struct jump_entry *iter_stop, void *start, void *end)
+               struct jump_entry *iter_stop, void *start, void *end, bool init)
 {
        struct jump_entry *iter;
 
        iter = iter_start;
        while (iter < iter_stop) {
-               if (addr_conflict(iter, start, end))
-                       return 1;
+               if (init || !jump_entry_is_init(iter)) {
+                       if (addr_conflict(iter, start, end))
+                               return 1;
+               }
                iter++;
        }
 
        return 0;
 }
 
-/*
- * Update code which is definitely not currently executing.
- * Architectures which need heavyweight synchronization to modify
- * running code can override this to make the non-live update case
- * cheaper.
- */
-void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
-                                           enum jump_label_type type)
+#ifndef arch_jump_label_transform_static
+static void arch_jump_label_transform_static(struct jump_entry *entry,
+                                            enum jump_label_type type)
 {
-       arch_jump_label_transform(entry, type);
+       /* nothing to do on most architectures */
 }
+#endif
 
 static inline struct jump_entry *static_key_entries(struct static_key *key)
 {
@@ -384,25 +424,65 @@ static enum jump_label_type jump_label_type(struct jump_entry *entry)
        return enabled ^ branch;
 }
 
+static bool jump_label_can_update(struct jump_entry *entry, bool init)
+{
+       /*
+        * Cannot update code that was in an init text area.
+        */
+       if (!init && jump_entry_is_init(entry))
+               return false;
+
+       if (!kernel_text_address(jump_entry_code(entry))) {
+               /*
+                * This skips patching built-in __exit, which
+                * is part of init_section_contains() but is
+                * not part of kernel_text_address().
+                *
+                * Skipping built-in __exit is fine since it
+                * will never be executed.
+                */
+               WARN_ONCE(!jump_entry_is_init(entry),
+                         "can't patch jump_label at %pS",
+                         (void *)jump_entry_code(entry));
+               return false;
+       }
+
+       return true;
+}
+
+#ifndef HAVE_JUMP_LABEL_BATCH
 static void __jump_label_update(struct static_key *key,
                                struct jump_entry *entry,
                                struct jump_entry *stop,
                                bool init)
 {
        for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
-               /*
-                * An entry->code of 0 indicates an entry which has been
-                * disabled because it was in an init text area.
-                */
-               if (init || !jump_entry_is_init(entry)) {
-                       if (kernel_text_address(jump_entry_code(entry)))
-                               arch_jump_label_transform(entry, jump_label_type(entry));
-                       else
-                               WARN_ONCE(1, "can't patch jump_label at %pS",
-                                         (void *)jump_entry_code(entry));
+               if (jump_label_can_update(entry, init))
+                       arch_jump_label_transform(entry, jump_label_type(entry));
+       }
+}
+#else
+static void __jump_label_update(struct static_key *key,
+                               struct jump_entry *entry,
+                               struct jump_entry *stop,
+                               bool init)
+{
+       for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
+
+               if (!jump_label_can_update(entry, init))
+                       continue;
+
+               if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
+                       /*
+                        * Queue is full: Apply the current queue and try again.
+                        */
+                       arch_jump_label_transform_apply();
+                       BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
                }
        }
+       arch_jump_label_transform_apply();
 }
+#endif
 
 void __init jump_label_init(void)
 {
@@ -429,13 +509,14 @@ void __init jump_label_init(void)
 
        for (iter = iter_start; iter < iter_stop; iter++) {
                struct static_key *iterk;
+               bool in_init;
 
                /* rewrite NOPs */
                if (jump_label_type(iter) == JUMP_LABEL_NOP)
                        arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
 
-               if (init_section_contains((void *)jump_entry_code(iter), 1))
-                       jump_entry_set_init(iter);
+               in_init = init_section_contains((void *)jump_entry_code(iter), 1);
+               jump_entry_set_init(iter, in_init);
 
                iterk = jump_entry_key(iter);
                if (iterk == key)
@@ -451,7 +532,7 @@ void __init jump_label_init(void)
 
 #ifdef CONFIG_MODULES
 
-static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
+enum jump_label_type jump_label_init_type(struct jump_entry *entry)
 {
        struct static_key *key = jump_entry_key(entry);
        bool type = static_key_type(key);
@@ -493,19 +574,25 @@ static void static_key_set_mod(struct static_key *key,
 static int __jump_label_mod_text_reserved(void *start, void *end)
 {
        struct module *mod;
+       int ret;
 
        preempt_disable();
        mod = __module_text_address((unsigned long)start);
        WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
+       if (!try_module_get(mod))
+               mod = NULL;
        preempt_enable();
 
        if (!mod)
                return 0;
 
-
-       return __jump_label_text_reserved(mod->jump_entries,
+       ret = __jump_label_text_reserved(mod->jump_entries,
                                mod->jump_entries + mod->num_jump_entries,
-                               start, end);
+                               start, end, mod->state == MODULE_STATE_COMING);
+
+       module_put(mod);
+
+       return ret;
 }
 
 static void __jump_label_mod_update(struct static_key *key)
@@ -533,31 +620,6 @@ static void __jump_label_mod_update(struct static_key *key)
        }
 }
 
-/***
- * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
- * @mod: module to patch
- *
- * Allow for run-time selection of the optimal nops. Before the module
- * loads patch these with arch_get_jump_label_nop(), which is specified by
- * the arch specific jump label code.
- */
-void jump_label_apply_nops(struct module *mod)
-{
-       struct jump_entry *iter_start = mod->jump_entries;
-       struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
-       struct jump_entry *iter;
-
-       /* if the module doesn't have jump label entries, just return */
-       if (iter_start == iter_stop)
-               return;
-
-       for (iter = iter_start; iter < iter_stop; iter++) {
-               /* Only write NOPs for arch_branch_static(). */
-               if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
-                       arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
-       }
-}
-
 static int jump_label_add_module(struct module *mod)
 {
        struct jump_entry *iter_start = mod->jump_entries;
@@ -574,9 +636,10 @@ static int jump_label_add_module(struct module *mod)
 
        for (iter = iter_start; iter < iter_stop; iter++) {
                struct static_key *iterk;
+               bool in_init;
 
-               if (within_module_init(jump_entry_code(iter), mod))
-                       jump_entry_set_init(iter);
+               in_init = within_module_init(jump_entry_code(iter), mod);
+               jump_entry_set_init(iter, in_init);
 
                iterk = jump_entry_key(iter);
                if (iterk == key)
@@ -726,8 +789,9 @@ early_initcall(jump_label_init_module);
  */
 int jump_label_text_reserved(void *start, void *end)
 {
+       bool init = system_state < SYSTEM_RUNNING;
        int ret = __jump_label_text_reserved(__start___jump_table,
-                       __stop___jump_table, start, end);
+                       __stop___jump_table, start, end, init);
 
        if (ret)
                return ret;
@@ -741,6 +805,7 @@ int jump_label_text_reserved(void *start, void *end)
 static void jump_label_update(struct static_key *key)
 {
        struct jump_entry *stop = __stop___jump_table;
+       bool init = system_state < SYSTEM_RUNNING;
        struct jump_entry *entry;
 #ifdef CONFIG_MODULES
        struct module *mod;
@@ -752,15 +817,16 @@ static void jump_label_update(struct static_key *key)
 
        preempt_disable();
        mod = __module_address((unsigned long)key);
-       if (mod)
+       if (mod) {
                stop = mod->jump_entries + mod->num_jump_entries;
+               init = mod->state == MODULE_STATE_COMING;
+       }
        preempt_enable();
 #endif
        entry = static_key_entries(key);
        /* if there are no users, entry can be NULL */
        if (entry)
-               __jump_label_update(key, entry, stop,
-                                   system_state < SYSTEM_RUNNING);
+               __jump_label_update(key, entry, stop, init);
 }
 
 #ifdef CONFIG_STATIC_KEYS_SELFTEST