rcu: Refactor kvfree_call_rcu() and high-level helpers
authorUladzislau Rezki (Sony) <urezki@gmail.com>
Tue, 25 Oct 2022 14:46:12 +0000 (16:46 +0200)
committerPaul E. McKenney <paulmck@kernel.org>
Wed, 4 Jan 2023 01:48:40 +0000 (17:48 -0800)
Currently a kvfree_call_rcu() takes an offset within a structure as
a second parameter, so a helper such as a kvfree_rcu_arg_2() has to
convert rcu_head and a freed ptr to an offset in order to pass it. That
leads to an extra conversion on macro entry.

Instead of converting, refactor the code in way that a pointer that has
to be freed is passed directly to the kvfree_call_rcu().

This patch does not make any functional change and is transparent to
all kvfree_rcu() users.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rcutree.h
kernel/rcu/tiny.c
kernel/rcu/tree.c

index 03abf883a281b7f83020334638a13d66902294b8..f38d4469d7f30f5a2078ac3b2f64a2a48203286d 100644 (file)
@@ -1011,8 +1011,7 @@ do {                                                                      \
                                                                        \
        if (___p) {                                                                     \
                BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf)));   \
-               kvfree_call_rcu(&((___p)->rhf), (rcu_callback_t)(unsigned long)         \
-                       (offsetof(typeof(*(ptr)), rhf)));                               \
+               kvfree_call_rcu(&((___p)->rhf), (void *) (___p));                       \
        }                                                                               \
 } while (0)
 
@@ -1021,7 +1020,7 @@ do {                                                              \
        typeof(ptr) ___p = (ptr);                               \
                                                                \
        if (___p)                                               \
-               kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \
+               kvfree_call_rcu(NULL, (void *) (___p));         \
 } while (0)
 
 /*
index 68f9070aa1110a659c6c17bc037a92c89fa21632..7f17acf29dda75db431db1b13763c5d1e86fc917 100644 (file)
@@ -98,25 +98,25 @@ static inline void synchronize_rcu_expedited(void)
  */
 extern void kvfree(const void *addr);
 
-static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
 {
        if (head) {
-               call_rcu(head, func);
+               call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
                return;
        }
 
        // kvfree_rcu(one_arg) call.
        might_sleep();
        synchronize_rcu();
-       kvfree((void *) func);
+       kvfree(ptr);
 }
 
 #ifdef CONFIG_KASAN_GENERIC
-void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
+void kvfree_call_rcu(struct rcu_head *head, void *ptr);
 #else
-static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr)
 {
-       __kvfree_call_rcu(head, func);
+       __kvfree_call_rcu(head, ptr);
 }
 #endif
 
index 4003bf6cfa1c275c6a5abe6cf25a5bda89d884f1..56bccb5a8fdea77ad20fadd1cf2a434b47c719cc 100644 (file)
@@ -33,7 +33,7 @@ static inline void rcu_virt_note_context_switch(void)
 }
 
 void synchronize_rcu_expedited(void);
-void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
+void kvfree_call_rcu(struct rcu_head *head, void *ptr);
 
 void rcu_barrier(void);
 bool rcu_eqs_special_set(int cpu);
index 72913ce21258b15e19ea75eb54359bbec8d3f6e7..42f7589e51e09e7c1e7c6537b3f14018d85c1b20 100644 (file)
@@ -246,15 +246,12 @@ bool poll_state_synchronize_rcu(unsigned long oldstate)
 EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
 
 #ifdef CONFIG_KASAN_GENERIC
-void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+void kvfree_call_rcu(struct rcu_head *head, void *ptr)
 {
-       if (head) {
-               void *ptr = (void *) head - (unsigned long) func;
-
+       if (head)
                kasan_record_aux_stack_noalloc(ptr);
-       }
 
-       __kvfree_call_rcu(head, func);
+       __kvfree_call_rcu(head, ptr);
 }
 EXPORT_SYMBOL_GPL(kvfree_call_rcu);
 #endif
index cf34a961821ade1b05ab4053e02606227eebaf00..7d222acd85bfd5f56da9e3b6c2b52af522dd5015 100644 (file)
@@ -3103,8 +3103,8 @@ static void kfree_rcu_work(struct work_struct *work)
         * This list is named "Channel 3".
         */
        for (; head; head = next) {
-               unsigned long offset = (unsigned long)head->func;
-               void *ptr = (void *)head - offset;
+               void *ptr = (void *) head->func;
+               unsigned long offset = (void *) head - ptr;
 
                next = head->next;
                debug_rcu_head_unqueue((struct rcu_head *)ptr);
@@ -3342,26 +3342,21 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
  * be free'd in workqueue context. This allows us to: batch requests together to
  * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
  */
-void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+void kvfree_call_rcu(struct rcu_head *head, void *ptr)
 {
        unsigned long flags;
        struct kfree_rcu_cpu *krcp;
        bool success;
-       void *ptr;
 
-       if (head) {
-               ptr = (void *) head - (unsigned long) func;
-       } else {
-               /*
-                * Please note there is a limitation for the head-less
-                * variant, that is why there is a clear rule for such
-                * objects: it can be used from might_sleep() context
-                * only. For other places please embed an rcu_head to
-                * your data.
-                */
+       /*
+        * Please note there is a limitation for the head-less
+        * variant, that is why there is a clear rule for such
+        * objects: it can be used from might_sleep() context
+        * only. For other places please embed an rcu_head to
+        * your data.
+        */
+       if (!head)
                might_sleep();
-               ptr = (unsigned long *) func;
-       }
 
        // Queue the object but don't yet schedule the batch.
        if (debug_rcu_head_queue(ptr)) {
@@ -3382,7 +3377,7 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
                        // Inline if kvfree_rcu(one_arg) call.
                        goto unlock_return;
 
-               head->func = func;
+               head->func = ptr;
                head->next = krcp->head;
                krcp->head = head;
                success = true;