vmbus: use rcu for per-cpu channel list
authorStephen Hemminger <stephen@networkplumber.org>
Sun, 5 Mar 2017 01:13:57 +0000 (18:13 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 16 Mar 2017 07:42:00 +0000 (16:42 +0900)
The per-cpu channel list is now referred to in the interrupt
routine. This is mostly safe since the host will not normally generate
an interrupt when channel is being deleted but if it did then there
would be a use after free problem.

To solve, this use RCU protection on ther per-cpu list.

Fixes: 631e63a9f346 ("vmbus: change to per channel tasklet")
Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/hv/channel_mgmt.c
drivers/hv/vmbus_drv.c
include/linux/hyperv.h

index f33465d78a025680d7515978fdbcc432d51a8062..d2cfa3eb71a24e30627255400a6fdb165db2a38e 100644 (file)
@@ -350,7 +350,8 @@ static struct vmbus_channel *alloc_channel(void)
 static void free_channel(struct vmbus_channel *channel)
 {
        tasklet_kill(&channel->callback_event);
-       kfree(channel);
+
+       kfree_rcu(channel, rcu);
 }
 
 static void percpu_channel_enq(void *arg)
@@ -359,14 +360,14 @@ static void percpu_channel_enq(void *arg)
        struct hv_per_cpu_context *hv_cpu
                = this_cpu_ptr(hv_context.cpu_context);
 
-       list_add_tail(&channel->percpu_list, &hv_cpu->chan_list);
+       list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
 }
 
 static void percpu_channel_deq(void *arg)
 {
        struct vmbus_channel *channel = arg;
 
-       list_del(&channel->percpu_list);
+       list_del_rcu(&channel->percpu_list);
 }
 
 
index da6b59ba594039e6d490e8cc9903f41f99032900..8370b9dc6037c17959abc3d9c45c9b233bd0004f 100644 (file)
@@ -939,8 +939,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
                if (relid == 0)
                        continue;
 
+               rcu_read_lock();
+
                /* Find channel based on relid */
-               list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) {
+               list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) {
                        if (channel->offermsg.child_relid != relid)
                                continue;
 
@@ -956,6 +958,8 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
                                tasklet_schedule(&channel->callback_event);
                        }
                }
+
+               rcu_read_unlock();
        }
 }
 
index 62bbf3c1aa4a04409fac1a001b03a87cf0162fd1..c4c7ae91f9d1df12187178842de1085a04ddd05d 100644 (file)
@@ -845,6 +845,13 @@ struct vmbus_channel {
         * link up channels based on their CPU affinity.
         */
        struct list_head percpu_list;
+
+       /*
+        * Defer freeing channel until after all cpu's have
+        * gone through grace period.
+        */
+       struct rcu_head rcu;
+
        /*
         * For performance critical channels (storage, networking
         * etc,), Hyper-V has a mechanism to enhance the throughput