net: sched: act_nat: remove dependency on rtnl lock
authorVlad Buslov <vladbu@mellanox.com>
Mon, 3 Sep 2018 07:09:20 +0000 (10:09 +0300)
committerDavid S. Miller <davem@davemloft.net>
Sat, 8 Sep 2018 17:18:25 +0000 (10:18 -0700)
According to the new locking rule, we have to take tcf_lock for both
->init() and ->dump(), as RTNL will be removed.

Use tcf spinlock to protect private nat action data from concurrent
modification during dump. (nat init already uses tcf spinlock when changing
action state)

Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/act_nat.c

index d98f33fdffe2bd994376c7e5381146d5a8c926a8..c5c1e23add77c09f9496ca8e68e7b188a80c6f12 100644 (file)
@@ -256,28 +256,31 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
        unsigned char *b = skb_tail_pointer(skb);
        struct tcf_nat *p = to_tcf_nat(a);
        struct tc_nat opt = {
-               .old_addr = p->old_addr,
-               .new_addr = p->new_addr,
-               .mask     = p->mask,
-               .flags    = p->flags,
-
                .index    = p->tcf_index,
-               .action   = p->tcf_action,
                .refcnt   = refcount_read(&p->tcf_refcnt) - ref,
                .bindcnt  = atomic_read(&p->tcf_bindcnt) - bind,
        };
        struct tcf_t t;
 
+       spin_lock_bh(&p->tcf_lock);
+       opt.old_addr = p->old_addr;
+       opt.new_addr = p->new_addr;
+       opt.mask = p->mask;
+       opt.flags = p->flags;
+       opt.action = p->tcf_action;
+
        if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
 
        tcf_tm_dump(&t, &p->tcf_tm);
        if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
                goto nla_put_failure;
+       spin_unlock_bh(&p->tcf_lock);
 
        return skb->len;
 
 nla_put_failure:
+       spin_unlock_bh(&p->tcf_lock);
        nlmsg_trim(skb, b);
        return -1;
 }