Merge tag 'drm-misc-next-2019-04-18' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / net / sched / cls_api.c
1 /*
2  * net/sched/cls_api.c  Packet classifier API.
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  * Changes:
12  *
13  * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
14  *
15  */
16
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/kmod.h>
26 #include <linux/slab.h>
27 #include <linux/idr.h>
28 #include <linux/rhashtable.h>
29 #include <net/net_namespace.h>
30 #include <net/sock.h>
31 #include <net/netlink.h>
32 #include <net/pkt_sched.h>
33 #include <net/pkt_cls.h>
34 #include <net/tc_act/tc_pedit.h>
35 #include <net/tc_act/tc_mirred.h>
36 #include <net/tc_act/tc_vlan.h>
37 #include <net/tc_act/tc_tunnel_key.h>
38 #include <net/tc_act/tc_csum.h>
39 #include <net/tc_act/tc_gact.h>
40 #include <net/tc_act/tc_skbedit.h>
41
42 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
43
44 /* The list of all installed classifier types */
45 static LIST_HEAD(tcf_proto_base);
46
47 /* Protects list of registered TC modules. It is pure SMP lock. */
48 static DEFINE_RWLOCK(cls_mod_lock);
49
50 /* Find classifier type by string name */
51
52 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
53 {
54         const struct tcf_proto_ops *t, *res = NULL;
55
56         if (kind) {
57                 read_lock(&cls_mod_lock);
58                 list_for_each_entry(t, &tcf_proto_base, head) {
59                         if (strcmp(kind, t->kind) == 0) {
60                                 if (try_module_get(t->owner))
61                                         res = t;
62                                 break;
63                         }
64                 }
65                 read_unlock(&cls_mod_lock);
66         }
67         return res;
68 }
69
70 static const struct tcf_proto_ops *
71 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
72                      struct netlink_ext_ack *extack)
73 {
74         const struct tcf_proto_ops *ops;
75
76         ops = __tcf_proto_lookup_ops(kind);
77         if (ops)
78                 return ops;
79 #ifdef CONFIG_MODULES
80         if (rtnl_held)
81                 rtnl_unlock();
82         request_module("cls_%s", kind);
83         if (rtnl_held)
84                 rtnl_lock();
85         ops = __tcf_proto_lookup_ops(kind);
86         /* We dropped the RTNL semaphore in order to perform
87          * the module load. So, even if we succeeded in loading
88          * the module we have to replay the request. We indicate
89          * this using -EAGAIN.
90          */
91         if (ops) {
92                 module_put(ops->owner);
93                 return ERR_PTR(-EAGAIN);
94         }
95 #endif
96         NL_SET_ERR_MSG(extack, "TC classifier not found");
97         return ERR_PTR(-ENOENT);
98 }
99
100 /* Register(unregister) new classifier type */
101
102 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
103 {
104         struct tcf_proto_ops *t;
105         int rc = -EEXIST;
106
107         write_lock(&cls_mod_lock);
108         list_for_each_entry(t, &tcf_proto_base, head)
109                 if (!strcmp(ops->kind, t->kind))
110                         goto out;
111
112         list_add_tail(&ops->head, &tcf_proto_base);
113         rc = 0;
114 out:
115         write_unlock(&cls_mod_lock);
116         return rc;
117 }
118 EXPORT_SYMBOL(register_tcf_proto_ops);
119
120 static struct workqueue_struct *tc_filter_wq;
121
122 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
123 {
124         struct tcf_proto_ops *t;
125         int rc = -ENOENT;
126
127         /* Wait for outstanding call_rcu()s, if any, from a
128          * tcf_proto_ops's destroy() handler.
129          */
130         rcu_barrier();
131         flush_workqueue(tc_filter_wq);
132
133         write_lock(&cls_mod_lock);
134         list_for_each_entry(t, &tcf_proto_base, head) {
135                 if (t == ops) {
136                         list_del(&t->head);
137                         rc = 0;
138                         break;
139                 }
140         }
141         write_unlock(&cls_mod_lock);
142         return rc;
143 }
144 EXPORT_SYMBOL(unregister_tcf_proto_ops);
145
146 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
147 {
148         INIT_RCU_WORK(rwork, func);
149         return queue_rcu_work(tc_filter_wq, rwork);
150 }
151 EXPORT_SYMBOL(tcf_queue_work);
152
153 /* Select new prio value from the range, managed by kernel. */
154
155 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
156 {
157         u32 first = TC_H_MAKE(0xC0000000U, 0U);
158
159         if (tp)
160                 first = tp->prio - 1;
161
162         return TC_H_MAJ(first);
163 }
164
165 static bool tcf_proto_is_unlocked(const char *kind)
166 {
167         const struct tcf_proto_ops *ops;
168         bool ret;
169
170         ops = tcf_proto_lookup_ops(kind, false, NULL);
171         /* On error return false to take rtnl lock. Proto lookup/create
172          * functions will perform lookup again and properly handle errors.
173          */
174         if (IS_ERR(ops))
175                 return false;
176
177         ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
178         module_put(ops->owner);
179         return ret;
180 }
181
182 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
183                                           u32 prio, struct tcf_chain *chain,
184                                           bool rtnl_held,
185                                           struct netlink_ext_ack *extack)
186 {
187         struct tcf_proto *tp;
188         int err;
189
190         tp = kzalloc(sizeof(*tp), GFP_KERNEL);
191         if (!tp)
192                 return ERR_PTR(-ENOBUFS);
193
194         tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
195         if (IS_ERR(tp->ops)) {
196                 err = PTR_ERR(tp->ops);
197                 goto errout;
198         }
199         tp->classify = tp->ops->classify;
200         tp->protocol = protocol;
201         tp->prio = prio;
202         tp->chain = chain;
203         spin_lock_init(&tp->lock);
204         refcount_set(&tp->refcnt, 1);
205
206         err = tp->ops->init(tp);
207         if (err) {
208                 module_put(tp->ops->owner);
209                 goto errout;
210         }
211         return tp;
212
213 errout:
214         kfree(tp);
215         return ERR_PTR(err);
216 }
217
218 static void tcf_proto_get(struct tcf_proto *tp)
219 {
220         refcount_inc(&tp->refcnt);
221 }
222
223 static void tcf_chain_put(struct tcf_chain *chain);
224
225 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
226                               struct netlink_ext_ack *extack)
227 {
228         tp->ops->destroy(tp, rtnl_held, extack);
229         tcf_chain_put(tp->chain);
230         module_put(tp->ops->owner);
231         kfree_rcu(tp, rcu);
232 }
233
234 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
235                           struct netlink_ext_ack *extack)
236 {
237         if (refcount_dec_and_test(&tp->refcnt))
238                 tcf_proto_destroy(tp, rtnl_held, extack);
239 }
240
241 static int walker_check_empty(struct tcf_proto *tp, void *fh,
242                               struct tcf_walker *arg)
243 {
244         if (fh) {
245                 arg->nonempty = true;
246                 return -1;
247         }
248         return 0;
249 }
250
251 static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
252 {
253         struct tcf_walker walker = { .fn = walker_check_empty, };
254
255         if (tp->ops->walk) {
256                 tp->ops->walk(tp, &walker, rtnl_held);
257                 return !walker.nonempty;
258         }
259         return true;
260 }
261
262 static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
263 {
264         spin_lock(&tp->lock);
265         if (tcf_proto_is_empty(tp, rtnl_held))
266                 tp->deleting = true;
267         spin_unlock(&tp->lock);
268         return tp->deleting;
269 }
270
271 static void tcf_proto_mark_delete(struct tcf_proto *tp)
272 {
273         spin_lock(&tp->lock);
274         tp->deleting = true;
275         spin_unlock(&tp->lock);
276 }
277
278 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
279 {
280         bool deleting;
281
282         spin_lock(&tp->lock);
283         deleting = tp->deleting;
284         spin_unlock(&tp->lock);
285
286         return deleting;
287 }
288
289 #define ASSERT_BLOCK_LOCKED(block)                                      \
290         lockdep_assert_held(&(block)->lock)
291
292 struct tcf_filter_chain_list_item {
293         struct list_head list;
294         tcf_chain_head_change_t *chain_head_change;
295         void *chain_head_change_priv;
296 };
297
298 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
299                                           u32 chain_index)
300 {
301         struct tcf_chain *chain;
302
303         ASSERT_BLOCK_LOCKED(block);
304
305         chain = kzalloc(sizeof(*chain), GFP_KERNEL);
306         if (!chain)
307                 return NULL;
308         list_add_tail(&chain->list, &block->chain_list);
309         mutex_init(&chain->filter_chain_lock);
310         chain->block = block;
311         chain->index = chain_index;
312         chain->refcnt = 1;
313         if (!chain->index)
314                 block->chain0.chain = chain;
315         return chain;
316 }
317
318 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
319                                        struct tcf_proto *tp_head)
320 {
321         if (item->chain_head_change)
322                 item->chain_head_change(tp_head, item->chain_head_change_priv);
323 }
324
325 static void tcf_chain0_head_change(struct tcf_chain *chain,
326                                    struct tcf_proto *tp_head)
327 {
328         struct tcf_filter_chain_list_item *item;
329         struct tcf_block *block = chain->block;
330
331         if (chain->index)
332                 return;
333
334         mutex_lock(&block->lock);
335         list_for_each_entry(item, &block->chain0.filter_chain_list, list)
336                 tcf_chain_head_change_item(item, tp_head);
337         mutex_unlock(&block->lock);
338 }
339
340 /* Returns true if block can be safely freed. */
341
342 static bool tcf_chain_detach(struct tcf_chain *chain)
343 {
344         struct tcf_block *block = chain->block;
345
346         ASSERT_BLOCK_LOCKED(block);
347
348         list_del(&chain->list);
349         if (!chain->index)
350                 block->chain0.chain = NULL;
351
352         if (list_empty(&block->chain_list) &&
353             refcount_read(&block->refcnt) == 0)
354                 return true;
355
356         return false;
357 }
358
359 static void tcf_block_destroy(struct tcf_block *block)
360 {
361         mutex_destroy(&block->lock);
362         kfree_rcu(block, rcu);
363 }
364
365 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
366 {
367         struct tcf_block *block = chain->block;
368
369         mutex_destroy(&chain->filter_chain_lock);
370         kfree_rcu(chain, rcu);
371         if (free_block)
372                 tcf_block_destroy(block);
373 }
374
375 static void tcf_chain_hold(struct tcf_chain *chain)
376 {
377         ASSERT_BLOCK_LOCKED(chain->block);
378
379         ++chain->refcnt;
380 }
381
382 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
383 {
384         ASSERT_BLOCK_LOCKED(chain->block);
385
386         /* In case all the references are action references, this
387          * chain should not be shown to the user.
388          */
389         return chain->refcnt == chain->action_refcnt;
390 }
391
392 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
393                                           u32 chain_index)
394 {
395         struct tcf_chain *chain;
396
397         ASSERT_BLOCK_LOCKED(block);
398
399         list_for_each_entry(chain, &block->chain_list, list) {
400                 if (chain->index == chain_index)
401                         return chain;
402         }
403         return NULL;
404 }
405
406 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
407                            u32 seq, u16 flags, int event, bool unicast);
408
409 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
410                                          u32 chain_index, bool create,
411                                          bool by_act)
412 {
413         struct tcf_chain *chain = NULL;
414         bool is_first_reference;
415
416         mutex_lock(&block->lock);
417         chain = tcf_chain_lookup(block, chain_index);
418         if (chain) {
419                 tcf_chain_hold(chain);
420         } else {
421                 if (!create)
422                         goto errout;
423                 chain = tcf_chain_create(block, chain_index);
424                 if (!chain)
425                         goto errout;
426         }
427
428         if (by_act)
429                 ++chain->action_refcnt;
430         is_first_reference = chain->refcnt - chain->action_refcnt == 1;
431         mutex_unlock(&block->lock);
432
433         /* Send notification only in case we got the first
434          * non-action reference. Until then, the chain acts only as
435          * a placeholder for actions pointing to it and user ought
436          * not know about them.
437          */
438         if (is_first_reference && !by_act)
439                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
440                                 RTM_NEWCHAIN, false);
441
442         return chain;
443
444 errout:
445         mutex_unlock(&block->lock);
446         return chain;
447 }
448
449 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
450                                        bool create)
451 {
452         return __tcf_chain_get(block, chain_index, create, false);
453 }
454
455 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
456 {
457         return __tcf_chain_get(block, chain_index, true, true);
458 }
459 EXPORT_SYMBOL(tcf_chain_get_by_act);
460
461 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
462                                void *tmplt_priv);
463 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
464                                   void *tmplt_priv, u32 chain_index,
465                                   struct tcf_block *block, struct sk_buff *oskb,
466                                   u32 seq, u16 flags, bool unicast);
467
468 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
469                             bool explicitly_created)
470 {
471         struct tcf_block *block = chain->block;
472         const struct tcf_proto_ops *tmplt_ops;
473         bool free_block = false;
474         unsigned int refcnt;
475         void *tmplt_priv;
476
477         mutex_lock(&block->lock);
478         if (explicitly_created) {
479                 if (!chain->explicitly_created) {
480                         mutex_unlock(&block->lock);
481                         return;
482                 }
483                 chain->explicitly_created = false;
484         }
485
486         if (by_act)
487                 chain->action_refcnt--;
488
489         /* tc_chain_notify_delete can't be called while holding block lock.
490          * However, when block is unlocked chain can be changed concurrently, so
491          * save these to temporary variables.
492          */
493         refcnt = --chain->refcnt;
494         tmplt_ops = chain->tmplt_ops;
495         tmplt_priv = chain->tmplt_priv;
496
497         /* The last dropped non-action reference will trigger notification. */
498         if (refcnt - chain->action_refcnt == 0 && !by_act) {
499                 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
500                                        block, NULL, 0, 0, false);
501                 /* Last reference to chain, no need to lock. */
502                 chain->flushing = false;
503         }
504
505         if (refcnt == 0)
506                 free_block = tcf_chain_detach(chain);
507         mutex_unlock(&block->lock);
508
509         if (refcnt == 0) {
510                 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
511                 tcf_chain_destroy(chain, free_block);
512         }
513 }
514
515 static void tcf_chain_put(struct tcf_chain *chain)
516 {
517         __tcf_chain_put(chain, false, false);
518 }
519
520 void tcf_chain_put_by_act(struct tcf_chain *chain)
521 {
522         __tcf_chain_put(chain, true, false);
523 }
524 EXPORT_SYMBOL(tcf_chain_put_by_act);
525
526 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
527 {
528         __tcf_chain_put(chain, false, true);
529 }
530
531 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
532 {
533         struct tcf_proto *tp, *tp_next;
534
535         mutex_lock(&chain->filter_chain_lock);
536         tp = tcf_chain_dereference(chain->filter_chain, chain);
537         RCU_INIT_POINTER(chain->filter_chain, NULL);
538         tcf_chain0_head_change(chain, NULL);
539         chain->flushing = true;
540         mutex_unlock(&chain->filter_chain_lock);
541
542         while (tp) {
543                 tp_next = rcu_dereference_protected(tp->next, 1);
544                 tcf_proto_put(tp, rtnl_held, NULL);
545                 tp = tp_next;
546         }
547 }
548
549 static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
550 {
551         const struct Qdisc_class_ops *cops;
552         struct Qdisc *qdisc;
553
554         if (!dev_ingress_queue(dev))
555                 return NULL;
556
557         qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
558         if (!qdisc)
559                 return NULL;
560
561         cops = qdisc->ops->cl_ops;
562         if (!cops)
563                 return NULL;
564
565         if (!cops->tcf_block)
566                 return NULL;
567
568         return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
569 }
570
571 static struct rhashtable indr_setup_block_ht;
572
573 struct tc_indr_block_dev {
574         struct rhash_head ht_node;
575         struct net_device *dev;
576         unsigned int refcnt;
577         struct list_head cb_list;
578         struct tcf_block *block;
579 };
580
581 struct tc_indr_block_cb {
582         struct list_head list;
583         void *cb_priv;
584         tc_indr_block_bind_cb_t *cb;
585         void *cb_ident;
586 };
587
588 static const struct rhashtable_params tc_indr_setup_block_ht_params = {
589         .key_offset     = offsetof(struct tc_indr_block_dev, dev),
590         .head_offset    = offsetof(struct tc_indr_block_dev, ht_node),
591         .key_len        = sizeof(struct net_device *),
592 };
593
594 static struct tc_indr_block_dev *
595 tc_indr_block_dev_lookup(struct net_device *dev)
596 {
597         return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
598                                       tc_indr_setup_block_ht_params);
599 }
600
601 static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev)
602 {
603         struct tc_indr_block_dev *indr_dev;
604
605         indr_dev = tc_indr_block_dev_lookup(dev);
606         if (indr_dev)
607                 goto inc_ref;
608
609         indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
610         if (!indr_dev)
611                 return NULL;
612
613         INIT_LIST_HEAD(&indr_dev->cb_list);
614         indr_dev->dev = dev;
615         indr_dev->block = tc_dev_ingress_block(dev);
616         if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
617                                    tc_indr_setup_block_ht_params)) {
618                 kfree(indr_dev);
619                 return NULL;
620         }
621
622 inc_ref:
623         indr_dev->refcnt++;
624         return indr_dev;
625 }
626
627 static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev)
628 {
629         if (--indr_dev->refcnt)
630                 return;
631
632         rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
633                                tc_indr_setup_block_ht_params);
634         kfree(indr_dev);
635 }
636
637 static struct tc_indr_block_cb *
638 tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev,
639                         tc_indr_block_bind_cb_t *cb, void *cb_ident)
640 {
641         struct tc_indr_block_cb *indr_block_cb;
642
643         list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
644                 if (indr_block_cb->cb == cb &&
645                     indr_block_cb->cb_ident == cb_ident)
646                         return indr_block_cb;
647         return NULL;
648 }
649
650 static struct tc_indr_block_cb *
651 tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv,
652                      tc_indr_block_bind_cb_t *cb, void *cb_ident)
653 {
654         struct tc_indr_block_cb *indr_block_cb;
655
656         indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
657         if (indr_block_cb)
658                 return ERR_PTR(-EEXIST);
659
660         indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
661         if (!indr_block_cb)
662                 return ERR_PTR(-ENOMEM);
663
664         indr_block_cb->cb_priv = cb_priv;
665         indr_block_cb->cb = cb;
666         indr_block_cb->cb_ident = cb_ident;
667         list_add(&indr_block_cb->list, &indr_dev->cb_list);
668
669         return indr_block_cb;
670 }
671
672 static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
673 {
674         list_del(&indr_block_cb->list);
675         kfree(indr_block_cb);
676 }
677
678 static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
679                                   struct tc_indr_block_cb *indr_block_cb,
680                                   enum tc_block_command command)
681 {
682         struct tc_block_offload bo = {
683                 .command        = command,
684                 .binder_type    = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
685                 .block          = indr_dev->block,
686         };
687
688         if (!indr_dev->block)
689                 return;
690
691         indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
692                           &bo);
693 }
694
695 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
696                                 tc_indr_block_bind_cb_t *cb, void *cb_ident)
697 {
698         struct tc_indr_block_cb *indr_block_cb;
699         struct tc_indr_block_dev *indr_dev;
700         int err;
701
702         indr_dev = tc_indr_block_dev_get(dev);
703         if (!indr_dev)
704                 return -ENOMEM;
705
706         indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
707         err = PTR_ERR_OR_ZERO(indr_block_cb);
708         if (err)
709                 goto err_dev_put;
710
711         tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_BIND);
712         return 0;
713
714 err_dev_put:
715         tc_indr_block_dev_put(indr_dev);
716         return err;
717 }
718 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register);
719
720 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
721                               tc_indr_block_bind_cb_t *cb, void *cb_ident)
722 {
723         int err;
724
725         rtnl_lock();
726         err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
727         rtnl_unlock();
728
729         return err;
730 }
731 EXPORT_SYMBOL_GPL(tc_indr_block_cb_register);
732
733 void __tc_indr_block_cb_unregister(struct net_device *dev,
734                                    tc_indr_block_bind_cb_t *cb, void *cb_ident)
735 {
736         struct tc_indr_block_cb *indr_block_cb;
737         struct tc_indr_block_dev *indr_dev;
738
739         indr_dev = tc_indr_block_dev_lookup(dev);
740         if (!indr_dev)
741                 return;
742
743         indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
744         if (!indr_block_cb)
745                 return;
746
747         /* Send unbind message if required to free any block cbs. */
748         tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_UNBIND);
749         tc_indr_block_cb_del(indr_block_cb);
750         tc_indr_block_dev_put(indr_dev);
751 }
752 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister);
753
754 void tc_indr_block_cb_unregister(struct net_device *dev,
755                                  tc_indr_block_bind_cb_t *cb, void *cb_ident)
756 {
757         rtnl_lock();
758         __tc_indr_block_cb_unregister(dev, cb, cb_ident);
759         rtnl_unlock();
760 }
761 EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister);
762
763 static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
764                                struct tcf_block_ext_info *ei,
765                                enum tc_block_command command,
766                                struct netlink_ext_ack *extack)
767 {
768         struct tc_indr_block_cb *indr_block_cb;
769         struct tc_indr_block_dev *indr_dev;
770         struct tc_block_offload bo = {
771                 .command        = command,
772                 .binder_type    = ei->binder_type,
773                 .block          = block,
774                 .extack         = extack,
775         };
776
777         indr_dev = tc_indr_block_dev_lookup(dev);
778         if (!indr_dev)
779                 return;
780
781         indr_dev->block = command == TC_BLOCK_BIND ? block : NULL;
782
783         list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
784                 indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
785                                   &bo);
786 }
787
788 static bool tcf_block_offload_in_use(struct tcf_block *block)
789 {
790         return block->offloadcnt;
791 }
792
793 static int tcf_block_offload_cmd(struct tcf_block *block,
794                                  struct net_device *dev,
795                                  struct tcf_block_ext_info *ei,
796                                  enum tc_block_command command,
797                                  struct netlink_ext_ack *extack)
798 {
799         struct tc_block_offload bo = {};
800
801         bo.command = command;
802         bo.binder_type = ei->binder_type;
803         bo.block = block;
804         bo.extack = extack;
805         return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
806 }
807
808 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
809                                   struct tcf_block_ext_info *ei,
810                                   struct netlink_ext_ack *extack)
811 {
812         struct net_device *dev = q->dev_queue->dev;
813         int err;
814
815         if (!dev->netdev_ops->ndo_setup_tc)
816                 goto no_offload_dev_inc;
817
818         /* If tc offload feature is disabled and the block we try to bind
819          * to already has some offloaded filters, forbid to bind.
820          */
821         if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
822                 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
823                 return -EOPNOTSUPP;
824         }
825
826         err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
827         if (err == -EOPNOTSUPP)
828                 goto no_offload_dev_inc;
829         if (err)
830                 return err;
831
832         tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
833         return 0;
834
835 no_offload_dev_inc:
836         if (tcf_block_offload_in_use(block))
837                 return -EOPNOTSUPP;
838         block->nooffloaddevcnt++;
839         tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
840         return 0;
841 }
842
843 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
844                                      struct tcf_block_ext_info *ei)
845 {
846         struct net_device *dev = q->dev_queue->dev;
847         int err;
848
849         tc_indr_block_call(block, dev, ei, TC_BLOCK_UNBIND, NULL);
850
851         if (!dev->netdev_ops->ndo_setup_tc)
852                 goto no_offload_dev_dec;
853         err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
854         if (err == -EOPNOTSUPP)
855                 goto no_offload_dev_dec;
856         return;
857
858 no_offload_dev_dec:
859         WARN_ON(block->nooffloaddevcnt-- == 0);
860 }
861
862 static int
863 tcf_chain0_head_change_cb_add(struct tcf_block *block,
864                               struct tcf_block_ext_info *ei,
865                               struct netlink_ext_ack *extack)
866 {
867         struct tcf_filter_chain_list_item *item;
868         struct tcf_chain *chain0;
869
870         item = kmalloc(sizeof(*item), GFP_KERNEL);
871         if (!item) {
872                 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
873                 return -ENOMEM;
874         }
875         item->chain_head_change = ei->chain_head_change;
876         item->chain_head_change_priv = ei->chain_head_change_priv;
877
878         mutex_lock(&block->lock);
879         chain0 = block->chain0.chain;
880         if (chain0)
881                 tcf_chain_hold(chain0);
882         else
883                 list_add(&item->list, &block->chain0.filter_chain_list);
884         mutex_unlock(&block->lock);
885
886         if (chain0) {
887                 struct tcf_proto *tp_head;
888
889                 mutex_lock(&chain0->filter_chain_lock);
890
891                 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
892                 if (tp_head)
893                         tcf_chain_head_change_item(item, tp_head);
894
895                 mutex_lock(&block->lock);
896                 list_add(&item->list, &block->chain0.filter_chain_list);
897                 mutex_unlock(&block->lock);
898
899                 mutex_unlock(&chain0->filter_chain_lock);
900                 tcf_chain_put(chain0);
901         }
902
903         return 0;
904 }
905
906 static void
907 tcf_chain0_head_change_cb_del(struct tcf_block *block,
908                               struct tcf_block_ext_info *ei)
909 {
910         struct tcf_filter_chain_list_item *item;
911
912         mutex_lock(&block->lock);
913         list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
914                 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
915                     (item->chain_head_change == ei->chain_head_change &&
916                      item->chain_head_change_priv == ei->chain_head_change_priv)) {
917                         if (block->chain0.chain)
918                                 tcf_chain_head_change_item(item, NULL);
919                         list_del(&item->list);
920                         mutex_unlock(&block->lock);
921
922                         kfree(item);
923                         return;
924                 }
925         }
926         mutex_unlock(&block->lock);
927         WARN_ON(1);
928 }
929
930 struct tcf_net {
931         spinlock_t idr_lock; /* Protects idr */
932         struct idr idr;
933 };
934
935 static unsigned int tcf_net_id;
936
937 static int tcf_block_insert(struct tcf_block *block, struct net *net,
938                             struct netlink_ext_ack *extack)
939 {
940         struct tcf_net *tn = net_generic(net, tcf_net_id);
941         int err;
942
943         idr_preload(GFP_KERNEL);
944         spin_lock(&tn->idr_lock);
945         err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
946                             GFP_NOWAIT);
947         spin_unlock(&tn->idr_lock);
948         idr_preload_end();
949
950         return err;
951 }
952
953 static void tcf_block_remove(struct tcf_block *block, struct net *net)
954 {
955         struct tcf_net *tn = net_generic(net, tcf_net_id);
956
957         spin_lock(&tn->idr_lock);
958         idr_remove(&tn->idr, block->index);
959         spin_unlock(&tn->idr_lock);
960 }
961
962 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
963                                           u32 block_index,
964                                           struct netlink_ext_ack *extack)
965 {
966         struct tcf_block *block;
967
968         block = kzalloc(sizeof(*block), GFP_KERNEL);
969         if (!block) {
970                 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
971                 return ERR_PTR(-ENOMEM);
972         }
973         mutex_init(&block->lock);
974         INIT_LIST_HEAD(&block->chain_list);
975         INIT_LIST_HEAD(&block->cb_list);
976         INIT_LIST_HEAD(&block->owner_list);
977         INIT_LIST_HEAD(&block->chain0.filter_chain_list);
978
979         refcount_set(&block->refcnt, 1);
980         block->net = net;
981         block->index = block_index;
982
983         /* Don't store q pointer for blocks which are shared */
984         if (!tcf_block_shared(block))
985                 block->q = q;
986         return block;
987 }
988
989 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
990 {
991         struct tcf_net *tn = net_generic(net, tcf_net_id);
992
993         return idr_find(&tn->idr, block_index);
994 }
995
996 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
997 {
998         struct tcf_block *block;
999
1000         rcu_read_lock();
1001         block = tcf_block_lookup(net, block_index);
1002         if (block && !refcount_inc_not_zero(&block->refcnt))
1003                 block = NULL;
1004         rcu_read_unlock();
1005
1006         return block;
1007 }
1008
1009 static struct tcf_chain *
1010 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1011 {
1012         mutex_lock(&block->lock);
1013         if (chain)
1014                 chain = list_is_last(&chain->list, &block->chain_list) ?
1015                         NULL : list_next_entry(chain, list);
1016         else
1017                 chain = list_first_entry_or_null(&block->chain_list,
1018                                                  struct tcf_chain, list);
1019
1020         /* skip all action-only chains */
1021         while (chain && tcf_chain_held_by_acts_only(chain))
1022                 chain = list_is_last(&chain->list, &block->chain_list) ?
1023                         NULL : list_next_entry(chain, list);
1024
1025         if (chain)
1026                 tcf_chain_hold(chain);
1027         mutex_unlock(&block->lock);
1028
1029         return chain;
1030 }
1031
1032 /* Function to be used by all clients that want to iterate over all chains on
1033  * block. It properly obtains block->lock and takes reference to chain before
1034  * returning it. Users of this function must be tolerant to concurrent chain
1035  * insertion/deletion or ensure that no concurrent chain modification is
1036  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1037  * consistent dump because rtnl lock is released each time skb is filled with
1038  * data and sent to user-space.
1039  */
1040
1041 struct tcf_chain *
1042 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1043 {
1044         struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1045
1046         if (chain)
1047                 tcf_chain_put(chain);
1048
1049         return chain_next;
1050 }
1051 EXPORT_SYMBOL(tcf_get_next_chain);
1052
1053 static struct tcf_proto *
1054 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1055 {
1056         u32 prio = 0;
1057
1058         ASSERT_RTNL();
1059         mutex_lock(&chain->filter_chain_lock);
1060
1061         if (!tp) {
1062                 tp = tcf_chain_dereference(chain->filter_chain, chain);
1063         } else if (tcf_proto_is_deleting(tp)) {
1064                 /* 'deleting' flag is set and chain->filter_chain_lock was
1065                  * unlocked, which means next pointer could be invalid. Restart
1066                  * search.
1067                  */
1068                 prio = tp->prio + 1;
1069                 tp = tcf_chain_dereference(chain->filter_chain, chain);
1070
1071                 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1072                         if (!tp->deleting && tp->prio >= prio)
1073                                 break;
1074         } else {
1075                 tp = tcf_chain_dereference(tp->next, chain);
1076         }
1077
1078         if (tp)
1079                 tcf_proto_get(tp);
1080
1081         mutex_unlock(&chain->filter_chain_lock);
1082
1083         return tp;
1084 }
1085
1086 /* Function to be used by all clients that want to iterate over all tp's on
1087  * chain. Users of this function must be tolerant to concurrent tp
1088  * insertion/deletion or ensure that no concurrent chain modification is
1089  * possible. Note that all netlink dump callbacks cannot guarantee to provide
1090  * consistent dump because rtnl lock is released each time skb is filled with
1091  * data and sent to user-space.
1092  */
1093
1094 struct tcf_proto *
1095 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
1096                    bool rtnl_held)
1097 {
1098         struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1099
1100         if (tp)
1101                 tcf_proto_put(tp, rtnl_held, NULL);
1102
1103         return tp_next;
1104 }
1105 EXPORT_SYMBOL(tcf_get_next_proto);
1106
1107 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1108 {
1109         struct tcf_chain *chain;
1110
1111         /* Last reference to block. At this point chains cannot be added or
1112          * removed concurrently.
1113          */
1114         for (chain = tcf_get_next_chain(block, NULL);
1115              chain;
1116              chain = tcf_get_next_chain(block, chain)) {
1117                 tcf_chain_put_explicitly_created(chain);
1118                 tcf_chain_flush(chain, rtnl_held);
1119         }
1120 }
1121
1122 /* Lookup Qdisc and increments its reference counter.
1123  * Set parent, if necessary.
1124  */
1125
1126 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1127                             u32 *parent, int ifindex, bool rtnl_held,
1128                             struct netlink_ext_ack *extack)
1129 {
1130         const struct Qdisc_class_ops *cops;
1131         struct net_device *dev;
1132         int err = 0;
1133
1134         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1135                 return 0;
1136
1137         rcu_read_lock();
1138
1139         /* Find link */
1140         dev = dev_get_by_index_rcu(net, ifindex);
1141         if (!dev) {
1142                 rcu_read_unlock();
1143                 return -ENODEV;
1144         }
1145
1146         /* Find qdisc */
1147         if (!*parent) {
1148                 *q = dev->qdisc;
1149                 *parent = (*q)->handle;
1150         } else {
1151                 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1152                 if (!*q) {
1153                         NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1154                         err = -EINVAL;
1155                         goto errout_rcu;
1156                 }
1157         }
1158
1159         *q = qdisc_refcount_inc_nz(*q);
1160         if (!*q) {
1161                 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1162                 err = -EINVAL;
1163                 goto errout_rcu;
1164         }
1165
1166         /* Is it classful? */
1167         cops = (*q)->ops->cl_ops;
1168         if (!cops) {
1169                 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1170                 err = -EINVAL;
1171                 goto errout_qdisc;
1172         }
1173
1174         if (!cops->tcf_block) {
1175                 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1176                 err = -EOPNOTSUPP;
1177                 goto errout_qdisc;
1178         }
1179
1180 errout_rcu:
1181         /* At this point we know that qdisc is not noop_qdisc,
1182          * which means that qdisc holds a reference to net_device
1183          * and we hold a reference to qdisc, so it is safe to release
1184          * rcu read lock.
1185          */
1186         rcu_read_unlock();
1187         return err;
1188
1189 errout_qdisc:
1190         rcu_read_unlock();
1191
1192         if (rtnl_held)
1193                 qdisc_put(*q);
1194         else
1195                 qdisc_put_unlocked(*q);
1196         *q = NULL;
1197
1198         return err;
1199 }
1200
1201 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1202                                int ifindex, struct netlink_ext_ack *extack)
1203 {
1204         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1205                 return 0;
1206
1207         /* Do we search for filter, attached to class? */
1208         if (TC_H_MIN(parent)) {
1209                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1210
1211                 *cl = cops->find(q, parent);
1212                 if (*cl == 0) {
1213                         NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1214                         return -ENOENT;
1215                 }
1216         }
1217
1218         return 0;
1219 }
1220
1221 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1222                                           unsigned long cl, int ifindex,
1223                                           u32 block_index,
1224                                           struct netlink_ext_ack *extack)
1225 {
1226         struct tcf_block *block;
1227
1228         if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1229                 block = tcf_block_refcnt_get(net, block_index);
1230                 if (!block) {
1231                         NL_SET_ERR_MSG(extack, "Block of given index was not found");
1232                         return ERR_PTR(-EINVAL);
1233                 }
1234         } else {
1235                 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1236
1237                 block = cops->tcf_block(q, cl, extack);
1238                 if (!block)
1239                         return ERR_PTR(-EINVAL);
1240
1241                 if (tcf_block_shared(block)) {
1242                         NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1243                         return ERR_PTR(-EOPNOTSUPP);
1244                 }
1245
1246                 /* Always take reference to block in order to support execution
1247                  * of rules update path of cls API without rtnl lock. Caller
1248                  * must release block when it is finished using it. 'if' block
1249                  * of this conditional obtain reference to block by calling
1250                  * tcf_block_refcnt_get().
1251                  */
1252                 refcount_inc(&block->refcnt);
1253         }
1254
1255         return block;
1256 }
1257
1258 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1259                             struct tcf_block_ext_info *ei, bool rtnl_held)
1260 {
1261         if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1262                 /* Flushing/putting all chains will cause the block to be
1263                  * deallocated when last chain is freed. However, if chain_list
1264                  * is empty, block has to be manually deallocated. After block
1265                  * reference counter reached 0, it is no longer possible to
1266                  * increment it or add new chains to block.
1267                  */
1268                 bool free_block = list_empty(&block->chain_list);
1269
1270                 mutex_unlock(&block->lock);
1271                 if (tcf_block_shared(block))
1272                         tcf_block_remove(block, block->net);
1273
1274                 if (q)
1275                         tcf_block_offload_unbind(block, q, ei);
1276
1277                 if (free_block)
1278                         tcf_block_destroy(block);
1279                 else
1280                         tcf_block_flush_all_chains(block, rtnl_held);
1281         } else if (q) {
1282                 tcf_block_offload_unbind(block, q, ei);
1283         }
1284 }
1285
1286 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1287 {
1288         __tcf_block_put(block, NULL, NULL, rtnl_held);
1289 }
1290
1291 /* Find tcf block.
1292  * Set q, parent, cl when appropriate.
1293  */
1294
1295 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1296                                         u32 *parent, unsigned long *cl,
1297                                         int ifindex, u32 block_index,
1298                                         struct netlink_ext_ack *extack)
1299 {
1300         struct tcf_block *block;
1301         int err = 0;
1302
1303         ASSERT_RTNL();
1304
1305         err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1306         if (err)
1307                 goto errout;
1308
1309         err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1310         if (err)
1311                 goto errout_qdisc;
1312
1313         block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1314         if (IS_ERR(block)) {
1315                 err = PTR_ERR(block);
1316                 goto errout_qdisc;
1317         }
1318
1319         return block;
1320
1321 errout_qdisc:
1322         if (*q)
1323                 qdisc_put(*q);
1324 errout:
1325         *q = NULL;
1326         return ERR_PTR(err);
1327 }
1328
1329 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1330                               bool rtnl_held)
1331 {
1332         if (!IS_ERR_OR_NULL(block))
1333                 tcf_block_refcnt_put(block, rtnl_held);
1334
1335         if (q) {
1336                 if (rtnl_held)
1337                         qdisc_put(q);
1338                 else
1339                         qdisc_put_unlocked(q);
1340         }
1341 }
1342
1343 struct tcf_block_owner_item {
1344         struct list_head list;
1345         struct Qdisc *q;
1346         enum tcf_block_binder_type binder_type;
1347 };
1348
1349 static void
1350 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1351                                struct Qdisc *q,
1352                                enum tcf_block_binder_type binder_type)
1353 {
1354         if (block->keep_dst &&
1355             binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1356             binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1357                 netif_keep_dst(qdisc_dev(q));
1358 }
1359
1360 void tcf_block_netif_keep_dst(struct tcf_block *block)
1361 {
1362         struct tcf_block_owner_item *item;
1363
1364         block->keep_dst = true;
1365         list_for_each_entry(item, &block->owner_list, list)
1366                 tcf_block_owner_netif_keep_dst(block, item->q,
1367                                                item->binder_type);
1368 }
1369 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1370
1371 static int tcf_block_owner_add(struct tcf_block *block,
1372                                struct Qdisc *q,
1373                                enum tcf_block_binder_type binder_type)
1374 {
1375         struct tcf_block_owner_item *item;
1376
1377         item = kmalloc(sizeof(*item), GFP_KERNEL);
1378         if (!item)
1379                 return -ENOMEM;
1380         item->q = q;
1381         item->binder_type = binder_type;
1382         list_add(&item->list, &block->owner_list);
1383         return 0;
1384 }
1385
1386 static void tcf_block_owner_del(struct tcf_block *block,
1387                                 struct Qdisc *q,
1388                                 enum tcf_block_binder_type binder_type)
1389 {
1390         struct tcf_block_owner_item *item;
1391
1392         list_for_each_entry(item, &block->owner_list, list) {
1393                 if (item->q == q && item->binder_type == binder_type) {
1394                         list_del(&item->list);
1395                         kfree(item);
1396                         return;
1397                 }
1398         }
1399         WARN_ON(1);
1400 }
1401
1402 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1403                       struct tcf_block_ext_info *ei,
1404                       struct netlink_ext_ack *extack)
1405 {
1406         struct net *net = qdisc_net(q);
1407         struct tcf_block *block = NULL;
1408         int err;
1409
1410         if (ei->block_index)
1411                 /* block_index not 0 means the shared block is requested */
1412                 block = tcf_block_refcnt_get(net, ei->block_index);
1413
1414         if (!block) {
1415                 block = tcf_block_create(net, q, ei->block_index, extack);
1416                 if (IS_ERR(block))
1417                         return PTR_ERR(block);
1418                 if (tcf_block_shared(block)) {
1419                         err = tcf_block_insert(block, net, extack);
1420                         if (err)
1421                                 goto err_block_insert;
1422                 }
1423         }
1424
1425         err = tcf_block_owner_add(block, q, ei->binder_type);
1426         if (err)
1427                 goto err_block_owner_add;
1428
1429         tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1430
1431         err = tcf_chain0_head_change_cb_add(block, ei, extack);
1432         if (err)
1433                 goto err_chain0_head_change_cb_add;
1434
1435         err = tcf_block_offload_bind(block, q, ei, extack);
1436         if (err)
1437                 goto err_block_offload_bind;
1438
1439         *p_block = block;
1440         return 0;
1441
1442 err_block_offload_bind:
1443         tcf_chain0_head_change_cb_del(block, ei);
1444 err_chain0_head_change_cb_add:
1445         tcf_block_owner_del(block, q, ei->binder_type);
1446 err_block_owner_add:
1447 err_block_insert:
1448         tcf_block_refcnt_put(block, true);
1449         return err;
1450 }
1451 EXPORT_SYMBOL(tcf_block_get_ext);
1452
1453 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1454 {
1455         struct tcf_proto __rcu **p_filter_chain = priv;
1456
1457         rcu_assign_pointer(*p_filter_chain, tp_head);
1458 }
1459
1460 int tcf_block_get(struct tcf_block **p_block,
1461                   struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1462                   struct netlink_ext_ack *extack)
1463 {
1464         struct tcf_block_ext_info ei = {
1465                 .chain_head_change = tcf_chain_head_change_dflt,
1466                 .chain_head_change_priv = p_filter_chain,
1467         };
1468
1469         WARN_ON(!p_filter_chain);
1470         return tcf_block_get_ext(p_block, q, &ei, extack);
1471 }
1472 EXPORT_SYMBOL(tcf_block_get);
1473
1474 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1475  * actions should be all removed after flushing.
1476  */
1477 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1478                        struct tcf_block_ext_info *ei)
1479 {
1480         if (!block)
1481                 return;
1482         tcf_chain0_head_change_cb_del(block, ei);
1483         tcf_block_owner_del(block, q, ei->binder_type);
1484
1485         __tcf_block_put(block, q, ei, true);
1486 }
1487 EXPORT_SYMBOL(tcf_block_put_ext);
1488
1489 void tcf_block_put(struct tcf_block *block)
1490 {
1491         struct tcf_block_ext_info ei = {0, };
1492
1493         if (!block)
1494                 return;
1495         tcf_block_put_ext(block, block->q, &ei);
1496 }
1497
1498 EXPORT_SYMBOL(tcf_block_put);
1499
1500 struct tcf_block_cb {
1501         struct list_head list;
1502         tc_setup_cb_t *cb;
1503         void *cb_ident;
1504         void *cb_priv;
1505         unsigned int refcnt;
1506 };
1507
1508 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
1509 {
1510         return block_cb->cb_priv;
1511 }
1512 EXPORT_SYMBOL(tcf_block_cb_priv);
1513
1514 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
1515                                          tc_setup_cb_t *cb, void *cb_ident)
1516 {       struct tcf_block_cb *block_cb;
1517
1518         list_for_each_entry(block_cb, &block->cb_list, list)
1519                 if (block_cb->cb == cb && block_cb->cb_ident == cb_ident)
1520                         return block_cb;
1521         return NULL;
1522 }
1523 EXPORT_SYMBOL(tcf_block_cb_lookup);
1524
1525 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
1526 {
1527         block_cb->refcnt++;
1528 }
1529 EXPORT_SYMBOL(tcf_block_cb_incref);
1530
1531 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
1532 {
1533         return --block_cb->refcnt;
1534 }
1535 EXPORT_SYMBOL(tcf_block_cb_decref);
1536
1537 static int
1538 tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
1539                             void *cb_priv, bool add, bool offload_in_use,
1540                             struct netlink_ext_ack *extack)
1541 {
1542         struct tcf_chain *chain, *chain_prev;
1543         struct tcf_proto *tp, *tp_prev;
1544         int err;
1545
1546         for (chain = __tcf_get_next_chain(block, NULL);
1547              chain;
1548              chain_prev = chain,
1549                      chain = __tcf_get_next_chain(block, chain),
1550                      tcf_chain_put(chain_prev)) {
1551                 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1552                      tp_prev = tp,
1553                              tp = __tcf_get_next_proto(chain, tp),
1554                              tcf_proto_put(tp_prev, true, NULL)) {
1555                         if (tp->ops->reoffload) {
1556                                 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1557                                                          extack);
1558                                 if (err && add)
1559                                         goto err_playback_remove;
1560                         } else if (add && offload_in_use) {
1561                                 err = -EOPNOTSUPP;
1562                                 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1563                                 goto err_playback_remove;
1564                         }
1565                 }
1566         }
1567
1568         return 0;
1569
1570 err_playback_remove:
1571         tcf_proto_put(tp, true, NULL);
1572         tcf_chain_put(chain);
1573         tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1574                                     extack);
1575         return err;
1576 }
1577
1578 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
1579                                              tc_setup_cb_t *cb, void *cb_ident,
1580                                              void *cb_priv,
1581                                              struct netlink_ext_ack *extack)
1582 {
1583         struct tcf_block_cb *block_cb;
1584         int err;
1585
1586         /* Replay any already present rules */
1587         err = tcf_block_playback_offloads(block, cb, cb_priv, true,
1588                                           tcf_block_offload_in_use(block),
1589                                           extack);
1590         if (err)
1591                 return ERR_PTR(err);
1592
1593         block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
1594         if (!block_cb)
1595                 return ERR_PTR(-ENOMEM);
1596         block_cb->cb = cb;
1597         block_cb->cb_ident = cb_ident;
1598         block_cb->cb_priv = cb_priv;
1599         list_add(&block_cb->list, &block->cb_list);
1600         return block_cb;
1601 }
1602 EXPORT_SYMBOL(__tcf_block_cb_register);
1603
1604 int tcf_block_cb_register(struct tcf_block *block,
1605                           tc_setup_cb_t *cb, void *cb_ident,
1606                           void *cb_priv, struct netlink_ext_ack *extack)
1607 {
1608         struct tcf_block_cb *block_cb;
1609
1610         block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv,
1611                                            extack);
1612         return PTR_ERR_OR_ZERO(block_cb);
1613 }
1614 EXPORT_SYMBOL(tcf_block_cb_register);
1615
1616 void __tcf_block_cb_unregister(struct tcf_block *block,
1617                                struct tcf_block_cb *block_cb)
1618 {
1619         tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv,
1620                                     false, tcf_block_offload_in_use(block),
1621                                     NULL);
1622         list_del(&block_cb->list);
1623         kfree(block_cb);
1624 }
1625 EXPORT_SYMBOL(__tcf_block_cb_unregister);
1626
1627 void tcf_block_cb_unregister(struct tcf_block *block,
1628                              tc_setup_cb_t *cb, void *cb_ident)
1629 {
1630         struct tcf_block_cb *block_cb;
1631
1632         block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
1633         if (!block_cb)
1634                 return;
1635         __tcf_block_cb_unregister(block, block_cb);
1636 }
1637 EXPORT_SYMBOL(tcf_block_cb_unregister);
1638
1639 /* Main classifier routine: scans classifier chain attached
1640  * to this qdisc, (optionally) tests for protocol and asks
1641  * specific classifiers.
1642  */
1643 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1644                  struct tcf_result *res, bool compat_mode)
1645 {
1646 #ifdef CONFIG_NET_CLS_ACT
1647         const int max_reclassify_loop = 4;
1648         const struct tcf_proto *orig_tp = tp;
1649         const struct tcf_proto *first_tp;
1650         int limit = 0;
1651
1652 reclassify:
1653 #endif
1654         for (; tp; tp = rcu_dereference_bh(tp->next)) {
1655                 __be16 protocol = tc_skb_protocol(skb);
1656                 int err;
1657
1658                 if (tp->protocol != protocol &&
1659                     tp->protocol != htons(ETH_P_ALL))
1660                         continue;
1661
1662                 err = tp->classify(skb, tp, res);
1663 #ifdef CONFIG_NET_CLS_ACT
1664                 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1665                         first_tp = orig_tp;
1666                         goto reset;
1667                 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1668                         first_tp = res->goto_tp;
1669                         goto reset;
1670                 }
1671 #endif
1672                 if (err >= 0)
1673                         return err;
1674         }
1675
1676         return TC_ACT_UNSPEC; /* signal: continue lookup */
1677 #ifdef CONFIG_NET_CLS_ACT
1678 reset:
1679         if (unlikely(limit++ >= max_reclassify_loop)) {
1680                 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1681                                        tp->chain->block->index,
1682                                        tp->prio & 0xffff,
1683                                        ntohs(tp->protocol));
1684                 return TC_ACT_SHOT;
1685         }
1686
1687         tp = first_tp;
1688         goto reclassify;
1689 #endif
1690 }
1691 EXPORT_SYMBOL(tcf_classify);
1692
1693 struct tcf_chain_info {
1694         struct tcf_proto __rcu **pprev;
1695         struct tcf_proto __rcu *next;
1696 };
1697
1698 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1699                                            struct tcf_chain_info *chain_info)
1700 {
1701         return tcf_chain_dereference(*chain_info->pprev, chain);
1702 }
1703
1704 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1705                                struct tcf_chain_info *chain_info,
1706                                struct tcf_proto *tp)
1707 {
1708         if (chain->flushing)
1709                 return -EAGAIN;
1710
1711         if (*chain_info->pprev == chain->filter_chain)
1712                 tcf_chain0_head_change(chain, tp);
1713         tcf_proto_get(tp);
1714         RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1715         rcu_assign_pointer(*chain_info->pprev, tp);
1716
1717         return 0;
1718 }
1719
1720 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1721                                 struct tcf_chain_info *chain_info,
1722                                 struct tcf_proto *tp)
1723 {
1724         struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1725
1726         tcf_proto_mark_delete(tp);
1727         if (tp == chain->filter_chain)
1728                 tcf_chain0_head_change(chain, next);
1729         RCU_INIT_POINTER(*chain_info->pprev, next);
1730 }
1731
1732 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1733                                            struct tcf_chain_info *chain_info,
1734                                            u32 protocol, u32 prio,
1735                                            bool prio_allocate);
1736
1737 /* Try to insert new proto.
1738  * If proto with specified priority already exists, free new proto
1739  * and return existing one.
1740  */
1741
1742 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1743                                                     struct tcf_proto *tp_new,
1744                                                     u32 protocol, u32 prio,
1745                                                     bool rtnl_held)
1746 {
1747         struct tcf_chain_info chain_info;
1748         struct tcf_proto *tp;
1749         int err = 0;
1750
1751         mutex_lock(&chain->filter_chain_lock);
1752
1753         tp = tcf_chain_tp_find(chain, &chain_info,
1754                                protocol, prio, false);
1755         if (!tp)
1756                 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1757         mutex_unlock(&chain->filter_chain_lock);
1758
1759         if (tp) {
1760                 tcf_proto_destroy(tp_new, rtnl_held, NULL);
1761                 tp_new = tp;
1762         } else if (err) {
1763                 tcf_proto_destroy(tp_new, rtnl_held, NULL);
1764                 tp_new = ERR_PTR(err);
1765         }
1766
1767         return tp_new;
1768 }
1769
1770 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1771                                       struct tcf_proto *tp, bool rtnl_held,
1772                                       struct netlink_ext_ack *extack)
1773 {
1774         struct tcf_chain_info chain_info;
1775         struct tcf_proto *tp_iter;
1776         struct tcf_proto **pprev;
1777         struct tcf_proto *next;
1778
1779         mutex_lock(&chain->filter_chain_lock);
1780
1781         /* Atomically find and remove tp from chain. */
1782         for (pprev = &chain->filter_chain;
1783              (tp_iter = tcf_chain_dereference(*pprev, chain));
1784              pprev = &tp_iter->next) {
1785                 if (tp_iter == tp) {
1786                         chain_info.pprev = pprev;
1787                         chain_info.next = tp_iter->next;
1788                         WARN_ON(tp_iter->deleting);
1789                         break;
1790                 }
1791         }
1792         /* Verify that tp still exists and no new filters were inserted
1793          * concurrently.
1794          * Mark tp for deletion if it is empty.
1795          */
1796         if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
1797                 mutex_unlock(&chain->filter_chain_lock);
1798                 return;
1799         }
1800
1801         next = tcf_chain_dereference(chain_info.next, chain);
1802         if (tp == chain->filter_chain)
1803                 tcf_chain0_head_change(chain, next);
1804         RCU_INIT_POINTER(*chain_info.pprev, next);
1805         mutex_unlock(&chain->filter_chain_lock);
1806
1807         tcf_proto_put(tp, rtnl_held, extack);
1808 }
1809
1810 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1811                                            struct tcf_chain_info *chain_info,
1812                                            u32 protocol, u32 prio,
1813                                            bool prio_allocate)
1814 {
1815         struct tcf_proto **pprev;
1816         struct tcf_proto *tp;
1817
1818         /* Check the chain for existence of proto-tcf with this priority */
1819         for (pprev = &chain->filter_chain;
1820              (tp = tcf_chain_dereference(*pprev, chain));
1821              pprev = &tp->next) {
1822                 if (tp->prio >= prio) {
1823                         if (tp->prio == prio) {
1824                                 if (prio_allocate ||
1825                                     (tp->protocol != protocol && protocol))
1826                                         return ERR_PTR(-EINVAL);
1827                         } else {
1828                                 tp = NULL;
1829                         }
1830                         break;
1831                 }
1832         }
1833         chain_info->pprev = pprev;
1834         if (tp) {
1835                 chain_info->next = tp->next;
1836                 tcf_proto_get(tp);
1837         } else {
1838                 chain_info->next = NULL;
1839         }
1840         return tp;
1841 }
1842
1843 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1844                          struct tcf_proto *tp, struct tcf_block *block,
1845                          struct Qdisc *q, u32 parent, void *fh,
1846                          u32 portid, u32 seq, u16 flags, int event,
1847                          bool rtnl_held)
1848 {
1849         struct tcmsg *tcm;
1850         struct nlmsghdr  *nlh;
1851         unsigned char *b = skb_tail_pointer(skb);
1852
1853         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1854         if (!nlh)
1855                 goto out_nlmsg_trim;
1856         tcm = nlmsg_data(nlh);
1857         tcm->tcm_family = AF_UNSPEC;
1858         tcm->tcm__pad1 = 0;
1859         tcm->tcm__pad2 = 0;
1860         if (q) {
1861                 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1862                 tcm->tcm_parent = parent;
1863         } else {
1864                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1865                 tcm->tcm_block_index = block->index;
1866         }
1867         tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1868         if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1869                 goto nla_put_failure;
1870         if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1871                 goto nla_put_failure;
1872         if (!fh) {
1873                 tcm->tcm_handle = 0;
1874         } else {
1875                 if (tp->ops->dump &&
1876                     tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1877                         goto nla_put_failure;
1878         }
1879         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1880         return skb->len;
1881
1882 out_nlmsg_trim:
1883 nla_put_failure:
1884         nlmsg_trim(skb, b);
1885         return -1;
1886 }
1887
1888 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1889                           struct nlmsghdr *n, struct tcf_proto *tp,
1890                           struct tcf_block *block, struct Qdisc *q,
1891                           u32 parent, void *fh, int event, bool unicast,
1892                           bool rtnl_held)
1893 {
1894         struct sk_buff *skb;
1895         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1896         int err = 0;
1897
1898         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1899         if (!skb)
1900                 return -ENOBUFS;
1901
1902         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1903                           n->nlmsg_seq, n->nlmsg_flags, event,
1904                           rtnl_held) <= 0) {
1905                 kfree_skb(skb);
1906                 return -EINVAL;
1907         }
1908
1909         if (unicast)
1910                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1911         else
1912                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1913                                      n->nlmsg_flags & NLM_F_ECHO);
1914
1915         if (err > 0)
1916                 err = 0;
1917         return err;
1918 }
1919
1920 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1921                               struct nlmsghdr *n, struct tcf_proto *tp,
1922                               struct tcf_block *block, struct Qdisc *q,
1923                               u32 parent, void *fh, bool unicast, bool *last,
1924                               bool rtnl_held, struct netlink_ext_ack *extack)
1925 {
1926         struct sk_buff *skb;
1927         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1928         int err;
1929
1930         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1931         if (!skb)
1932                 return -ENOBUFS;
1933
1934         if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1935                           n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1936                           rtnl_held) <= 0) {
1937                 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1938                 kfree_skb(skb);
1939                 return -EINVAL;
1940         }
1941
1942         err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1943         if (err) {
1944                 kfree_skb(skb);
1945                 return err;
1946         }
1947
1948         if (unicast)
1949                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1950         else
1951                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1952                                      n->nlmsg_flags & NLM_F_ECHO);
1953         if (err < 0)
1954                 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1955
1956         if (err > 0)
1957                 err = 0;
1958         return err;
1959 }
1960
1961 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1962                                  struct tcf_block *block, struct Qdisc *q,
1963                                  u32 parent, struct nlmsghdr *n,
1964                                  struct tcf_chain *chain, int event,
1965                                  bool rtnl_held)
1966 {
1967         struct tcf_proto *tp;
1968
1969         for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1970              tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1971                 tfilter_notify(net, oskb, n, tp, block,
1972                                q, parent, NULL, event, false, rtnl_held);
1973 }
1974
1975 static void tfilter_put(struct tcf_proto *tp, void *fh)
1976 {
1977         if (tp->ops->put && fh)
1978                 tp->ops->put(tp, fh);
1979 }
1980
1981 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1982                           struct netlink_ext_ack *extack)
1983 {
1984         struct net *net = sock_net(skb->sk);
1985         struct nlattr *tca[TCA_MAX + 1];
1986         struct tcmsg *t;
1987         u32 protocol;
1988         u32 prio;
1989         bool prio_allocate;
1990         u32 parent;
1991         u32 chain_index;
1992         struct Qdisc *q = NULL;
1993         struct tcf_chain_info chain_info;
1994         struct tcf_chain *chain = NULL;
1995         struct tcf_block *block;
1996         struct tcf_proto *tp;
1997         unsigned long cl;
1998         void *fh;
1999         int err;
2000         int tp_created;
2001         bool rtnl_held = false;
2002
2003         if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2004                 return -EPERM;
2005
2006 replay:
2007         tp_created = 0;
2008
2009         err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
2010         if (err < 0)
2011                 return err;
2012
2013         t = nlmsg_data(n);
2014         protocol = TC_H_MIN(t->tcm_info);
2015         prio = TC_H_MAJ(t->tcm_info);
2016         prio_allocate = false;
2017         parent = t->tcm_parent;
2018         tp = NULL;
2019         cl = 0;
2020         block = NULL;
2021
2022         if (prio == 0) {
2023                 /* If no priority is provided by the user,
2024                  * we allocate one.
2025                  */
2026                 if (n->nlmsg_flags & NLM_F_CREATE) {
2027                         prio = TC_H_MAKE(0x80000000U, 0U);
2028                         prio_allocate = true;
2029                 } else {
2030                         NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2031                         return -ENOENT;
2032                 }
2033         }
2034
2035         /* Find head of filter chain. */
2036
2037         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2038         if (err)
2039                 return err;
2040
2041         /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2042          * block is shared (no qdisc found), qdisc is not unlocked, classifier
2043          * type is not specified, classifier is not unlocked.
2044          */
2045         if (rtnl_held ||
2046             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2047             !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2048                 rtnl_held = true;
2049                 rtnl_lock();
2050         }
2051
2052         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2053         if (err)
2054                 goto errout;
2055
2056         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2057                                  extack);
2058         if (IS_ERR(block)) {
2059                 err = PTR_ERR(block);
2060                 goto errout;
2061         }
2062
2063         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2064         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2065                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2066                 err = -EINVAL;
2067                 goto errout;
2068         }
2069         chain = tcf_chain_get(block, chain_index, true);
2070         if (!chain) {
2071                 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2072                 err = -ENOMEM;
2073                 goto errout;
2074         }
2075
2076         mutex_lock(&chain->filter_chain_lock);
2077         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2078                                prio, prio_allocate);
2079         if (IS_ERR(tp)) {
2080                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2081                 err = PTR_ERR(tp);
2082                 goto errout_locked;
2083         }
2084
2085         if (tp == NULL) {
2086                 struct tcf_proto *tp_new = NULL;
2087
2088                 if (chain->flushing) {
2089                         err = -EAGAIN;
2090                         goto errout_locked;
2091                 }
2092
2093                 /* Proto-tcf does not exist, create new one */
2094
2095                 if (tca[TCA_KIND] == NULL || !protocol) {
2096                         NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2097                         err = -EINVAL;
2098                         goto errout_locked;
2099                 }
2100
2101                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2102                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2103                         err = -ENOENT;
2104                         goto errout_locked;
2105                 }
2106
2107                 if (prio_allocate)
2108                         prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2109                                                                &chain_info));
2110
2111                 mutex_unlock(&chain->filter_chain_lock);
2112                 tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
2113                                           protocol, prio, chain, rtnl_held,
2114                                           extack);
2115                 if (IS_ERR(tp_new)) {
2116                         err = PTR_ERR(tp_new);
2117                         goto errout_tp;
2118                 }
2119
2120                 tp_created = 1;
2121                 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2122                                                 rtnl_held);
2123                 if (IS_ERR(tp)) {
2124                         err = PTR_ERR(tp);
2125                         goto errout_tp;
2126                 }
2127         } else {
2128                 mutex_unlock(&chain->filter_chain_lock);
2129         }
2130
2131         if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2132                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2133                 err = -EINVAL;
2134                 goto errout;
2135         }
2136
2137         fh = tp->ops->get(tp, t->tcm_handle);
2138
2139         if (!fh) {
2140                 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2141                         NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2142                         err = -ENOENT;
2143                         goto errout;
2144                 }
2145         } else if (n->nlmsg_flags & NLM_F_EXCL) {
2146                 tfilter_put(tp, fh);
2147                 NL_SET_ERR_MSG(extack, "Filter already exists");
2148                 err = -EEXIST;
2149                 goto errout;
2150         }
2151
2152         if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2153                 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2154                 err = -EINVAL;
2155                 goto errout;
2156         }
2157
2158         err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2159                               n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2160                               rtnl_held, extack);
2161         if (err == 0) {
2162                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2163                                RTM_NEWTFILTER, false, rtnl_held);
2164                 tfilter_put(tp, fh);
2165         }
2166
2167 errout:
2168         if (err && tp_created)
2169                 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2170 errout_tp:
2171         if (chain) {
2172                 if (tp && !IS_ERR(tp))
2173                         tcf_proto_put(tp, rtnl_held, NULL);
2174                 if (!tp_created)
2175                         tcf_chain_put(chain);
2176         }
2177         tcf_block_release(q, block, rtnl_held);
2178
2179         if (rtnl_held)
2180                 rtnl_unlock();
2181
2182         if (err == -EAGAIN) {
2183                 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2184                  * of target chain.
2185                  */
2186                 rtnl_held = true;
2187                 /* Replay the request. */
2188                 goto replay;
2189         }
2190         return err;
2191
2192 errout_locked:
2193         mutex_unlock(&chain->filter_chain_lock);
2194         goto errout;
2195 }
2196
2197 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2198                           struct netlink_ext_ack *extack)
2199 {
2200         struct net *net = sock_net(skb->sk);
2201         struct nlattr *tca[TCA_MAX + 1];
2202         struct tcmsg *t;
2203         u32 protocol;
2204         u32 prio;
2205         u32 parent;
2206         u32 chain_index;
2207         struct Qdisc *q = NULL;
2208         struct tcf_chain_info chain_info;
2209         struct tcf_chain *chain = NULL;
2210         struct tcf_block *block = NULL;
2211         struct tcf_proto *tp = NULL;
2212         unsigned long cl = 0;
2213         void *fh = NULL;
2214         int err;
2215         bool rtnl_held = false;
2216
2217         if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2218                 return -EPERM;
2219
2220         err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
2221         if (err < 0)
2222                 return err;
2223
2224         t = nlmsg_data(n);
2225         protocol = TC_H_MIN(t->tcm_info);
2226         prio = TC_H_MAJ(t->tcm_info);
2227         parent = t->tcm_parent;
2228
2229         if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2230                 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2231                 return -ENOENT;
2232         }
2233
2234         /* Find head of filter chain. */
2235
2236         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2237         if (err)
2238                 return err;
2239
2240         /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2241          * found), qdisc is not unlocked, classifier type is not specified,
2242          * classifier is not unlocked.
2243          */
2244         if (!prio ||
2245             (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2246             !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2247                 rtnl_held = true;
2248                 rtnl_lock();
2249         }
2250
2251         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2252         if (err)
2253                 goto errout;
2254
2255         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2256                                  extack);
2257         if (IS_ERR(block)) {
2258                 err = PTR_ERR(block);
2259                 goto errout;
2260         }
2261
2262         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2263         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2264                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2265                 err = -EINVAL;
2266                 goto errout;
2267         }
2268         chain = tcf_chain_get(block, chain_index, false);
2269         if (!chain) {
2270                 /* User requested flush on non-existent chain. Nothing to do,
2271                  * so just return success.
2272                  */
2273                 if (prio == 0) {
2274                         err = 0;
2275                         goto errout;
2276                 }
2277                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2278                 err = -ENOENT;
2279                 goto errout;
2280         }
2281
2282         if (prio == 0) {
2283                 tfilter_notify_chain(net, skb, block, q, parent, n,
2284                                      chain, RTM_DELTFILTER, rtnl_held);
2285                 tcf_chain_flush(chain, rtnl_held);
2286                 err = 0;
2287                 goto errout;
2288         }
2289
2290         mutex_lock(&chain->filter_chain_lock);
2291         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2292                                prio, false);
2293         if (!tp || IS_ERR(tp)) {
2294                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2295                 err = tp ? PTR_ERR(tp) : -ENOENT;
2296                 goto errout_locked;
2297         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2298                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2299                 err = -EINVAL;
2300                 goto errout_locked;
2301         } else if (t->tcm_handle == 0) {
2302                 tcf_chain_tp_remove(chain, &chain_info, tp);
2303                 mutex_unlock(&chain->filter_chain_lock);
2304
2305                 tcf_proto_put(tp, rtnl_held, NULL);
2306                 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2307                                RTM_DELTFILTER, false, rtnl_held);
2308                 err = 0;
2309                 goto errout;
2310         }
2311         mutex_unlock(&chain->filter_chain_lock);
2312
2313         fh = tp->ops->get(tp, t->tcm_handle);
2314
2315         if (!fh) {
2316                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2317                 err = -ENOENT;
2318         } else {
2319                 bool last;
2320
2321                 err = tfilter_del_notify(net, skb, n, tp, block,
2322                                          q, parent, fh, false, &last,
2323                                          rtnl_held, extack);
2324
2325                 if (err)
2326                         goto errout;
2327                 if (last)
2328                         tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2329         }
2330
2331 errout:
2332         if (chain) {
2333                 if (tp && !IS_ERR(tp))
2334                         tcf_proto_put(tp, rtnl_held, NULL);
2335                 tcf_chain_put(chain);
2336         }
2337         tcf_block_release(q, block, rtnl_held);
2338
2339         if (rtnl_held)
2340                 rtnl_unlock();
2341
2342         return err;
2343
2344 errout_locked:
2345         mutex_unlock(&chain->filter_chain_lock);
2346         goto errout;
2347 }
2348
2349 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2350                           struct netlink_ext_ack *extack)
2351 {
2352         struct net *net = sock_net(skb->sk);
2353         struct nlattr *tca[TCA_MAX + 1];
2354         struct tcmsg *t;
2355         u32 protocol;
2356         u32 prio;
2357         u32 parent;
2358         u32 chain_index;
2359         struct Qdisc *q = NULL;
2360         struct tcf_chain_info chain_info;
2361         struct tcf_chain *chain = NULL;
2362         struct tcf_block *block = NULL;
2363         struct tcf_proto *tp = NULL;
2364         unsigned long cl = 0;
2365         void *fh = NULL;
2366         int err;
2367         bool rtnl_held = false;
2368
2369         err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
2370         if (err < 0)
2371                 return err;
2372
2373         t = nlmsg_data(n);
2374         protocol = TC_H_MIN(t->tcm_info);
2375         prio = TC_H_MAJ(t->tcm_info);
2376         parent = t->tcm_parent;
2377
2378         if (prio == 0) {
2379                 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2380                 return -ENOENT;
2381         }
2382
2383         /* Find head of filter chain. */
2384
2385         err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2386         if (err)
2387                 return err;
2388
2389         /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2390          * unlocked, classifier type is not specified, classifier is not
2391          * unlocked.
2392          */
2393         if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2394             !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
2395                 rtnl_held = true;
2396                 rtnl_lock();
2397         }
2398
2399         err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2400         if (err)
2401                 goto errout;
2402
2403         block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2404                                  extack);
2405         if (IS_ERR(block)) {
2406                 err = PTR_ERR(block);
2407                 goto errout;
2408         }
2409
2410         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2411         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2412                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2413                 err = -EINVAL;
2414                 goto errout;
2415         }
2416         chain = tcf_chain_get(block, chain_index, false);
2417         if (!chain) {
2418                 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2419                 err = -EINVAL;
2420                 goto errout;
2421         }
2422
2423         mutex_lock(&chain->filter_chain_lock);
2424         tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2425                                prio, false);
2426         mutex_unlock(&chain->filter_chain_lock);
2427         if (!tp || IS_ERR(tp)) {
2428                 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2429                 err = tp ? PTR_ERR(tp) : -ENOENT;
2430                 goto errout;
2431         } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2432                 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2433                 err = -EINVAL;
2434                 goto errout;
2435         }
2436
2437         fh = tp->ops->get(tp, t->tcm_handle);
2438
2439         if (!fh) {
2440                 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2441                 err = -ENOENT;
2442         } else {
2443                 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2444                                      fh, RTM_NEWTFILTER, true, rtnl_held);
2445                 if (err < 0)
2446                         NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2447         }
2448
2449         tfilter_put(tp, fh);
2450 errout:
2451         if (chain) {
2452                 if (tp && !IS_ERR(tp))
2453                         tcf_proto_put(tp, rtnl_held, NULL);
2454                 tcf_chain_put(chain);
2455         }
2456         tcf_block_release(q, block, rtnl_held);
2457
2458         if (rtnl_held)
2459                 rtnl_unlock();
2460
2461         return err;
2462 }
2463
2464 struct tcf_dump_args {
2465         struct tcf_walker w;
2466         struct sk_buff *skb;
2467         struct netlink_callback *cb;
2468         struct tcf_block *block;
2469         struct Qdisc *q;
2470         u32 parent;
2471 };
2472
2473 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2474 {
2475         struct tcf_dump_args *a = (void *)arg;
2476         struct net *net = sock_net(a->skb->sk);
2477
2478         return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2479                              n, NETLINK_CB(a->cb->skb).portid,
2480                              a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2481                              RTM_NEWTFILTER, true);
2482 }
2483
2484 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2485                            struct sk_buff *skb, struct netlink_callback *cb,
2486                            long index_start, long *p_index)
2487 {
2488         struct net *net = sock_net(skb->sk);
2489         struct tcf_block *block = chain->block;
2490         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2491         struct tcf_proto *tp, *tp_prev;
2492         struct tcf_dump_args arg;
2493
2494         for (tp = __tcf_get_next_proto(chain, NULL);
2495              tp;
2496              tp_prev = tp,
2497                      tp = __tcf_get_next_proto(chain, tp),
2498                      tcf_proto_put(tp_prev, true, NULL),
2499                      (*p_index)++) {
2500                 if (*p_index < index_start)
2501                         continue;
2502                 if (TC_H_MAJ(tcm->tcm_info) &&
2503                     TC_H_MAJ(tcm->tcm_info) != tp->prio)
2504                         continue;
2505                 if (TC_H_MIN(tcm->tcm_info) &&
2506                     TC_H_MIN(tcm->tcm_info) != tp->protocol)
2507                         continue;
2508                 if (*p_index > index_start)
2509                         memset(&cb->args[1], 0,
2510                                sizeof(cb->args) - sizeof(cb->args[0]));
2511                 if (cb->args[1] == 0) {
2512                         if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2513                                           NETLINK_CB(cb->skb).portid,
2514                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
2515                                           RTM_NEWTFILTER, true) <= 0)
2516                                 goto errout;
2517                         cb->args[1] = 1;
2518                 }
2519                 if (!tp->ops->walk)
2520                         continue;
2521                 arg.w.fn = tcf_node_dump;
2522                 arg.skb = skb;
2523                 arg.cb = cb;
2524                 arg.block = block;
2525                 arg.q = q;
2526                 arg.parent = parent;
2527                 arg.w.stop = 0;
2528                 arg.w.skip = cb->args[1] - 1;
2529                 arg.w.count = 0;
2530                 arg.w.cookie = cb->args[2];
2531                 tp->ops->walk(tp, &arg.w, true);
2532                 cb->args[2] = arg.w.cookie;
2533                 cb->args[1] = arg.w.count + 1;
2534                 if (arg.w.stop)
2535                         goto errout;
2536         }
2537         return true;
2538
2539 errout:
2540         tcf_proto_put(tp, true, NULL);
2541         return false;
2542 }
2543
2544 /* called with RTNL */
2545 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2546 {
2547         struct tcf_chain *chain, *chain_prev;
2548         struct net *net = sock_net(skb->sk);
2549         struct nlattr *tca[TCA_MAX + 1];
2550         struct Qdisc *q = NULL;
2551         struct tcf_block *block;
2552         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2553         long index_start;
2554         long index;
2555         u32 parent;
2556         int err;
2557
2558         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2559                 return skb->len;
2560
2561         err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL,
2562                           cb->extack);
2563         if (err)
2564                 return err;
2565
2566         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2567                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2568                 if (!block)
2569                         goto out;
2570                 /* If we work with block index, q is NULL and parent value
2571                  * will never be used in the following code. The check
2572                  * in tcf_fill_node prevents it. However, compiler does not
2573                  * see that far, so set parent to zero to silence the warning
2574                  * about parent being uninitialized.
2575                  */
2576                 parent = 0;
2577         } else {
2578                 const struct Qdisc_class_ops *cops;
2579                 struct net_device *dev;
2580                 unsigned long cl = 0;
2581
2582                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2583                 if (!dev)
2584                         return skb->len;
2585
2586                 parent = tcm->tcm_parent;
2587                 if (!parent) {
2588                         q = dev->qdisc;
2589                         parent = q->handle;
2590                 } else {
2591                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2592                 }
2593                 if (!q)
2594                         goto out;
2595                 cops = q->ops->cl_ops;
2596                 if (!cops)
2597                         goto out;
2598                 if (!cops->tcf_block)
2599                         goto out;
2600                 if (TC_H_MIN(tcm->tcm_parent)) {
2601                         cl = cops->find(q, tcm->tcm_parent);
2602                         if (cl == 0)
2603                                 goto out;
2604                 }
2605                 block = cops->tcf_block(q, cl, NULL);
2606                 if (!block)
2607                         goto out;
2608                 if (tcf_block_shared(block))
2609                         q = NULL;
2610         }
2611
2612         index_start = cb->args[0];
2613         index = 0;
2614
2615         for (chain = __tcf_get_next_chain(block, NULL);
2616              chain;
2617              chain_prev = chain,
2618                      chain = __tcf_get_next_chain(block, chain),
2619                      tcf_chain_put(chain_prev)) {
2620                 if (tca[TCA_CHAIN] &&
2621                     nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2622                         continue;
2623                 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2624                                     index_start, &index)) {
2625                         tcf_chain_put(chain);
2626                         err = -EMSGSIZE;
2627                         break;
2628                 }
2629         }
2630
2631         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2632                 tcf_block_refcnt_put(block, true);
2633         cb->args[0] = index;
2634
2635 out:
2636         /* If we did no progress, the error (EMSGSIZE) is real */
2637         if (skb->len == 0 && err)
2638                 return err;
2639         return skb->len;
2640 }
2641
2642 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2643                               void *tmplt_priv, u32 chain_index,
2644                               struct net *net, struct sk_buff *skb,
2645                               struct tcf_block *block,
2646                               u32 portid, u32 seq, u16 flags, int event)
2647 {
2648         unsigned char *b = skb_tail_pointer(skb);
2649         const struct tcf_proto_ops *ops;
2650         struct nlmsghdr *nlh;
2651         struct tcmsg *tcm;
2652         void *priv;
2653
2654         ops = tmplt_ops;
2655         priv = tmplt_priv;
2656
2657         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2658         if (!nlh)
2659                 goto out_nlmsg_trim;
2660         tcm = nlmsg_data(nlh);
2661         tcm->tcm_family = AF_UNSPEC;
2662         tcm->tcm__pad1 = 0;
2663         tcm->tcm__pad2 = 0;
2664         tcm->tcm_handle = 0;
2665         if (block->q) {
2666                 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2667                 tcm->tcm_parent = block->q->handle;
2668         } else {
2669                 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2670                 tcm->tcm_block_index = block->index;
2671         }
2672
2673         if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2674                 goto nla_put_failure;
2675
2676         if (ops) {
2677                 if (nla_put_string(skb, TCA_KIND, ops->kind))
2678                         goto nla_put_failure;
2679                 if (ops->tmplt_dump(skb, net, priv) < 0)
2680                         goto nla_put_failure;
2681         }
2682
2683         nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2684         return skb->len;
2685
2686 out_nlmsg_trim:
2687 nla_put_failure:
2688         nlmsg_trim(skb, b);
2689         return -EMSGSIZE;
2690 }
2691
2692 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2693                            u32 seq, u16 flags, int event, bool unicast)
2694 {
2695         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2696         struct tcf_block *block = chain->block;
2697         struct net *net = block->net;
2698         struct sk_buff *skb;
2699         int err = 0;
2700
2701         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2702         if (!skb)
2703                 return -ENOBUFS;
2704
2705         if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2706                                chain->index, net, skb, block, portid,
2707                                seq, flags, event) <= 0) {
2708                 kfree_skb(skb);
2709                 return -EINVAL;
2710         }
2711
2712         if (unicast)
2713                 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2714         else
2715                 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2716                                      flags & NLM_F_ECHO);
2717
2718         if (err > 0)
2719                 err = 0;
2720         return err;
2721 }
2722
2723 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2724                                   void *tmplt_priv, u32 chain_index,
2725                                   struct tcf_block *block, struct sk_buff *oskb,
2726                                   u32 seq, u16 flags, bool unicast)
2727 {
2728         u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2729         struct net *net = block->net;
2730         struct sk_buff *skb;
2731
2732         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2733         if (!skb)
2734                 return -ENOBUFS;
2735
2736         if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2737                                block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2738                 kfree_skb(skb);
2739                 return -EINVAL;
2740         }
2741
2742         if (unicast)
2743                 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2744
2745         return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2746 }
2747
2748 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2749                               struct nlattr **tca,
2750                               struct netlink_ext_ack *extack)
2751 {
2752         const struct tcf_proto_ops *ops;
2753         void *tmplt_priv;
2754
2755         /* If kind is not set, user did not specify template. */
2756         if (!tca[TCA_KIND])
2757                 return 0;
2758
2759         ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack);
2760         if (IS_ERR(ops))
2761                 return PTR_ERR(ops);
2762         if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2763                 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2764                 return -EOPNOTSUPP;
2765         }
2766
2767         tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2768         if (IS_ERR(tmplt_priv)) {
2769                 module_put(ops->owner);
2770                 return PTR_ERR(tmplt_priv);
2771         }
2772         chain->tmplt_ops = ops;
2773         chain->tmplt_priv = tmplt_priv;
2774         return 0;
2775 }
2776
2777 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2778                                void *tmplt_priv)
2779 {
2780         /* If template ops are set, no work to do for us. */
2781         if (!tmplt_ops)
2782                 return;
2783
2784         tmplt_ops->tmplt_destroy(tmplt_priv);
2785         module_put(tmplt_ops->owner);
2786 }
2787
2788 /* Add/delete/get a chain */
2789
2790 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2791                         struct netlink_ext_ack *extack)
2792 {
2793         struct net *net = sock_net(skb->sk);
2794         struct nlattr *tca[TCA_MAX + 1];
2795         struct tcmsg *t;
2796         u32 parent;
2797         u32 chain_index;
2798         struct Qdisc *q = NULL;
2799         struct tcf_chain *chain = NULL;
2800         struct tcf_block *block;
2801         unsigned long cl;
2802         int err;
2803
2804         if (n->nlmsg_type != RTM_GETCHAIN &&
2805             !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2806                 return -EPERM;
2807
2808 replay:
2809         err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
2810         if (err < 0)
2811                 return err;
2812
2813         t = nlmsg_data(n);
2814         parent = t->tcm_parent;
2815         cl = 0;
2816
2817         block = tcf_block_find(net, &q, &parent, &cl,
2818                                t->tcm_ifindex, t->tcm_block_index, extack);
2819         if (IS_ERR(block))
2820                 return PTR_ERR(block);
2821
2822         chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2823         if (chain_index > TC_ACT_EXT_VAL_MASK) {
2824                 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2825                 err = -EINVAL;
2826                 goto errout_block;
2827         }
2828
2829         mutex_lock(&block->lock);
2830         chain = tcf_chain_lookup(block, chain_index);
2831         if (n->nlmsg_type == RTM_NEWCHAIN) {
2832                 if (chain) {
2833                         if (tcf_chain_held_by_acts_only(chain)) {
2834                                 /* The chain exists only because there is
2835                                  * some action referencing it.
2836                                  */
2837                                 tcf_chain_hold(chain);
2838                         } else {
2839                                 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2840                                 err = -EEXIST;
2841                                 goto errout_block_locked;
2842                         }
2843                 } else {
2844                         if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2845                                 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2846                                 err = -ENOENT;
2847                                 goto errout_block_locked;
2848                         }
2849                         chain = tcf_chain_create(block, chain_index);
2850                         if (!chain) {
2851                                 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2852                                 err = -ENOMEM;
2853                                 goto errout_block_locked;
2854                         }
2855                 }
2856         } else {
2857                 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2858                         NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2859                         err = -EINVAL;
2860                         goto errout_block_locked;
2861                 }
2862                 tcf_chain_hold(chain);
2863         }
2864
2865         if (n->nlmsg_type == RTM_NEWCHAIN) {
2866                 /* Modifying chain requires holding parent block lock. In case
2867                  * the chain was successfully added, take a reference to the
2868                  * chain. This ensures that an empty chain does not disappear at
2869                  * the end of this function.
2870                  */
2871                 tcf_chain_hold(chain);
2872                 chain->explicitly_created = true;
2873         }
2874         mutex_unlock(&block->lock);
2875
2876         switch (n->nlmsg_type) {
2877         case RTM_NEWCHAIN:
2878                 err = tc_chain_tmplt_add(chain, net, tca, extack);
2879                 if (err) {
2880                         tcf_chain_put_explicitly_created(chain);
2881                         goto errout;
2882                 }
2883
2884                 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2885                                 RTM_NEWCHAIN, false);
2886                 break;
2887         case RTM_DELCHAIN:
2888                 tfilter_notify_chain(net, skb, block, q, parent, n,
2889                                      chain, RTM_DELTFILTER, true);
2890                 /* Flush the chain first as the user requested chain removal. */
2891                 tcf_chain_flush(chain, true);
2892                 /* In case the chain was successfully deleted, put a reference
2893                  * to the chain previously taken during addition.
2894                  */
2895                 tcf_chain_put_explicitly_created(chain);
2896                 break;
2897         case RTM_GETCHAIN:
2898                 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2899                                       n->nlmsg_seq, n->nlmsg_type, true);
2900                 if (err < 0)
2901                         NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2902                 break;
2903         default:
2904                 err = -EOPNOTSUPP;
2905                 NL_SET_ERR_MSG(extack, "Unsupported message type");
2906                 goto errout;
2907         }
2908
2909 errout:
2910         tcf_chain_put(chain);
2911 errout_block:
2912         tcf_block_release(q, block, true);
2913         if (err == -EAGAIN)
2914                 /* Replay the request. */
2915                 goto replay;
2916         return err;
2917
2918 errout_block_locked:
2919         mutex_unlock(&block->lock);
2920         goto errout_block;
2921 }
2922
2923 /* called with RTNL */
2924 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2925 {
2926         struct net *net = sock_net(skb->sk);
2927         struct nlattr *tca[TCA_MAX + 1];
2928         struct Qdisc *q = NULL;
2929         struct tcf_block *block;
2930         struct tcmsg *tcm = nlmsg_data(cb->nlh);
2931         struct tcf_chain *chain;
2932         long index_start;
2933         long index;
2934         u32 parent;
2935         int err;
2936
2937         if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2938                 return skb->len;
2939
2940         err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
2941                           cb->extack);
2942         if (err)
2943                 return err;
2944
2945         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2946                 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2947                 if (!block)
2948                         goto out;
2949                 /* If we work with block index, q is NULL and parent value
2950                  * will never be used in the following code. The check
2951                  * in tcf_fill_node prevents it. However, compiler does not
2952                  * see that far, so set parent to zero to silence the warning
2953                  * about parent being uninitialized.
2954                  */
2955                 parent = 0;
2956         } else {
2957                 const struct Qdisc_class_ops *cops;
2958                 struct net_device *dev;
2959                 unsigned long cl = 0;
2960
2961                 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2962                 if (!dev)
2963                         return skb->len;
2964
2965                 parent = tcm->tcm_parent;
2966                 if (!parent) {
2967                         q = dev->qdisc;
2968                         parent = q->handle;
2969                 } else {
2970                         q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2971                 }
2972                 if (!q)
2973                         goto out;
2974                 cops = q->ops->cl_ops;
2975                 if (!cops)
2976                         goto out;
2977                 if (!cops->tcf_block)
2978                         goto out;
2979                 if (TC_H_MIN(tcm->tcm_parent)) {
2980                         cl = cops->find(q, tcm->tcm_parent);
2981                         if (cl == 0)
2982                                 goto out;
2983                 }
2984                 block = cops->tcf_block(q, cl, NULL);
2985                 if (!block)
2986                         goto out;
2987                 if (tcf_block_shared(block))
2988                         q = NULL;
2989         }
2990
2991         index_start = cb->args[0];
2992         index = 0;
2993
2994         mutex_lock(&block->lock);
2995         list_for_each_entry(chain, &block->chain_list, list) {
2996                 if ((tca[TCA_CHAIN] &&
2997                      nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2998                         continue;
2999                 if (index < index_start) {
3000                         index++;
3001                         continue;
3002                 }
3003                 if (tcf_chain_held_by_acts_only(chain))
3004                         continue;
3005                 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3006                                          chain->index, net, skb, block,
3007                                          NETLINK_CB(cb->skb).portid,
3008                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
3009                                          RTM_NEWCHAIN);
3010                 if (err <= 0)
3011                         break;
3012                 index++;
3013         }
3014         mutex_unlock(&block->lock);
3015
3016         if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3017                 tcf_block_refcnt_put(block, true);
3018         cb->args[0] = index;
3019
3020 out:
3021         /* If we did no progress, the error (EMSGSIZE) is real */
3022         if (skb->len == 0 && err)
3023                 return err;
3024         return skb->len;
3025 }
3026
3027 void tcf_exts_destroy(struct tcf_exts *exts)
3028 {
3029 #ifdef CONFIG_NET_CLS_ACT
3030         tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3031         kfree(exts->actions);
3032         exts->nr_actions = 0;
3033 #endif
3034 }
3035 EXPORT_SYMBOL(tcf_exts_destroy);
3036
3037 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3038                       struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3039                       bool rtnl_held, struct netlink_ext_ack *extack)
3040 {
3041 #ifdef CONFIG_NET_CLS_ACT
3042         {
3043                 struct tc_action *act;
3044                 size_t attr_size = 0;
3045
3046                 if (exts->police && tb[exts->police]) {
3047                         act = tcf_action_init_1(net, tp, tb[exts->police],
3048                                                 rate_tlv, "police", ovr,
3049                                                 TCA_ACT_BIND, rtnl_held,
3050                                                 extack);
3051                         if (IS_ERR(act))
3052                                 return PTR_ERR(act);
3053
3054                         act->type = exts->type = TCA_OLD_COMPAT;
3055                         exts->actions[0] = act;
3056                         exts->nr_actions = 1;
3057                 } else if (exts->action && tb[exts->action]) {
3058                         int err;
3059
3060                         err = tcf_action_init(net, tp, tb[exts->action],
3061                                               rate_tlv, NULL, ovr, TCA_ACT_BIND,
3062                                               exts->actions, &attr_size,
3063                                               rtnl_held, extack);
3064                         if (err < 0)
3065                                 return err;
3066                         exts->nr_actions = err;
3067                 }
3068         }
3069 #else
3070         if ((exts->action && tb[exts->action]) ||
3071             (exts->police && tb[exts->police])) {
3072                 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3073                 return -EOPNOTSUPP;
3074         }
3075 #endif
3076
3077         return 0;
3078 }
3079 EXPORT_SYMBOL(tcf_exts_validate);
3080
3081 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3082 {
3083 #ifdef CONFIG_NET_CLS_ACT
3084         struct tcf_exts old = *dst;
3085
3086         *dst = *src;
3087         tcf_exts_destroy(&old);
3088 #endif
3089 }
3090 EXPORT_SYMBOL(tcf_exts_change);
3091
3092 #ifdef CONFIG_NET_CLS_ACT
3093 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3094 {
3095         if (exts->nr_actions == 0)
3096                 return NULL;
3097         else
3098                 return exts->actions[0];
3099 }
3100 #endif
3101
3102 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3103 {
3104 #ifdef CONFIG_NET_CLS_ACT
3105         struct nlattr *nest;
3106
3107         if (exts->action && tcf_exts_has_actions(exts)) {
3108                 /*
3109                  * again for backward compatible mode - we want
3110                  * to work with both old and new modes of entering
3111                  * tc data even if iproute2  was newer - jhs
3112                  */
3113                 if (exts->type != TCA_OLD_COMPAT) {
3114                         nest = nla_nest_start(skb, exts->action);
3115                         if (nest == NULL)
3116                                 goto nla_put_failure;
3117
3118                         if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
3119                                 goto nla_put_failure;
3120                         nla_nest_end(skb, nest);
3121                 } else if (exts->police) {
3122                         struct tc_action *act = tcf_exts_first_act(exts);
3123                         nest = nla_nest_start(skb, exts->police);
3124                         if (nest == NULL || !act)
3125                                 goto nla_put_failure;
3126                         if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3127                                 goto nla_put_failure;
3128                         nla_nest_end(skb, nest);
3129                 }
3130         }
3131         return 0;
3132
3133 nla_put_failure:
3134         nla_nest_cancel(skb, nest);
3135         return -1;
3136 #else
3137         return 0;
3138 #endif
3139 }
3140 EXPORT_SYMBOL(tcf_exts_dump);
3141
3142
3143 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3144 {
3145 #ifdef CONFIG_NET_CLS_ACT
3146         struct tc_action *a = tcf_exts_first_act(exts);
3147         if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3148                 return -1;
3149 #endif
3150         return 0;
3151 }
3152 EXPORT_SYMBOL(tcf_exts_dump_stats);
3153
3154 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3155                      void *type_data, bool err_stop)
3156 {
3157         struct tcf_block_cb *block_cb;
3158         int ok_count = 0;
3159         int err;
3160
3161         /* Make sure all netdevs sharing this block are offload-capable. */
3162         if (block->nooffloaddevcnt && err_stop)
3163                 return -EOPNOTSUPP;
3164
3165         list_for_each_entry(block_cb, &block->cb_list, list) {
3166                 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3167                 if (err) {
3168                         if (err_stop)
3169                                 return err;
3170                 } else {
3171                         ok_count++;
3172                 }
3173         }
3174         return ok_count;
3175 }
3176 EXPORT_SYMBOL(tc_setup_cb_call);
3177
3178 int tc_setup_flow_action(struct flow_action *flow_action,
3179                          const struct tcf_exts *exts)
3180 {
3181         const struct tc_action *act;
3182         int i, j, k;
3183
3184         if (!exts)
3185                 return 0;
3186
3187         j = 0;
3188         tcf_exts_for_each_action(i, act, exts) {
3189                 struct flow_action_entry *entry;
3190
3191                 entry = &flow_action->entries[j];
3192                 if (is_tcf_gact_ok(act)) {
3193                         entry->id = FLOW_ACTION_ACCEPT;
3194                 } else if (is_tcf_gact_shot(act)) {
3195                         entry->id = FLOW_ACTION_DROP;
3196                 } else if (is_tcf_gact_trap(act)) {
3197                         entry->id = FLOW_ACTION_TRAP;
3198                 } else if (is_tcf_gact_goto_chain(act)) {
3199                         entry->id = FLOW_ACTION_GOTO;
3200                         entry->chain_index = tcf_gact_goto_chain_index(act);
3201                 } else if (is_tcf_mirred_egress_redirect(act)) {
3202                         entry->id = FLOW_ACTION_REDIRECT;
3203                         entry->dev = tcf_mirred_dev(act);
3204                 } else if (is_tcf_mirred_egress_mirror(act)) {
3205                         entry->id = FLOW_ACTION_MIRRED;
3206                         entry->dev = tcf_mirred_dev(act);
3207                 } else if (is_tcf_vlan(act)) {
3208                         switch (tcf_vlan_action(act)) {
3209                         case TCA_VLAN_ACT_PUSH:
3210                                 entry->id = FLOW_ACTION_VLAN_PUSH;
3211                                 entry->vlan.vid = tcf_vlan_push_vid(act);
3212                                 entry->vlan.proto = tcf_vlan_push_proto(act);
3213                                 entry->vlan.prio = tcf_vlan_push_prio(act);
3214                                 break;
3215                         case TCA_VLAN_ACT_POP:
3216                                 entry->id = FLOW_ACTION_VLAN_POP;
3217                                 break;
3218                         case TCA_VLAN_ACT_MODIFY:
3219                                 entry->id = FLOW_ACTION_VLAN_MANGLE;
3220                                 entry->vlan.vid = tcf_vlan_push_vid(act);
3221                                 entry->vlan.proto = tcf_vlan_push_proto(act);
3222                                 entry->vlan.prio = tcf_vlan_push_prio(act);
3223                                 break;
3224                         default:
3225                                 goto err_out;
3226                         }
3227                 } else if (is_tcf_tunnel_set(act)) {
3228                         entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3229                         entry->tunnel = tcf_tunnel_info(act);
3230                 } else if (is_tcf_tunnel_release(act)) {
3231                         entry->id = FLOW_ACTION_TUNNEL_DECAP;
3232                         entry->tunnel = tcf_tunnel_info(act);
3233                 } else if (is_tcf_pedit(act)) {
3234                         for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3235                                 switch (tcf_pedit_cmd(act, k)) {
3236                                 case TCA_PEDIT_KEY_EX_CMD_SET:
3237                                         entry->id = FLOW_ACTION_MANGLE;
3238                                         break;
3239                                 case TCA_PEDIT_KEY_EX_CMD_ADD:
3240                                         entry->id = FLOW_ACTION_ADD;
3241                                         break;
3242                                 default:
3243                                         goto err_out;
3244                                 }
3245                                 entry->mangle.htype = tcf_pedit_htype(act, k);
3246                                 entry->mangle.mask = tcf_pedit_mask(act, k);
3247                                 entry->mangle.val = tcf_pedit_val(act, k);
3248                                 entry->mangle.offset = tcf_pedit_offset(act, k);
3249                                 entry = &flow_action->entries[++j];
3250                         }
3251                 } else if (is_tcf_csum(act)) {
3252                         entry->id = FLOW_ACTION_CSUM;
3253                         entry->csum_flags = tcf_csum_update_flags(act);
3254                 } else if (is_tcf_skbedit_mark(act)) {
3255                         entry->id = FLOW_ACTION_MARK;
3256                         entry->mark = tcf_skbedit_mark(act);
3257                 } else {
3258                         goto err_out;
3259                 }
3260
3261                 if (!is_tcf_pedit(act))
3262                         j++;
3263         }
3264         return 0;
3265 err_out:
3266         return -EOPNOTSUPP;
3267 }
3268 EXPORT_SYMBOL(tc_setup_flow_action);
3269
3270 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3271 {
3272         unsigned int num_acts = 0;
3273         struct tc_action *act;
3274         int i;
3275
3276         tcf_exts_for_each_action(i, act, exts) {
3277                 if (is_tcf_pedit(act))
3278                         num_acts += tcf_pedit_nkeys(act);
3279                 else
3280                         num_acts++;
3281         }
3282         return num_acts;
3283 }
3284 EXPORT_SYMBOL(tcf_exts_num_actions);
3285
3286 static __net_init int tcf_net_init(struct net *net)
3287 {
3288         struct tcf_net *tn = net_generic(net, tcf_net_id);
3289
3290         spin_lock_init(&tn->idr_lock);
3291         idr_init(&tn->idr);
3292         return 0;
3293 }
3294
3295 static void __net_exit tcf_net_exit(struct net *net)
3296 {
3297         struct tcf_net *tn = net_generic(net, tcf_net_id);
3298
3299         idr_destroy(&tn->idr);
3300 }
3301
3302 static struct pernet_operations tcf_net_ops = {
3303         .init = tcf_net_init,
3304         .exit = tcf_net_exit,
3305         .id   = &tcf_net_id,
3306         .size = sizeof(struct tcf_net),
3307 };
3308
3309 static int __init tc_filter_init(void)
3310 {
3311         int err;
3312
3313         tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3314         if (!tc_filter_wq)
3315                 return -ENOMEM;
3316
3317         err = register_pernet_subsys(&tcf_net_ops);
3318         if (err)
3319                 goto err_register_pernet_subsys;
3320
3321         err = rhashtable_init(&indr_setup_block_ht,
3322                               &tc_indr_setup_block_ht_params);
3323         if (err)
3324                 goto err_rhash_setup_block_ht;
3325
3326         rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3327                       RTNL_FLAG_DOIT_UNLOCKED);
3328         rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3329                       RTNL_FLAG_DOIT_UNLOCKED);
3330         rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3331                       tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3332         rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3333         rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3334         rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3335                       tc_dump_chain, 0);
3336
3337         return 0;
3338
3339 err_rhash_setup_block_ht:
3340         unregister_pernet_subsys(&tcf_net_ops);
3341 err_register_pernet_subsys:
3342         destroy_workqueue(tc_filter_wq);
3343         return err;
3344 }
3345
3346 subsys_initcall(tc_filter_init);