2 * net/sched/cls_api.c Packet classifier API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/kmod.h>
26 #include <linux/slab.h>
27 #include <linux/idr.h>
28 #include <linux/rhashtable.h>
29 #include <net/net_namespace.h>
31 #include <net/netlink.h>
32 #include <net/pkt_sched.h>
33 #include <net/pkt_cls.h>
35 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
37 /* The list of all installed classifier types */
38 static LIST_HEAD(tcf_proto_base);
40 /* Protects list of registered TC modules. It is pure SMP lock. */
41 static DEFINE_RWLOCK(cls_mod_lock);
43 /* Find classifier type by string name */
45 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
47 const struct tcf_proto_ops *t, *res = NULL;
50 read_lock(&cls_mod_lock);
51 list_for_each_entry(t, &tcf_proto_base, head) {
52 if (strcmp(kind, t->kind) == 0) {
53 if (try_module_get(t->owner))
58 read_unlock(&cls_mod_lock);
63 static const struct tcf_proto_ops *
64 tcf_proto_lookup_ops(const char *kind, struct netlink_ext_ack *extack)
66 const struct tcf_proto_ops *ops;
68 ops = __tcf_proto_lookup_ops(kind);
73 request_module("cls_%s", kind);
75 ops = __tcf_proto_lookup_ops(kind);
76 /* We dropped the RTNL semaphore in order to perform
77 * the module load. So, even if we succeeded in loading
78 * the module we have to replay the request. We indicate
82 module_put(ops->owner);
83 return ERR_PTR(-EAGAIN);
86 NL_SET_ERR_MSG(extack, "TC classifier not found");
87 return ERR_PTR(-ENOENT);
90 /* Register(unregister) new classifier type */
92 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
94 struct tcf_proto_ops *t;
97 write_lock(&cls_mod_lock);
98 list_for_each_entry(t, &tcf_proto_base, head)
99 if (!strcmp(ops->kind, t->kind))
102 list_add_tail(&ops->head, &tcf_proto_base);
105 write_unlock(&cls_mod_lock);
108 EXPORT_SYMBOL(register_tcf_proto_ops);
110 static struct workqueue_struct *tc_filter_wq;
112 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
114 struct tcf_proto_ops *t;
117 /* Wait for outstanding call_rcu()s, if any, from a
118 * tcf_proto_ops's destroy() handler.
121 flush_workqueue(tc_filter_wq);
123 write_lock(&cls_mod_lock);
124 list_for_each_entry(t, &tcf_proto_base, head) {
131 write_unlock(&cls_mod_lock);
134 EXPORT_SYMBOL(unregister_tcf_proto_ops);
136 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
138 INIT_RCU_WORK(rwork, func);
139 return queue_rcu_work(tc_filter_wq, rwork);
141 EXPORT_SYMBOL(tcf_queue_work);
143 /* Select new prio value from the range, managed by kernel. */
145 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
147 u32 first = TC_H_MAKE(0xC0000000U, 0U);
150 first = tp->prio - 1;
152 return TC_H_MAJ(first);
155 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
156 u32 prio, struct tcf_chain *chain,
157 struct netlink_ext_ack *extack)
159 struct tcf_proto *tp;
162 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
164 return ERR_PTR(-ENOBUFS);
166 tp->ops = tcf_proto_lookup_ops(kind, extack);
167 if (IS_ERR(tp->ops)) {
168 err = PTR_ERR(tp->ops);
171 tp->classify = tp->ops->classify;
172 tp->protocol = protocol;
176 err = tp->ops->init(tp);
178 module_put(tp->ops->owner);
188 static void tcf_proto_destroy(struct tcf_proto *tp,
189 struct netlink_ext_ack *extack)
191 tp->ops->destroy(tp, extack);
192 module_put(tp->ops->owner);
196 struct tcf_filter_chain_list_item {
197 struct list_head list;
198 tcf_chain_head_change_t *chain_head_change;
199 void *chain_head_change_priv;
202 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
205 struct tcf_chain *chain;
207 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
210 list_add_tail(&chain->list, &block->chain_list);
211 chain->block = block;
212 chain->index = chain_index;
215 block->chain0.chain = chain;
219 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
220 struct tcf_proto *tp_head)
222 if (item->chain_head_change)
223 item->chain_head_change(tp_head, item->chain_head_change_priv);
226 static void tcf_chain0_head_change(struct tcf_chain *chain,
227 struct tcf_proto *tp_head)
229 struct tcf_filter_chain_list_item *item;
230 struct tcf_block *block = chain->block;
234 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
235 tcf_chain_head_change_item(item, tp_head);
238 static void tcf_chain_destroy(struct tcf_chain *chain)
240 struct tcf_block *block = chain->block;
242 list_del(&chain->list);
244 block->chain0.chain = NULL;
246 if (list_empty(&block->chain_list) && !refcount_read(&block->refcnt))
247 kfree_rcu(block, rcu);
250 static void tcf_chain_hold(struct tcf_chain *chain)
255 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
257 /* In case all the references are action references, this
258 * chain should not be shown to the user.
260 return chain->refcnt == chain->action_refcnt;
263 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
266 struct tcf_chain *chain;
268 list_for_each_entry(chain, &block->chain_list, list) {
269 if (chain->index == chain_index)
275 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
276 u32 seq, u16 flags, int event, bool unicast);
278 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
279 u32 chain_index, bool create,
282 struct tcf_chain *chain = tcf_chain_lookup(block, chain_index);
285 tcf_chain_hold(chain);
289 chain = tcf_chain_create(block, chain_index);
295 ++chain->action_refcnt;
297 /* Send notification only in case we got the first
298 * non-action reference. Until then, the chain acts only as
299 * a placeholder for actions pointing to it and user ought
300 * not know about them.
302 if (chain->refcnt - chain->action_refcnt == 1 && !by_act)
303 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
304 RTM_NEWCHAIN, false);
309 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
312 return __tcf_chain_get(block, chain_index, create, false);
315 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
317 return __tcf_chain_get(block, chain_index, true, true);
319 EXPORT_SYMBOL(tcf_chain_get_by_act);
321 static void tc_chain_tmplt_del(struct tcf_chain *chain);
323 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act)
326 chain->action_refcnt--;
329 /* The last dropped non-action reference will trigger notification. */
330 if (chain->refcnt - chain->action_refcnt == 0 && !by_act)
331 tc_chain_notify(chain, NULL, 0, 0, RTM_DELCHAIN, false);
333 if (chain->refcnt == 0) {
334 tc_chain_tmplt_del(chain);
335 tcf_chain_destroy(chain);
339 static void tcf_chain_put(struct tcf_chain *chain)
341 __tcf_chain_put(chain, false);
344 void tcf_chain_put_by_act(struct tcf_chain *chain)
346 __tcf_chain_put(chain, true);
348 EXPORT_SYMBOL(tcf_chain_put_by_act);
350 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
352 if (chain->explicitly_created)
353 tcf_chain_put(chain);
356 static void tcf_chain_flush(struct tcf_chain *chain)
358 struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
360 tcf_chain0_head_change(chain, NULL);
362 RCU_INIT_POINTER(chain->filter_chain, tp->next);
363 tcf_proto_destroy(tp, NULL);
364 tp = rtnl_dereference(chain->filter_chain);
365 tcf_chain_put(chain);
369 static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
371 const struct Qdisc_class_ops *cops;
374 if (!dev_ingress_queue(dev))
377 qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
381 cops = qdisc->ops->cl_ops;
385 if (!cops->tcf_block)
388 return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
391 static struct rhashtable indr_setup_block_ht;
393 struct tc_indr_block_dev {
394 struct rhash_head ht_node;
395 struct net_device *dev;
397 struct list_head cb_list;
398 struct tcf_block *block;
401 struct tc_indr_block_cb {
402 struct list_head list;
404 tc_indr_block_bind_cb_t *cb;
408 static const struct rhashtable_params tc_indr_setup_block_ht_params = {
409 .key_offset = offsetof(struct tc_indr_block_dev, dev),
410 .head_offset = offsetof(struct tc_indr_block_dev, ht_node),
411 .key_len = sizeof(struct net_device *),
414 static struct tc_indr_block_dev *
415 tc_indr_block_dev_lookup(struct net_device *dev)
417 return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
418 tc_indr_setup_block_ht_params);
421 static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev)
423 struct tc_indr_block_dev *indr_dev;
425 indr_dev = tc_indr_block_dev_lookup(dev);
429 indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
433 INIT_LIST_HEAD(&indr_dev->cb_list);
435 indr_dev->block = tc_dev_ingress_block(dev);
436 if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
437 tc_indr_setup_block_ht_params)) {
447 static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev)
449 if (--indr_dev->refcnt)
452 rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
453 tc_indr_setup_block_ht_params);
457 static struct tc_indr_block_cb *
458 tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev,
459 tc_indr_block_bind_cb_t *cb, void *cb_ident)
461 struct tc_indr_block_cb *indr_block_cb;
463 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
464 if (indr_block_cb->cb == cb &&
465 indr_block_cb->cb_ident == cb_ident)
466 return indr_block_cb;
470 static struct tc_indr_block_cb *
471 tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv,
472 tc_indr_block_bind_cb_t *cb, void *cb_ident)
474 struct tc_indr_block_cb *indr_block_cb;
476 indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
478 return ERR_PTR(-EEXIST);
480 indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
482 return ERR_PTR(-ENOMEM);
484 indr_block_cb->cb_priv = cb_priv;
485 indr_block_cb->cb = cb;
486 indr_block_cb->cb_ident = cb_ident;
487 list_add(&indr_block_cb->list, &indr_dev->cb_list);
489 return indr_block_cb;
492 static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
494 list_del(&indr_block_cb->list);
495 kfree(indr_block_cb);
498 static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
499 struct tc_indr_block_cb *indr_block_cb,
500 enum tc_block_command command)
502 struct tc_block_offload bo = {
504 .binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
505 .block = indr_dev->block,
508 if (!indr_dev->block)
511 indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
515 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
516 tc_indr_block_bind_cb_t *cb, void *cb_ident)
518 struct tc_indr_block_cb *indr_block_cb;
519 struct tc_indr_block_dev *indr_dev;
522 indr_dev = tc_indr_block_dev_get(dev);
526 indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
527 err = PTR_ERR_OR_ZERO(indr_block_cb);
531 tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_BIND);
535 tc_indr_block_dev_put(indr_dev);
538 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register);
540 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
541 tc_indr_block_bind_cb_t *cb, void *cb_ident)
546 err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
551 EXPORT_SYMBOL_GPL(tc_indr_block_cb_register);
553 void __tc_indr_block_cb_unregister(struct net_device *dev,
554 tc_indr_block_bind_cb_t *cb, void *cb_ident)
556 struct tc_indr_block_cb *indr_block_cb;
557 struct tc_indr_block_dev *indr_dev;
559 indr_dev = tc_indr_block_dev_lookup(dev);
563 indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
567 /* Send unbind message if required to free any block cbs. */
568 tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_UNBIND);
569 tc_indr_block_cb_del(indr_block_cb);
570 tc_indr_block_dev_put(indr_dev);
572 EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister);
574 void tc_indr_block_cb_unregister(struct net_device *dev,
575 tc_indr_block_bind_cb_t *cb, void *cb_ident)
578 __tc_indr_block_cb_unregister(dev, cb, cb_ident);
581 EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister);
583 static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
584 struct tcf_block_ext_info *ei,
585 enum tc_block_command command,
586 struct netlink_ext_ack *extack)
588 struct tc_indr_block_cb *indr_block_cb;
589 struct tc_indr_block_dev *indr_dev;
590 struct tc_block_offload bo = {
592 .binder_type = ei->binder_type,
597 indr_dev = tc_indr_block_dev_lookup(dev);
601 indr_dev->block = command == TC_BLOCK_BIND ? block : NULL;
603 list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
604 indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
608 static bool tcf_block_offload_in_use(struct tcf_block *block)
610 return block->offloadcnt;
613 static int tcf_block_offload_cmd(struct tcf_block *block,
614 struct net_device *dev,
615 struct tcf_block_ext_info *ei,
616 enum tc_block_command command,
617 struct netlink_ext_ack *extack)
619 struct tc_block_offload bo = {};
621 bo.command = command;
622 bo.binder_type = ei->binder_type;
625 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
628 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
629 struct tcf_block_ext_info *ei,
630 struct netlink_ext_ack *extack)
632 struct net_device *dev = q->dev_queue->dev;
635 if (!dev->netdev_ops->ndo_setup_tc)
636 goto no_offload_dev_inc;
638 /* If tc offload feature is disabled and the block we try to bind
639 * to already has some offloaded filters, forbid to bind.
641 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
642 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
646 err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
647 if (err == -EOPNOTSUPP)
648 goto no_offload_dev_inc;
652 tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
656 if (tcf_block_offload_in_use(block))
658 block->nooffloaddevcnt++;
659 tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
663 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
664 struct tcf_block_ext_info *ei)
666 struct net_device *dev = q->dev_queue->dev;
669 tc_indr_block_call(block, dev, ei, TC_BLOCK_UNBIND, NULL);
671 if (!dev->netdev_ops->ndo_setup_tc)
672 goto no_offload_dev_dec;
673 err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
674 if (err == -EOPNOTSUPP)
675 goto no_offload_dev_dec;
679 WARN_ON(block->nooffloaddevcnt-- == 0);
683 tcf_chain0_head_change_cb_add(struct tcf_block *block,
684 struct tcf_block_ext_info *ei,
685 struct netlink_ext_ack *extack)
687 struct tcf_chain *chain0 = block->chain0.chain;
688 struct tcf_filter_chain_list_item *item;
690 item = kmalloc(sizeof(*item), GFP_KERNEL);
692 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
695 item->chain_head_change = ei->chain_head_change;
696 item->chain_head_change_priv = ei->chain_head_change_priv;
697 if (chain0 && chain0->filter_chain)
698 tcf_chain_head_change_item(item, chain0->filter_chain);
699 list_add(&item->list, &block->chain0.filter_chain_list);
704 tcf_chain0_head_change_cb_del(struct tcf_block *block,
705 struct tcf_block_ext_info *ei)
707 struct tcf_chain *chain0 = block->chain0.chain;
708 struct tcf_filter_chain_list_item *item;
710 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
711 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
712 (item->chain_head_change == ei->chain_head_change &&
713 item->chain_head_change_priv == ei->chain_head_change_priv)) {
715 tcf_chain_head_change_item(item, NULL);
716 list_del(&item->list);
725 spinlock_t idr_lock; /* Protects idr */
729 static unsigned int tcf_net_id;
731 static int tcf_block_insert(struct tcf_block *block, struct net *net,
732 struct netlink_ext_ack *extack)
734 struct tcf_net *tn = net_generic(net, tcf_net_id);
737 idr_preload(GFP_KERNEL);
738 spin_lock(&tn->idr_lock);
739 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
741 spin_unlock(&tn->idr_lock);
747 static void tcf_block_remove(struct tcf_block *block, struct net *net)
749 struct tcf_net *tn = net_generic(net, tcf_net_id);
751 spin_lock(&tn->idr_lock);
752 idr_remove(&tn->idr, block->index);
753 spin_unlock(&tn->idr_lock);
756 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
758 struct netlink_ext_ack *extack)
760 struct tcf_block *block;
762 block = kzalloc(sizeof(*block), GFP_KERNEL);
764 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
765 return ERR_PTR(-ENOMEM);
767 INIT_LIST_HEAD(&block->chain_list);
768 INIT_LIST_HEAD(&block->cb_list);
769 INIT_LIST_HEAD(&block->owner_list);
770 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
772 refcount_set(&block->refcnt, 1);
774 block->index = block_index;
776 /* Don't store q pointer for blocks which are shared */
777 if (!tcf_block_shared(block))
782 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
784 struct tcf_net *tn = net_generic(net, tcf_net_id);
786 return idr_find(&tn->idr, block_index);
789 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
791 struct tcf_block *block;
794 block = tcf_block_lookup(net, block_index);
795 if (block && !refcount_inc_not_zero(&block->refcnt))
802 static void tcf_block_flush_all_chains(struct tcf_block *block)
804 struct tcf_chain *chain;
806 /* Hold a refcnt for all chains, so that they don't disappear
807 * while we are iterating.
809 list_for_each_entry(chain, &block->chain_list, list)
810 tcf_chain_hold(chain);
812 list_for_each_entry(chain, &block->chain_list, list)
813 tcf_chain_flush(chain);
816 static void tcf_block_put_all_chains(struct tcf_block *block)
818 struct tcf_chain *chain, *tmp;
820 /* At this point, all the chains should have refcnt >= 1. */
821 list_for_each_entry_safe(chain, tmp, &block->chain_list, list) {
822 tcf_chain_put_explicitly_created(chain);
823 tcf_chain_put(chain);
827 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
828 struct tcf_block_ext_info *ei)
830 if (refcount_dec_and_test(&block->refcnt)) {
831 /* Flushing/putting all chains will cause the block to be
832 * deallocated when last chain is freed. However, if chain_list
833 * is empty, block has to be manually deallocated. After block
834 * reference counter reached 0, it is no longer possible to
835 * increment it or add new chains to block.
837 bool free_block = list_empty(&block->chain_list);
839 if (tcf_block_shared(block))
840 tcf_block_remove(block, block->net);
842 tcf_block_flush_all_chains(block);
845 tcf_block_offload_unbind(block, q, ei);
848 kfree_rcu(block, rcu);
850 tcf_block_put_all_chains(block);
852 tcf_block_offload_unbind(block, q, ei);
856 static void tcf_block_refcnt_put(struct tcf_block *block)
858 __tcf_block_put(block, NULL, NULL);
862 * Set q, parent, cl when appropriate.
865 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
866 u32 *parent, unsigned long *cl,
867 int ifindex, u32 block_index,
868 struct netlink_ext_ack *extack)
870 struct tcf_block *block;
873 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
874 block = tcf_block_refcnt_get(net, block_index);
876 NL_SET_ERR_MSG(extack, "Block of given index was not found");
877 return ERR_PTR(-EINVAL);
880 const struct Qdisc_class_ops *cops;
881 struct net_device *dev;
886 dev = dev_get_by_index_rcu(net, ifindex);
889 return ERR_PTR(-ENODEV);
895 *parent = (*q)->handle;
897 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
899 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
905 *q = qdisc_refcount_inc_nz(*q);
907 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
912 /* Is it classful? */
913 cops = (*q)->ops->cl_ops;
915 NL_SET_ERR_MSG(extack, "Qdisc not classful");
920 if (!cops->tcf_block) {
921 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
926 /* At this point we know that qdisc is not noop_qdisc,
927 * which means that qdisc holds a reference to net_device
928 * and we hold a reference to qdisc, so it is safe to release
933 /* Do we search for filter, attached to class? */
934 if (TC_H_MIN(*parent)) {
935 *cl = cops->find(*q, *parent);
937 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
943 /* And the last stroke */
944 block = cops->tcf_block(*q, *cl, extack);
949 if (tcf_block_shared(block)) {
950 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
955 /* Always take reference to block in order to support execution
956 * of rules update path of cls API without rtnl lock. Caller
957 * must release block when it is finished using it. 'if' block
958 * of this conditional obtain reference to block by calling
959 * tcf_block_refcnt_get().
961 refcount_inc(&block->refcnt);
976 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block)
978 if (!IS_ERR_OR_NULL(block))
979 tcf_block_refcnt_put(block);
985 struct tcf_block_owner_item {
986 struct list_head list;
988 enum tcf_block_binder_type binder_type;
992 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
994 enum tcf_block_binder_type binder_type)
996 if (block->keep_dst &&
997 binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
998 binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
999 netif_keep_dst(qdisc_dev(q));
1002 void tcf_block_netif_keep_dst(struct tcf_block *block)
1004 struct tcf_block_owner_item *item;
1006 block->keep_dst = true;
1007 list_for_each_entry(item, &block->owner_list, list)
1008 tcf_block_owner_netif_keep_dst(block, item->q,
1011 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1013 static int tcf_block_owner_add(struct tcf_block *block,
1015 enum tcf_block_binder_type binder_type)
1017 struct tcf_block_owner_item *item;
1019 item = kmalloc(sizeof(*item), GFP_KERNEL);
1023 item->binder_type = binder_type;
1024 list_add(&item->list, &block->owner_list);
1028 static void tcf_block_owner_del(struct tcf_block *block,
1030 enum tcf_block_binder_type binder_type)
1032 struct tcf_block_owner_item *item;
1034 list_for_each_entry(item, &block->owner_list, list) {
1035 if (item->q == q && item->binder_type == binder_type) {
1036 list_del(&item->list);
1044 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1045 struct tcf_block_ext_info *ei,
1046 struct netlink_ext_ack *extack)
1048 struct net *net = qdisc_net(q);
1049 struct tcf_block *block = NULL;
1052 if (ei->block_index)
1053 /* block_index not 0 means the shared block is requested */
1054 block = tcf_block_refcnt_get(net, ei->block_index);
1057 block = tcf_block_create(net, q, ei->block_index, extack);
1059 return PTR_ERR(block);
1060 if (tcf_block_shared(block)) {
1061 err = tcf_block_insert(block, net, extack);
1063 goto err_block_insert;
1067 err = tcf_block_owner_add(block, q, ei->binder_type);
1069 goto err_block_owner_add;
1071 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1073 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1075 goto err_chain0_head_change_cb_add;
1077 err = tcf_block_offload_bind(block, q, ei, extack);
1079 goto err_block_offload_bind;
1084 err_block_offload_bind:
1085 tcf_chain0_head_change_cb_del(block, ei);
1086 err_chain0_head_change_cb_add:
1087 tcf_block_owner_del(block, q, ei->binder_type);
1088 err_block_owner_add:
1090 tcf_block_refcnt_put(block);
1093 EXPORT_SYMBOL(tcf_block_get_ext);
1095 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1097 struct tcf_proto __rcu **p_filter_chain = priv;
1099 rcu_assign_pointer(*p_filter_chain, tp_head);
1102 int tcf_block_get(struct tcf_block **p_block,
1103 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1104 struct netlink_ext_ack *extack)
1106 struct tcf_block_ext_info ei = {
1107 .chain_head_change = tcf_chain_head_change_dflt,
1108 .chain_head_change_priv = p_filter_chain,
1111 WARN_ON(!p_filter_chain);
1112 return tcf_block_get_ext(p_block, q, &ei, extack);
1114 EXPORT_SYMBOL(tcf_block_get);
1116 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1117 * actions should be all removed after flushing.
1119 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1120 struct tcf_block_ext_info *ei)
1124 tcf_chain0_head_change_cb_del(block, ei);
1125 tcf_block_owner_del(block, q, ei->binder_type);
1127 __tcf_block_put(block, q, ei);
1129 EXPORT_SYMBOL(tcf_block_put_ext);
1131 void tcf_block_put(struct tcf_block *block)
1133 struct tcf_block_ext_info ei = {0, };
1137 tcf_block_put_ext(block, block->q, &ei);
1140 EXPORT_SYMBOL(tcf_block_put);
1142 struct tcf_block_cb {
1143 struct list_head list;
1147 unsigned int refcnt;
1150 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
1152 return block_cb->cb_priv;
1154 EXPORT_SYMBOL(tcf_block_cb_priv);
1156 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
1157 tc_setup_cb_t *cb, void *cb_ident)
1158 { struct tcf_block_cb *block_cb;
1160 list_for_each_entry(block_cb, &block->cb_list, list)
1161 if (block_cb->cb == cb && block_cb->cb_ident == cb_ident)
1165 EXPORT_SYMBOL(tcf_block_cb_lookup);
1167 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
1171 EXPORT_SYMBOL(tcf_block_cb_incref);
1173 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
1175 return --block_cb->refcnt;
1177 EXPORT_SYMBOL(tcf_block_cb_decref);
1180 tcf_block_playback_offloads(struct tcf_block *block, tc_setup_cb_t *cb,
1181 void *cb_priv, bool add, bool offload_in_use,
1182 struct netlink_ext_ack *extack)
1184 struct tcf_chain *chain;
1185 struct tcf_proto *tp;
1188 list_for_each_entry(chain, &block->chain_list, list) {
1189 for (tp = rtnl_dereference(chain->filter_chain); tp;
1190 tp = rtnl_dereference(tp->next)) {
1191 if (tp->ops->reoffload) {
1192 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1195 goto err_playback_remove;
1196 } else if (add && offload_in_use) {
1198 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1199 goto err_playback_remove;
1206 err_playback_remove:
1207 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1212 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
1213 tc_setup_cb_t *cb, void *cb_ident,
1215 struct netlink_ext_ack *extack)
1217 struct tcf_block_cb *block_cb;
1220 /* Replay any already present rules */
1221 err = tcf_block_playback_offloads(block, cb, cb_priv, true,
1222 tcf_block_offload_in_use(block),
1225 return ERR_PTR(err);
1227 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
1229 return ERR_PTR(-ENOMEM);
1231 block_cb->cb_ident = cb_ident;
1232 block_cb->cb_priv = cb_priv;
1233 list_add(&block_cb->list, &block->cb_list);
1236 EXPORT_SYMBOL(__tcf_block_cb_register);
1238 int tcf_block_cb_register(struct tcf_block *block,
1239 tc_setup_cb_t *cb, void *cb_ident,
1240 void *cb_priv, struct netlink_ext_ack *extack)
1242 struct tcf_block_cb *block_cb;
1244 block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv,
1246 return PTR_ERR_OR_ZERO(block_cb);
1248 EXPORT_SYMBOL(tcf_block_cb_register);
1250 void __tcf_block_cb_unregister(struct tcf_block *block,
1251 struct tcf_block_cb *block_cb)
1253 tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv,
1254 false, tcf_block_offload_in_use(block),
1256 list_del(&block_cb->list);
1259 EXPORT_SYMBOL(__tcf_block_cb_unregister);
1261 void tcf_block_cb_unregister(struct tcf_block *block,
1262 tc_setup_cb_t *cb, void *cb_ident)
1264 struct tcf_block_cb *block_cb;
1266 block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
1269 __tcf_block_cb_unregister(block, block_cb);
1271 EXPORT_SYMBOL(tcf_block_cb_unregister);
1273 /* Main classifier routine: scans classifier chain attached
1274 * to this qdisc, (optionally) tests for protocol and asks
1275 * specific classifiers.
1277 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1278 struct tcf_result *res, bool compat_mode)
1280 __be16 protocol = tc_skb_protocol(skb);
1281 #ifdef CONFIG_NET_CLS_ACT
1282 const int max_reclassify_loop = 4;
1283 const struct tcf_proto *orig_tp = tp;
1284 const struct tcf_proto *first_tp;
1289 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1292 if (tp->protocol != protocol &&
1293 tp->protocol != htons(ETH_P_ALL))
1296 err = tp->classify(skb, tp, res);
1297 #ifdef CONFIG_NET_CLS_ACT
1298 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1301 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1302 first_tp = res->goto_tp;
1310 return TC_ACT_UNSPEC; /* signal: continue lookup */
1311 #ifdef CONFIG_NET_CLS_ACT
1313 if (unlikely(limit++ >= max_reclassify_loop)) {
1314 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1315 tp->chain->block->index,
1317 ntohs(tp->protocol));
1322 protocol = tc_skb_protocol(skb);
1326 EXPORT_SYMBOL(tcf_classify);
1328 struct tcf_chain_info {
1329 struct tcf_proto __rcu **pprev;
1330 struct tcf_proto __rcu *next;
1333 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
1335 return rtnl_dereference(*chain_info->pprev);
1338 static void tcf_chain_tp_insert(struct tcf_chain *chain,
1339 struct tcf_chain_info *chain_info,
1340 struct tcf_proto *tp)
1342 if (*chain_info->pprev == chain->filter_chain)
1343 tcf_chain0_head_change(chain, tp);
1344 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
1345 rcu_assign_pointer(*chain_info->pprev, tp);
1346 tcf_chain_hold(chain);
1349 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1350 struct tcf_chain_info *chain_info,
1351 struct tcf_proto *tp)
1353 struct tcf_proto *next = rtnl_dereference(chain_info->next);
1355 if (tp == chain->filter_chain)
1356 tcf_chain0_head_change(chain, next);
1357 RCU_INIT_POINTER(*chain_info->pprev, next);
1358 tcf_chain_put(chain);
1361 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1362 struct tcf_chain_info *chain_info,
1363 u32 protocol, u32 prio,
1366 struct tcf_proto **pprev;
1367 struct tcf_proto *tp;
1369 /* Check the chain for existence of proto-tcf with this priority */
1370 for (pprev = &chain->filter_chain;
1371 (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
1372 if (tp->prio >= prio) {
1373 if (tp->prio == prio) {
1374 if (prio_allocate ||
1375 (tp->protocol != protocol && protocol))
1376 return ERR_PTR(-EINVAL);
1383 chain_info->pprev = pprev;
1384 chain_info->next = tp ? tp->next : NULL;
1388 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1389 struct tcf_proto *tp, struct tcf_block *block,
1390 struct Qdisc *q, u32 parent, void *fh,
1391 u32 portid, u32 seq, u16 flags, int event)
1394 struct nlmsghdr *nlh;
1395 unsigned char *b = skb_tail_pointer(skb);
1397 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1399 goto out_nlmsg_trim;
1400 tcm = nlmsg_data(nlh);
1401 tcm->tcm_family = AF_UNSPEC;
1405 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1406 tcm->tcm_parent = parent;
1408 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1409 tcm->tcm_block_index = block->index;
1411 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1412 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1413 goto nla_put_failure;
1414 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1415 goto nla_put_failure;
1417 tcm->tcm_handle = 0;
1419 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
1420 goto nla_put_failure;
1422 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1431 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1432 struct nlmsghdr *n, struct tcf_proto *tp,
1433 struct tcf_block *block, struct Qdisc *q,
1434 u32 parent, void *fh, int event, bool unicast)
1436 struct sk_buff *skb;
1437 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1439 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1443 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1444 n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
1450 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1452 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1453 n->nlmsg_flags & NLM_F_ECHO);
1456 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1457 struct nlmsghdr *n, struct tcf_proto *tp,
1458 struct tcf_block *block, struct Qdisc *q,
1459 u32 parent, void *fh, bool unicast, bool *last,
1460 struct netlink_ext_ack *extack)
1462 struct sk_buff *skb;
1463 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1466 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1470 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1471 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
1472 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1477 err = tp->ops->delete(tp, fh, last, extack);
1484 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1486 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1487 n->nlmsg_flags & NLM_F_ECHO);
1489 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1493 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1494 struct tcf_block *block, struct Qdisc *q,
1495 u32 parent, struct nlmsghdr *n,
1496 struct tcf_chain *chain, int event)
1498 struct tcf_proto *tp;
1500 for (tp = rtnl_dereference(chain->filter_chain);
1501 tp; tp = rtnl_dereference(tp->next))
1502 tfilter_notify(net, oskb, n, tp, block,
1503 q, parent, NULL, event, false);
1506 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1507 struct netlink_ext_ack *extack)
1509 struct net *net = sock_net(skb->sk);
1510 struct nlattr *tca[TCA_MAX + 1];
1517 struct Qdisc *q = NULL;
1518 struct tcf_chain_info chain_info;
1519 struct tcf_chain *chain = NULL;
1520 struct tcf_block *block;
1521 struct tcf_proto *tp;
1527 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1533 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
1538 protocol = TC_H_MIN(t->tcm_info);
1539 prio = TC_H_MAJ(t->tcm_info);
1540 prio_allocate = false;
1541 parent = t->tcm_parent;
1545 /* If no priority is provided by the user,
1548 if (n->nlmsg_flags & NLM_F_CREATE) {
1549 prio = TC_H_MAKE(0x80000000U, 0U);
1550 prio_allocate = true;
1552 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1557 /* Find head of filter chain. */
1559 block = tcf_block_find(net, &q, &parent, &cl,
1560 t->tcm_ifindex, t->tcm_block_index, extack);
1561 if (IS_ERR(block)) {
1562 err = PTR_ERR(block);
1566 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1567 if (chain_index > TC_ACT_EXT_VAL_MASK) {
1568 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
1572 chain = tcf_chain_get(block, chain_index, true);
1574 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
1579 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1580 prio, prio_allocate);
1582 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
1588 /* Proto-tcf does not exist, create new one */
1590 if (tca[TCA_KIND] == NULL || !protocol) {
1591 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
1596 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1597 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
1603 prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
1605 tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
1606 protocol, prio, chain, extack);
1612 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1613 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1618 fh = tp->ops->get(tp, t->tcm_handle);
1621 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1622 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
1626 } else if (n->nlmsg_flags & NLM_F_EXCL) {
1627 NL_SET_ERR_MSG(extack, "Filter already exists");
1632 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
1633 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
1638 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
1639 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
1643 tcf_chain_tp_insert(chain, &chain_info, tp);
1644 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
1645 RTM_NEWTFILTER, false);
1648 tcf_proto_destroy(tp, NULL);
1653 tcf_chain_put(chain);
1654 tcf_block_release(q, block);
1656 /* Replay the request. */
1661 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1662 struct netlink_ext_ack *extack)
1664 struct net *net = sock_net(skb->sk);
1665 struct nlattr *tca[TCA_MAX + 1];
1671 struct Qdisc *q = NULL;
1672 struct tcf_chain_info chain_info;
1673 struct tcf_chain *chain = NULL;
1674 struct tcf_block *block;
1675 struct tcf_proto *tp = NULL;
1676 unsigned long cl = 0;
1680 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1683 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
1688 protocol = TC_H_MIN(t->tcm_info);
1689 prio = TC_H_MAJ(t->tcm_info);
1690 parent = t->tcm_parent;
1692 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
1693 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
1697 /* Find head of filter chain. */
1699 block = tcf_block_find(net, &q, &parent, &cl,
1700 t->tcm_ifindex, t->tcm_block_index, extack);
1701 if (IS_ERR(block)) {
1702 err = PTR_ERR(block);
1706 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1707 if (chain_index > TC_ACT_EXT_VAL_MASK) {
1708 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
1712 chain = tcf_chain_get(block, chain_index, false);
1714 /* User requested flush on non-existent chain. Nothing to do,
1715 * so just return success.
1721 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
1727 tfilter_notify_chain(net, skb, block, q, parent, n,
1728 chain, RTM_DELTFILTER);
1729 tcf_chain_flush(chain);
1734 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1736 if (!tp || IS_ERR(tp)) {
1737 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
1738 err = tp ? PTR_ERR(tp) : -ENOENT;
1740 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1741 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1746 fh = tp->ops->get(tp, t->tcm_handle);
1749 if (t->tcm_handle == 0) {
1750 tcf_chain_tp_remove(chain, &chain_info, tp);
1751 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
1752 RTM_DELTFILTER, false);
1753 tcf_proto_destroy(tp, extack);
1756 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
1762 err = tfilter_del_notify(net, skb, n, tp, block,
1763 q, parent, fh, false, &last,
1768 tcf_chain_tp_remove(chain, &chain_info, tp);
1769 tcf_proto_destroy(tp, extack);
1775 tcf_chain_put(chain);
1776 tcf_block_release(q, block);
1780 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1781 struct netlink_ext_ack *extack)
1783 struct net *net = sock_net(skb->sk);
1784 struct nlattr *tca[TCA_MAX + 1];
1790 struct Qdisc *q = NULL;
1791 struct tcf_chain_info chain_info;
1792 struct tcf_chain *chain = NULL;
1793 struct tcf_block *block;
1794 struct tcf_proto *tp = NULL;
1795 unsigned long cl = 0;
1799 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
1804 protocol = TC_H_MIN(t->tcm_info);
1805 prio = TC_H_MAJ(t->tcm_info);
1806 parent = t->tcm_parent;
1809 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1813 /* Find head of filter chain. */
1815 block = tcf_block_find(net, &q, &parent, &cl,
1816 t->tcm_ifindex, t->tcm_block_index, extack);
1817 if (IS_ERR(block)) {
1818 err = PTR_ERR(block);
1822 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1823 if (chain_index > TC_ACT_EXT_VAL_MASK) {
1824 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
1828 chain = tcf_chain_get(block, chain_index, false);
1830 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
1835 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1837 if (!tp || IS_ERR(tp)) {
1838 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
1839 err = tp ? PTR_ERR(tp) : -ENOENT;
1841 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1842 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1847 fh = tp->ops->get(tp, t->tcm_handle);
1850 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
1853 err = tfilter_notify(net, skb, n, tp, block, q, parent,
1854 fh, RTM_NEWTFILTER, true);
1856 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
1861 tcf_chain_put(chain);
1862 tcf_block_release(q, block);
1866 struct tcf_dump_args {
1867 struct tcf_walker w;
1868 struct sk_buff *skb;
1869 struct netlink_callback *cb;
1870 struct tcf_block *block;
1875 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1877 struct tcf_dump_args *a = (void *)arg;
1878 struct net *net = sock_net(a->skb->sk);
1880 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
1881 n, NETLINK_CB(a->cb->skb).portid,
1882 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1886 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
1887 struct sk_buff *skb, struct netlink_callback *cb,
1888 long index_start, long *p_index)
1890 struct net *net = sock_net(skb->sk);
1891 struct tcf_block *block = chain->block;
1892 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1893 struct tcf_dump_args arg;
1894 struct tcf_proto *tp;
1896 for (tp = rtnl_dereference(chain->filter_chain);
1897 tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
1898 if (*p_index < index_start)
1900 if (TC_H_MAJ(tcm->tcm_info) &&
1901 TC_H_MAJ(tcm->tcm_info) != tp->prio)
1903 if (TC_H_MIN(tcm->tcm_info) &&
1904 TC_H_MIN(tcm->tcm_info) != tp->protocol)
1906 if (*p_index > index_start)
1907 memset(&cb->args[1], 0,
1908 sizeof(cb->args) - sizeof(cb->args[0]));
1909 if (cb->args[1] == 0) {
1910 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
1911 NETLINK_CB(cb->skb).portid,
1912 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1913 RTM_NEWTFILTER) <= 0)
1920 arg.w.fn = tcf_node_dump;
1925 arg.parent = parent;
1927 arg.w.skip = cb->args[1] - 1;
1929 arg.w.cookie = cb->args[2];
1930 tp->ops->walk(tp, &arg.w);
1931 cb->args[2] = arg.w.cookie;
1932 cb->args[1] = arg.w.count + 1;
1939 /* called with RTNL */
1940 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
1942 struct net *net = sock_net(skb->sk);
1943 struct nlattr *tca[TCA_MAX + 1];
1944 struct Qdisc *q = NULL;
1945 struct tcf_block *block;
1946 struct tcf_chain *chain;
1947 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1953 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1956 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL,
1961 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1962 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
1965 /* If we work with block index, q is NULL and parent value
1966 * will never be used in the following code. The check
1967 * in tcf_fill_node prevents it. However, compiler does not
1968 * see that far, so set parent to zero to silence the warning
1969 * about parent being uninitialized.
1973 const struct Qdisc_class_ops *cops;
1974 struct net_device *dev;
1975 unsigned long cl = 0;
1977 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1981 parent = tcm->tcm_parent;
1986 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
1990 cops = q->ops->cl_ops;
1993 if (!cops->tcf_block)
1995 if (TC_H_MIN(tcm->tcm_parent)) {
1996 cl = cops->find(q, tcm->tcm_parent);
2000 block = cops->tcf_block(q, cl, NULL);
2003 if (tcf_block_shared(block))
2007 index_start = cb->args[0];
2010 list_for_each_entry(chain, &block->chain_list, list) {
2011 if (tca[TCA_CHAIN] &&
2012 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2014 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2015 index_start, &index)) {
2021 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2022 tcf_block_refcnt_put(block);
2023 cb->args[0] = index;
2026 /* If we did no progress, the error (EMSGSIZE) is real */
2027 if (skb->len == 0 && err)
2032 static int tc_chain_fill_node(struct tcf_chain *chain, struct net *net,
2033 struct sk_buff *skb, struct tcf_block *block,
2034 u32 portid, u32 seq, u16 flags, int event)
2036 unsigned char *b = skb_tail_pointer(skb);
2037 const struct tcf_proto_ops *ops;
2038 struct nlmsghdr *nlh;
2042 ops = chain->tmplt_ops;
2043 priv = chain->tmplt_priv;
2045 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2047 goto out_nlmsg_trim;
2048 tcm = nlmsg_data(nlh);
2049 tcm->tcm_family = AF_UNSPEC;
2052 tcm->tcm_handle = 0;
2054 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2055 tcm->tcm_parent = block->q->handle;
2057 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2058 tcm->tcm_block_index = block->index;
2061 if (nla_put_u32(skb, TCA_CHAIN, chain->index))
2062 goto nla_put_failure;
2065 if (nla_put_string(skb, TCA_KIND, ops->kind))
2066 goto nla_put_failure;
2067 if (ops->tmplt_dump(skb, net, priv) < 0)
2068 goto nla_put_failure;
2071 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2080 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2081 u32 seq, u16 flags, int event, bool unicast)
2083 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2084 struct tcf_block *block = chain->block;
2085 struct net *net = block->net;
2086 struct sk_buff *skb;
2088 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2092 if (tc_chain_fill_node(chain, net, skb, block, portid,
2093 seq, flags, event) <= 0) {
2099 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2101 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2104 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2105 struct nlattr **tca,
2106 struct netlink_ext_ack *extack)
2108 const struct tcf_proto_ops *ops;
2111 /* If kind is not set, user did not specify template. */
2115 ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), extack);
2117 return PTR_ERR(ops);
2118 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2119 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2123 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2124 if (IS_ERR(tmplt_priv)) {
2125 module_put(ops->owner);
2126 return PTR_ERR(tmplt_priv);
2128 chain->tmplt_ops = ops;
2129 chain->tmplt_priv = tmplt_priv;
2133 static void tc_chain_tmplt_del(struct tcf_chain *chain)
2135 const struct tcf_proto_ops *ops = chain->tmplt_ops;
2137 /* If template ops are set, no work to do for us. */
2141 ops->tmplt_destroy(chain->tmplt_priv);
2142 module_put(ops->owner);
2145 /* Add/delete/get a chain */
2147 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2148 struct netlink_ext_ack *extack)
2150 struct net *net = sock_net(skb->sk);
2151 struct nlattr *tca[TCA_MAX + 1];
2155 struct Qdisc *q = NULL;
2156 struct tcf_chain *chain = NULL;
2157 struct tcf_block *block;
2161 if (n->nlmsg_type != RTM_GETCHAIN &&
2162 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2166 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
2171 parent = t->tcm_parent;
2174 block = tcf_block_find(net, &q, &parent, &cl,
2175 t->tcm_ifindex, t->tcm_block_index, extack);
2177 return PTR_ERR(block);
2179 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2180 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2181 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2185 chain = tcf_chain_lookup(block, chain_index);
2186 if (n->nlmsg_type == RTM_NEWCHAIN) {
2188 if (tcf_chain_held_by_acts_only(chain)) {
2189 /* The chain exists only because there is
2190 * some action referencing it.
2192 tcf_chain_hold(chain);
2194 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2199 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2200 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2204 chain = tcf_chain_create(block, chain_index);
2206 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2212 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2213 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2217 tcf_chain_hold(chain);
2220 switch (n->nlmsg_type) {
2222 err = tc_chain_tmplt_add(chain, net, tca, extack);
2225 /* In case the chain was successfully added, take a reference
2226 * to the chain. This ensures that an empty chain
2227 * does not disappear at the end of this function.
2229 tcf_chain_hold(chain);
2230 chain->explicitly_created = true;
2231 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2232 RTM_NEWCHAIN, false);
2235 tfilter_notify_chain(net, skb, block, q, parent, n,
2236 chain, RTM_DELTFILTER);
2237 /* Flush the chain first as the user requested chain removal. */
2238 tcf_chain_flush(chain);
2239 /* In case the chain was successfully deleted, put a reference
2240 * to the chain previously taken during addition.
2242 tcf_chain_put_explicitly_created(chain);
2243 chain->explicitly_created = false;
2246 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2247 n->nlmsg_seq, n->nlmsg_type, true);
2249 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2253 NL_SET_ERR_MSG(extack, "Unsupported message type");
2258 tcf_chain_put(chain);
2260 tcf_block_release(q, block);
2262 /* Replay the request. */
2267 /* called with RTNL */
2268 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2270 struct net *net = sock_net(skb->sk);
2271 struct nlattr *tca[TCA_MAX + 1];
2272 struct Qdisc *q = NULL;
2273 struct tcf_block *block;
2274 struct tcf_chain *chain;
2275 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2281 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2284 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
2289 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2290 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2293 /* If we work with block index, q is NULL and parent value
2294 * will never be used in the following code. The check
2295 * in tcf_fill_node prevents it. However, compiler does not
2296 * see that far, so set parent to zero to silence the warning
2297 * about parent being uninitialized.
2301 const struct Qdisc_class_ops *cops;
2302 struct net_device *dev;
2303 unsigned long cl = 0;
2305 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2309 parent = tcm->tcm_parent;
2314 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2318 cops = q->ops->cl_ops;
2321 if (!cops->tcf_block)
2323 if (TC_H_MIN(tcm->tcm_parent)) {
2324 cl = cops->find(q, tcm->tcm_parent);
2328 block = cops->tcf_block(q, cl, NULL);
2331 if (tcf_block_shared(block))
2335 index_start = cb->args[0];
2338 list_for_each_entry(chain, &block->chain_list, list) {
2339 if ((tca[TCA_CHAIN] &&
2340 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2342 if (index < index_start) {
2346 if (tcf_chain_held_by_acts_only(chain))
2348 err = tc_chain_fill_node(chain, net, skb, block,
2349 NETLINK_CB(cb->skb).portid,
2350 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2357 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2358 tcf_block_refcnt_put(block);
2359 cb->args[0] = index;
2362 /* If we did no progress, the error (EMSGSIZE) is real */
2363 if (skb->len == 0 && err)
2368 void tcf_exts_destroy(struct tcf_exts *exts)
2370 #ifdef CONFIG_NET_CLS_ACT
2371 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
2372 kfree(exts->actions);
2373 exts->nr_actions = 0;
2376 EXPORT_SYMBOL(tcf_exts_destroy);
2378 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
2379 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
2380 struct netlink_ext_ack *extack)
2382 #ifdef CONFIG_NET_CLS_ACT
2384 struct tc_action *act;
2385 size_t attr_size = 0;
2387 if (exts->police && tb[exts->police]) {
2388 act = tcf_action_init_1(net, tp, tb[exts->police],
2389 rate_tlv, "police", ovr,
2390 TCA_ACT_BIND, true, extack);
2392 return PTR_ERR(act);
2394 act->type = exts->type = TCA_OLD_COMPAT;
2395 exts->actions[0] = act;
2396 exts->nr_actions = 1;
2397 } else if (exts->action && tb[exts->action]) {
2400 err = tcf_action_init(net, tp, tb[exts->action],
2401 rate_tlv, NULL, ovr, TCA_ACT_BIND,
2402 exts->actions, &attr_size, true,
2406 exts->nr_actions = err;
2411 if ((exts->action && tb[exts->action]) ||
2412 (exts->police && tb[exts->police])) {
2413 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
2420 EXPORT_SYMBOL(tcf_exts_validate);
2422 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
2424 #ifdef CONFIG_NET_CLS_ACT
2425 struct tcf_exts old = *dst;
2428 tcf_exts_destroy(&old);
2431 EXPORT_SYMBOL(tcf_exts_change);
2433 #ifdef CONFIG_NET_CLS_ACT
2434 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
2436 if (exts->nr_actions == 0)
2439 return exts->actions[0];
2443 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
2445 #ifdef CONFIG_NET_CLS_ACT
2446 struct nlattr *nest;
2448 if (exts->action && tcf_exts_has_actions(exts)) {
2450 * again for backward compatible mode - we want
2451 * to work with both old and new modes of entering
2452 * tc data even if iproute2 was newer - jhs
2454 if (exts->type != TCA_OLD_COMPAT) {
2455 nest = nla_nest_start(skb, exts->action);
2457 goto nla_put_failure;
2459 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
2460 goto nla_put_failure;
2461 nla_nest_end(skb, nest);
2462 } else if (exts->police) {
2463 struct tc_action *act = tcf_exts_first_act(exts);
2464 nest = nla_nest_start(skb, exts->police);
2465 if (nest == NULL || !act)
2466 goto nla_put_failure;
2467 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
2468 goto nla_put_failure;
2469 nla_nest_end(skb, nest);
2475 nla_nest_cancel(skb, nest);
2481 EXPORT_SYMBOL(tcf_exts_dump);
2484 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
2486 #ifdef CONFIG_NET_CLS_ACT
2487 struct tc_action *a = tcf_exts_first_act(exts);
2488 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
2493 EXPORT_SYMBOL(tcf_exts_dump_stats);
2495 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
2496 void *type_data, bool err_stop)
2498 struct tcf_block_cb *block_cb;
2502 /* Make sure all netdevs sharing this block are offload-capable. */
2503 if (block->nooffloaddevcnt && err_stop)
2506 list_for_each_entry(block_cb, &block->cb_list, list) {
2507 err = block_cb->cb(type, type_data, block_cb->cb_priv);
2517 EXPORT_SYMBOL(tc_setup_cb_call);
2519 static __net_init int tcf_net_init(struct net *net)
2521 struct tcf_net *tn = net_generic(net, tcf_net_id);
2523 spin_lock_init(&tn->idr_lock);
2528 static void __net_exit tcf_net_exit(struct net *net)
2530 struct tcf_net *tn = net_generic(net, tcf_net_id);
2532 idr_destroy(&tn->idr);
2535 static struct pernet_operations tcf_net_ops = {
2536 .init = tcf_net_init,
2537 .exit = tcf_net_exit,
2539 .size = sizeof(struct tcf_net),
2542 static int __init tc_filter_init(void)
2546 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
2550 err = register_pernet_subsys(&tcf_net_ops);
2552 goto err_register_pernet_subsys;
2554 err = rhashtable_init(&indr_setup_block_ht,
2555 &tc_indr_setup_block_ht_params);
2557 goto err_rhash_setup_block_ht;
2559 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 0);
2560 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0);
2561 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
2562 tc_dump_tfilter, 0);
2563 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
2564 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
2565 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
2570 err_rhash_setup_block_ht:
2571 unregister_pernet_subsys(&tcf_net_ops);
2572 err_register_pernet_subsys:
2573 destroy_workqueue(tc_filter_wq);
2577 subsys_initcall(tc_filter_init);