2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/slab.h>
31 #include <linux/hashtable.h>
33 #include <net/net_namespace.h>
35 #include <net/netlink.h>
36 #include <net/pkt_sched.h>
37 #include <net/pkt_cls.h>
44 This file consists of two interrelated parts:
46 1. queueing disciplines manager frontend.
47 2. traffic classes manager frontend.
49 Generally, queueing discipline ("qdisc") is a black box,
50 which is able to enqueue packets and to dequeue them (when
51 device is ready to send something) in order and at times
52 determined by algorithm hidden in it.
54 qdisc's are divided to two categories:
55 - "queues", which have no internal structure visible from outside.
56 - "schedulers", which split all the packets to "traffic classes",
57 using "packet classifiers" (look at cls_api.c)
59 In turn, classes may have child qdiscs (as rule, queues)
60 attached to them etc. etc. etc.
62 The goal of the routines in this file is to translate
63 information supplied by user in the form of handles
64 to more intelligible for kernel form, to make some sanity
65 checks and part of work, which is common to all qdiscs
66 and to provide rtnetlink notifications.
68 All real intelligent work is done inside qdisc modules.
72 Every discipline has two major routines: enqueue and dequeue.
76 dequeue usually returns a skb to send. It is allowed to return NULL,
77 but it does not mean that queue is empty, it just means that
78 discipline does not want to send anything this time.
79 Queue is really empty if q->q.qlen == 0.
80 For complicated disciplines with multiple queues q->q is not
81 real packet queue, but however q->q.qlen must be valid.
85 enqueue returns 0, if packet was enqueued successfully.
86 If packet (this one or another one) was dropped, it returns
88 NET_XMIT_DROP - this packet dropped
89 Expected action: do not backoff, but wait until queue will clear.
90 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
91 Expected action: backoff or ignore
97 like dequeue but without removing a packet from the queue
101 returns qdisc to initial state: purge all buffers, clear all
102 timers, counters (except for statistics) etc.
106 initializes newly created qdisc.
110 destroys resources allocated by init and during lifetime of qdisc.
114 changes qdisc parameters.
117 /* Protects list of registered TC modules. It is pure SMP lock. */
118 static DEFINE_RWLOCK(qdisc_mod_lock);
121 /************************************************
122 * Queueing disciplines manipulation. *
123 ************************************************/
126 /* The list of all installed queueing disciplines. */
128 static struct Qdisc_ops *qdisc_base;
130 /* Register/unregister queueing discipline */
132 int register_qdisc(struct Qdisc_ops *qops)
134 struct Qdisc_ops *q, **qp;
137 write_lock(&qdisc_mod_lock);
138 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
139 if (!strcmp(qops->id, q->id))
142 if (qops->enqueue == NULL)
143 qops->enqueue = noop_qdisc_ops.enqueue;
144 if (qops->peek == NULL) {
145 if (qops->dequeue == NULL)
146 qops->peek = noop_qdisc_ops.peek;
150 if (qops->dequeue == NULL)
151 qops->dequeue = noop_qdisc_ops.dequeue;
154 const struct Qdisc_class_ops *cops = qops->cl_ops;
156 if (!(cops->find && cops->walk && cops->leaf))
159 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
167 write_unlock(&qdisc_mod_lock);
174 EXPORT_SYMBOL(register_qdisc);
176 int unregister_qdisc(struct Qdisc_ops *qops)
178 struct Qdisc_ops *q, **qp;
181 write_lock(&qdisc_mod_lock);
182 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
190 write_unlock(&qdisc_mod_lock);
193 EXPORT_SYMBOL(unregister_qdisc);
195 /* Get default qdisc if not otherwise specified */
196 void qdisc_get_default(char *name, size_t len)
198 read_lock(&qdisc_mod_lock);
199 strlcpy(name, default_qdisc_ops->id, len);
200 read_unlock(&qdisc_mod_lock);
203 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
205 struct Qdisc_ops *q = NULL;
207 for (q = qdisc_base; q; q = q->next) {
208 if (!strcmp(name, q->id)) {
209 if (!try_module_get(q->owner))
218 /* Set new default qdisc to use */
219 int qdisc_set_default(const char *name)
221 const struct Qdisc_ops *ops;
223 if (!capable(CAP_NET_ADMIN))
226 write_lock(&qdisc_mod_lock);
227 ops = qdisc_lookup_default(name);
229 /* Not found, drop lock and try to load module */
230 write_unlock(&qdisc_mod_lock);
231 request_module("sch_%s", name);
232 write_lock(&qdisc_mod_lock);
234 ops = qdisc_lookup_default(name);
238 /* Set new default */
239 module_put(default_qdisc_ops->owner);
240 default_qdisc_ops = ops;
242 write_unlock(&qdisc_mod_lock);
244 return ops ? 0 : -ENOENT;
247 #ifdef CONFIG_NET_SCH_DEFAULT
248 /* Set default value from kernel config */
249 static int __init sch_default_qdisc(void)
251 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
253 late_initcall(sch_default_qdisc);
256 /* We know handle. Find qdisc among all qdisc's attached to device
257 * (root qdisc, all its children, children of children etc.)
258 * Note: caller either uses rtnl or rcu_read_lock()
261 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
265 if (!qdisc_dev(root))
266 return (root->handle == handle ? root : NULL);
268 if (!(root->flags & TCQ_F_BUILTIN) &&
269 root->handle == handle)
272 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
273 if (q->handle == handle)
279 void qdisc_hash_add(struct Qdisc *q, bool invisible)
281 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
283 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
285 q->flags |= TCQ_F_INVISIBLE;
288 EXPORT_SYMBOL(qdisc_hash_add);
290 void qdisc_hash_del(struct Qdisc *q)
292 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
294 hash_del_rcu(&q->hash);
297 EXPORT_SYMBOL(qdisc_hash_del);
299 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
305 q = qdisc_match_from_root(dev->qdisc, handle);
309 if (dev_ingress_queue(dev))
310 q = qdisc_match_from_root(
311 dev_ingress_queue(dev)->qdisc_sleeping,
317 struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
319 struct netdev_queue *nq;
324 q = qdisc_match_from_root(dev->qdisc, handle);
328 nq = dev_ingress_queue_rcu(dev);
330 q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
335 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
338 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
342 cl = cops->find(p, classid);
346 return cops->leaf(p, cl);
349 /* Find queueing discipline by name */
351 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
353 struct Qdisc_ops *q = NULL;
356 read_lock(&qdisc_mod_lock);
357 for (q = qdisc_base; q; q = q->next) {
358 if (nla_strcmp(kind, q->id) == 0) {
359 if (!try_module_get(q->owner))
364 read_unlock(&qdisc_mod_lock);
369 /* The linklayer setting were not transferred from iproute2, in older
370 * versions, and the rate tables lookup systems have been dropped in
371 * the kernel. To keep backward compatible with older iproute2 tc
372 * utils, we detect the linklayer setting by detecting if the rate
373 * table were modified.
375 * For linklayer ATM table entries, the rate table will be aligned to
376 * 48 bytes, thus some table entries will contain the same value. The
377 * mpu (min packet unit) is also encoded into the old rate table, thus
378 * starting from the mpu, we find low and high table entries for
379 * mapping this cell. If these entries contain the same value, when
380 * the rate tables have been modified for linklayer ATM.
382 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
383 * and then roundup to the next cell, calc the table entry one below,
386 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
388 int low = roundup(r->mpu, 48);
389 int high = roundup(low+1, 48);
390 int cell_low = low >> r->cell_log;
391 int cell_high = (high >> r->cell_log) - 1;
393 /* rtab is too inaccurate at rates > 100Mbit/s */
394 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
395 pr_debug("TC linklayer: Giving up ATM detection\n");
396 return TC_LINKLAYER_ETHERNET;
399 if ((cell_high > cell_low) && (cell_high < 256)
400 && (rtab[cell_low] == rtab[cell_high])) {
401 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
402 cell_low, cell_high, rtab[cell_high]);
403 return TC_LINKLAYER_ATM;
405 return TC_LINKLAYER_ETHERNET;
408 static struct qdisc_rate_table *qdisc_rtab_list;
410 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
412 struct netlink_ext_ack *extack)
414 struct qdisc_rate_table *rtab;
416 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
417 nla_len(tab) != TC_RTAB_SIZE) {
418 NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
422 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
423 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
424 !memcmp(&rtab->data, nla_data(tab), 1024)) {
430 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
434 memcpy(rtab->data, nla_data(tab), 1024);
435 if (r->linklayer == TC_LINKLAYER_UNAWARE)
436 r->linklayer = __detect_linklayer(r, rtab->data);
437 rtab->next = qdisc_rtab_list;
438 qdisc_rtab_list = rtab;
440 NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
444 EXPORT_SYMBOL(qdisc_get_rtab);
446 void qdisc_put_rtab(struct qdisc_rate_table *tab)
448 struct qdisc_rate_table *rtab, **rtabp;
450 if (!tab || --tab->refcnt)
453 for (rtabp = &qdisc_rtab_list;
454 (rtab = *rtabp) != NULL;
455 rtabp = &rtab->next) {
463 EXPORT_SYMBOL(qdisc_put_rtab);
465 static LIST_HEAD(qdisc_stab_list);
467 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
468 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
469 [TCA_STAB_DATA] = { .type = NLA_BINARY },
472 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
473 struct netlink_ext_ack *extack)
475 struct nlattr *tb[TCA_STAB_MAX + 1];
476 struct qdisc_size_table *stab;
477 struct tc_sizespec *s;
478 unsigned int tsize = 0;
482 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, extack);
485 if (!tb[TCA_STAB_BASE]) {
486 NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
487 return ERR_PTR(-EINVAL);
490 s = nla_data(tb[TCA_STAB_BASE]);
493 if (!tb[TCA_STAB_DATA]) {
494 NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
495 return ERR_PTR(-EINVAL);
497 tab = nla_data(tb[TCA_STAB_DATA]);
498 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
501 if (tsize != s->tsize || (!tab && tsize > 0)) {
502 NL_SET_ERR_MSG(extack, "Invalid size of size table");
503 return ERR_PTR(-EINVAL);
506 list_for_each_entry(stab, &qdisc_stab_list, list) {
507 if (memcmp(&stab->szopts, s, sizeof(*s)))
509 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
515 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
517 return ERR_PTR(-ENOMEM);
522 memcpy(stab->data, tab, tsize * sizeof(u16));
524 list_add_tail(&stab->list, &qdisc_stab_list);
529 static void stab_kfree_rcu(struct rcu_head *head)
531 kfree(container_of(head, struct qdisc_size_table, rcu));
534 void qdisc_put_stab(struct qdisc_size_table *tab)
539 if (--tab->refcnt == 0) {
540 list_del(&tab->list);
541 call_rcu(&tab->rcu, stab_kfree_rcu);
544 EXPORT_SYMBOL(qdisc_put_stab);
546 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
550 nest = nla_nest_start(skb, TCA_STAB);
552 goto nla_put_failure;
553 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
554 goto nla_put_failure;
555 nla_nest_end(skb, nest);
563 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
564 const struct qdisc_size_table *stab)
568 pkt_len = skb->len + stab->szopts.overhead;
569 if (unlikely(!stab->szopts.tsize))
572 slot = pkt_len + stab->szopts.cell_align;
573 if (unlikely(slot < 0))
576 slot >>= stab->szopts.cell_log;
577 if (likely(slot < stab->szopts.tsize))
578 pkt_len = stab->data[slot];
580 pkt_len = stab->data[stab->szopts.tsize - 1] *
581 (slot / stab->szopts.tsize) +
582 stab->data[slot % stab->szopts.tsize];
584 pkt_len <<= stab->szopts.size_log;
586 if (unlikely(pkt_len < 1))
588 qdisc_skb_cb(skb)->pkt_len = pkt_len;
590 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
592 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
594 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
595 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
596 txt, qdisc->ops->id, qdisc->handle >> 16);
597 qdisc->flags |= TCQ_F_WARN_NONWC;
600 EXPORT_SYMBOL(qdisc_warn_nonwc);
602 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
604 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
608 __netif_schedule(qdisc_root(wd->qdisc));
611 return HRTIMER_NORESTART;
614 void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
617 hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
618 wd->timer.function = qdisc_watchdog;
621 EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
623 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
625 qdisc_watchdog_init_clockid(wd, qdisc, CLOCK_MONOTONIC);
627 EXPORT_SYMBOL(qdisc_watchdog_init);
629 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
631 if (test_bit(__QDISC_STATE_DEACTIVATED,
632 &qdisc_root_sleeping(wd->qdisc)->state))
635 if (wd->last_expires == expires)
638 wd->last_expires = expires;
639 hrtimer_start(&wd->timer,
640 ns_to_ktime(expires),
641 HRTIMER_MODE_ABS_PINNED);
643 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
645 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
647 hrtimer_cancel(&wd->timer);
649 EXPORT_SYMBOL(qdisc_watchdog_cancel);
651 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
653 struct hlist_head *h;
656 h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL);
659 for (i = 0; i < n; i++)
660 INIT_HLIST_HEAD(&h[i]);
665 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
667 struct Qdisc_class_common *cl;
668 struct hlist_node *next;
669 struct hlist_head *nhash, *ohash;
670 unsigned int nsize, nmask, osize;
673 /* Rehash when load factor exceeds 0.75 */
674 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
676 nsize = clhash->hashsize * 2;
678 nhash = qdisc_class_hash_alloc(nsize);
682 ohash = clhash->hash;
683 osize = clhash->hashsize;
686 for (i = 0; i < osize; i++) {
687 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
688 h = qdisc_class_hash(cl->classid, nmask);
689 hlist_add_head(&cl->hnode, &nhash[h]);
692 clhash->hash = nhash;
693 clhash->hashsize = nsize;
694 clhash->hashmask = nmask;
695 sch_tree_unlock(sch);
699 EXPORT_SYMBOL(qdisc_class_hash_grow);
701 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
703 unsigned int size = 4;
705 clhash->hash = qdisc_class_hash_alloc(size);
708 clhash->hashsize = size;
709 clhash->hashmask = size - 1;
710 clhash->hashelems = 0;
713 EXPORT_SYMBOL(qdisc_class_hash_init);
715 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
717 kvfree(clhash->hash);
719 EXPORT_SYMBOL(qdisc_class_hash_destroy);
721 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
722 struct Qdisc_class_common *cl)
726 INIT_HLIST_NODE(&cl->hnode);
727 h = qdisc_class_hash(cl->classid, clhash->hashmask);
728 hlist_add_head(&cl->hnode, &clhash->hash[h]);
731 EXPORT_SYMBOL(qdisc_class_hash_insert);
733 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
734 struct Qdisc_class_common *cl)
736 hlist_del(&cl->hnode);
739 EXPORT_SYMBOL(qdisc_class_hash_remove);
741 /* Allocate an unique handle from space managed by kernel
742 * Possible range is [8000-FFFF]:0000 (0x8000 values)
744 static u32 qdisc_alloc_handle(struct net_device *dev)
747 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
750 autohandle += TC_H_MAKE(0x10000U, 0);
751 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
752 autohandle = TC_H_MAKE(0x80000000U, 0);
753 if (!qdisc_lookup(dev, autohandle))
761 void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
764 bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
765 const struct Qdisc_class_ops *cops;
771 if (n == 0 && len == 0)
773 drops = max_t(int, n, 0);
775 while ((parentid = sch->parent)) {
776 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
779 if (sch->flags & TCQ_F_NOPARENT)
781 /* Notify parent qdisc only if child qdisc becomes empty.
783 * If child was empty even before update then backlog
784 * counter is screwed and we skip notification because
785 * parent class is already passive.
787 * If the original child was offloaded then it is allowed
788 * to be seem as empty, so the parent is notified anyway.
790 notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
791 !qdisc_is_offloaded);
792 /* TODO: perform the search on a per txq basis */
793 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
795 WARN_ON_ONCE(parentid != TC_H_ROOT);
798 cops = sch->ops->cl_ops;
799 if (notify && cops->qlen_notify) {
800 cl = cops->find(sch, parentid);
801 cops->qlen_notify(sch, cl);
804 sch->qstats.backlog -= len;
805 __qdisc_qstats_drop(sch, drops);
809 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
811 int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
814 struct net_device *dev = qdisc_dev(sch);
817 sch->flags &= ~TCQ_F_OFFLOADED;
818 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
821 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
822 if (err == -EOPNOTSUPP)
826 sch->flags |= TCQ_F_OFFLOADED;
830 EXPORT_SYMBOL(qdisc_offload_dump_helper);
832 void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
833 struct Qdisc *new, struct Qdisc *old,
834 enum tc_setup_type type, void *type_data,
835 struct netlink_ext_ack *extack)
837 bool any_qdisc_is_offloaded;
840 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
843 err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
845 /* Don't report error if the graft is part of destroy operation. */
846 if (!err || !new || new == &noop_qdisc)
849 /* Don't report error if the parent, the old child and the new
850 * one are not offloaded.
852 any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
853 any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
854 any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
856 if (any_qdisc_is_offloaded)
857 NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
859 EXPORT_SYMBOL(qdisc_offload_graft_helper);
861 static void qdisc_offload_graft_root(struct net_device *dev,
862 struct Qdisc *new, struct Qdisc *old,
863 struct netlink_ext_ack *extack)
865 struct tc_root_qopt_offload graft_offload = {
866 .command = TC_ROOT_GRAFT,
867 .handle = new ? new->handle : 0,
868 .ingress = (new && new->flags & TCQ_F_INGRESS) ||
869 (old && old->flags & TCQ_F_INGRESS),
872 qdisc_offload_graft_helper(dev, NULL, new, old,
873 TC_SETUP_ROOT_QDISC, &graft_offload, extack);
876 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
877 u32 portid, u32 seq, u16 flags, int event)
879 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
880 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
882 struct nlmsghdr *nlh;
883 unsigned char *b = skb_tail_pointer(skb);
885 struct qdisc_size_table *stab;
890 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
893 tcm = nlmsg_data(nlh);
894 tcm->tcm_family = AF_UNSPEC;
897 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
898 tcm->tcm_parent = clid;
899 tcm->tcm_handle = q->handle;
900 tcm->tcm_info = refcount_read(&q->refcnt);
901 if (nla_put_string(skb, TCA_KIND, q->ops->id))
902 goto nla_put_failure;
903 if (q->ops->ingress_block_get) {
904 block_index = q->ops->ingress_block_get(q);
906 nla_put_u32(skb, TCA_INGRESS_BLOCK, block_index))
907 goto nla_put_failure;
909 if (q->ops->egress_block_get) {
910 block_index = q->ops->egress_block_get(q);
912 nla_put_u32(skb, TCA_EGRESS_BLOCK, block_index))
913 goto nla_put_failure;
915 if (q->ops->dump && q->ops->dump(q, skb) < 0)
916 goto nla_put_failure;
917 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
918 goto nla_put_failure;
919 qlen = qdisc_qlen_sum(q);
921 stab = rtnl_dereference(q->stab);
922 if (stab && qdisc_dump_stab(skb, stab) < 0)
923 goto nla_put_failure;
925 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
926 NULL, &d, TCA_PAD) < 0)
927 goto nla_put_failure;
929 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
930 goto nla_put_failure;
932 if (qdisc_is_percpu_stats(q)) {
933 cpu_bstats = q->cpu_bstats;
934 cpu_qstats = q->cpu_qstats;
937 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
938 &d, cpu_bstats, &q->bstats) < 0 ||
939 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
940 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
941 goto nla_put_failure;
943 if (gnet_stats_finish_copy(&d) < 0)
944 goto nla_put_failure;
946 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
955 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
957 if (q->flags & TCQ_F_BUILTIN)
959 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
965 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
966 struct nlmsghdr *n, u32 clid,
967 struct Qdisc *old, struct Qdisc *new)
970 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
972 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
976 if (old && !tc_qdisc_dump_ignore(old, false)) {
977 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
978 0, RTM_DELQDISC) < 0)
981 if (new && !tc_qdisc_dump_ignore(new, false)) {
982 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
983 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
988 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
989 n->nlmsg_flags & NLM_F_ECHO);
996 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
997 struct nlmsghdr *n, u32 clid,
998 struct Qdisc *old, struct Qdisc *new)
1001 qdisc_notify(net, skb, n, clid, old, new);
1007 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
1010 * When appropriate send a netlink notification using 'skb'
1013 * On success, destroy old qdisc.
1016 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
1017 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
1018 struct Qdisc *new, struct Qdisc *old,
1019 struct netlink_ext_ack *extack)
1021 struct Qdisc *q = old;
1022 struct net *net = dev_net(dev);
1024 if (parent == NULL) {
1025 unsigned int i, num_q, ingress;
1028 num_q = dev->num_tx_queues;
1029 if ((q && q->flags & TCQ_F_INGRESS) ||
1030 (new && new->flags & TCQ_F_INGRESS)) {
1033 if (!dev_ingress_queue(dev)) {
1034 NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
1039 if (dev->flags & IFF_UP)
1040 dev_deactivate(dev);
1042 qdisc_offload_graft_root(dev, new, old, extack);
1044 if (new && new->ops->attach)
1047 for (i = 0; i < num_q; i++) {
1048 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
1051 dev_queue = netdev_get_tx_queue(dev, i);
1053 old = dev_graft_qdisc(dev_queue, new);
1055 qdisc_refcount_inc(new);
1063 notify_and_destroy(net, skb, n, classid,
1065 if (new && !new->ops->attach)
1066 qdisc_refcount_inc(new);
1067 dev->qdisc = new ? : &noop_qdisc;
1069 if (new && new->ops->attach)
1070 new->ops->attach(new);
1072 notify_and_destroy(net, skb, n, classid, old, new);
1075 if (dev->flags & IFF_UP)
1078 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1082 /* Only support running class lockless if parent is lockless */
1083 if (new && (new->flags & TCQ_F_NOLOCK) &&
1084 parent && !(parent->flags & TCQ_F_NOLOCK))
1085 new->flags &= ~TCQ_F_NOLOCK;
1087 if (!cops || !cops->graft)
1090 cl = cops->find(parent, classid);
1092 NL_SET_ERR_MSG(extack, "Specified class not found");
1096 err = cops->graft(parent, cl, new, &old, extack);
1099 notify_and_destroy(net, skb, n, classid, old, new);
1104 static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
1105 struct netlink_ext_ack *extack)
1109 if (tca[TCA_INGRESS_BLOCK]) {
1110 block_index = nla_get_u32(tca[TCA_INGRESS_BLOCK]);
1113 NL_SET_ERR_MSG(extack, "Ingress block index cannot be 0");
1116 if (!sch->ops->ingress_block_set) {
1117 NL_SET_ERR_MSG(extack, "Ingress block sharing is not supported");
1120 sch->ops->ingress_block_set(sch, block_index);
1122 if (tca[TCA_EGRESS_BLOCK]) {
1123 block_index = nla_get_u32(tca[TCA_EGRESS_BLOCK]);
1126 NL_SET_ERR_MSG(extack, "Egress block index cannot be 0");
1129 if (!sch->ops->egress_block_set) {
1130 NL_SET_ERR_MSG(extack, "Egress block sharing is not supported");
1133 sch->ops->egress_block_set(sch, block_index);
1139 Allocate and initialize new qdisc.
1141 Parameters are passed via opt.
1144 static struct Qdisc *qdisc_create(struct net_device *dev,
1145 struct netdev_queue *dev_queue,
1146 struct Qdisc *p, u32 parent, u32 handle,
1147 struct nlattr **tca, int *errp,
1148 struct netlink_ext_ack *extack)
1151 struct nlattr *kind = tca[TCA_KIND];
1153 struct Qdisc_ops *ops;
1154 struct qdisc_size_table *stab;
1156 ops = qdisc_lookup_ops(kind);
1157 #ifdef CONFIG_MODULES
1158 if (ops == NULL && kind != NULL) {
1159 char name[IFNAMSIZ];
1160 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
1161 /* We dropped the RTNL semaphore in order to
1162 * perform the module load. So, even if we
1163 * succeeded in loading the module we have to
1164 * tell the caller to replay the request. We
1165 * indicate this using -EAGAIN.
1166 * We replay the request because the device may
1167 * go away in the mean time.
1170 request_module("sch_%s", name);
1172 ops = qdisc_lookup_ops(kind);
1174 /* We will try again qdisc_lookup_ops,
1175 * so don't keep a reference.
1177 module_put(ops->owner);
1187 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1191 sch = qdisc_alloc(dev_queue, ops, extack);
1197 sch->parent = parent;
1199 if (handle == TC_H_INGRESS) {
1200 sch->flags |= TCQ_F_INGRESS;
1201 handle = TC_H_MAKE(TC_H_INGRESS, 0);
1204 handle = qdisc_alloc_handle(dev);
1209 if (!netif_is_multiqueue(dev))
1210 sch->flags |= TCQ_F_ONETXQUEUE;
1213 sch->handle = handle;
1215 /* This exist to keep backward compatible with a userspace
1216 * loophole, what allowed userspace to get IFF_NO_QUEUE
1217 * facility on older kernels by setting tx_queue_len=0 (prior
1218 * to qdisc init), and then forgot to reinit tx_queue_len
1219 * before again attaching a qdisc.
1221 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
1222 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
1223 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
1226 err = qdisc_block_indexes_set(sch, tca, extack);
1231 err = ops->init(sch, tca[TCA_OPTIONS], extack);
1236 if (tca[TCA_STAB]) {
1237 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1239 err = PTR_ERR(stab);
1242 rcu_assign_pointer(sch->stab, stab);
1244 if (tca[TCA_RATE]) {
1245 seqcount_t *running;
1248 if (sch->flags & TCQ_F_MQROOT) {
1249 NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
1253 if (sch->parent != TC_H_ROOT &&
1254 !(sch->flags & TCQ_F_INGRESS) &&
1255 (!p || !(p->flags & TCQ_F_MQROOT)))
1256 running = qdisc_root_sleeping_running(sch);
1258 running = &sch->running;
1260 err = gen_new_estimator(&sch->bstats,
1267 NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
1272 qdisc_hash_add(sch, false);
1277 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1284 module_put(ops->owner);
1291 * Any broken qdiscs that would require a ops->reset() here?
1292 * The qdisc was never in action so it shouldn't be necessary.
1294 qdisc_put_stab(rtnl_dereference(sch->stab));
1300 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
1301 struct netlink_ext_ack *extack)
1303 struct qdisc_size_table *ostab, *stab = NULL;
1306 if (tca[TCA_OPTIONS]) {
1307 if (!sch->ops->change) {
1308 NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
1311 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
1312 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
1315 err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
1320 if (tca[TCA_STAB]) {
1321 stab = qdisc_get_stab(tca[TCA_STAB], extack);
1323 return PTR_ERR(stab);
1326 ostab = rtnl_dereference(sch->stab);
1327 rcu_assign_pointer(sch->stab, stab);
1328 qdisc_put_stab(ostab);
1330 if (tca[TCA_RATE]) {
1331 /* NB: ignores errors from replace_estimator
1332 because change can't be undone. */
1333 if (sch->flags & TCQ_F_MQROOT)
1335 gen_replace_estimator(&sch->bstats,
1339 qdisc_root_sleeping_running(sch),
1346 struct check_loop_arg {
1347 struct qdisc_walker w;
1352 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1353 struct qdisc_walker *w);
1355 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1357 struct check_loop_arg arg;
1359 if (q->ops->cl_ops == NULL)
1362 arg.w.stop = arg.w.skip = arg.w.count = 0;
1363 arg.w.fn = check_loop_fn;
1366 q->ops->cl_ops->walk(q, &arg.w);
1367 return arg.w.stop ? -ELOOP : 0;
1371 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1374 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1375 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1377 leaf = cops->leaf(q, cl);
1379 if (leaf == arg->p || arg->depth > 7)
1381 return check_loop(leaf, arg->p, arg->depth + 1);
1386 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
1387 [TCA_KIND] = { .type = NLA_STRING },
1388 [TCA_RATE] = { .type = NLA_BINARY,
1389 .len = sizeof(struct tc_estimator) },
1390 [TCA_STAB] = { .type = NLA_NESTED },
1391 [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
1392 [TCA_CHAIN] = { .type = NLA_U32 },
1393 [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
1394 [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
1401 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1402 struct netlink_ext_ack *extack)
1404 struct net *net = sock_net(skb->sk);
1405 struct tcmsg *tcm = nlmsg_data(n);
1406 struct nlattr *tca[TCA_MAX + 1];
1407 struct net_device *dev;
1409 struct Qdisc *q = NULL;
1410 struct Qdisc *p = NULL;
1413 if ((n->nlmsg_type != RTM_GETQDISC) &&
1414 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1417 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
1422 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1426 clid = tcm->tcm_parent;
1428 if (clid != TC_H_ROOT) {
1429 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1430 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1432 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
1435 q = qdisc_leaf(p, clid);
1436 } else if (dev_ingress_queue(dev)) {
1437 q = dev_ingress_queue(dev)->qdisc_sleeping;
1443 NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
1447 if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
1448 NL_SET_ERR_MSG(extack, "Invalid handle");
1452 q = qdisc_lookup(dev, tcm->tcm_handle);
1454 NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
1459 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1460 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1464 if (n->nlmsg_type == RTM_DELQDISC) {
1466 NL_SET_ERR_MSG(extack, "Classid cannot be zero");
1469 if (q->handle == 0) {
1470 NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
1473 err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
1477 qdisc_notify(net, skb, n, clid, NULL, q);
1483 * Create/change qdisc.
1486 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1487 struct netlink_ext_ack *extack)
1489 struct net *net = sock_net(skb->sk);
1491 struct nlattr *tca[TCA_MAX + 1];
1492 struct net_device *dev;
1494 struct Qdisc *q, *p;
1497 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1501 /* Reinit, just in case something touches this. */
1502 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
1507 tcm = nlmsg_data(n);
1508 clid = tcm->tcm_parent;
1511 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1517 if (clid != TC_H_ROOT) {
1518 if (clid != TC_H_INGRESS) {
1519 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1521 NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
1524 q = qdisc_leaf(p, clid);
1525 } else if (dev_ingress_queue_create(dev)) {
1526 q = dev_ingress_queue(dev)->qdisc_sleeping;
1532 /* It may be default qdisc, ignore it */
1533 if (q && q->handle == 0)
1536 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1537 if (tcm->tcm_handle) {
1538 if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
1539 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
1542 if (TC_H_MIN(tcm->tcm_handle)) {
1543 NL_SET_ERR_MSG(extack, "Invalid minor handle");
1546 q = qdisc_lookup(dev, tcm->tcm_handle);
1548 goto create_n_graft;
1549 if (n->nlmsg_flags & NLM_F_EXCL) {
1550 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
1553 if (tca[TCA_KIND] &&
1554 nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1555 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1559 (p && check_loop(q, p, 0))) {
1560 NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
1563 qdisc_refcount_inc(q);
1567 goto create_n_graft;
1569 /* This magic test requires explanation.
1571 * We know, that some child q is already
1572 * attached to this parent and have choice:
1573 * either to change it or to create/graft new one.
1575 * 1. We are allowed to create/graft only
1576 * if CREATE and REPLACE flags are set.
1578 * 2. If EXCL is set, requestor wanted to say,
1579 * that qdisc tcm_handle is not expected
1580 * to exist, so that we choose create/graft too.
1582 * 3. The last case is when no flags are set.
1583 * Alas, it is sort of hole in API, we
1584 * cannot decide what to do unambiguously.
1585 * For now we select create/graft, if
1586 * user gave KIND, which does not match existing.
1588 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1589 (n->nlmsg_flags & NLM_F_REPLACE) &&
1590 ((n->nlmsg_flags & NLM_F_EXCL) ||
1592 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1593 goto create_n_graft;
1597 if (!tcm->tcm_handle) {
1598 NL_SET_ERR_MSG(extack, "Handle cannot be zero");
1601 q = qdisc_lookup(dev, tcm->tcm_handle);
1604 /* Change qdisc parameters */
1606 NL_SET_ERR_MSG(extack, "Specified qdisc not found");
1609 if (n->nlmsg_flags & NLM_F_EXCL) {
1610 NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
1613 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
1614 NL_SET_ERR_MSG(extack, "Invalid qdisc name");
1617 err = qdisc_change(q, tca, extack);
1619 qdisc_notify(net, skb, n, clid, NULL, q);
1623 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
1624 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
1627 if (clid == TC_H_INGRESS) {
1628 if (dev_ingress_queue(dev)) {
1629 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1630 tcm->tcm_parent, tcm->tcm_parent,
1633 NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
1637 struct netdev_queue *dev_queue;
1639 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1640 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1642 dev_queue = p->dev_queue;
1644 dev_queue = netdev_get_tx_queue(dev, 0);
1646 q = qdisc_create(dev, dev_queue, p,
1647 tcm->tcm_parent, tcm->tcm_handle,
1657 err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
1667 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1668 struct netlink_callback *cb,
1669 int *q_idx_p, int s_q_idx, bool recur,
1670 bool dump_invisible)
1672 int ret = 0, q_idx = *q_idx_p;
1680 if (q_idx < s_q_idx) {
1683 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1684 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1685 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1691 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1692 * itself has already been dumped.
1694 * If we've already dumped the top-level (ingress) qdisc above and the global
1695 * qdisc hashtable, we don't want to hit it again
1697 if (!qdisc_dev(root) || !recur)
1700 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1701 if (q_idx < s_q_idx) {
1705 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1706 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1707 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1721 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1723 struct net *net = sock_net(skb->sk);
1726 struct net_device *dev;
1727 const struct nlmsghdr *nlh = cb->nlh;
1728 struct nlattr *tca[TCA_MAX + 1];
1731 s_idx = cb->args[0];
1732 s_q_idx = q_idx = cb->args[1];
1737 err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
1738 rtm_tca_policy, cb->extack);
1742 for_each_netdev(net, dev) {
1743 struct netdev_queue *dev_queue;
1751 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1752 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1755 dev_queue = dev_ingress_queue(dev);
1757 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1758 &q_idx, s_q_idx, false,
1759 tca[TCA_DUMP_INVISIBLE]) < 0)
1768 cb->args[1] = q_idx;
1775 /************************************************
1776 * Traffic classes manipulation. *
1777 ************************************************/
1779 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1781 u32 portid, u32 seq, u16 flags, int event)
1784 struct nlmsghdr *nlh;
1785 unsigned char *b = skb_tail_pointer(skb);
1787 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1790 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1792 goto out_nlmsg_trim;
1793 tcm = nlmsg_data(nlh);
1794 tcm->tcm_family = AF_UNSPEC;
1797 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1798 tcm->tcm_parent = q->handle;
1799 tcm->tcm_handle = q->handle;
1801 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1802 goto nla_put_failure;
1803 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1804 goto nla_put_failure;
1806 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1807 NULL, &d, TCA_PAD) < 0)
1808 goto nla_put_failure;
1810 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1811 goto nla_put_failure;
1813 if (gnet_stats_finish_copy(&d) < 0)
1814 goto nla_put_failure;
1816 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1825 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1826 struct nlmsghdr *n, struct Qdisc *q,
1827 unsigned long cl, int event)
1829 struct sk_buff *skb;
1830 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1832 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1836 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1841 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1842 n->nlmsg_flags & NLM_F_ECHO);
1845 static int tclass_del_notify(struct net *net,
1846 const struct Qdisc_class_ops *cops,
1847 struct sk_buff *oskb, struct nlmsghdr *n,
1848 struct Qdisc *q, unsigned long cl)
1850 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1851 struct sk_buff *skb;
1857 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1861 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0,
1862 RTM_DELTCLASS) < 0) {
1867 err = cops->delete(q, cl);
1873 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1874 n->nlmsg_flags & NLM_F_ECHO);
1877 #ifdef CONFIG_NET_CLS
1879 struct tcf_bind_args {
1880 struct tcf_walker w;
1885 static int tcf_node_bind(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1887 struct tcf_bind_args *a = (void *)arg;
1889 if (tp->ops->bind_class) {
1890 struct Qdisc *q = tcf_block_q(tp->chain->block);
1893 tp->ops->bind_class(n, a->classid, a->cl);
1899 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1900 unsigned long new_cl)
1902 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1903 struct tcf_block *block;
1904 struct tcf_chain *chain;
1907 cl = cops->find(q, portid);
1910 block = cops->tcf_block(q, cl, NULL);
1913 list_for_each_entry(chain, &block->chain_list, list) {
1914 struct tcf_proto *tp;
1916 for (tp = rtnl_dereference(chain->filter_chain);
1917 tp; tp = rtnl_dereference(tp->next)) {
1918 struct tcf_bind_args arg = {};
1920 arg.w.fn = tcf_node_bind;
1923 tp->ops->walk(tp, &arg.w);
1930 static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1931 unsigned long new_cl)
1937 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1938 struct netlink_ext_ack *extack)
1940 struct net *net = sock_net(skb->sk);
1941 struct tcmsg *tcm = nlmsg_data(n);
1942 struct nlattr *tca[TCA_MAX + 1];
1943 struct net_device *dev;
1944 struct Qdisc *q = NULL;
1945 const struct Qdisc_class_ops *cops;
1946 unsigned long cl = 0;
1947 unsigned long new_cl;
1953 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1954 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1957 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
1962 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1967 parent == TC_H_UNSPEC - unspecified parent.
1968 parent == TC_H_ROOT - class is root, which has no parent.
1969 parent == X:0 - parent is root class.
1970 parent == X:Y - parent is a node in hierarchy.
1971 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1973 handle == 0:0 - generate handle from kernel pool.
1974 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1975 handle == X:Y - clear.
1976 handle == X:0 - root class.
1979 /* Step 1. Determine qdisc handle X:0 */
1981 portid = tcm->tcm_parent;
1982 clid = tcm->tcm_handle;
1983 qid = TC_H_MAJ(clid);
1985 if (portid != TC_H_ROOT) {
1986 u32 qid1 = TC_H_MAJ(portid);
1989 /* If both majors are known, they must be identical. */
1994 } else if (qid == 0)
1995 qid = dev->qdisc->handle;
1997 /* Now qid is genuine qdisc handle consistent
1998 * both with parent and child.
2000 * TC_H_MAJ(portid) still may be unspecified, complete it now.
2003 portid = TC_H_MAKE(qid, portid);
2006 qid = dev->qdisc->handle;
2009 /* OK. Locate qdisc */
2010 q = qdisc_lookup(dev, qid);
2014 /* An check that it supports classes */
2015 cops = q->ops->cl_ops;
2019 /* Now try to get class */
2021 if (portid == TC_H_ROOT)
2024 clid = TC_H_MAKE(qid, clid);
2027 cl = cops->find(q, clid);
2031 if (n->nlmsg_type != RTM_NEWTCLASS ||
2032 !(n->nlmsg_flags & NLM_F_CREATE))
2035 switch (n->nlmsg_type) {
2038 if (n->nlmsg_flags & NLM_F_EXCL)
2042 err = tclass_del_notify(net, cops, skb, n, q, cl);
2043 /* Unbind the class with flilters with 0 */
2044 tc_bind_tclass(q, portid, clid, 0);
2047 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
2055 if (tca[TCA_INGRESS_BLOCK] || tca[TCA_EGRESS_BLOCK]) {
2056 NL_SET_ERR_MSG(extack, "Shared blocks are not supported for classes");
2063 err = cops->change(q, clid, portid, tca, &new_cl, extack);
2065 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
2066 /* We just create a new class, need to do reverse binding. */
2068 tc_bind_tclass(q, portid, clid, new_cl);
2074 struct qdisc_dump_args {
2075 struct qdisc_walker w;
2076 struct sk_buff *skb;
2077 struct netlink_callback *cb;
2080 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
2081 struct qdisc_walker *arg)
2083 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
2085 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
2086 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2090 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
2091 struct tcmsg *tcm, struct netlink_callback *cb,
2094 struct qdisc_dump_args arg;
2096 if (tc_qdisc_dump_ignore(q, false) ||
2097 *t_p < s_t || !q->ops->cl_ops ||
2099 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
2104 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
2105 arg.w.fn = qdisc_class_dump;
2109 arg.w.skip = cb->args[1];
2111 q->ops->cl_ops->walk(q, &arg.w);
2112 cb->args[1] = arg.w.count;
2119 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
2120 struct tcmsg *tcm, struct netlink_callback *cb,
2129 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
2132 if (!qdisc_dev(root))
2135 if (tcm->tcm_parent) {
2136 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
2137 if (q && q != root &&
2138 tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2142 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
2143 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
2150 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
2152 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2153 struct net *net = sock_net(skb->sk);
2154 struct netdev_queue *dev_queue;
2155 struct net_device *dev;
2158 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2160 dev = dev_get_by_index(net, tcm->tcm_ifindex);
2167 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
2170 dev_queue = dev_ingress_queue(dev);
2172 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
2183 #ifdef CONFIG_PROC_FS
2184 static int psched_show(struct seq_file *seq, void *v)
2186 seq_printf(seq, "%08x %08x %08x %08x\n",
2187 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
2189 (u32)NSEC_PER_SEC / hrtimer_resolution);
2194 static int __net_init psched_net_init(struct net *net)
2196 struct proc_dir_entry *e;
2198 e = proc_create_single("psched", 0, net->proc_net, psched_show);
2205 static void __net_exit psched_net_exit(struct net *net)
2207 remove_proc_entry("psched", net->proc_net);
2210 static int __net_init psched_net_init(struct net *net)
2215 static void __net_exit psched_net_exit(struct net *net)
2220 static struct pernet_operations psched_net_ops = {
2221 .init = psched_net_init,
2222 .exit = psched_net_exit,
2225 static int __init pktsched_init(void)
2229 err = register_pernet_subsys(&psched_net_ops);
2231 pr_err("pktsched_init: "
2232 "cannot initialize per netns operations\n");
2236 register_qdisc(&pfifo_fast_ops);
2237 register_qdisc(&pfifo_qdisc_ops);
2238 register_qdisc(&bfifo_qdisc_ops);
2239 register_qdisc(&pfifo_head_drop_qdisc_ops);
2240 register_qdisc(&mq_qdisc_ops);
2241 register_qdisc(&noqueue_qdisc_ops);
2243 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
2244 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
2245 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2247 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
2248 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
2249 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2255 subsys_initcall(pktsched_init);