1 // SPDX-License-Identifier: GPL-2.0
3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/skbuff.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
20 #include <net/pkt_cls.h>
21 #include <net/sch_generic.h>
23 static LIST_HEAD(taprio_list);
24 static DEFINE_SPINLOCK(taprio_list_lock);
26 #define TAPRIO_ALL_GATES_OPEN -1
29 struct list_head list;
31 /* The instant that this entry "closes" and the next one
32 * should open, the qdisc will make some effort so that no
33 * packet leaves after this time.
44 struct Qdisc **qdiscs;
48 atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
49 * speeds it's sub-nanoseconds per byte
53 /* Protects the update side of the RCU protected current_entry */
54 spinlock_t current_entry_lock;
55 struct sched_entry __rcu *current_entry;
56 struct list_head entries;
57 ktime_t (*get_time)(void);
58 struct hrtimer advance_timer;
59 struct list_head taprio_list;
62 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
63 struct sk_buff **to_free)
65 struct taprio_sched *q = qdisc_priv(sch);
69 queue = skb_get_queue_mapping(skb);
71 child = q->qdiscs[queue];
73 return qdisc_drop(skb, sch, to_free);
75 qdisc_qstats_backlog_inc(sch, skb);
78 return qdisc_enqueue(skb, child, to_free);
81 static struct sk_buff *taprio_peek(struct Qdisc *sch)
83 struct taprio_sched *q = qdisc_priv(sch);
84 struct net_device *dev = qdisc_dev(sch);
85 struct sched_entry *entry;
91 entry = rcu_dereference(q->current_entry);
92 gate_mask = entry ? entry->gate_mask : -1;
98 for (i = 0; i < dev->num_tx_queues; i++) {
99 struct Qdisc *child = q->qdiscs[i];
103 if (unlikely(!child))
106 skb = child->ops->peek(child);
110 prio = skb->priority;
111 tc = netdev_get_prio_tc_map(dev, prio);
113 if (!(gate_mask & BIT(tc)))
122 static inline int length_to_duration(struct taprio_sched *q, int len)
124 return (len * atomic64_read(&q->picos_per_byte)) / 1000;
127 static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
129 struct taprio_sched *q = qdisc_priv(sch);
130 struct net_device *dev = qdisc_dev(sch);
131 struct sched_entry *entry;
136 if (atomic64_read(&q->picos_per_byte) == -1) {
137 WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte.");
142 entry = rcu_dereference(q->current_entry);
143 /* if there's no entry, it means that the schedule didn't
144 * start yet, so force all gates to be open, this is in
145 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
148 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
154 for (i = 0; i < dev->num_tx_queues; i++) {
155 struct Qdisc *child = q->qdiscs[i];
161 if (unlikely(!child))
164 skb = child->ops->peek(child);
168 prio = skb->priority;
169 tc = netdev_get_prio_tc_map(dev, prio);
171 if (!(gate_mask & BIT(tc)))
174 len = qdisc_pkt_len(skb);
175 guard = ktime_add_ns(q->get_time(),
176 length_to_duration(q, len));
178 /* In the case that there's no gate entry, there's no
181 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
182 ktime_after(guard, entry->close_time))
185 /* ... and no budget. */
186 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
187 atomic_sub_return(len, &entry->budget) < 0)
190 skb = child->ops->dequeue(child);
194 qdisc_bstats_update(sch, skb);
195 qdisc_qstats_backlog_dec(sch, skb);
204 static bool should_restart_cycle(const struct taprio_sched *q,
205 const struct sched_entry *entry)
209 return list_is_last(&entry->list, &q->entries);
212 static enum hrtimer_restart advance_sched(struct hrtimer *timer)
214 struct taprio_sched *q = container_of(timer, struct taprio_sched,
216 struct sched_entry *entry, *next;
217 struct Qdisc *sch = q->root;
220 spin_lock(&q->current_entry_lock);
221 entry = rcu_dereference_protected(q->current_entry,
222 lockdep_is_held(&q->current_entry_lock));
224 /* This is the case that it's the first time that the schedule
225 * runs, so it only happens once per schedule. The first entry
226 * is pre-calculated during the schedule initialization.
228 if (unlikely(!entry)) {
229 next = list_first_entry(&q->entries, struct sched_entry,
231 close_time = next->close_time;
235 if (should_restart_cycle(q, entry))
236 next = list_first_entry(&q->entries, struct sched_entry,
239 next = list_next_entry(entry, list);
241 close_time = ktime_add_ns(entry->close_time, next->interval);
243 next->close_time = close_time;
244 atomic_set(&next->budget,
245 (next->interval * 1000) / atomic64_read(&q->picos_per_byte));
248 rcu_assign_pointer(q->current_entry, next);
249 spin_unlock(&q->current_entry_lock);
251 hrtimer_set_expires(&q->advance_timer, close_time);
254 __netif_schedule(sch);
257 return HRTIMER_RESTART;
260 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
261 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
262 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
263 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
264 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
267 static const struct nla_policy entry_list_policy[TCA_TAPRIO_SCHED_MAX + 1] = {
268 [TCA_TAPRIO_SCHED_ENTRY] = { .type = NLA_NESTED },
271 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
272 [TCA_TAPRIO_ATTR_PRIOMAP] = {
273 .len = sizeof(struct tc_mqprio_qopt)
275 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
276 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
277 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
278 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
281 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
282 struct netlink_ext_ack *extack)
286 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
287 entry->command = nla_get_u8(
288 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
290 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
291 entry->gate_mask = nla_get_u32(
292 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
294 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
295 interval = nla_get_u32(
296 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
299 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
303 entry->interval = interval;
308 static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
309 int index, struct netlink_ext_ack *extack)
311 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
314 err = nla_parse_nested(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
317 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
321 entry->index = index;
323 return fill_sched_entry(tb, entry, extack);
326 /* Returns the number of entries in case of success */
327 static int parse_sched_single_entry(struct nlattr *n,
328 struct taprio_sched *q,
329 struct netlink_ext_ack *extack)
331 struct nlattr *tb_entry[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
332 struct nlattr *tb_list[TCA_TAPRIO_SCHED_MAX + 1] = { };
333 struct sched_entry *entry;
338 err = nla_parse_nested(tb_list, TCA_TAPRIO_SCHED_MAX,
339 n, entry_list_policy, NULL);
341 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
345 if (!tb_list[TCA_TAPRIO_SCHED_ENTRY]) {
346 NL_SET_ERR_MSG(extack, "Single-entry must include an entry");
350 err = nla_parse_nested(tb_entry, TCA_TAPRIO_SCHED_ENTRY_MAX,
351 tb_list[TCA_TAPRIO_SCHED_ENTRY],
354 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
358 if (!tb_entry[TCA_TAPRIO_SCHED_ENTRY_INDEX]) {
359 NL_SET_ERR_MSG(extack, "Entry must specify an index\n");
363 index = nla_get_u32(tb_entry[TCA_TAPRIO_SCHED_ENTRY_INDEX]);
364 if (index >= q->num_entries) {
365 NL_SET_ERR_MSG(extack, "Index for single entry exceeds number of entries in schedule");
369 list_for_each_entry(entry, &q->entries, list) {
370 if (entry->index == index) {
377 NL_SET_ERR_MSG(extack, "Could not find entry");
381 err = fill_sched_entry(tb_entry, entry, extack);
385 return q->num_entries;
388 static int parse_sched_list(struct nlattr *list,
389 struct taprio_sched *q,
390 struct netlink_ext_ack *extack)
399 nla_for_each_nested(n, list, rem) {
400 struct sched_entry *entry;
402 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
403 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
407 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
409 NL_SET_ERR_MSG(extack, "Not enough memory for entry");
413 err = parse_sched_entry(n, entry, i, extack);
419 list_add_tail(&entry->list, &q->entries);
428 /* Returns the number of entries in case of success */
429 static int parse_taprio_opt(struct nlattr **tb, struct taprio_sched *q,
430 struct netlink_ext_ack *extack)
435 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] &&
436 tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY])
439 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] && q->num_entries == 0)
442 if (q->clockid == -1 && !tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID])
445 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
446 q->base_time = nla_get_s64(
447 tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
449 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
450 clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
452 /* We only support static clockids and we don't allow
453 * for it to be modified after the first init.
455 if (clockid < 0 || (q->clockid != -1 && q->clockid != clockid))
458 q->clockid = clockid;
461 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
462 err = parse_sched_list(
463 tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], q, extack);
464 else if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY])
465 err = parse_sched_single_entry(
466 tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY], q, extack);
468 /* parse_sched_* return the number of entries in the schedule,
469 * a schedule with zero entries is an error.
472 NL_SET_ERR_MSG(extack, "The schedule should contain at least one entry");
479 static int taprio_parse_mqprio_opt(struct net_device *dev,
480 struct tc_mqprio_qopt *qopt,
481 struct netlink_ext_ack *extack)
486 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
490 /* Verify num_tc is not out of max range */
491 if (qopt->num_tc > TC_MAX_QUEUE) {
492 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
496 /* taprio imposes that traffic classes map 1:n to tx queues */
497 if (qopt->num_tc > dev->num_tx_queues) {
498 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
502 /* Verify priority mapping uses valid tcs */
503 for (i = 0; i < TC_BITMASK + 1; i++) {
504 if (qopt->prio_tc_map[i] >= qopt->num_tc) {
505 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
510 for (i = 0; i < qopt->num_tc; i++) {
511 unsigned int last = qopt->offset[i] + qopt->count[i];
513 /* Verify the queue count is in tx range being equal to the
514 * real_num_tx_queues indicates the last queue is in use.
516 if (qopt->offset[i] >= dev->num_tx_queues ||
518 last > dev->real_num_tx_queues) {
519 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
523 /* Verify that the offset and counts do not overlap */
524 for (j = i + 1; j < qopt->num_tc; j++) {
525 if (last > qopt->offset[j]) {
526 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
535 static ktime_t taprio_get_start_time(struct Qdisc *sch)
537 struct taprio_sched *q = qdisc_priv(sch);
538 struct sched_entry *entry;
539 ktime_t now, base, cycle;
542 base = ns_to_ktime(q->base_time);
545 /* Calculate the cycle_time, by summing all the intervals.
547 list_for_each_entry(entry, &q->entries, list)
548 cycle = ktime_add_ns(cycle, entry->interval);
555 if (ktime_after(base, now))
558 /* Schedule the start time for the beginning of the next
561 n = div64_s64(ktime_sub_ns(now, base), cycle);
563 return ktime_add_ns(base, (n + 1) * cycle);
566 static void taprio_start_sched(struct Qdisc *sch, ktime_t start)
568 struct taprio_sched *q = qdisc_priv(sch);
569 struct sched_entry *first;
572 spin_lock_irqsave(&q->current_entry_lock, flags);
574 first = list_first_entry(&q->entries, struct sched_entry,
577 first->close_time = ktime_add_ns(start, first->interval);
578 atomic_set(&first->budget,
579 (first->interval * 1000) /
580 atomic64_read(&q->picos_per_byte));
581 rcu_assign_pointer(q->current_entry, NULL);
583 spin_unlock_irqrestore(&q->current_entry_lock, flags);
585 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
588 static void taprio_set_picos_per_byte(struct net_device *dev,
589 struct taprio_sched *q)
591 struct ethtool_link_ksettings ecmd;
592 int picos_per_byte = -1;
594 if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
595 ecmd.base.speed != SPEED_UNKNOWN)
596 picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
597 ecmd.base.speed * 1000 * 1000);
599 atomic64_set(&q->picos_per_byte, picos_per_byte);
600 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
601 dev->name, (long long)atomic64_read(&q->picos_per_byte),
605 static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
608 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
609 struct net_device *qdev;
610 struct taprio_sched *q;
615 if (event != NETDEV_UP && event != NETDEV_CHANGE)
618 spin_lock(&taprio_list_lock);
619 list_for_each_entry(q, &taprio_list, taprio_list) {
620 qdev = qdisc_dev(q->root);
626 spin_unlock(&taprio_list_lock);
629 taprio_set_picos_per_byte(dev, q);
634 static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
635 struct netlink_ext_ack *extack)
637 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
638 struct taprio_sched *q = qdisc_priv(sch);
639 struct net_device *dev = qdisc_dev(sch);
640 struct tc_mqprio_qopt *mqprio = NULL;
644 err = nla_parse_nested(tb, TCA_TAPRIO_ATTR_MAX, opt,
645 taprio_policy, extack);
650 if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
651 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
653 err = taprio_parse_mqprio_opt(dev, mqprio, extack);
657 /* A schedule with less than one entry is an error */
658 size = parse_taprio_opt(tb, q, extack);
662 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
663 q->advance_timer.function = advance_sched;
665 switch (q->clockid) {
667 q->get_time = ktime_get_real;
669 case CLOCK_MONOTONIC:
670 q->get_time = ktime_get;
673 q->get_time = ktime_get_boottime;
676 q->get_time = ktime_get_clocktai;
682 for (i = 0; i < dev->num_tx_queues; i++) {
683 struct netdev_queue *dev_queue;
686 dev_queue = netdev_get_tx_queue(dev, i);
687 qdisc = qdisc_create_dflt(dev_queue,
689 TC_H_MAKE(TC_H_MAJ(sch->handle),
695 if (i < dev->real_num_tx_queues)
696 qdisc_hash_add(qdisc, false);
698 q->qdiscs[i] = qdisc;
702 netdev_set_num_tc(dev, mqprio->num_tc);
703 for (i = 0; i < mqprio->num_tc; i++)
704 netdev_set_tc_queue(dev, i,
708 /* Always use supplied priority mappings */
709 for (i = 0; i < TC_BITMASK + 1; i++)
710 netdev_set_prio_tc_map(dev, i,
711 mqprio->prio_tc_map[i]);
714 taprio_set_picos_per_byte(dev, q);
715 start = taprio_get_start_time(sch);
719 taprio_start_sched(sch, start);
724 static void taprio_destroy(struct Qdisc *sch)
726 struct taprio_sched *q = qdisc_priv(sch);
727 struct net_device *dev = qdisc_dev(sch);
728 struct sched_entry *entry, *n;
731 spin_lock(&taprio_list_lock);
732 list_del(&q->taprio_list);
733 spin_unlock(&taprio_list_lock);
735 hrtimer_cancel(&q->advance_timer);
738 for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
739 qdisc_put(q->qdiscs[i]);
745 netdev_set_num_tc(dev, 0);
747 list_for_each_entry_safe(entry, n, &q->entries, list) {
748 list_del(&entry->list);
753 static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
754 struct netlink_ext_ack *extack)
756 struct taprio_sched *q = qdisc_priv(sch);
757 struct net_device *dev = qdisc_dev(sch);
759 INIT_LIST_HEAD(&q->entries);
760 spin_lock_init(&q->current_entry_lock);
762 /* We may overwrite the configuration later */
763 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
767 /* We only support static clockids. Use an invalid value as default
768 * and get the valid one on taprio_change().
772 if (sch->parent != TC_H_ROOT)
775 if (!netif_is_multiqueue(dev))
778 /* pre-allocate qdisc, attachment can't fail */
779 q->qdiscs = kcalloc(dev->num_tx_queues,
780 sizeof(q->qdiscs[0]),
789 spin_lock(&taprio_list_lock);
790 list_add(&q->taprio_list, &taprio_list);
791 spin_unlock(&taprio_list_lock);
793 return taprio_change(sch, opt, extack);
796 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
799 struct net_device *dev = qdisc_dev(sch);
800 unsigned long ntx = cl - 1;
802 if (ntx >= dev->num_tx_queues)
805 return netdev_get_tx_queue(dev, ntx);
808 static int taprio_graft(struct Qdisc *sch, unsigned long cl,
809 struct Qdisc *new, struct Qdisc **old,
810 struct netlink_ext_ack *extack)
812 struct taprio_sched *q = qdisc_priv(sch);
813 struct net_device *dev = qdisc_dev(sch);
814 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
819 if (dev->flags & IFF_UP)
822 *old = q->qdiscs[cl - 1];
823 q->qdiscs[cl - 1] = new;
826 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
828 if (dev->flags & IFF_UP)
834 static int dump_entry(struct sk_buff *msg,
835 const struct sched_entry *entry)
839 item = nla_nest_start(msg, TCA_TAPRIO_SCHED_ENTRY);
843 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
844 goto nla_put_failure;
846 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
847 goto nla_put_failure;
849 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
851 goto nla_put_failure;
853 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
855 goto nla_put_failure;
857 return nla_nest_end(msg, item);
860 nla_nest_cancel(msg, item);
864 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
866 struct taprio_sched *q = qdisc_priv(sch);
867 struct net_device *dev = qdisc_dev(sch);
868 struct tc_mqprio_qopt opt = { 0 };
869 struct nlattr *nest, *entry_list;
870 struct sched_entry *entry;
873 opt.num_tc = netdev_get_num_tc(dev);
874 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
876 for (i = 0; i < netdev_get_num_tc(dev); i++) {
877 opt.count[i] = dev->tc_to_txq[i].count;
878 opt.offset[i] = dev->tc_to_txq[i].offset;
881 nest = nla_nest_start(skb, TCA_OPTIONS);
885 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
888 if (nla_put_s64(skb, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
889 q->base_time, TCA_TAPRIO_PAD))
892 if (nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
895 entry_list = nla_nest_start(skb, TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
899 list_for_each_entry(entry, &q->entries, list) {
900 if (dump_entry(skb, entry) < 0)
904 nla_nest_end(skb, entry_list);
906 return nla_nest_end(skb, nest);
909 nla_nest_cancel(skb, nest);
913 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
915 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
920 return dev_queue->qdisc_sleeping;
923 static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
925 unsigned int ntx = TC_H_MIN(classid);
927 if (!taprio_queue_get(sch, ntx))
932 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
933 struct sk_buff *skb, struct tcmsg *tcm)
935 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
937 tcm->tcm_parent = TC_H_ROOT;
938 tcm->tcm_handle |= TC_H_MIN(cl);
939 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
944 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
949 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
951 sch = dev_queue->qdisc_sleeping;
952 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
953 qdisc_qstats_copy(d, sch) < 0)
958 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
960 struct net_device *dev = qdisc_dev(sch);
966 arg->count = arg->skip;
967 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
968 if (arg->fn(sch, ntx + 1, arg) < 0) {
976 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
979 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
982 static const struct Qdisc_class_ops taprio_class_ops = {
983 .graft = taprio_graft,
987 .dump = taprio_dump_class,
988 .dump_stats = taprio_dump_class_stats,
989 .select_queue = taprio_select_queue,
992 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
993 .cl_ops = &taprio_class_ops,
995 .priv_size = sizeof(struct taprio_sched),
997 .destroy = taprio_destroy,
999 .dequeue = taprio_dequeue,
1000 .enqueue = taprio_enqueue,
1001 .dump = taprio_dump,
1002 .owner = THIS_MODULE,
1005 static struct notifier_block taprio_device_notifier = {
1006 .notifier_call = taprio_dev_notifier,
1009 static int __init taprio_module_init(void)
1011 int err = register_netdevice_notifier(&taprio_device_notifier);
1016 return register_qdisc(&taprio_qdisc_ops);
1019 static void __exit taprio_module_exit(void)
1021 unregister_qdisc(&taprio_qdisc_ops);
1022 unregister_netdevice_notifier(&taprio_device_notifier);
1025 module_init(taprio_module_init);
1026 module_exit(taprio_module_exit);
1027 MODULE_LICENSE("GPL");