net/sched: fq_codel: Avoid set-but-unused variable
[sfrench/cifs-2.6.git] / net / sched / sch_fq_codel.c
1 /*
2  * Fair Queue CoDel discipline
3  *
4  *      This program is free software; you can redistribute it and/or
5  *      modify it under the terms of the GNU General Public License
6  *      as published by the Free Software Foundation; either version
7  *      2 of the License, or (at your option) any later version.
8  *
9  *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
10  */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
17 #include <linux/in.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/pkt_cls.h>
27 #include <net/codel.h>
28 #include <net/codel_impl.h>
29 #include <net/codel_qdisc.h>
30
31 /*      Fair Queue CoDel.
32  *
33  * Principles :
34  * Packets are classified (internal classifier or external) on flows.
35  * This is a Stochastic model (as we use a hash, several flows
36  *                             might be hashed on same slot)
37  * Each flow has a CoDel managed queue.
38  * Flows are linked onto two (Round Robin) lists,
39  * so that new flows have priority on old ones.
40  *
41  * For a given flow, packets are not reordered (CoDel uses a FIFO)
42  * head drops only.
43  * ECN capability is on by default.
44  * Low memory footprint (64 bytes per flow)
45  */
46
47 struct fq_codel_flow {
48         struct sk_buff    *head;
49         struct sk_buff    *tail;
50         struct list_head  flowchain;
51         int               deficit;
52         u32               dropped; /* number of drops (or ECN marks) on this flow */
53         struct codel_vars cvars;
54 }; /* please try to keep this structure <= 64 bytes */
55
56 struct fq_codel_sched_data {
57         struct tcf_proto __rcu *filter_list; /* optional external classifier */
58         struct fq_codel_flow *flows;    /* Flows table [flows_cnt] */
59         u32             *backlogs;      /* backlog table [flows_cnt] */
60         u32             flows_cnt;      /* number of flows */
61         u32             quantum;        /* psched_mtu(qdisc_dev(sch)); */
62         u32             drop_batch_size;
63         u32             memory_limit;
64         struct codel_params cparams;
65         struct codel_stats cstats;
66         u32             memory_usage;
67         u32             drop_overmemory;
68         u32             drop_overlimit;
69         u32             new_flow_count;
70
71         struct list_head new_flows;     /* list of new flows */
72         struct list_head old_flows;     /* list of old flows */
73 };
74
75 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
76                                   struct sk_buff *skb)
77 {
78         return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
79 }
80
81 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
82                                       int *qerr)
83 {
84         struct fq_codel_sched_data *q = qdisc_priv(sch);
85         struct tcf_proto *filter;
86         struct tcf_result res;
87         int result;
88
89         if (TC_H_MAJ(skb->priority) == sch->handle &&
90             TC_H_MIN(skb->priority) > 0 &&
91             TC_H_MIN(skb->priority) <= q->flows_cnt)
92                 return TC_H_MIN(skb->priority);
93
94         filter = rcu_dereference_bh(q->filter_list);
95         if (!filter)
96                 return fq_codel_hash(q, skb) + 1;
97
98         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
99         result = tc_classify(skb, filter, &res, false);
100         if (result >= 0) {
101 #ifdef CONFIG_NET_CLS_ACT
102                 switch (result) {
103                 case TC_ACT_STOLEN:
104                 case TC_ACT_QUEUED:
105                         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
106                 case TC_ACT_SHOT:
107                         return 0;
108                 }
109 #endif
110                 if (TC_H_MIN(res.classid) <= q->flows_cnt)
111                         return TC_H_MIN(res.classid);
112         }
113         return 0;
114 }
115
116 /* helper functions : might be changed when/if skb use a standard list_head */
117
118 /* remove one skb from head of slot queue */
119 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
120 {
121         struct sk_buff *skb = flow->head;
122
123         flow->head = skb->next;
124         skb->next = NULL;
125         return skb;
126 }
127
128 /* add skb to flow queue (tail add) */
129 static inline void flow_queue_add(struct fq_codel_flow *flow,
130                                   struct sk_buff *skb)
131 {
132         if (flow->head == NULL)
133                 flow->head = skb;
134         else
135                 flow->tail->next = skb;
136         flow->tail = skb;
137         skb->next = NULL;
138 }
139
140 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
141                                   struct sk_buff **to_free)
142 {
143         struct fq_codel_sched_data *q = qdisc_priv(sch);
144         struct sk_buff *skb;
145         unsigned int maxbacklog = 0, idx = 0, i, len;
146         struct fq_codel_flow *flow;
147         unsigned int threshold;
148         unsigned int mem = 0;
149
150         /* Queue is full! Find the fat flow and drop packet(s) from it.
151          * This might sound expensive, but with 1024 flows, we scan
152          * 4KB of memory, and we dont need to handle a complex tree
153          * in fast path (packet queue/enqueue) with many cache misses.
154          * In stress mode, we'll try to drop 64 packets from the flow,
155          * amortizing this linear lookup to one cache line per drop.
156          */
157         for (i = 0; i < q->flows_cnt; i++) {
158                 if (q->backlogs[i] > maxbacklog) {
159                         maxbacklog = q->backlogs[i];
160                         idx = i;
161                 }
162         }
163
164         /* Our goal is to drop half of this fat flow backlog */
165         threshold = maxbacklog >> 1;
166
167         flow = &q->flows[idx];
168         len = 0;
169         i = 0;
170         do {
171                 skb = dequeue_head(flow);
172                 len += qdisc_pkt_len(skb);
173                 mem += get_codel_cb(skb)->mem_usage;
174                 __qdisc_drop(skb, to_free);
175         } while (++i < max_packets && len < threshold);
176
177         flow->dropped += i;
178         q->backlogs[idx] -= len;
179         q->memory_usage -= mem;
180         sch->qstats.drops += i;
181         sch->qstats.backlog -= len;
182         sch->q.qlen -= i;
183         return idx;
184 }
185
186 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
187                             struct sk_buff **to_free)
188 {
189         struct fq_codel_sched_data *q = qdisc_priv(sch);
190         unsigned int idx, prev_backlog, prev_qlen;
191         struct fq_codel_flow *flow;
192         int uninitialized_var(ret);
193         unsigned int pkt_len;
194         bool memory_limited;
195
196         idx = fq_codel_classify(skb, sch, &ret);
197         if (idx == 0) {
198                 if (ret & __NET_XMIT_BYPASS)
199                         qdisc_qstats_drop(sch);
200                 __qdisc_drop(skb, to_free);
201                 return ret;
202         }
203         idx--;
204
205         codel_set_enqueue_time(skb);
206         flow = &q->flows[idx];
207         flow_queue_add(flow, skb);
208         q->backlogs[idx] += qdisc_pkt_len(skb);
209         qdisc_qstats_backlog_inc(sch, skb);
210
211         if (list_empty(&flow->flowchain)) {
212                 list_add_tail(&flow->flowchain, &q->new_flows);
213                 q->new_flow_count++;
214                 flow->deficit = q->quantum;
215                 flow->dropped = 0;
216         }
217         get_codel_cb(skb)->mem_usage = skb->truesize;
218         q->memory_usage += get_codel_cb(skb)->mem_usage;
219         memory_limited = q->memory_usage > q->memory_limit;
220         if (++sch->q.qlen <= sch->limit && !memory_limited)
221                 return NET_XMIT_SUCCESS;
222
223         prev_backlog = sch->qstats.backlog;
224         prev_qlen = sch->q.qlen;
225
226         /* save this packet length as it might be dropped by fq_codel_drop() */
227         pkt_len = qdisc_pkt_len(skb);
228         /* fq_codel_drop() is quite expensive, as it performs a linear search
229          * in q->backlogs[] to find a fat flow.
230          * So instead of dropping a single packet, drop half of its backlog
231          * with a 64 packets limit to not add a too big cpu spike here.
232          */
233         ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
234
235         prev_qlen -= sch->q.qlen;
236         prev_backlog -= sch->qstats.backlog;
237         q->drop_overlimit += prev_qlen;
238         if (memory_limited)
239                 q->drop_overmemory += prev_qlen;
240
241         /* As we dropped packet(s), better let upper stack know this.
242          * If we dropped a packet for this flow, return NET_XMIT_CN,
243          * but in this case, our parents wont increase their backlogs.
244          */
245         if (ret == idx) {
246                 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
247                                           prev_backlog - pkt_len);
248                 return NET_XMIT_CN;
249         }
250         qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
251         return NET_XMIT_SUCCESS;
252 }
253
254 /* This is the specific function called from codel_dequeue()
255  * to dequeue a packet from queue. Note: backlog is handled in
256  * codel, we dont need to reduce it here.
257  */
258 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
259 {
260         struct Qdisc *sch = ctx;
261         struct fq_codel_sched_data *q = qdisc_priv(sch);
262         struct fq_codel_flow *flow;
263         struct sk_buff *skb = NULL;
264
265         flow = container_of(vars, struct fq_codel_flow, cvars);
266         if (flow->head) {
267                 skb = dequeue_head(flow);
268                 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
269                 q->memory_usage -= get_codel_cb(skb)->mem_usage;
270                 sch->q.qlen--;
271                 sch->qstats.backlog -= qdisc_pkt_len(skb);
272         }
273         return skb;
274 }
275
276 static void drop_func(struct sk_buff *skb, void *ctx)
277 {
278         struct Qdisc *sch = ctx;
279
280         kfree_skb(skb);
281         qdisc_qstats_drop(sch);
282 }
283
284 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
285 {
286         struct fq_codel_sched_data *q = qdisc_priv(sch);
287         struct sk_buff *skb;
288         struct fq_codel_flow *flow;
289         struct list_head *head;
290         u32 prev_drop_count, prev_ecn_mark;
291
292 begin:
293         head = &q->new_flows;
294         if (list_empty(head)) {
295                 head = &q->old_flows;
296                 if (list_empty(head))
297                         return NULL;
298         }
299         flow = list_first_entry(head, struct fq_codel_flow, flowchain);
300
301         if (flow->deficit <= 0) {
302                 flow->deficit += q->quantum;
303                 list_move_tail(&flow->flowchain, &q->old_flows);
304                 goto begin;
305         }
306
307         prev_drop_count = q->cstats.drop_count;
308         prev_ecn_mark = q->cstats.ecn_mark;
309
310         skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
311                             &flow->cvars, &q->cstats, qdisc_pkt_len,
312                             codel_get_enqueue_time, drop_func, dequeue_func);
313
314         flow->dropped += q->cstats.drop_count - prev_drop_count;
315         flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
316
317         if (!skb) {
318                 /* force a pass through old_flows to prevent starvation */
319                 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
320                         list_move_tail(&flow->flowchain, &q->old_flows);
321                 else
322                         list_del_init(&flow->flowchain);
323                 goto begin;
324         }
325         qdisc_bstats_update(sch, skb);
326         flow->deficit -= qdisc_pkt_len(skb);
327         /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
328          * or HTB crashes. Defer it for next round.
329          */
330         if (q->cstats.drop_count && sch->q.qlen) {
331                 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
332                                           q->cstats.drop_len);
333                 q->cstats.drop_count = 0;
334                 q->cstats.drop_len = 0;
335         }
336         return skb;
337 }
338
339 static void fq_codel_flow_purge(struct fq_codel_flow *flow)
340 {
341         rtnl_kfree_skbs(flow->head, flow->tail);
342         flow->head = NULL;
343 }
344
345 static void fq_codel_reset(struct Qdisc *sch)
346 {
347         struct fq_codel_sched_data *q = qdisc_priv(sch);
348         int i;
349
350         INIT_LIST_HEAD(&q->new_flows);
351         INIT_LIST_HEAD(&q->old_flows);
352         for (i = 0; i < q->flows_cnt; i++) {
353                 struct fq_codel_flow *flow = q->flows + i;
354
355                 fq_codel_flow_purge(flow);
356                 INIT_LIST_HEAD(&flow->flowchain);
357                 codel_vars_init(&flow->cvars);
358         }
359         memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
360         sch->q.qlen = 0;
361         sch->qstats.backlog = 0;
362         q->memory_usage = 0;
363 }
364
365 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
366         [TCA_FQ_CODEL_TARGET]   = { .type = NLA_U32 },
367         [TCA_FQ_CODEL_LIMIT]    = { .type = NLA_U32 },
368         [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
369         [TCA_FQ_CODEL_ECN]      = { .type = NLA_U32 },
370         [TCA_FQ_CODEL_FLOWS]    = { .type = NLA_U32 },
371         [TCA_FQ_CODEL_QUANTUM]  = { .type = NLA_U32 },
372         [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
373         [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
374         [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
375 };
376
377 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
378 {
379         struct fq_codel_sched_data *q = qdisc_priv(sch);
380         struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
381         int err;
382
383         if (!opt)
384                 return -EINVAL;
385
386         err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy);
387         if (err < 0)
388                 return err;
389         if (tb[TCA_FQ_CODEL_FLOWS]) {
390                 if (q->flows)
391                         return -EINVAL;
392                 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
393                 if (!q->flows_cnt ||
394                     q->flows_cnt > 65536)
395                         return -EINVAL;
396         }
397         sch_tree_lock(sch);
398
399         if (tb[TCA_FQ_CODEL_TARGET]) {
400                 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
401
402                 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
403         }
404
405         if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
406                 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
407
408                 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
409         }
410
411         if (tb[TCA_FQ_CODEL_INTERVAL]) {
412                 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
413
414                 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
415         }
416
417         if (tb[TCA_FQ_CODEL_LIMIT])
418                 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
419
420         if (tb[TCA_FQ_CODEL_ECN])
421                 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
422
423         if (tb[TCA_FQ_CODEL_QUANTUM])
424                 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
425
426         if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
427                 q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
428
429         if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
430                 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
431
432         while (sch->q.qlen > sch->limit ||
433                q->memory_usage > q->memory_limit) {
434                 struct sk_buff *skb = fq_codel_dequeue(sch);
435
436                 q->cstats.drop_len += qdisc_pkt_len(skb);
437                 rtnl_kfree_skbs(skb, skb);
438                 q->cstats.drop_count++;
439         }
440         qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
441         q->cstats.drop_count = 0;
442         q->cstats.drop_len = 0;
443
444         sch_tree_unlock(sch);
445         return 0;
446 }
447
448 static void *fq_codel_zalloc(size_t sz)
449 {
450         void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
451
452         if (!ptr)
453                 ptr = vzalloc(sz);
454         return ptr;
455 }
456
457 static void fq_codel_free(void *addr)
458 {
459         kvfree(addr);
460 }
461
462 static void fq_codel_destroy(struct Qdisc *sch)
463 {
464         struct fq_codel_sched_data *q = qdisc_priv(sch);
465
466         tcf_destroy_chain(&q->filter_list);
467         fq_codel_free(q->backlogs);
468         fq_codel_free(q->flows);
469 }
470
471 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
472 {
473         struct fq_codel_sched_data *q = qdisc_priv(sch);
474         int i;
475
476         sch->limit = 10*1024;
477         q->flows_cnt = 1024;
478         q->memory_limit = 32 << 20; /* 32 MBytes */
479         q->drop_batch_size = 64;
480         q->quantum = psched_mtu(qdisc_dev(sch));
481         INIT_LIST_HEAD(&q->new_flows);
482         INIT_LIST_HEAD(&q->old_flows);
483         codel_params_init(&q->cparams);
484         codel_stats_init(&q->cstats);
485         q->cparams.ecn = true;
486         q->cparams.mtu = psched_mtu(qdisc_dev(sch));
487
488         if (opt) {
489                 int err = fq_codel_change(sch, opt);
490                 if (err)
491                         return err;
492         }
493
494         if (!q->flows) {
495                 q->flows = fq_codel_zalloc(q->flows_cnt *
496                                            sizeof(struct fq_codel_flow));
497                 if (!q->flows)
498                         return -ENOMEM;
499                 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
500                 if (!q->backlogs) {
501                         fq_codel_free(q->flows);
502                         return -ENOMEM;
503                 }
504                 for (i = 0; i < q->flows_cnt; i++) {
505                         struct fq_codel_flow *flow = q->flows + i;
506
507                         INIT_LIST_HEAD(&flow->flowchain);
508                         codel_vars_init(&flow->cvars);
509                 }
510         }
511         if (sch->limit >= 1)
512                 sch->flags |= TCQ_F_CAN_BYPASS;
513         else
514                 sch->flags &= ~TCQ_F_CAN_BYPASS;
515         return 0;
516 }
517
518 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
519 {
520         struct fq_codel_sched_data *q = qdisc_priv(sch);
521         struct nlattr *opts;
522
523         opts = nla_nest_start(skb, TCA_OPTIONS);
524         if (opts == NULL)
525                 goto nla_put_failure;
526
527         if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
528                         codel_time_to_us(q->cparams.target)) ||
529             nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
530                         sch->limit) ||
531             nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
532                         codel_time_to_us(q->cparams.interval)) ||
533             nla_put_u32(skb, TCA_FQ_CODEL_ECN,
534                         q->cparams.ecn) ||
535             nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
536                         q->quantum) ||
537             nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
538                         q->drop_batch_size) ||
539             nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
540                         q->memory_limit) ||
541             nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
542                         q->flows_cnt))
543                 goto nla_put_failure;
544
545         if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
546             nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
547                         codel_time_to_us(q->cparams.ce_threshold)))
548                 goto nla_put_failure;
549
550         return nla_nest_end(skb, opts);
551
552 nla_put_failure:
553         return -1;
554 }
555
556 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
557 {
558         struct fq_codel_sched_data *q = qdisc_priv(sch);
559         struct tc_fq_codel_xstats st = {
560                 .type                           = TCA_FQ_CODEL_XSTATS_QDISC,
561         };
562         struct list_head *pos;
563
564         st.qdisc_stats.maxpacket = q->cstats.maxpacket;
565         st.qdisc_stats.drop_overlimit = q->drop_overlimit;
566         st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
567         st.qdisc_stats.new_flow_count = q->new_flow_count;
568         st.qdisc_stats.ce_mark = q->cstats.ce_mark;
569         st.qdisc_stats.memory_usage  = q->memory_usage;
570         st.qdisc_stats.drop_overmemory = q->drop_overmemory;
571
572         sch_tree_lock(sch);
573         list_for_each(pos, &q->new_flows)
574                 st.qdisc_stats.new_flows_len++;
575
576         list_for_each(pos, &q->old_flows)
577                 st.qdisc_stats.old_flows_len++;
578         sch_tree_unlock(sch);
579
580         return gnet_stats_copy_app(d, &st, sizeof(st));
581 }
582
583 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
584 {
585         return NULL;
586 }
587
588 static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
589 {
590         return 0;
591 }
592
593 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
594                               u32 classid)
595 {
596         /* we cannot bypass queue discipline anymore */
597         sch->flags &= ~TCQ_F_CAN_BYPASS;
598         return 0;
599 }
600
601 static void fq_codel_put(struct Qdisc *q, unsigned long cl)
602 {
603 }
604
605 static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
606                                                   unsigned long cl)
607 {
608         struct fq_codel_sched_data *q = qdisc_priv(sch);
609
610         if (cl)
611                 return NULL;
612         return &q->filter_list;
613 }
614
615 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
616                           struct sk_buff *skb, struct tcmsg *tcm)
617 {
618         tcm->tcm_handle |= TC_H_MIN(cl);
619         return 0;
620 }
621
622 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
623                                      struct gnet_dump *d)
624 {
625         struct fq_codel_sched_data *q = qdisc_priv(sch);
626         u32 idx = cl - 1;
627         struct gnet_stats_queue qs = { 0 };
628         struct tc_fq_codel_xstats xstats;
629
630         if (idx < q->flows_cnt) {
631                 const struct fq_codel_flow *flow = &q->flows[idx];
632                 const struct sk_buff *skb;
633
634                 memset(&xstats, 0, sizeof(xstats));
635                 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
636                 xstats.class_stats.deficit = flow->deficit;
637                 xstats.class_stats.ldelay =
638                         codel_time_to_us(flow->cvars.ldelay);
639                 xstats.class_stats.count = flow->cvars.count;
640                 xstats.class_stats.lastcount = flow->cvars.lastcount;
641                 xstats.class_stats.dropping = flow->cvars.dropping;
642                 if (flow->cvars.dropping) {
643                         codel_tdiff_t delta = flow->cvars.drop_next -
644                                               codel_get_time();
645
646                         xstats.class_stats.drop_next = (delta >= 0) ?
647                                 codel_time_to_us(delta) :
648                                 -codel_time_to_us(-delta);
649                 }
650                 if (flow->head) {
651                         sch_tree_lock(sch);
652                         skb = flow->head;
653                         while (skb) {
654                                 qs.qlen++;
655                                 skb = skb->next;
656                         }
657                         sch_tree_unlock(sch);
658                 }
659                 qs.backlog = q->backlogs[idx];
660                 qs.drops = flow->dropped;
661         }
662         if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
663                 return -1;
664         if (idx < q->flows_cnt)
665                 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
666         return 0;
667 }
668
669 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
670 {
671         struct fq_codel_sched_data *q = qdisc_priv(sch);
672         unsigned int i;
673
674         if (arg->stop)
675                 return;
676
677         for (i = 0; i < q->flows_cnt; i++) {
678                 if (list_empty(&q->flows[i].flowchain) ||
679                     arg->count < arg->skip) {
680                         arg->count++;
681                         continue;
682                 }
683                 if (arg->fn(sch, i + 1, arg) < 0) {
684                         arg->stop = 1;
685                         break;
686                 }
687                 arg->count++;
688         }
689 }
690
691 static const struct Qdisc_class_ops fq_codel_class_ops = {
692         .leaf           =       fq_codel_leaf,
693         .get            =       fq_codel_get,
694         .put            =       fq_codel_put,
695         .tcf_chain      =       fq_codel_find_tcf,
696         .bind_tcf       =       fq_codel_bind,
697         .unbind_tcf     =       fq_codel_put,
698         .dump           =       fq_codel_dump_class,
699         .dump_stats     =       fq_codel_dump_class_stats,
700         .walk           =       fq_codel_walk,
701 };
702
703 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
704         .cl_ops         =       &fq_codel_class_ops,
705         .id             =       "fq_codel",
706         .priv_size      =       sizeof(struct fq_codel_sched_data),
707         .enqueue        =       fq_codel_enqueue,
708         .dequeue        =       fq_codel_dequeue,
709         .peek           =       qdisc_peek_dequeued,
710         .init           =       fq_codel_init,
711         .reset          =       fq_codel_reset,
712         .destroy        =       fq_codel_destroy,
713         .change         =       fq_codel_change,
714         .dump           =       fq_codel_dump,
715         .dump_stats =   fq_codel_dump_stats,
716         .owner          =       THIS_MODULE,
717 };
718
719 static int __init fq_codel_module_init(void)
720 {
721         return register_qdisc(&fq_codel_qdisc_ops);
722 }
723
724 static void __exit fq_codel_module_exit(void)
725 {
726         unregister_qdisc(&fq_codel_qdisc_ops);
727 }
728
729 module_init(fq_codel_module_init)
730 module_exit(fq_codel_module_exit)
731 MODULE_AUTHOR("Eric Dumazet");
732 MODULE_LICENSE("GPL");