dm integrity: fix deadlock with overlapping I/O
[sfrench/cifs-2.6.git] / net / sched / sch_cake.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
3 /* COMMON Applications Kept Enhanced (CAKE) discipline
4  *
5  * Copyright (C) 2014-2018 Jonathan Morton <chromatix99@gmail.com>
6  * Copyright (C) 2015-2018 Toke Høiland-Jørgensen <toke@toke.dk>
7  * Copyright (C) 2014-2018 Dave Täht <dave.taht@gmail.com>
8  * Copyright (C) 2015-2018 Sebastian Moeller <moeller0@gmx.de>
9  * (C) 2015-2018 Kevin Darbyshire-Bryant <kevin@darbyshire-bryant.me.uk>
10  * Copyright (C) 2017-2018 Ryan Mounce <ryan@mounce.com.au>
11  *
12  * The CAKE Principles:
13  *                 (or, how to have your cake and eat it too)
14  *
15  * This is a combination of several shaping, AQM and FQ techniques into one
16  * easy-to-use package:
17  *
18  * - An overall bandwidth shaper, to move the bottleneck away from dumb CPE
19  *   equipment and bloated MACs.  This operates in deficit mode (as in sch_fq),
20  *   eliminating the need for any sort of burst parameter (eg. token bucket
21  *   depth).  Burst support is limited to that necessary to overcome scheduling
22  *   latency.
23  *
24  * - A Diffserv-aware priority queue, giving more priority to certain classes,
25  *   up to a specified fraction of bandwidth.  Above that bandwidth threshold,
26  *   the priority is reduced to avoid starving other tins.
27  *
28  * - Each priority tin has a separate Flow Queue system, to isolate traffic
29  *   flows from each other.  This prevents a burst on one flow from increasing
30  *   the delay to another.  Flows are distributed to queues using a
31  *   set-associative hash function.
32  *
33  * - Each queue is actively managed by Cobalt, which is a combination of the
34  *   Codel and Blue AQM algorithms.  This serves flows fairly, and signals
35  *   congestion early via ECN (if available) and/or packet drops, to keep
36  *   latency low.  The codel parameters are auto-tuned based on the bandwidth
37  *   setting, as is necessary at low bandwidths.
38  *
39  * The configuration parameters are kept deliberately simple for ease of use.
40  * Everything has sane defaults.  Complete generality of configuration is *not*
41  * a goal.
42  *
43  * The priority queue operates according to a weighted DRR scheme, combined with
44  * a bandwidth tracker which reuses the shaper logic to detect which side of the
45  * bandwidth sharing threshold the tin is operating.  This determines whether a
46  * priority-based weight (high) or a bandwidth-based weight (low) is used for
47  * that tin in the current pass.
48  *
49  * This qdisc was inspired by Eric Dumazet's fq_codel code, which he kindly
50  * granted us permission to leverage.
51  */
52
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/kernel.h>
56 #include <linux/jiffies.h>
57 #include <linux/string.h>
58 #include <linux/in.h>
59 #include <linux/errno.h>
60 #include <linux/init.h>
61 #include <linux/skbuff.h>
62 #include <linux/jhash.h>
63 #include <linux/slab.h>
64 #include <linux/vmalloc.h>
65 #include <linux/reciprocal_div.h>
66 #include <net/netlink.h>
67 #include <linux/if_vlan.h>
68 #include <net/pkt_sched.h>
69 #include <net/pkt_cls.h>
70 #include <net/tcp.h>
71 #include <net/flow_dissector.h>
72
73 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
74 #include <net/netfilter/nf_conntrack_core.h>
75 #endif
76
77 #define CAKE_SET_WAYS (8)
78 #define CAKE_MAX_TINS (8)
79 #define CAKE_QUEUES (1024)
80 #define CAKE_FLOW_MASK 63
81 #define CAKE_FLOW_NAT_FLAG 64
82
83 /* struct cobalt_params - contains codel and blue parameters
84  * @interval:   codel initial drop rate
85  * @target:     maximum persistent sojourn time & blue update rate
86  * @mtu_time:   serialisation delay of maximum-size packet
87  * @p_inc:      increment of blue drop probability (0.32 fxp)
88  * @p_dec:      decrement of blue drop probability (0.32 fxp)
89  */
90 struct cobalt_params {
91         u64     interval;
92         u64     target;
93         u64     mtu_time;
94         u32     p_inc;
95         u32     p_dec;
96 };
97
98 /* struct cobalt_vars - contains codel and blue variables
99  * @count:              codel dropping frequency
100  * @rec_inv_sqrt:       reciprocal value of sqrt(count) >> 1
101  * @drop_next:          time to drop next packet, or when we dropped last
102  * @blue_timer:         Blue time to next drop
103  * @p_drop:             BLUE drop probability (0.32 fxp)
104  * @dropping:           set if in dropping state
105  * @ecn_marked:         set if marked
106  */
107 struct cobalt_vars {
108         u32     count;
109         u32     rec_inv_sqrt;
110         ktime_t drop_next;
111         ktime_t blue_timer;
112         u32     p_drop;
113         bool    dropping;
114         bool    ecn_marked;
115 };
116
117 enum {
118         CAKE_SET_NONE = 0,
119         CAKE_SET_SPARSE,
120         CAKE_SET_SPARSE_WAIT, /* counted in SPARSE, actually in BULK */
121         CAKE_SET_BULK,
122         CAKE_SET_DECAYING
123 };
124
125 struct cake_flow {
126         /* this stuff is all needed per-flow at dequeue time */
127         struct sk_buff    *head;
128         struct sk_buff    *tail;
129         struct list_head  flowchain;
130         s32               deficit;
131         u32               dropped;
132         struct cobalt_vars cvars;
133         u16               srchost; /* index into cake_host table */
134         u16               dsthost;
135         u8                set;
136 }; /* please try to keep this structure <= 64 bytes */
137
138 struct cake_host {
139         u32 srchost_tag;
140         u32 dsthost_tag;
141         u16 srchost_bulk_flow_count;
142         u16 dsthost_bulk_flow_count;
143 };
144
145 struct cake_heap_entry {
146         u16 t:3, b:10;
147 };
148
149 struct cake_tin_data {
150         struct cake_flow flows[CAKE_QUEUES];
151         u32     backlogs[CAKE_QUEUES];
152         u32     tags[CAKE_QUEUES]; /* for set association */
153         u16     overflow_idx[CAKE_QUEUES];
154         struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */
155         u16     flow_quantum;
156
157         struct cobalt_params cparams;
158         u32     drop_overlimit;
159         u16     bulk_flow_count;
160         u16     sparse_flow_count;
161         u16     decaying_flow_count;
162         u16     unresponsive_flow_count;
163
164         u32     max_skblen;
165
166         struct list_head new_flows;
167         struct list_head old_flows;
168         struct list_head decaying_flows;
169
170         /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
171         ktime_t time_next_packet;
172         u64     tin_rate_ns;
173         u64     tin_rate_bps;
174         u16     tin_rate_shft;
175
176         u16     tin_quantum_prio;
177         u16     tin_quantum_band;
178         s32     tin_deficit;
179         u32     tin_backlog;
180         u32     tin_dropped;
181         u32     tin_ecn_mark;
182
183         u32     packets;
184         u64     bytes;
185
186         u32     ack_drops;
187
188         /* moving averages */
189         u64 avge_delay;
190         u64 peak_delay;
191         u64 base_delay;
192
193         /* hash function stats */
194         u32     way_directs;
195         u32     way_hits;
196         u32     way_misses;
197         u32     way_collisions;
198 }; /* number of tins is small, so size of this struct doesn't matter much */
199
200 struct cake_sched_data {
201         struct tcf_proto __rcu *filter_list; /* optional external classifier */
202         struct tcf_block *block;
203         struct cake_tin_data *tins;
204
205         struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
206         u16             overflow_timeout;
207
208         u16             tin_cnt;
209         u8              tin_mode;
210         u8              flow_mode;
211         u8              ack_filter;
212         u8              atm_mode;
213
214         u32             fwmark_mask;
215         u16             fwmark_shft;
216
217         /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
218         u16             rate_shft;
219         ktime_t         time_next_packet;
220         ktime_t         failsafe_next_packet;
221         u64             rate_ns;
222         u64             rate_bps;
223         u16             rate_flags;
224         s16             rate_overhead;
225         u16             rate_mpu;
226         u64             interval;
227         u64             target;
228
229         /* resource tracking */
230         u32             buffer_used;
231         u32             buffer_max_used;
232         u32             buffer_limit;
233         u32             buffer_config_limit;
234
235         /* indices for dequeue */
236         u16             cur_tin;
237         u16             cur_flow;
238
239         struct qdisc_watchdog watchdog;
240         const u8        *tin_index;
241         const u8        *tin_order;
242
243         /* bandwidth capacity estimate */
244         ktime_t         last_packet_time;
245         ktime_t         avg_window_begin;
246         u64             avg_packet_interval;
247         u64             avg_window_bytes;
248         u64             avg_peak_bandwidth;
249         ktime_t         last_reconfig_time;
250
251         /* packet length stats */
252         u32             avg_netoff;
253         u16             max_netlen;
254         u16             max_adjlen;
255         u16             min_netlen;
256         u16             min_adjlen;
257 };
258
259 enum {
260         CAKE_FLAG_OVERHEAD         = BIT(0),
261         CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
262         CAKE_FLAG_INGRESS          = BIT(2),
263         CAKE_FLAG_WASH             = BIT(3),
264         CAKE_FLAG_SPLIT_GSO        = BIT(4)
265 };
266
267 /* COBALT operates the Codel and BLUE algorithms in parallel, in order to
268  * obtain the best features of each.  Codel is excellent on flows which
269  * respond to congestion signals in a TCP-like way.  BLUE is more effective on
270  * unresponsive flows.
271  */
272
273 struct cobalt_skb_cb {
274         ktime_t enqueue_time;
275         u32     adjusted_len;
276 };
277
278 static u64 us_to_ns(u64 us)
279 {
280         return us * NSEC_PER_USEC;
281 }
282
283 static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb)
284 {
285         qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb));
286         return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data;
287 }
288
289 static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb)
290 {
291         return get_cobalt_cb(skb)->enqueue_time;
292 }
293
294 static void cobalt_set_enqueue_time(struct sk_buff *skb,
295                                     ktime_t now)
296 {
297         get_cobalt_cb(skb)->enqueue_time = now;
298 }
299
300 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
301
302 /* Diffserv lookup tables */
303
304 static const u8 precedence[] = {
305         0, 0, 0, 0, 0, 0, 0, 0,
306         1, 1, 1, 1, 1, 1, 1, 1,
307         2, 2, 2, 2, 2, 2, 2, 2,
308         3, 3, 3, 3, 3, 3, 3, 3,
309         4, 4, 4, 4, 4, 4, 4, 4,
310         5, 5, 5, 5, 5, 5, 5, 5,
311         6, 6, 6, 6, 6, 6, 6, 6,
312         7, 7, 7, 7, 7, 7, 7, 7,
313 };
314
315 static const u8 diffserv8[] = {
316         2, 5, 1, 2, 4, 2, 2, 2,
317         0, 2, 1, 2, 1, 2, 1, 2,
318         5, 2, 4, 2, 4, 2, 4, 2,
319         3, 2, 3, 2, 3, 2, 3, 2,
320         6, 2, 3, 2, 3, 2, 3, 2,
321         6, 2, 2, 2, 6, 2, 6, 2,
322         7, 2, 2, 2, 2, 2, 2, 2,
323         7, 2, 2, 2, 2, 2, 2, 2,
324 };
325
326 static const u8 diffserv4[] = {
327         0, 2, 0, 0, 2, 0, 0, 0,
328         1, 0, 0, 0, 0, 0, 0, 0,
329         2, 0, 2, 0, 2, 0, 2, 0,
330         2, 0, 2, 0, 2, 0, 2, 0,
331         3, 0, 2, 0, 2, 0, 2, 0,
332         3, 0, 0, 0, 3, 0, 3, 0,
333         3, 0, 0, 0, 0, 0, 0, 0,
334         3, 0, 0, 0, 0, 0, 0, 0,
335 };
336
337 static const u8 diffserv3[] = {
338         0, 0, 0, 0, 2, 0, 0, 0,
339         1, 0, 0, 0, 0, 0, 0, 0,
340         0, 0, 0, 0, 0, 0, 0, 0,
341         0, 0, 0, 0, 0, 0, 0, 0,
342         0, 0, 0, 0, 0, 0, 0, 0,
343         0, 0, 0, 0, 2, 0, 2, 0,
344         2, 0, 0, 0, 0, 0, 0, 0,
345         2, 0, 0, 0, 0, 0, 0, 0,
346 };
347
348 static const u8 besteffort[] = {
349         0, 0, 0, 0, 0, 0, 0, 0,
350         0, 0, 0, 0, 0, 0, 0, 0,
351         0, 0, 0, 0, 0, 0, 0, 0,
352         0, 0, 0, 0, 0, 0, 0, 0,
353         0, 0, 0, 0, 0, 0, 0, 0,
354         0, 0, 0, 0, 0, 0, 0, 0,
355         0, 0, 0, 0, 0, 0, 0, 0,
356         0, 0, 0, 0, 0, 0, 0, 0,
357 };
358
359 /* tin priority order for stats dumping */
360
361 static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
362 static const u8 bulk_order[] = {1, 0, 2, 3};
363
364 #define REC_INV_SQRT_CACHE (16)
365 static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
366
367 /* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
368  * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
369  *
370  * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
371  */
372
373 static void cobalt_newton_step(struct cobalt_vars *vars)
374 {
375         u32 invsqrt, invsqrt2;
376         u64 val;
377
378         invsqrt = vars->rec_inv_sqrt;
379         invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
380         val = (3LL << 32) - ((u64)vars->count * invsqrt2);
381
382         val >>= 2; /* avoid overflow in following multiply */
383         val = (val * invsqrt) >> (32 - 2 + 1);
384
385         vars->rec_inv_sqrt = val;
386 }
387
388 static void cobalt_invsqrt(struct cobalt_vars *vars)
389 {
390         if (vars->count < REC_INV_SQRT_CACHE)
391                 vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
392         else
393                 cobalt_newton_step(vars);
394 }
395
396 /* There is a big difference in timing between the accurate values placed in
397  * the cache and the approximations given by a single Newton step for small
398  * count values, particularly when stepping from count 1 to 2 or vice versa.
399  * Above 16, a single Newton step gives sufficient accuracy in either
400  * direction, given the precision stored.
401  *
402  * The magnitude of the error when stepping up to count 2 is such as to give
403  * the value that *should* have been produced at count 4.
404  */
405
406 static void cobalt_cache_init(void)
407 {
408         struct cobalt_vars v;
409
410         memset(&v, 0, sizeof(v));
411         v.rec_inv_sqrt = ~0U;
412         cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
413
414         for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
415                 cobalt_newton_step(&v);
416                 cobalt_newton_step(&v);
417                 cobalt_newton_step(&v);
418                 cobalt_newton_step(&v);
419
420                 cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt;
421         }
422 }
423
424 static void cobalt_vars_init(struct cobalt_vars *vars)
425 {
426         memset(vars, 0, sizeof(*vars));
427
428         if (!cobalt_rec_inv_sqrt_cache[0]) {
429                 cobalt_cache_init();
430                 cobalt_rec_inv_sqrt_cache[0] = ~0;
431         }
432 }
433
434 /* CoDel control_law is t + interval/sqrt(count)
435  * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
436  * both sqrt() and divide operation.
437  */
438 static ktime_t cobalt_control(ktime_t t,
439                               u64 interval,
440                               u32 rec_inv_sqrt)
441 {
442         return ktime_add_ns(t, reciprocal_scale(interval,
443                                                 rec_inv_sqrt));
444 }
445
446 /* Call this when a packet had to be dropped due to queue overflow.  Returns
447  * true if the BLUE state was quiescent before but active after this call.
448  */
449 static bool cobalt_queue_full(struct cobalt_vars *vars,
450                               struct cobalt_params *p,
451                               ktime_t now)
452 {
453         bool up = false;
454
455         if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
456                 up = !vars->p_drop;
457                 vars->p_drop += p->p_inc;
458                 if (vars->p_drop < p->p_inc)
459                         vars->p_drop = ~0;
460                 vars->blue_timer = now;
461         }
462         vars->dropping = true;
463         vars->drop_next = now;
464         if (!vars->count)
465                 vars->count = 1;
466
467         return up;
468 }
469
470 /* Call this when the queue was serviced but turned out to be empty.  Returns
471  * true if the BLUE state was active before but quiescent after this call.
472  */
473 static bool cobalt_queue_empty(struct cobalt_vars *vars,
474                                struct cobalt_params *p,
475                                ktime_t now)
476 {
477         bool down = false;
478
479         if (vars->p_drop &&
480             ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
481                 if (vars->p_drop < p->p_dec)
482                         vars->p_drop = 0;
483                 else
484                         vars->p_drop -= p->p_dec;
485                 vars->blue_timer = now;
486                 down = !vars->p_drop;
487         }
488         vars->dropping = false;
489
490         if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) {
491                 vars->count--;
492                 cobalt_invsqrt(vars);
493                 vars->drop_next = cobalt_control(vars->drop_next,
494                                                  p->interval,
495                                                  vars->rec_inv_sqrt);
496         }
497
498         return down;
499 }
500
501 /* Call this with a freshly dequeued packet for possible congestion marking.
502  * Returns true as an instruction to drop the packet, false for delivery.
503  */
504 static bool cobalt_should_drop(struct cobalt_vars *vars,
505                                struct cobalt_params *p,
506                                ktime_t now,
507                                struct sk_buff *skb,
508                                u32 bulk_flows)
509 {
510         bool next_due, over_target, drop = false;
511         ktime_t schedule;
512         u64 sojourn;
513
514 /* The 'schedule' variable records, in its sign, whether 'now' is before or
515  * after 'drop_next'.  This allows 'drop_next' to be updated before the next
516  * scheduling decision is actually branched, without destroying that
517  * information.  Similarly, the first 'schedule' value calculated is preserved
518  * in the boolean 'next_due'.
519  *
520  * As for 'drop_next', we take advantage of the fact that 'interval' is both
521  * the delay between first exceeding 'target' and the first signalling event,
522  * *and* the scaling factor for the signalling frequency.  It's therefore very
523  * natural to use a single mechanism for both purposes, and eliminates a
524  * significant amount of reference Codel's spaghetti code.  To help with this,
525  * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close
526  * as possible to 1.0 in fixed-point.
527  */
528
529         sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
530         schedule = ktime_sub(now, vars->drop_next);
531         over_target = sojourn > p->target &&
532                       sojourn > p->mtu_time * bulk_flows * 2 &&
533                       sojourn > p->mtu_time * 4;
534         next_due = vars->count && ktime_to_ns(schedule) >= 0;
535
536         vars->ecn_marked = false;
537
538         if (over_target) {
539                 if (!vars->dropping) {
540                         vars->dropping = true;
541                         vars->drop_next = cobalt_control(now,
542                                                          p->interval,
543                                                          vars->rec_inv_sqrt);
544                 }
545                 if (!vars->count)
546                         vars->count = 1;
547         } else if (vars->dropping) {
548                 vars->dropping = false;
549         }
550
551         if (next_due && vars->dropping) {
552                 /* Use ECN mark if possible, otherwise drop */
553                 drop = !(vars->ecn_marked = INET_ECN_set_ce(skb));
554
555                 vars->count++;
556                 if (!vars->count)
557                         vars->count--;
558                 cobalt_invsqrt(vars);
559                 vars->drop_next = cobalt_control(vars->drop_next,
560                                                  p->interval,
561                                                  vars->rec_inv_sqrt);
562                 schedule = ktime_sub(now, vars->drop_next);
563         } else {
564                 while (next_due) {
565                         vars->count--;
566                         cobalt_invsqrt(vars);
567                         vars->drop_next = cobalt_control(vars->drop_next,
568                                                          p->interval,
569                                                          vars->rec_inv_sqrt);
570                         schedule = ktime_sub(now, vars->drop_next);
571                         next_due = vars->count && ktime_to_ns(schedule) >= 0;
572                 }
573         }
574
575         /* Simple BLUE implementation.  Lack of ECN is deliberate. */
576         if (vars->p_drop)
577                 drop |= (prandom_u32() < vars->p_drop);
578
579         /* Overload the drop_next field as an activity timeout */
580         if (!vars->count)
581                 vars->drop_next = ktime_add_ns(now, p->interval);
582         else if (ktime_to_ns(schedule) > 0 && !drop)
583                 vars->drop_next = now;
584
585         return drop;
586 }
587
588 static void cake_update_flowkeys(struct flow_keys *keys,
589                                  const struct sk_buff *skb)
590 {
591 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
592         struct nf_conntrack_tuple tuple = {};
593         bool rev = !skb->_nfct;
594
595         if (tc_skb_protocol(skb) != htons(ETH_P_IP))
596                 return;
597
598         if (!nf_ct_get_tuple_skb(&tuple, skb))
599                 return;
600
601         keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
602         keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
603
604         if (keys->ports.ports) {
605                 keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all;
606                 keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all;
607         }
608 #endif
609 }
610
611 /* Cake has several subtle multiple bit settings. In these cases you
612  *  would be matching triple isolate mode as well.
613  */
614
615 static bool cake_dsrc(int flow_mode)
616 {
617         return (flow_mode & CAKE_FLOW_DUAL_SRC) == CAKE_FLOW_DUAL_SRC;
618 }
619
620 static bool cake_ddst(int flow_mode)
621 {
622         return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
623 }
624
625 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
626                      int flow_mode, u16 flow_override, u16 host_override)
627 {
628         u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
629         u16 reduced_hash, srchost_idx, dsthost_idx;
630         struct flow_keys keys, host_keys;
631
632         if (unlikely(flow_mode == CAKE_FLOW_NONE))
633                 return 0;
634
635         /* If both overrides are set we can skip packet dissection entirely */
636         if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) &&
637             (host_override || !(flow_mode & CAKE_FLOW_HOSTS)))
638                 goto skip_hash;
639
640         skb_flow_dissect_flow_keys(skb, &keys,
641                                    FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
642
643         if (flow_mode & CAKE_FLOW_NAT_FLAG)
644                 cake_update_flowkeys(&keys, skb);
645
646         /* flow_hash_from_keys() sorts the addresses by value, so we have
647          * to preserve their order in a separate data structure to treat
648          * src and dst host addresses as independently selectable.
649          */
650         host_keys = keys;
651         host_keys.ports.ports     = 0;
652         host_keys.basic.ip_proto  = 0;
653         host_keys.keyid.keyid     = 0;
654         host_keys.tags.flow_label = 0;
655
656         switch (host_keys.control.addr_type) {
657         case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
658                 host_keys.addrs.v4addrs.src = 0;
659                 dsthost_hash = flow_hash_from_keys(&host_keys);
660                 host_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
661                 host_keys.addrs.v4addrs.dst = 0;
662                 srchost_hash = flow_hash_from_keys(&host_keys);
663                 break;
664
665         case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
666                 memset(&host_keys.addrs.v6addrs.src, 0,
667                        sizeof(host_keys.addrs.v6addrs.src));
668                 dsthost_hash = flow_hash_from_keys(&host_keys);
669                 host_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
670                 memset(&host_keys.addrs.v6addrs.dst, 0,
671                        sizeof(host_keys.addrs.v6addrs.dst));
672                 srchost_hash = flow_hash_from_keys(&host_keys);
673                 break;
674
675         default:
676                 dsthost_hash = 0;
677                 srchost_hash = 0;
678         }
679
680         /* This *must* be after the above switch, since as a
681          * side-effect it sorts the src and dst addresses.
682          */
683         if (flow_mode & CAKE_FLOW_FLOWS)
684                 flow_hash = flow_hash_from_keys(&keys);
685
686 skip_hash:
687         if (flow_override)
688                 flow_hash = flow_override - 1;
689         if (host_override) {
690                 dsthost_hash = host_override - 1;
691                 srchost_hash = host_override - 1;
692         }
693
694         if (!(flow_mode & CAKE_FLOW_FLOWS)) {
695                 if (flow_mode & CAKE_FLOW_SRC_IP)
696                         flow_hash ^= srchost_hash;
697
698                 if (flow_mode & CAKE_FLOW_DST_IP)
699                         flow_hash ^= dsthost_hash;
700         }
701
702         reduced_hash = flow_hash % CAKE_QUEUES;
703
704         /* set-associative hashing */
705         /* fast path if no hash collision (direct lookup succeeds) */
706         if (likely(q->tags[reduced_hash] == flow_hash &&
707                    q->flows[reduced_hash].set)) {
708                 q->way_directs++;
709         } else {
710                 u32 inner_hash = reduced_hash % CAKE_SET_WAYS;
711                 u32 outer_hash = reduced_hash - inner_hash;
712                 bool allocate_src = false;
713                 bool allocate_dst = false;
714                 u32 i, k;
715
716                 /* check if any active queue in the set is reserved for
717                  * this flow.
718                  */
719                 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
720                      i++, k = (k + 1) % CAKE_SET_WAYS) {
721                         if (q->tags[outer_hash + k] == flow_hash) {
722                                 if (i)
723                                         q->way_hits++;
724
725                                 if (!q->flows[outer_hash + k].set) {
726                                         /* need to increment host refcnts */
727                                         allocate_src = cake_dsrc(flow_mode);
728                                         allocate_dst = cake_ddst(flow_mode);
729                                 }
730
731                                 goto found;
732                         }
733                 }
734
735                 /* no queue is reserved for this flow, look for an
736                  * empty one.
737                  */
738                 for (i = 0; i < CAKE_SET_WAYS;
739                          i++, k = (k + 1) % CAKE_SET_WAYS) {
740                         if (!q->flows[outer_hash + k].set) {
741                                 q->way_misses++;
742                                 allocate_src = cake_dsrc(flow_mode);
743                                 allocate_dst = cake_ddst(flow_mode);
744                                 goto found;
745                         }
746                 }
747
748                 /* With no empty queues, default to the original
749                  * queue, accept the collision, update the host tags.
750                  */
751                 q->way_collisions++;
752                 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
753                         q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
754                         q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
755                 }
756                 allocate_src = cake_dsrc(flow_mode);
757                 allocate_dst = cake_ddst(flow_mode);
758 found:
759                 /* reserve queue for future packets in same flow */
760                 reduced_hash = outer_hash + k;
761                 q->tags[reduced_hash] = flow_hash;
762
763                 if (allocate_src) {
764                         srchost_idx = srchost_hash % CAKE_QUEUES;
765                         inner_hash = srchost_idx % CAKE_SET_WAYS;
766                         outer_hash = srchost_idx - inner_hash;
767                         for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
768                                 i++, k = (k + 1) % CAKE_SET_WAYS) {
769                                 if (q->hosts[outer_hash + k].srchost_tag ==
770                                     srchost_hash)
771                                         goto found_src;
772                         }
773                         for (i = 0; i < CAKE_SET_WAYS;
774                                 i++, k = (k + 1) % CAKE_SET_WAYS) {
775                                 if (!q->hosts[outer_hash + k].srchost_bulk_flow_count)
776                                         break;
777                         }
778                         q->hosts[outer_hash + k].srchost_tag = srchost_hash;
779 found_src:
780                         srchost_idx = outer_hash + k;
781                         if (q->flows[reduced_hash].set == CAKE_SET_BULK)
782                                 q->hosts[srchost_idx].srchost_bulk_flow_count++;
783                         q->flows[reduced_hash].srchost = srchost_idx;
784                 }
785
786                 if (allocate_dst) {
787                         dsthost_idx = dsthost_hash % CAKE_QUEUES;
788                         inner_hash = dsthost_idx % CAKE_SET_WAYS;
789                         outer_hash = dsthost_idx - inner_hash;
790                         for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
791                              i++, k = (k + 1) % CAKE_SET_WAYS) {
792                                 if (q->hosts[outer_hash + k].dsthost_tag ==
793                                     dsthost_hash)
794                                         goto found_dst;
795                         }
796                         for (i = 0; i < CAKE_SET_WAYS;
797                              i++, k = (k + 1) % CAKE_SET_WAYS) {
798                                 if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count)
799                                         break;
800                         }
801                         q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
802 found_dst:
803                         dsthost_idx = outer_hash + k;
804                         if (q->flows[reduced_hash].set == CAKE_SET_BULK)
805                                 q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
806                         q->flows[reduced_hash].dsthost = dsthost_idx;
807                 }
808         }
809
810         return reduced_hash;
811 }
812
813 /* helper functions : might be changed when/if skb use a standard list_head */
814 /* remove one skb from head of slot queue */
815
816 static struct sk_buff *dequeue_head(struct cake_flow *flow)
817 {
818         struct sk_buff *skb = flow->head;
819
820         if (skb) {
821                 flow->head = skb->next;
822                 skb_mark_not_on_list(skb);
823         }
824
825         return skb;
826 }
827
828 /* add skb to flow queue (tail add) */
829
830 static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb)
831 {
832         if (!flow->head)
833                 flow->head = skb;
834         else
835                 flow->tail->next = skb;
836         flow->tail = skb;
837         skb->next = NULL;
838 }
839
840 static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
841                                     struct ipv6hdr *buf)
842 {
843         unsigned int offset = skb_network_offset(skb);
844         struct iphdr *iph;
845
846         iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
847
848         if (!iph)
849                 return NULL;
850
851         if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
852                 return skb_header_pointer(skb, offset + iph->ihl * 4,
853                                           sizeof(struct ipv6hdr), buf);
854
855         else if (iph->version == 4)
856                 return iph;
857
858         else if (iph->version == 6)
859                 return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
860                                           buf);
861
862         return NULL;
863 }
864
865 static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
866                                       void *buf, unsigned int bufsize)
867 {
868         unsigned int offset = skb_network_offset(skb);
869         const struct ipv6hdr *ipv6h;
870         const struct tcphdr *tcph;
871         const struct iphdr *iph;
872         struct ipv6hdr _ipv6h;
873         struct tcphdr _tcph;
874
875         ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
876
877         if (!ipv6h)
878                 return NULL;
879
880         if (ipv6h->version == 4) {
881                 iph = (struct iphdr *)ipv6h;
882                 offset += iph->ihl * 4;
883
884                 /* special-case 6in4 tunnelling, as that is a common way to get
885                  * v6 connectivity in the home
886                  */
887                 if (iph->protocol == IPPROTO_IPV6) {
888                         ipv6h = skb_header_pointer(skb, offset,
889                                                    sizeof(_ipv6h), &_ipv6h);
890
891                         if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
892                                 return NULL;
893
894                         offset += sizeof(struct ipv6hdr);
895
896                 } else if (iph->protocol != IPPROTO_TCP) {
897                         return NULL;
898                 }
899
900         } else if (ipv6h->version == 6) {
901                 if (ipv6h->nexthdr != IPPROTO_TCP)
902                         return NULL;
903
904                 offset += sizeof(struct ipv6hdr);
905         } else {
906                 return NULL;
907         }
908
909         tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
910         if (!tcph)
911                 return NULL;
912
913         return skb_header_pointer(skb, offset,
914                                   min(__tcp_hdrlen(tcph), bufsize), buf);
915 }
916
917 static const void *cake_get_tcpopt(const struct tcphdr *tcph,
918                                    int code, int *oplen)
919 {
920         /* inspired by tcp_parse_options in tcp_input.c */
921         int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
922         const u8 *ptr = (const u8 *)(tcph + 1);
923
924         while (length > 0) {
925                 int opcode = *ptr++;
926                 int opsize;
927
928                 if (opcode == TCPOPT_EOL)
929                         break;
930                 if (opcode == TCPOPT_NOP) {
931                         length--;
932                         continue;
933                 }
934                 opsize = *ptr++;
935                 if (opsize < 2 || opsize > length)
936                         break;
937
938                 if (opcode == code) {
939                         *oplen = opsize;
940                         return ptr;
941                 }
942
943                 ptr += opsize - 2;
944                 length -= opsize;
945         }
946
947         return NULL;
948 }
949
950 /* Compare two SACK sequences. A sequence is considered greater if it SACKs more
951  * bytes than the other. In the case where both sequences ACKs bytes that the
952  * other doesn't, A is considered greater. DSACKs in A also makes A be
953  * considered greater.
954  *
955  * @return -1, 0 or 1 as normal compare functions
956  */
957 static int cake_tcph_sack_compare(const struct tcphdr *tcph_a,
958                                   const struct tcphdr *tcph_b)
959 {
960         const struct tcp_sack_block_wire *sack_a, *sack_b;
961         u32 ack_seq_a = ntohl(tcph_a->ack_seq);
962         u32 bytes_a = 0, bytes_b = 0;
963         int oplen_a, oplen_b;
964         bool first = true;
965
966         sack_a = cake_get_tcpopt(tcph_a, TCPOPT_SACK, &oplen_a);
967         sack_b = cake_get_tcpopt(tcph_b, TCPOPT_SACK, &oplen_b);
968
969         /* pointers point to option contents */
970         oplen_a -= TCPOLEN_SACK_BASE;
971         oplen_b -= TCPOLEN_SACK_BASE;
972
973         if (sack_a && oplen_a >= sizeof(*sack_a) &&
974             (!sack_b || oplen_b < sizeof(*sack_b)))
975                 return -1;
976         else if (sack_b && oplen_b >= sizeof(*sack_b) &&
977                  (!sack_a || oplen_a < sizeof(*sack_a)))
978                 return 1;
979         else if ((!sack_a || oplen_a < sizeof(*sack_a)) &&
980                  (!sack_b || oplen_b < sizeof(*sack_b)))
981                 return 0;
982
983         while (oplen_a >= sizeof(*sack_a)) {
984                 const struct tcp_sack_block_wire *sack_tmp = sack_b;
985                 u32 start_a = get_unaligned_be32(&sack_a->start_seq);
986                 u32 end_a = get_unaligned_be32(&sack_a->end_seq);
987                 int oplen_tmp = oplen_b;
988                 bool found = false;
989
990                 /* DSACK; always considered greater to prevent dropping */
991                 if (before(start_a, ack_seq_a))
992                         return -1;
993
994                 bytes_a += end_a - start_a;
995
996                 while (oplen_tmp >= sizeof(*sack_tmp)) {
997                         u32 start_b = get_unaligned_be32(&sack_tmp->start_seq);
998                         u32 end_b = get_unaligned_be32(&sack_tmp->end_seq);
999
1000                         /* first time through we count the total size */
1001                         if (first)
1002                                 bytes_b += end_b - start_b;
1003
1004                         if (!after(start_b, start_a) && !before(end_b, end_a)) {
1005                                 found = true;
1006                                 if (!first)
1007                                         break;
1008                         }
1009                         oplen_tmp -= sizeof(*sack_tmp);
1010                         sack_tmp++;
1011                 }
1012
1013                 if (!found)
1014                         return -1;
1015
1016                 oplen_a -= sizeof(*sack_a);
1017                 sack_a++;
1018                 first = false;
1019         }
1020
1021         /* If we made it this far, all ranges SACKed by A are covered by B, so
1022          * either the SACKs are equal, or B SACKs more bytes.
1023          */
1024         return bytes_b > bytes_a ? 1 : 0;
1025 }
1026
1027 static void cake_tcph_get_tstamp(const struct tcphdr *tcph,
1028                                  u32 *tsval, u32 *tsecr)
1029 {
1030         const u8 *ptr;
1031         int opsize;
1032
1033         ptr = cake_get_tcpopt(tcph, TCPOPT_TIMESTAMP, &opsize);
1034
1035         if (ptr && opsize == TCPOLEN_TIMESTAMP) {
1036                 *tsval = get_unaligned_be32(ptr);
1037                 *tsecr = get_unaligned_be32(ptr + 4);
1038         }
1039 }
1040
1041 static bool cake_tcph_may_drop(const struct tcphdr *tcph,
1042                                u32 tstamp_new, u32 tsecr_new)
1043 {
1044         /* inspired by tcp_parse_options in tcp_input.c */
1045         int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
1046         const u8 *ptr = (const u8 *)(tcph + 1);
1047         u32 tstamp, tsecr;
1048
1049         /* 3 reserved flags must be unset to avoid future breakage
1050          * ACK must be set
1051          * ECE/CWR are handled separately
1052          * All other flags URG/PSH/RST/SYN/FIN must be unset
1053          * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero)
1054          * 0x00C00000 = CWR/ECE (handled separately)
1055          * 0x0F3F0000 = 0x0FFF0000 & ~0x00C00000
1056          */
1057         if (((tcp_flag_word(tcph) &
1058               cpu_to_be32(0x0F3F0000)) != TCP_FLAG_ACK))
1059                 return false;
1060
1061         while (length > 0) {
1062                 int opcode = *ptr++;
1063                 int opsize;
1064
1065                 if (opcode == TCPOPT_EOL)
1066                         break;
1067                 if (opcode == TCPOPT_NOP) {
1068                         length--;
1069                         continue;
1070                 }
1071                 opsize = *ptr++;
1072                 if (opsize < 2 || opsize > length)
1073                         break;
1074
1075                 switch (opcode) {
1076                 case TCPOPT_MD5SIG: /* doesn't influence state */
1077                         break;
1078
1079                 case TCPOPT_SACK: /* stricter checking performed later */
1080                         if (opsize % 8 != 2)
1081                                 return false;
1082                         break;
1083
1084                 case TCPOPT_TIMESTAMP:
1085                         /* only drop timestamps lower than new */
1086                         if (opsize != TCPOLEN_TIMESTAMP)
1087                                 return false;
1088                         tstamp = get_unaligned_be32(ptr);
1089                         tsecr = get_unaligned_be32(ptr + 4);
1090                         if (after(tstamp, tstamp_new) ||
1091                             after(tsecr, tsecr_new))
1092                                 return false;
1093                         break;
1094
1095                 case TCPOPT_MSS:  /* these should only be set on SYN */
1096                 case TCPOPT_WINDOW:
1097                 case TCPOPT_SACK_PERM:
1098                 case TCPOPT_FASTOPEN:
1099                 case TCPOPT_EXP:
1100                 default: /* don't drop if any unknown options are present */
1101                         return false;
1102                 }
1103
1104                 ptr += opsize - 2;
1105                 length -= opsize;
1106         }
1107
1108         return true;
1109 }
1110
1111 static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
1112                                        struct cake_flow *flow)
1113 {
1114         bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
1115         struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL;
1116         struct sk_buff *skb_check, *skb_prev = NULL;
1117         const struct ipv6hdr *ipv6h, *ipv6h_check;
1118         unsigned char _tcph[64], _tcph_check[64];
1119         const struct tcphdr *tcph, *tcph_check;
1120         const struct iphdr *iph, *iph_check;
1121         struct ipv6hdr _iph, _iph_check;
1122         const struct sk_buff *skb;
1123         int seglen, num_found = 0;
1124         u32 tstamp = 0, tsecr = 0;
1125         __be32 elig_flags = 0;
1126         int sack_comp;
1127
1128         /* no other possible ACKs to filter */
1129         if (flow->head == flow->tail)
1130                 return NULL;
1131
1132         skb = flow->tail;
1133         tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph));
1134         iph = cake_get_iphdr(skb, &_iph);
1135         if (!tcph)
1136                 return NULL;
1137
1138         cake_tcph_get_tstamp(tcph, &tstamp, &tsecr);
1139
1140         /* the 'triggering' packet need only have the ACK flag set.
1141          * also check that SYN is not set, as there won't be any previous ACKs.
1142          */
1143         if ((tcp_flag_word(tcph) &
1144              (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK)
1145                 return NULL;
1146
1147         /* the 'triggering' ACK is at the tail of the queue, we have already
1148          * returned if it is the only packet in the flow. loop through the rest
1149          * of the queue looking for pure ACKs with the same 5-tuple as the
1150          * triggering one.
1151          */
1152         for (skb_check = flow->head;
1153              skb_check && skb_check != skb;
1154              skb_prev = skb_check, skb_check = skb_check->next) {
1155                 iph_check = cake_get_iphdr(skb_check, &_iph_check);
1156                 tcph_check = cake_get_tcphdr(skb_check, &_tcph_check,
1157                                              sizeof(_tcph_check));
1158
1159                 /* only TCP packets with matching 5-tuple are eligible, and only
1160                  * drop safe headers
1161                  */
1162                 if (!tcph_check || iph->version != iph_check->version ||
1163                     tcph_check->source != tcph->source ||
1164                     tcph_check->dest != tcph->dest)
1165                         continue;
1166
1167                 if (iph_check->version == 4) {
1168                         if (iph_check->saddr != iph->saddr ||
1169                             iph_check->daddr != iph->daddr)
1170                                 continue;
1171
1172                         seglen = ntohs(iph_check->tot_len) -
1173                                        (4 * iph_check->ihl);
1174                 } else if (iph_check->version == 6) {
1175                         ipv6h = (struct ipv6hdr *)iph;
1176                         ipv6h_check = (struct ipv6hdr *)iph_check;
1177
1178                         if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) ||
1179                             ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr))
1180                                 continue;
1181
1182                         seglen = ntohs(ipv6h_check->payload_len);
1183                 } else {
1184                         WARN_ON(1);  /* shouldn't happen */
1185                         continue;
1186                 }
1187
1188                 /* If the ECE/CWR flags changed from the previous eligible
1189                  * packet in the same flow, we should no longer be dropping that
1190                  * previous packet as this would lose information.
1191                  */
1192                 if (elig_ack && (tcp_flag_word(tcph_check) &
1193                                  (TCP_FLAG_ECE | TCP_FLAG_CWR)) != elig_flags) {
1194                         elig_ack = NULL;
1195                         elig_ack_prev = NULL;
1196                         num_found--;
1197                 }
1198
1199                 /* Check TCP options and flags, don't drop ACKs with segment
1200                  * data, and don't drop ACKs with a higher cumulative ACK
1201                  * counter than the triggering packet. Check ACK seqno here to
1202                  * avoid parsing SACK options of packets we are going to exclude
1203                  * anyway.
1204                  */
1205                 if (!cake_tcph_may_drop(tcph_check, tstamp, tsecr) ||
1206                     (seglen - __tcp_hdrlen(tcph_check)) != 0 ||
1207                     after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq)))
1208                         continue;
1209
1210                 /* Check SACK options. The triggering packet must SACK more data
1211                  * than the ACK under consideration, or SACK the same range but
1212                  * have a larger cumulative ACK counter. The latter is a
1213                  * pathological case, but is contained in the following check
1214                  * anyway, just to be safe.
1215                  */
1216                 sack_comp = cake_tcph_sack_compare(tcph_check, tcph);
1217
1218                 if (sack_comp < 0 ||
1219                     (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) &&
1220                      sack_comp == 0))
1221                         continue;
1222
1223                 /* At this point we have found an eligible pure ACK to drop; if
1224                  * we are in aggressive mode, we are done. Otherwise, keep
1225                  * searching unless this is the second eligible ACK we
1226                  * found.
1227                  *
1228                  * Since we want to drop ACK closest to the head of the queue,
1229                  * save the first eligible ACK we find, even if we need to loop
1230                  * again.
1231                  */
1232                 if (!elig_ack) {
1233                         elig_ack = skb_check;
1234                         elig_ack_prev = skb_prev;
1235                         elig_flags = (tcp_flag_word(tcph_check)
1236                                       & (TCP_FLAG_ECE | TCP_FLAG_CWR));
1237                 }
1238
1239                 if (num_found++ > 0)
1240                         goto found;
1241         }
1242
1243         /* We made it through the queue without finding two eligible ACKs . If
1244          * we found a single eligible ACK we can drop it in aggressive mode if
1245          * we can guarantee that this does not interfere with ECN flag
1246          * information. We ensure this by dropping it only if the enqueued
1247          * packet is consecutive with the eligible ACK, and their flags match.
1248          */
1249         if (elig_ack && aggressive && elig_ack->next == skb &&
1250             (elig_flags == (tcp_flag_word(tcph) &
1251                             (TCP_FLAG_ECE | TCP_FLAG_CWR))))
1252                 goto found;
1253
1254         return NULL;
1255
1256 found:
1257         if (elig_ack_prev)
1258                 elig_ack_prev->next = elig_ack->next;
1259         else
1260                 flow->head = elig_ack->next;
1261
1262         skb_mark_not_on_list(elig_ack);
1263
1264         return elig_ack;
1265 }
1266
1267 static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
1268 {
1269         avg -= avg >> shift;
1270         avg += sample >> shift;
1271         return avg;
1272 }
1273
1274 static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off)
1275 {
1276         if (q->rate_flags & CAKE_FLAG_OVERHEAD)
1277                 len -= off;
1278
1279         if (q->max_netlen < len)
1280                 q->max_netlen = len;
1281         if (q->min_netlen > len)
1282                 q->min_netlen = len;
1283
1284         len += q->rate_overhead;
1285
1286         if (len < q->rate_mpu)
1287                 len = q->rate_mpu;
1288
1289         if (q->atm_mode == CAKE_ATM_ATM) {
1290                 len += 47;
1291                 len /= 48;
1292                 len *= 53;
1293         } else if (q->atm_mode == CAKE_ATM_PTM) {
1294                 /* Add one byte per 64 bytes or part thereof.
1295                  * This is conservative and easier to calculate than the
1296                  * precise value.
1297                  */
1298                 len += (len + 63) / 64;
1299         }
1300
1301         if (q->max_adjlen < len)
1302                 q->max_adjlen = len;
1303         if (q->min_adjlen > len)
1304                 q->min_adjlen = len;
1305
1306         return len;
1307 }
1308
1309 static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb)
1310 {
1311         const struct skb_shared_info *shinfo = skb_shinfo(skb);
1312         unsigned int hdr_len, last_len = 0;
1313         u32 off = skb_network_offset(skb);
1314         u32 len = qdisc_pkt_len(skb);
1315         u16 segs = 1;
1316
1317         q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8);
1318
1319         if (!shinfo->gso_size)
1320                 return cake_calc_overhead(q, len, off);
1321
1322         /* borrowed from qdisc_pkt_len_init() */
1323         hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
1324
1325         /* + transport layer */
1326         if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 |
1327                                                 SKB_GSO_TCPV6))) {
1328                 const struct tcphdr *th;
1329                 struct tcphdr _tcphdr;
1330
1331                 th = skb_header_pointer(skb, skb_transport_offset(skb),
1332                                         sizeof(_tcphdr), &_tcphdr);
1333                 if (likely(th))
1334                         hdr_len += __tcp_hdrlen(th);
1335         } else {
1336                 struct udphdr _udphdr;
1337
1338                 if (skb_header_pointer(skb, skb_transport_offset(skb),
1339                                        sizeof(_udphdr), &_udphdr))
1340                         hdr_len += sizeof(struct udphdr);
1341         }
1342
1343         if (unlikely(shinfo->gso_type & SKB_GSO_DODGY))
1344                 segs = DIV_ROUND_UP(skb->len - hdr_len,
1345                                     shinfo->gso_size);
1346         else
1347                 segs = shinfo->gso_segs;
1348
1349         len = shinfo->gso_size + hdr_len;
1350         last_len = skb->len - shinfo->gso_size * (segs - 1);
1351
1352         return (cake_calc_overhead(q, len, off) * (segs - 1) +
1353                 cake_calc_overhead(q, last_len, off));
1354 }
1355
1356 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
1357 {
1358         struct cake_heap_entry ii = q->overflow_heap[i];
1359         struct cake_heap_entry jj = q->overflow_heap[j];
1360
1361         q->overflow_heap[i] = jj;
1362         q->overflow_heap[j] = ii;
1363
1364         q->tins[ii.t].overflow_idx[ii.b] = j;
1365         q->tins[jj.t].overflow_idx[jj.b] = i;
1366 }
1367
1368 static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
1369 {
1370         struct cake_heap_entry ii = q->overflow_heap[i];
1371
1372         return q->tins[ii.t].backlogs[ii.b];
1373 }
1374
1375 static void cake_heapify(struct cake_sched_data *q, u16 i)
1376 {
1377         static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES;
1378         u32 mb = cake_heap_get_backlog(q, i);
1379         u32 m = i;
1380
1381         while (m < a) {
1382                 u32 l = m + m + 1;
1383                 u32 r = l + 1;
1384
1385                 if (l < a) {
1386                         u32 lb = cake_heap_get_backlog(q, l);
1387
1388                         if (lb > mb) {
1389                                 m  = l;
1390                                 mb = lb;
1391                         }
1392                 }
1393
1394                 if (r < a) {
1395                         u32 rb = cake_heap_get_backlog(q, r);
1396
1397                         if (rb > mb) {
1398                                 m  = r;
1399                                 mb = rb;
1400                         }
1401                 }
1402
1403                 if (m != i) {
1404                         cake_heap_swap(q, i, m);
1405                         i = m;
1406                 } else {
1407                         break;
1408                 }
1409         }
1410 }
1411
1412 static void cake_heapify_up(struct cake_sched_data *q, u16 i)
1413 {
1414         while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) {
1415                 u16 p = (i - 1) >> 1;
1416                 u32 ib = cake_heap_get_backlog(q, i);
1417                 u32 pb = cake_heap_get_backlog(q, p);
1418
1419                 if (ib > pb) {
1420                         cake_heap_swap(q, i, p);
1421                         i = p;
1422                 } else {
1423                         break;
1424                 }
1425         }
1426 }
1427
1428 static int cake_advance_shaper(struct cake_sched_data *q,
1429                                struct cake_tin_data *b,
1430                                struct sk_buff *skb,
1431                                ktime_t now, bool drop)
1432 {
1433         u32 len = get_cobalt_cb(skb)->adjusted_len;
1434
1435         /* charge packet bandwidth to this tin
1436          * and to the global shaper.
1437          */
1438         if (q->rate_ns) {
1439                 u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft;
1440                 u64 global_dur = (len * q->rate_ns) >> q->rate_shft;
1441                 u64 failsafe_dur = global_dur + (global_dur >> 1);
1442
1443                 if (ktime_before(b->time_next_packet, now))
1444                         b->time_next_packet = ktime_add_ns(b->time_next_packet,
1445                                                            tin_dur);
1446
1447                 else if (ktime_before(b->time_next_packet,
1448                                       ktime_add_ns(now, tin_dur)))
1449                         b->time_next_packet = ktime_add_ns(now, tin_dur);
1450
1451                 q->time_next_packet = ktime_add_ns(q->time_next_packet,
1452                                                    global_dur);
1453                 if (!drop)
1454                         q->failsafe_next_packet = \
1455                                 ktime_add_ns(q->failsafe_next_packet,
1456                                              failsafe_dur);
1457         }
1458         return len;
1459 }
1460
1461 static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
1462 {
1463         struct cake_sched_data *q = qdisc_priv(sch);
1464         ktime_t now = ktime_get();
1465         u32 idx = 0, tin = 0, len;
1466         struct cake_heap_entry qq;
1467         struct cake_tin_data *b;
1468         struct cake_flow *flow;
1469         struct sk_buff *skb;
1470
1471         if (!q->overflow_timeout) {
1472                 int i;
1473                 /* Build fresh max-heap */
1474                 for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2; i >= 0; i--)
1475                         cake_heapify(q, i);
1476         }
1477         q->overflow_timeout = 65535;
1478
1479         /* select longest queue for pruning */
1480         qq  = q->overflow_heap[0];
1481         tin = qq.t;
1482         idx = qq.b;
1483
1484         b = &q->tins[tin];
1485         flow = &b->flows[idx];
1486         skb = dequeue_head(flow);
1487         if (unlikely(!skb)) {
1488                 /* heap has gone wrong, rebuild it next time */
1489                 q->overflow_timeout = 0;
1490                 return idx + (tin << 16);
1491         }
1492
1493         if (cobalt_queue_full(&flow->cvars, &b->cparams, now))
1494                 b->unresponsive_flow_count++;
1495
1496         len = qdisc_pkt_len(skb);
1497         q->buffer_used      -= skb->truesize;
1498         b->backlogs[idx]    -= len;
1499         b->tin_backlog      -= len;
1500         sch->qstats.backlog -= len;
1501         qdisc_tree_reduce_backlog(sch, 1, len);
1502
1503         flow->dropped++;
1504         b->tin_dropped++;
1505         sch->qstats.drops++;
1506
1507         if (q->rate_flags & CAKE_FLAG_INGRESS)
1508                 cake_advance_shaper(q, b, skb, now, true);
1509
1510         __qdisc_drop(skb, to_free);
1511         sch->q.qlen--;
1512
1513         cake_heapify(q, 0);
1514
1515         return idx + (tin << 16);
1516 }
1517
1518 static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
1519 {
1520         u8 dscp;
1521
1522         switch (skb->protocol) {
1523         case htons(ETH_P_IP):
1524                 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
1525                 if (wash && dscp)
1526                         ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
1527                 return dscp;
1528
1529         case htons(ETH_P_IPV6):
1530                 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
1531                 if (wash && dscp)
1532                         ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
1533                 return dscp;
1534
1535         case htons(ETH_P_ARP):
1536                 return 0x38;  /* CS7 - Net Control */
1537
1538         default:
1539                 /* If there is no Diffserv field, treat as best-effort */
1540                 return 0;
1541         }
1542 }
1543
1544 static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
1545                                              struct sk_buff *skb)
1546 {
1547         struct cake_sched_data *q = qdisc_priv(sch);
1548         u32 tin, mark;
1549         u8 dscp;
1550
1551         /* Tin selection: Default to diffserv-based selection, allow overriding
1552          * using firewall marks or skb->priority.
1553          */
1554         dscp = cake_handle_diffserv(skb,
1555                                     q->rate_flags & CAKE_FLAG_WASH);
1556         mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
1557
1558         if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
1559                 tin = 0;
1560
1561         else if (mark && mark <= q->tin_cnt)
1562                 tin = q->tin_order[mark - 1];
1563
1564         else if (TC_H_MAJ(skb->priority) == sch->handle &&
1565                  TC_H_MIN(skb->priority) > 0 &&
1566                  TC_H_MIN(skb->priority) <= q->tin_cnt)
1567                 tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
1568
1569         else {
1570                 tin = q->tin_index[dscp];
1571
1572                 if (unlikely(tin >= q->tin_cnt))
1573                         tin = 0;
1574         }
1575
1576         return &q->tins[tin];
1577 }
1578
1579 static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
1580                          struct sk_buff *skb, int flow_mode, int *qerr)
1581 {
1582         struct cake_sched_data *q = qdisc_priv(sch);
1583         struct tcf_proto *filter;
1584         struct tcf_result res;
1585         u16 flow = 0, host = 0;
1586         int result;
1587
1588         filter = rcu_dereference_bh(q->filter_list);
1589         if (!filter)
1590                 goto hash;
1591
1592         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1593         result = tcf_classify(skb, filter, &res, false);
1594
1595         if (result >= 0) {
1596 #ifdef CONFIG_NET_CLS_ACT
1597                 switch (result) {
1598                 case TC_ACT_STOLEN:
1599                 case TC_ACT_QUEUED:
1600                 case TC_ACT_TRAP:
1601                         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1602                         /* fall through */
1603                 case TC_ACT_SHOT:
1604                         return 0;
1605                 }
1606 #endif
1607                 if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
1608                         flow = TC_H_MIN(res.classid);
1609                 if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
1610                         host = TC_H_MAJ(res.classid) >> 16;
1611         }
1612 hash:
1613         *t = cake_select_tin(sch, skb);
1614         return cake_hash(*t, skb, flow_mode, flow, host) + 1;
1615 }
1616
1617 static void cake_reconfigure(struct Qdisc *sch);
1618
1619 static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1620                         struct sk_buff **to_free)
1621 {
1622         struct cake_sched_data *q = qdisc_priv(sch);
1623         int len = qdisc_pkt_len(skb);
1624         int uninitialized_var(ret);
1625         struct sk_buff *ack = NULL;
1626         ktime_t now = ktime_get();
1627         struct cake_tin_data *b;
1628         struct cake_flow *flow;
1629         u32 idx;
1630
1631         /* choose flow to insert into */
1632         idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
1633         if (idx == 0) {
1634                 if (ret & __NET_XMIT_BYPASS)
1635                         qdisc_qstats_drop(sch);
1636                 __qdisc_drop(skb, to_free);
1637                 return ret;
1638         }
1639         idx--;
1640         flow = &b->flows[idx];
1641
1642         /* ensure shaper state isn't stale */
1643         if (!b->tin_backlog) {
1644                 if (ktime_before(b->time_next_packet, now))
1645                         b->time_next_packet = now;
1646
1647                 if (!sch->q.qlen) {
1648                         if (ktime_before(q->time_next_packet, now)) {
1649                                 q->failsafe_next_packet = now;
1650                                 q->time_next_packet = now;
1651                         } else if (ktime_after(q->time_next_packet, now) &&
1652                                    ktime_after(q->failsafe_next_packet, now)) {
1653                                 u64 next = \
1654                                         min(ktime_to_ns(q->time_next_packet),
1655                                             ktime_to_ns(
1656                                                    q->failsafe_next_packet));
1657                                 sch->qstats.overlimits++;
1658                                 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1659                         }
1660                 }
1661         }
1662
1663         if (unlikely(len > b->max_skblen))
1664                 b->max_skblen = len;
1665
1666         if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
1667                 struct sk_buff *segs, *nskb;
1668                 netdev_features_t features = netif_skb_features(skb);
1669                 unsigned int slen = 0, numsegs = 0;
1670
1671                 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
1672                 if (IS_ERR_OR_NULL(segs))
1673                         return qdisc_drop(skb, sch, to_free);
1674
1675                 while (segs) {
1676                         nskb = segs->next;
1677                         skb_mark_not_on_list(segs);
1678                         qdisc_skb_cb(segs)->pkt_len = segs->len;
1679                         cobalt_set_enqueue_time(segs, now);
1680                         get_cobalt_cb(segs)->adjusted_len = cake_overhead(q,
1681                                                                           segs);
1682                         flow_queue_add(flow, segs);
1683
1684                         sch->q.qlen++;
1685                         numsegs++;
1686                         slen += segs->len;
1687                         q->buffer_used += segs->truesize;
1688                         b->packets++;
1689                         segs = nskb;
1690                 }
1691
1692                 /* stats */
1693                 b->bytes            += slen;
1694                 b->backlogs[idx]    += slen;
1695                 b->tin_backlog      += slen;
1696                 sch->qstats.backlog += slen;
1697                 q->avg_window_bytes += slen;
1698
1699                 qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
1700                 consume_skb(skb);
1701         } else {
1702                 /* not splitting */
1703                 cobalt_set_enqueue_time(skb, now);
1704                 get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
1705                 flow_queue_add(flow, skb);
1706
1707                 if (q->ack_filter)
1708                         ack = cake_ack_filter(q, flow);
1709
1710                 if (ack) {
1711                         b->ack_drops++;
1712                         sch->qstats.drops++;
1713                         b->bytes += qdisc_pkt_len(ack);
1714                         len -= qdisc_pkt_len(ack);
1715                         q->buffer_used += skb->truesize - ack->truesize;
1716                         if (q->rate_flags & CAKE_FLAG_INGRESS)
1717                                 cake_advance_shaper(q, b, ack, now, true);
1718
1719                         qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
1720                         consume_skb(ack);
1721                 } else {
1722                         sch->q.qlen++;
1723                         q->buffer_used      += skb->truesize;
1724                 }
1725
1726                 /* stats */
1727                 b->packets++;
1728                 b->bytes            += len;
1729                 b->backlogs[idx]    += len;
1730                 b->tin_backlog      += len;
1731                 sch->qstats.backlog += len;
1732                 q->avg_window_bytes += len;
1733         }
1734
1735         if (q->overflow_timeout)
1736                 cake_heapify_up(q, b->overflow_idx[idx]);
1737
1738         /* incoming bandwidth capacity estimate */
1739         if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
1740                 u64 packet_interval = \
1741                         ktime_to_ns(ktime_sub(now, q->last_packet_time));
1742
1743                 if (packet_interval > NSEC_PER_SEC)
1744                         packet_interval = NSEC_PER_SEC;
1745
1746                 /* filter out short-term bursts, eg. wifi aggregation */
1747                 q->avg_packet_interval = \
1748                         cake_ewma(q->avg_packet_interval,
1749                                   packet_interval,
1750                                   (packet_interval > q->avg_packet_interval ?
1751                                           2 : 8));
1752
1753                 q->last_packet_time = now;
1754
1755                 if (packet_interval > q->avg_packet_interval) {
1756                         u64 window_interval = \
1757                                 ktime_to_ns(ktime_sub(now,
1758                                                       q->avg_window_begin));
1759                         u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
1760
1761                         do_div(b, window_interval);
1762                         q->avg_peak_bandwidth =
1763                                 cake_ewma(q->avg_peak_bandwidth, b,
1764                                           b > q->avg_peak_bandwidth ? 2 : 8);
1765                         q->avg_window_bytes = 0;
1766                         q->avg_window_begin = now;
1767
1768                         if (ktime_after(now,
1769                                         ktime_add_ms(q->last_reconfig_time,
1770                                                      250))) {
1771                                 q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
1772                                 cake_reconfigure(sch);
1773                         }
1774                 }
1775         } else {
1776                 q->avg_window_bytes = 0;
1777                 q->last_packet_time = now;
1778         }
1779
1780         /* flowchain */
1781         if (!flow->set || flow->set == CAKE_SET_DECAYING) {
1782                 struct cake_host *srchost = &b->hosts[flow->srchost];
1783                 struct cake_host *dsthost = &b->hosts[flow->dsthost];
1784                 u16 host_load = 1;
1785
1786                 if (!flow->set) {
1787                         list_add_tail(&flow->flowchain, &b->new_flows);
1788                 } else {
1789                         b->decaying_flow_count--;
1790                         list_move_tail(&flow->flowchain, &b->new_flows);
1791                 }
1792                 flow->set = CAKE_SET_SPARSE;
1793                 b->sparse_flow_count++;
1794
1795                 if (cake_dsrc(q->flow_mode))
1796                         host_load = max(host_load, srchost->srchost_bulk_flow_count);
1797
1798                 if (cake_ddst(q->flow_mode))
1799                         host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
1800
1801                 flow->deficit = (b->flow_quantum *
1802                                  quantum_div[host_load]) >> 16;
1803         } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
1804                 struct cake_host *srchost = &b->hosts[flow->srchost];
1805                 struct cake_host *dsthost = &b->hosts[flow->dsthost];
1806
1807                 /* this flow was empty, accounted as a sparse flow, but actually
1808                  * in the bulk rotation.
1809                  */
1810                 flow->set = CAKE_SET_BULK;
1811                 b->sparse_flow_count--;
1812                 b->bulk_flow_count++;
1813
1814                 if (cake_dsrc(q->flow_mode))
1815                         srchost->srchost_bulk_flow_count++;
1816
1817                 if (cake_ddst(q->flow_mode))
1818                         dsthost->dsthost_bulk_flow_count++;
1819
1820         }
1821
1822         if (q->buffer_used > q->buffer_max_used)
1823                 q->buffer_max_used = q->buffer_used;
1824
1825         if (q->buffer_used > q->buffer_limit) {
1826                 u32 dropped = 0;
1827
1828                 while (q->buffer_used > q->buffer_limit) {
1829                         dropped++;
1830                         cake_drop(sch, to_free);
1831                 }
1832                 b->drop_overlimit += dropped;
1833         }
1834         return NET_XMIT_SUCCESS;
1835 }
1836
1837 static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
1838 {
1839         struct cake_sched_data *q = qdisc_priv(sch);
1840         struct cake_tin_data *b = &q->tins[q->cur_tin];
1841         struct cake_flow *flow = &b->flows[q->cur_flow];
1842         struct sk_buff *skb = NULL;
1843         u32 len;
1844
1845         if (flow->head) {
1846                 skb = dequeue_head(flow);
1847                 len = qdisc_pkt_len(skb);
1848                 b->backlogs[q->cur_flow] -= len;
1849                 b->tin_backlog           -= len;
1850                 sch->qstats.backlog      -= len;
1851                 q->buffer_used           -= skb->truesize;
1852                 sch->q.qlen--;
1853
1854                 if (q->overflow_timeout)
1855                         cake_heapify(q, b->overflow_idx[q->cur_flow]);
1856         }
1857         return skb;
1858 }
1859
1860 /* Discard leftover packets from a tin no longer in use. */
1861 static void cake_clear_tin(struct Qdisc *sch, u16 tin)
1862 {
1863         struct cake_sched_data *q = qdisc_priv(sch);
1864         struct sk_buff *skb;
1865
1866         q->cur_tin = tin;
1867         for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
1868                 while (!!(skb = cake_dequeue_one(sch)))
1869                         kfree_skb(skb);
1870 }
1871
1872 static struct sk_buff *cake_dequeue(struct Qdisc *sch)
1873 {
1874         struct cake_sched_data *q = qdisc_priv(sch);
1875         struct cake_tin_data *b = &q->tins[q->cur_tin];
1876         struct cake_host *srchost, *dsthost;
1877         ktime_t now = ktime_get();
1878         struct cake_flow *flow;
1879         struct list_head *head;
1880         bool first_flow = true;
1881         struct sk_buff *skb;
1882         u16 host_load;
1883         u64 delay;
1884         u32 len;
1885
1886 begin:
1887         if (!sch->q.qlen)
1888                 return NULL;
1889
1890         /* global hard shaper */
1891         if (ktime_after(q->time_next_packet, now) &&
1892             ktime_after(q->failsafe_next_packet, now)) {
1893                 u64 next = min(ktime_to_ns(q->time_next_packet),
1894                                ktime_to_ns(q->failsafe_next_packet));
1895
1896                 sch->qstats.overlimits++;
1897                 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1898                 return NULL;
1899         }
1900
1901         /* Choose a class to work on. */
1902         if (!q->rate_ns) {
1903                 /* In unlimited mode, can't rely on shaper timings, just balance
1904                  * with DRR
1905                  */
1906                 bool wrapped = false, empty = true;
1907
1908                 while (b->tin_deficit < 0 ||
1909                        !(b->sparse_flow_count + b->bulk_flow_count)) {
1910                         if (b->tin_deficit <= 0)
1911                                 b->tin_deficit += b->tin_quantum_band;
1912                         if (b->sparse_flow_count + b->bulk_flow_count)
1913                                 empty = false;
1914
1915                         q->cur_tin++;
1916                         b++;
1917                         if (q->cur_tin >= q->tin_cnt) {
1918                                 q->cur_tin = 0;
1919                                 b = q->tins;
1920
1921                                 if (wrapped) {
1922                                         /* It's possible for q->qlen to be
1923                                          * nonzero when we actually have no
1924                                          * packets anywhere.
1925                                          */
1926                                         if (empty)
1927                                                 return NULL;
1928                                 } else {
1929                                         wrapped = true;
1930                                 }
1931                         }
1932                 }
1933         } else {
1934                 /* In shaped mode, choose:
1935                  * - Highest-priority tin with queue and meeting schedule, or
1936                  * - The earliest-scheduled tin with queue.
1937                  */
1938                 ktime_t best_time = KTIME_MAX;
1939                 int tin, best_tin = 0;
1940
1941                 for (tin = 0; tin < q->tin_cnt; tin++) {
1942                         b = q->tins + tin;
1943                         if ((b->sparse_flow_count + b->bulk_flow_count) > 0) {
1944                                 ktime_t time_to_pkt = \
1945                                         ktime_sub(b->time_next_packet, now);
1946
1947                                 if (ktime_to_ns(time_to_pkt) <= 0 ||
1948                                     ktime_compare(time_to_pkt,
1949                                                   best_time) <= 0) {
1950                                         best_time = time_to_pkt;
1951                                         best_tin = tin;
1952                                 }
1953                         }
1954                 }
1955
1956                 q->cur_tin = best_tin;
1957                 b = q->tins + best_tin;
1958
1959                 /* No point in going further if no packets to deliver. */
1960                 if (unlikely(!(b->sparse_flow_count + b->bulk_flow_count)))
1961                         return NULL;
1962         }
1963
1964 retry:
1965         /* service this class */
1966         head = &b->decaying_flows;
1967         if (!first_flow || list_empty(head)) {
1968                 head = &b->new_flows;
1969                 if (list_empty(head)) {
1970                         head = &b->old_flows;
1971                         if (unlikely(list_empty(head))) {
1972                                 head = &b->decaying_flows;
1973                                 if (unlikely(list_empty(head)))
1974                                         goto begin;
1975                         }
1976                 }
1977         }
1978         flow = list_first_entry(head, struct cake_flow, flowchain);
1979         q->cur_flow = flow - b->flows;
1980         first_flow = false;
1981
1982         /* triple isolation (modified DRR++) */
1983         srchost = &b->hosts[flow->srchost];
1984         dsthost = &b->hosts[flow->dsthost];
1985         host_load = 1;
1986
1987         /* flow isolation (DRR++) */
1988         if (flow->deficit <= 0) {
1989                 /* Keep all flows with deficits out of the sparse and decaying
1990                  * rotations.  No non-empty flow can go into the decaying
1991                  * rotation, so they can't get deficits
1992                  */
1993                 if (flow->set == CAKE_SET_SPARSE) {
1994                         if (flow->head) {
1995                                 b->sparse_flow_count--;
1996                                 b->bulk_flow_count++;
1997
1998                                 if (cake_dsrc(q->flow_mode))
1999                                         srchost->srchost_bulk_flow_count++;
2000
2001                                 if (cake_ddst(q->flow_mode))
2002                                         dsthost->dsthost_bulk_flow_count++;
2003
2004                                 flow->set = CAKE_SET_BULK;
2005                         } else {
2006                                 /* we've moved it to the bulk rotation for
2007                                  * correct deficit accounting but we still want
2008                                  * to count it as a sparse flow, not a bulk one.
2009                                  */
2010                                 flow->set = CAKE_SET_SPARSE_WAIT;
2011                         }
2012                 }
2013
2014                 if (cake_dsrc(q->flow_mode))
2015                         host_load = max(host_load, srchost->srchost_bulk_flow_count);
2016
2017                 if (cake_ddst(q->flow_mode))
2018                         host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
2019
2020                 WARN_ON(host_load > CAKE_QUEUES);
2021
2022                 /* The shifted prandom_u32() is a way to apply dithering to
2023                  * avoid accumulating roundoff errors
2024                  */
2025                 flow->deficit += (b->flow_quantum * quantum_div[host_load] +
2026                                   (prandom_u32() >> 16)) >> 16;
2027                 list_move_tail(&flow->flowchain, &b->old_flows);
2028
2029                 goto retry;
2030         }
2031
2032         /* Retrieve a packet via the AQM */
2033         while (1) {
2034                 skb = cake_dequeue_one(sch);
2035                 if (!skb) {
2036                         /* this queue was actually empty */
2037                         if (cobalt_queue_empty(&flow->cvars, &b->cparams, now))
2038                                 b->unresponsive_flow_count--;
2039
2040                         if (flow->cvars.p_drop || flow->cvars.count ||
2041                             ktime_before(now, flow->cvars.drop_next)) {
2042                                 /* keep in the flowchain until the state has
2043                                  * decayed to rest
2044                                  */
2045                                 list_move_tail(&flow->flowchain,
2046                                                &b->decaying_flows);
2047                                 if (flow->set == CAKE_SET_BULK) {
2048                                         b->bulk_flow_count--;
2049
2050                                         if (cake_dsrc(q->flow_mode))
2051                                                 srchost->srchost_bulk_flow_count--;
2052
2053                                         if (cake_ddst(q->flow_mode))
2054                                                 dsthost->dsthost_bulk_flow_count--;
2055
2056                                         b->decaying_flow_count++;
2057                                 } else if (flow->set == CAKE_SET_SPARSE ||
2058                                            flow->set == CAKE_SET_SPARSE_WAIT) {
2059                                         b->sparse_flow_count--;
2060                                         b->decaying_flow_count++;
2061                                 }
2062                                 flow->set = CAKE_SET_DECAYING;
2063                         } else {
2064                                 /* remove empty queue from the flowchain */
2065                                 list_del_init(&flow->flowchain);
2066                                 if (flow->set == CAKE_SET_SPARSE ||
2067                                     flow->set == CAKE_SET_SPARSE_WAIT)
2068                                         b->sparse_flow_count--;
2069                                 else if (flow->set == CAKE_SET_BULK) {
2070                                         b->bulk_flow_count--;
2071
2072                                         if (cake_dsrc(q->flow_mode))
2073                                                 srchost->srchost_bulk_flow_count--;
2074
2075                                         if (cake_ddst(q->flow_mode))
2076                                                 dsthost->dsthost_bulk_flow_count--;
2077
2078                                 } else
2079                                         b->decaying_flow_count--;
2080
2081                                 flow->set = CAKE_SET_NONE;
2082                         }
2083                         goto begin;
2084                 }
2085
2086                 /* Last packet in queue may be marked, shouldn't be dropped */
2087                 if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
2088                                         (b->bulk_flow_count *
2089                                          !!(q->rate_flags &
2090                                             CAKE_FLAG_INGRESS))) ||
2091                     !flow->head)
2092                         break;
2093
2094                 /* drop this packet, get another one */
2095                 if (q->rate_flags & CAKE_FLAG_INGRESS) {
2096                         len = cake_advance_shaper(q, b, skb,
2097                                                   now, true);
2098                         flow->deficit -= len;
2099                         b->tin_deficit -= len;
2100                 }
2101                 flow->dropped++;
2102                 b->tin_dropped++;
2103                 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
2104                 qdisc_qstats_drop(sch);
2105                 kfree_skb(skb);
2106                 if (q->rate_flags & CAKE_FLAG_INGRESS)
2107                         goto retry;
2108         }
2109
2110         b->tin_ecn_mark += !!flow->cvars.ecn_marked;
2111         qdisc_bstats_update(sch, skb);
2112
2113         /* collect delay stats */
2114         delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
2115         b->avge_delay = cake_ewma(b->avge_delay, delay, 8);
2116         b->peak_delay = cake_ewma(b->peak_delay, delay,
2117                                   delay > b->peak_delay ? 2 : 8);
2118         b->base_delay = cake_ewma(b->base_delay, delay,
2119                                   delay < b->base_delay ? 2 : 8);
2120
2121         len = cake_advance_shaper(q, b, skb, now, false);
2122         flow->deficit -= len;
2123         b->tin_deficit -= len;
2124
2125         if (ktime_after(q->time_next_packet, now) && sch->q.qlen) {
2126                 u64 next = min(ktime_to_ns(q->time_next_packet),
2127                                ktime_to_ns(q->failsafe_next_packet));
2128
2129                 qdisc_watchdog_schedule_ns(&q->watchdog, next);
2130         } else if (!sch->q.qlen) {
2131                 int i;
2132
2133                 for (i = 0; i < q->tin_cnt; i++) {
2134                         if (q->tins[i].decaying_flow_count) {
2135                                 ktime_t next = \
2136                                         ktime_add_ns(now,
2137                                                      q->tins[i].cparams.target);
2138
2139                                 qdisc_watchdog_schedule_ns(&q->watchdog,
2140                                                            ktime_to_ns(next));
2141                                 break;
2142                         }
2143                 }
2144         }
2145
2146         if (q->overflow_timeout)
2147                 q->overflow_timeout--;
2148
2149         return skb;
2150 }
2151
2152 static void cake_reset(struct Qdisc *sch)
2153 {
2154         u32 c;
2155
2156         for (c = 0; c < CAKE_MAX_TINS; c++)
2157                 cake_clear_tin(sch, c);
2158 }
2159
2160 static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
2161         [TCA_CAKE_BASE_RATE64]   = { .type = NLA_U64 },
2162         [TCA_CAKE_DIFFSERV_MODE] = { .type = NLA_U32 },
2163         [TCA_CAKE_ATM]           = { .type = NLA_U32 },
2164         [TCA_CAKE_FLOW_MODE]     = { .type = NLA_U32 },
2165         [TCA_CAKE_OVERHEAD]      = { .type = NLA_S32 },
2166         [TCA_CAKE_RTT]           = { .type = NLA_U32 },
2167         [TCA_CAKE_TARGET]        = { .type = NLA_U32 },
2168         [TCA_CAKE_AUTORATE]      = { .type = NLA_U32 },
2169         [TCA_CAKE_MEMORY]        = { .type = NLA_U32 },
2170         [TCA_CAKE_NAT]           = { .type = NLA_U32 },
2171         [TCA_CAKE_RAW]           = { .type = NLA_U32 },
2172         [TCA_CAKE_WASH]          = { .type = NLA_U32 },
2173         [TCA_CAKE_MPU]           = { .type = NLA_U32 },
2174         [TCA_CAKE_INGRESS]       = { .type = NLA_U32 },
2175         [TCA_CAKE_ACK_FILTER]    = { .type = NLA_U32 },
2176         [TCA_CAKE_FWMARK]        = { .type = NLA_U32 },
2177 };
2178
2179 static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
2180                           u64 target_ns, u64 rtt_est_ns)
2181 {
2182         /* convert byte-rate into time-per-byte
2183          * so it will always unwedge in reasonable time.
2184          */
2185         static const u64 MIN_RATE = 64;
2186         u32 byte_target = mtu;
2187         u64 byte_target_ns;
2188         u8  rate_shft = 0;
2189         u64 rate_ns = 0;
2190
2191         b->flow_quantum = 1514;
2192         if (rate) {
2193                 b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL);
2194                 rate_shft = 34;
2195                 rate_ns = ((u64)NSEC_PER_SEC) << rate_shft;
2196                 rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate));
2197                 while (!!(rate_ns >> 34)) {
2198                         rate_ns >>= 1;
2199                         rate_shft--;
2200                 }
2201         } /* else unlimited, ie. zero delay */
2202
2203         b->tin_rate_bps  = rate;
2204         b->tin_rate_ns   = rate_ns;
2205         b->tin_rate_shft = rate_shft;
2206
2207         byte_target_ns = (byte_target * rate_ns) >> rate_shft;
2208
2209         b->cparams.target = max((byte_target_ns * 3) / 2, target_ns);
2210         b->cparams.interval = max(rtt_est_ns +
2211                                      b->cparams.target - target_ns,
2212                                      b->cparams.target * 2);
2213         b->cparams.mtu_time = byte_target_ns;
2214         b->cparams.p_inc = 1 << 24; /* 1/256 */
2215         b->cparams.p_dec = 1 << 20; /* 1/4096 */
2216 }
2217
2218 static int cake_config_besteffort(struct Qdisc *sch)
2219 {
2220         struct cake_sched_data *q = qdisc_priv(sch);
2221         struct cake_tin_data *b = &q->tins[0];
2222         u32 mtu = psched_mtu(qdisc_dev(sch));
2223         u64 rate = q->rate_bps;
2224
2225         q->tin_cnt = 1;
2226
2227         q->tin_index = besteffort;
2228         q->tin_order = normal_order;
2229
2230         cake_set_rate(b, rate, mtu,
2231                       us_to_ns(q->target), us_to_ns(q->interval));
2232         b->tin_quantum_band = 65535;
2233         b->tin_quantum_prio = 65535;
2234
2235         return 0;
2236 }
2237
2238 static int cake_config_precedence(struct Qdisc *sch)
2239 {
2240         /* convert high-level (user visible) parameters into internal format */
2241         struct cake_sched_data *q = qdisc_priv(sch);
2242         u32 mtu = psched_mtu(qdisc_dev(sch));
2243         u64 rate = q->rate_bps;
2244         u32 quantum1 = 256;
2245         u32 quantum2 = 256;
2246         u32 i;
2247
2248         q->tin_cnt = 8;
2249         q->tin_index = precedence;
2250         q->tin_order = normal_order;
2251
2252         for (i = 0; i < q->tin_cnt; i++) {
2253                 struct cake_tin_data *b = &q->tins[i];
2254
2255                 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
2256                               us_to_ns(q->interval));
2257
2258                 b->tin_quantum_prio = max_t(u16, 1U, quantum1);
2259                 b->tin_quantum_band = max_t(u16, 1U, quantum2);
2260
2261                 /* calculate next class's parameters */
2262                 rate  *= 7;
2263                 rate >>= 3;
2264
2265                 quantum1  *= 3;
2266                 quantum1 >>= 1;
2267
2268                 quantum2  *= 7;
2269                 quantum2 >>= 3;
2270         }
2271
2272         return 0;
2273 }
2274
2275 /*      List of known Diffserv codepoints:
2276  *
2277  *      Least Effort (CS1)
2278  *      Best Effort (CS0)
2279  *      Max Reliability & LLT "Lo" (TOS1)
2280  *      Max Throughput (TOS2)
2281  *      Min Delay (TOS4)
2282  *      LLT "La" (TOS5)
2283  *      Assured Forwarding 1 (AF1x) - x3
2284  *      Assured Forwarding 2 (AF2x) - x3
2285  *      Assured Forwarding 3 (AF3x) - x3
2286  *      Assured Forwarding 4 (AF4x) - x3
2287  *      Precedence Class 2 (CS2)
2288  *      Precedence Class 3 (CS3)
2289  *      Precedence Class 4 (CS4)
2290  *      Precedence Class 5 (CS5)
2291  *      Precedence Class 6 (CS6)
2292  *      Precedence Class 7 (CS7)
2293  *      Voice Admit (VA)
2294  *      Expedited Forwarding (EF)
2295
2296  *      Total 25 codepoints.
2297  */
2298
2299 /*      List of traffic classes in RFC 4594:
2300  *              (roughly descending order of contended priority)
2301  *              (roughly ascending order of uncontended throughput)
2302  *
2303  *      Network Control (CS6,CS7)      - routing traffic
2304  *      Telephony (EF,VA)         - aka. VoIP streams
2305  *      Signalling (CS5)               - VoIP setup
2306  *      Multimedia Conferencing (AF4x) - aka. video calls
2307  *      Realtime Interactive (CS4)     - eg. games
2308  *      Multimedia Streaming (AF3x)    - eg. YouTube, NetFlix, Twitch
2309  *      Broadcast Video (CS3)
2310  *      Low Latency Data (AF2x,TOS4)      - eg. database
2311  *      Ops, Admin, Management (CS2,TOS1) - eg. ssh
2312  *      Standard Service (CS0 & unrecognised codepoints)
2313  *      High Throughput Data (AF1x,TOS2)  - eg. web traffic
2314  *      Low Priority Data (CS1)           - eg. BitTorrent
2315
2316  *      Total 12 traffic classes.
2317  */
2318
2319 static int cake_config_diffserv8(struct Qdisc *sch)
2320 {
2321 /*      Pruned list of traffic classes for typical applications:
2322  *
2323  *              Network Control          (CS6, CS7)
2324  *              Minimum Latency          (EF, VA, CS5, CS4)
2325  *              Interactive Shell        (CS2, TOS1)
2326  *              Low Latency Transactions (AF2x, TOS4)
2327  *              Video Streaming          (AF4x, AF3x, CS3)
2328  *              Bog Standard             (CS0 etc.)
2329  *              High Throughput          (AF1x, TOS2)
2330  *              Background Traffic       (CS1)
2331  *
2332  *              Total 8 traffic classes.
2333  */
2334
2335         struct cake_sched_data *q = qdisc_priv(sch);
2336         u32 mtu = psched_mtu(qdisc_dev(sch));
2337         u64 rate = q->rate_bps;
2338         u32 quantum1 = 256;
2339         u32 quantum2 = 256;
2340         u32 i;
2341
2342         q->tin_cnt = 8;
2343
2344         /* codepoint to class mapping */
2345         q->tin_index = diffserv8;
2346         q->tin_order = normal_order;
2347
2348         /* class characteristics */
2349         for (i = 0; i < q->tin_cnt; i++) {
2350                 struct cake_tin_data *b = &q->tins[i];
2351
2352                 cake_set_rate(b, rate, mtu, us_to_ns(q->target),
2353                               us_to_ns(q->interval));
2354
2355                 b->tin_quantum_prio = max_t(u16, 1U, quantum1);
2356                 b->tin_quantum_band = max_t(u16, 1U, quantum2);
2357
2358                 /* calculate next class's parameters */
2359                 rate  *= 7;
2360                 rate >>= 3;
2361
2362                 quantum1  *= 3;
2363                 quantum1 >>= 1;
2364
2365                 quantum2  *= 7;
2366                 quantum2 >>= 3;
2367         }
2368
2369         return 0;
2370 }
2371
2372 static int cake_config_diffserv4(struct Qdisc *sch)
2373 {
2374 /*  Further pruned list of traffic classes for four-class system:
2375  *
2376  *          Latency Sensitive  (CS7, CS6, EF, VA, CS5, CS4)
2377  *          Streaming Media    (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
2378  *          Best Effort        (CS0, AF1x, TOS2, and those not specified)
2379  *          Background Traffic (CS1)
2380  *
2381  *              Total 4 traffic classes.
2382  */
2383
2384         struct cake_sched_data *q = qdisc_priv(sch);
2385         u32 mtu = psched_mtu(qdisc_dev(sch));
2386         u64 rate = q->rate_bps;
2387         u32 quantum = 1024;
2388
2389         q->tin_cnt = 4;
2390
2391         /* codepoint to class mapping */
2392         q->tin_index = diffserv4;
2393         q->tin_order = bulk_order;
2394
2395         /* class characteristics */
2396         cake_set_rate(&q->tins[0], rate, mtu,
2397                       us_to_ns(q->target), us_to_ns(q->interval));
2398         cake_set_rate(&q->tins[1], rate >> 4, mtu,
2399                       us_to_ns(q->target), us_to_ns(q->interval));
2400         cake_set_rate(&q->tins[2], rate >> 1, mtu,
2401                       us_to_ns(q->target), us_to_ns(q->interval));
2402         cake_set_rate(&q->tins[3], rate >> 2, mtu,
2403                       us_to_ns(q->target), us_to_ns(q->interval));
2404
2405         /* priority weights */
2406         q->tins[0].tin_quantum_prio = quantum;
2407         q->tins[1].tin_quantum_prio = quantum >> 4;
2408         q->tins[2].tin_quantum_prio = quantum << 2;
2409         q->tins[3].tin_quantum_prio = quantum << 4;
2410
2411         /* bandwidth-sharing weights */
2412         q->tins[0].tin_quantum_band = quantum;
2413         q->tins[1].tin_quantum_band = quantum >> 4;
2414         q->tins[2].tin_quantum_band = quantum >> 1;
2415         q->tins[3].tin_quantum_band = quantum >> 2;
2416
2417         return 0;
2418 }
2419
2420 static int cake_config_diffserv3(struct Qdisc *sch)
2421 {
2422 /*  Simplified Diffserv structure with 3 tins.
2423  *              Low Priority            (CS1)
2424  *              Best Effort
2425  *              Latency Sensitive       (TOS4, VA, EF, CS6, CS7)
2426  */
2427         struct cake_sched_data *q = qdisc_priv(sch);
2428         u32 mtu = psched_mtu(qdisc_dev(sch));
2429         u64 rate = q->rate_bps;
2430         u32 quantum = 1024;
2431
2432         q->tin_cnt = 3;
2433
2434         /* codepoint to class mapping */
2435         q->tin_index = diffserv3;
2436         q->tin_order = bulk_order;
2437
2438         /* class characteristics */
2439         cake_set_rate(&q->tins[0], rate, mtu,
2440                       us_to_ns(q->target), us_to_ns(q->interval));
2441         cake_set_rate(&q->tins[1], rate >> 4, mtu,
2442                       us_to_ns(q->target), us_to_ns(q->interval));
2443         cake_set_rate(&q->tins[2], rate >> 2, mtu,
2444                       us_to_ns(q->target), us_to_ns(q->interval));
2445
2446         /* priority weights */
2447         q->tins[0].tin_quantum_prio = quantum;
2448         q->tins[1].tin_quantum_prio = quantum >> 4;
2449         q->tins[2].tin_quantum_prio = quantum << 4;
2450
2451         /* bandwidth-sharing weights */
2452         q->tins[0].tin_quantum_band = quantum;
2453         q->tins[1].tin_quantum_band = quantum >> 4;
2454         q->tins[2].tin_quantum_band = quantum >> 2;
2455
2456         return 0;
2457 }
2458
2459 static void cake_reconfigure(struct Qdisc *sch)
2460 {
2461         struct cake_sched_data *q = qdisc_priv(sch);
2462         int c, ft;
2463
2464         switch (q->tin_mode) {
2465         case CAKE_DIFFSERV_BESTEFFORT:
2466                 ft = cake_config_besteffort(sch);
2467                 break;
2468
2469         case CAKE_DIFFSERV_PRECEDENCE:
2470                 ft = cake_config_precedence(sch);
2471                 break;
2472
2473         case CAKE_DIFFSERV_DIFFSERV8:
2474                 ft = cake_config_diffserv8(sch);
2475                 break;
2476
2477         case CAKE_DIFFSERV_DIFFSERV4:
2478                 ft = cake_config_diffserv4(sch);
2479                 break;
2480
2481         case CAKE_DIFFSERV_DIFFSERV3:
2482         default:
2483                 ft = cake_config_diffserv3(sch);
2484                 break;
2485         }
2486
2487         for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) {
2488                 cake_clear_tin(sch, c);
2489                 q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time;
2490         }
2491
2492         q->rate_ns   = q->tins[ft].tin_rate_ns;
2493         q->rate_shft = q->tins[ft].tin_rate_shft;
2494
2495         if (q->buffer_config_limit) {
2496                 q->buffer_limit = q->buffer_config_limit;
2497         } else if (q->rate_bps) {
2498                 u64 t = q->rate_bps * q->interval;
2499
2500                 do_div(t, USEC_PER_SEC / 4);
2501                 q->buffer_limit = max_t(u32, t, 4U << 20);
2502         } else {
2503                 q->buffer_limit = ~0;
2504         }
2505
2506         sch->flags &= ~TCQ_F_CAN_BYPASS;
2507
2508         q->buffer_limit = min(q->buffer_limit,
2509                               max(sch->limit * psched_mtu(qdisc_dev(sch)),
2510                                   q->buffer_config_limit));
2511 }
2512
2513 static int cake_change(struct Qdisc *sch, struct nlattr *opt,
2514                        struct netlink_ext_ack *extack)
2515 {
2516         struct cake_sched_data *q = qdisc_priv(sch);
2517         struct nlattr *tb[TCA_CAKE_MAX + 1];
2518         int err;
2519
2520         if (!opt)
2521                 return -EINVAL;
2522
2523         err = nla_parse_nested(tb, TCA_CAKE_MAX, opt, cake_policy, extack);
2524         if (err < 0)
2525                 return err;
2526
2527         if (tb[TCA_CAKE_NAT]) {
2528 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
2529                 q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
2530                 q->flow_mode |= CAKE_FLOW_NAT_FLAG *
2531                         !!nla_get_u32(tb[TCA_CAKE_NAT]);
2532 #else
2533                 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
2534                                     "No conntrack support in kernel");
2535                 return -EOPNOTSUPP;
2536 #endif
2537         }
2538
2539         if (tb[TCA_CAKE_BASE_RATE64])
2540                 q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
2541
2542         if (tb[TCA_CAKE_DIFFSERV_MODE])
2543                 q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]);
2544
2545         if (tb[TCA_CAKE_WASH]) {
2546                 if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
2547                         q->rate_flags |= CAKE_FLAG_WASH;
2548                 else
2549                         q->rate_flags &= ~CAKE_FLAG_WASH;
2550         }
2551
2552         if (tb[TCA_CAKE_FLOW_MODE])
2553                 q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) |
2554                                 (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
2555                                         CAKE_FLOW_MASK));
2556
2557         if (tb[TCA_CAKE_ATM])
2558                 q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]);
2559
2560         if (tb[TCA_CAKE_OVERHEAD]) {
2561                 q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]);
2562                 q->rate_flags |= CAKE_FLAG_OVERHEAD;
2563
2564                 q->max_netlen = 0;
2565                 q->max_adjlen = 0;
2566                 q->min_netlen = ~0;
2567                 q->min_adjlen = ~0;
2568         }
2569
2570         if (tb[TCA_CAKE_RAW]) {
2571                 q->rate_flags &= ~CAKE_FLAG_OVERHEAD;
2572
2573                 q->max_netlen = 0;
2574                 q->max_adjlen = 0;
2575                 q->min_netlen = ~0;
2576                 q->min_adjlen = ~0;
2577         }
2578
2579         if (tb[TCA_CAKE_MPU])
2580                 q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]);
2581
2582         if (tb[TCA_CAKE_RTT]) {
2583                 q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
2584
2585                 if (!q->interval)
2586                         q->interval = 1;
2587         }
2588
2589         if (tb[TCA_CAKE_TARGET]) {
2590                 q->target = nla_get_u32(tb[TCA_CAKE_TARGET]);
2591
2592                 if (!q->target)
2593                         q->target = 1;
2594         }
2595
2596         if (tb[TCA_CAKE_AUTORATE]) {
2597                 if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
2598                         q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
2599                 else
2600                         q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
2601         }
2602
2603         if (tb[TCA_CAKE_INGRESS]) {
2604                 if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
2605                         q->rate_flags |= CAKE_FLAG_INGRESS;
2606                 else
2607                         q->rate_flags &= ~CAKE_FLAG_INGRESS;
2608         }
2609
2610         if (tb[TCA_CAKE_ACK_FILTER])
2611                 q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
2612
2613         if (tb[TCA_CAKE_MEMORY])
2614                 q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
2615
2616         if (tb[TCA_CAKE_SPLIT_GSO]) {
2617                 if (!!nla_get_u32(tb[TCA_CAKE_SPLIT_GSO]))
2618                         q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
2619                 else
2620                         q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO;
2621         }
2622
2623         if (tb[TCA_CAKE_FWMARK]) {
2624                 q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
2625                 q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0;
2626         }
2627
2628         if (q->tins) {
2629                 sch_tree_lock(sch);
2630                 cake_reconfigure(sch);
2631                 sch_tree_unlock(sch);
2632         }
2633
2634         return 0;
2635 }
2636
2637 static void cake_destroy(struct Qdisc *sch)
2638 {
2639         struct cake_sched_data *q = qdisc_priv(sch);
2640
2641         qdisc_watchdog_cancel(&q->watchdog);
2642         tcf_block_put(q->block);
2643         kvfree(q->tins);
2644 }
2645
2646 static int cake_init(struct Qdisc *sch, struct nlattr *opt,
2647                      struct netlink_ext_ack *extack)
2648 {
2649         struct cake_sched_data *q = qdisc_priv(sch);
2650         int i, j, err;
2651
2652         sch->limit = 10240;
2653         q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
2654         q->flow_mode  = CAKE_FLOW_TRIPLE;
2655
2656         q->rate_bps = 0; /* unlimited by default */
2657
2658         q->interval = 100000; /* 100ms default */
2659         q->target   =   5000; /* 5ms: codel RFC argues
2660                                * for 5 to 10% of interval
2661                                */
2662         q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
2663         q->cur_tin = 0;
2664         q->cur_flow  = 0;
2665
2666         qdisc_watchdog_init(&q->watchdog, sch);
2667
2668         if (opt) {
2669                 int err = cake_change(sch, opt, extack);
2670
2671                 if (err)
2672                         return err;
2673         }
2674
2675         err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
2676         if (err)
2677                 return err;
2678
2679         quantum_div[0] = ~0;
2680         for (i = 1; i <= CAKE_QUEUES; i++)
2681                 quantum_div[i] = 65535 / i;
2682
2683         q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
2684                            GFP_KERNEL);
2685         if (!q->tins)
2686                 goto nomem;
2687
2688         for (i = 0; i < CAKE_MAX_TINS; i++) {
2689                 struct cake_tin_data *b = q->tins + i;
2690
2691                 INIT_LIST_HEAD(&b->new_flows);
2692                 INIT_LIST_HEAD(&b->old_flows);
2693                 INIT_LIST_HEAD(&b->decaying_flows);
2694                 b->sparse_flow_count = 0;
2695                 b->bulk_flow_count = 0;
2696                 b->decaying_flow_count = 0;
2697
2698                 for (j = 0; j < CAKE_QUEUES; j++) {
2699                         struct cake_flow *flow = b->flows + j;
2700                         u32 k = j * CAKE_MAX_TINS + i;
2701
2702                         INIT_LIST_HEAD(&flow->flowchain);
2703                         cobalt_vars_init(&flow->cvars);
2704
2705                         q->overflow_heap[k].t = i;
2706                         q->overflow_heap[k].b = j;
2707                         b->overflow_idx[j] = k;
2708                 }
2709         }
2710
2711         cake_reconfigure(sch);
2712         q->avg_peak_bandwidth = q->rate_bps;
2713         q->min_netlen = ~0;
2714         q->min_adjlen = ~0;
2715         return 0;
2716
2717 nomem:
2718         cake_destroy(sch);
2719         return -ENOMEM;
2720 }
2721
2722 static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
2723 {
2724         struct cake_sched_data *q = qdisc_priv(sch);
2725         struct nlattr *opts;
2726
2727         opts = nla_nest_start(skb, TCA_OPTIONS);
2728         if (!opts)
2729                 goto nla_put_failure;
2730
2731         if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
2732                               TCA_CAKE_PAD))
2733                 goto nla_put_failure;
2734
2735         if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
2736                         q->flow_mode & CAKE_FLOW_MASK))
2737                 goto nla_put_failure;
2738
2739         if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
2740                 goto nla_put_failure;
2741
2742         if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
2743                 goto nla_put_failure;
2744
2745         if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
2746                 goto nla_put_failure;
2747
2748         if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
2749                         !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
2750                 goto nla_put_failure;
2751
2752         if (nla_put_u32(skb, TCA_CAKE_INGRESS,
2753                         !!(q->rate_flags & CAKE_FLAG_INGRESS)))
2754                 goto nla_put_failure;
2755
2756         if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
2757                 goto nla_put_failure;
2758
2759         if (nla_put_u32(skb, TCA_CAKE_NAT,
2760                         !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
2761                 goto nla_put_failure;
2762
2763         if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode))
2764                 goto nla_put_failure;
2765
2766         if (nla_put_u32(skb, TCA_CAKE_WASH,
2767                         !!(q->rate_flags & CAKE_FLAG_WASH)))
2768                 goto nla_put_failure;
2769
2770         if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead))
2771                 goto nla_put_failure;
2772
2773         if (!(q->rate_flags & CAKE_FLAG_OVERHEAD))
2774                 if (nla_put_u32(skb, TCA_CAKE_RAW, 0))
2775                         goto nla_put_failure;
2776
2777         if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode))
2778                 goto nla_put_failure;
2779
2780         if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu))
2781                 goto nla_put_failure;
2782
2783         if (nla_put_u32(skb, TCA_CAKE_SPLIT_GSO,
2784                         !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
2785                 goto nla_put_failure;
2786
2787         if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
2788                 goto nla_put_failure;
2789
2790         return nla_nest_end(skb, opts);
2791
2792 nla_put_failure:
2793         return -1;
2794 }
2795
2796 static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
2797 {
2798         struct nlattr *stats = nla_nest_start(d->skb, TCA_STATS_APP);
2799         struct cake_sched_data *q = qdisc_priv(sch);
2800         struct nlattr *tstats, *ts;
2801         int i;
2802
2803         if (!stats)
2804                 return -1;
2805
2806 #define PUT_STAT_U32(attr, data) do {                                  \
2807                 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2808                         goto nla_put_failure;                          \
2809         } while (0)
2810 #define PUT_STAT_U64(attr, data) do {                                  \
2811                 if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \
2812                                         data, TCA_CAKE_STATS_PAD)) \
2813                         goto nla_put_failure;                          \
2814         } while (0)
2815
2816         PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth);
2817         PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit);
2818         PUT_STAT_U32(MEMORY_USED, q->buffer_max_used);
2819         PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16));
2820         PUT_STAT_U32(MAX_NETLEN, q->max_netlen);
2821         PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen);
2822         PUT_STAT_U32(MIN_NETLEN, q->min_netlen);
2823         PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen);
2824
2825 #undef PUT_STAT_U32
2826 #undef PUT_STAT_U64
2827
2828         tstats = nla_nest_start(d->skb, TCA_CAKE_STATS_TIN_STATS);
2829         if (!tstats)
2830                 goto nla_put_failure;
2831
2832 #define PUT_TSTAT_U32(attr, data) do {                                  \
2833                 if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \
2834                         goto nla_put_failure;                           \
2835         } while (0)
2836 #define PUT_TSTAT_U64(attr, data) do {                                  \
2837                 if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \
2838                                         data, TCA_CAKE_TIN_STATS_PAD))  \
2839                         goto nla_put_failure;                           \
2840         } while (0)
2841
2842         for (i = 0; i < q->tin_cnt; i++) {
2843                 struct cake_tin_data *b = &q->tins[q->tin_order[i]];
2844
2845                 ts = nla_nest_start(d->skb, i + 1);
2846                 if (!ts)
2847                         goto nla_put_failure;
2848
2849                 PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps);
2850                 PUT_TSTAT_U64(SENT_BYTES64, b->bytes);
2851                 PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog);
2852
2853                 PUT_TSTAT_U32(TARGET_US,
2854                               ktime_to_us(ns_to_ktime(b->cparams.target)));
2855                 PUT_TSTAT_U32(INTERVAL_US,
2856                               ktime_to_us(ns_to_ktime(b->cparams.interval)));
2857
2858                 PUT_TSTAT_U32(SENT_PACKETS, b->packets);
2859                 PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped);
2860                 PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark);
2861                 PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops);
2862
2863                 PUT_TSTAT_U32(PEAK_DELAY_US,
2864                               ktime_to_us(ns_to_ktime(b->peak_delay)));
2865                 PUT_TSTAT_U32(AVG_DELAY_US,
2866                               ktime_to_us(ns_to_ktime(b->avge_delay)));
2867                 PUT_TSTAT_U32(BASE_DELAY_US,
2868                               ktime_to_us(ns_to_ktime(b->base_delay)));
2869
2870                 PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits);
2871                 PUT_TSTAT_U32(WAY_MISSES, b->way_misses);
2872                 PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions);
2873
2874                 PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count +
2875                                             b->decaying_flow_count);
2876                 PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count);
2877                 PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count);
2878                 PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen);
2879
2880                 PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum);
2881                 nla_nest_end(d->skb, ts);
2882         }
2883
2884 #undef PUT_TSTAT_U32
2885 #undef PUT_TSTAT_U64
2886
2887         nla_nest_end(d->skb, tstats);
2888         return nla_nest_end(d->skb, stats);
2889
2890 nla_put_failure:
2891         nla_nest_cancel(d->skb, stats);
2892         return -1;
2893 }
2894
2895 static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg)
2896 {
2897         return NULL;
2898 }
2899
2900 static unsigned long cake_find(struct Qdisc *sch, u32 classid)
2901 {
2902         return 0;
2903 }
2904
2905 static unsigned long cake_bind(struct Qdisc *sch, unsigned long parent,
2906                                u32 classid)
2907 {
2908         return 0;
2909 }
2910
2911 static void cake_unbind(struct Qdisc *q, unsigned long cl)
2912 {
2913 }
2914
2915 static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl,
2916                                         struct netlink_ext_ack *extack)
2917 {
2918         struct cake_sched_data *q = qdisc_priv(sch);
2919
2920         if (cl)
2921                 return NULL;
2922         return q->block;
2923 }
2924
2925 static int cake_dump_class(struct Qdisc *sch, unsigned long cl,
2926                            struct sk_buff *skb, struct tcmsg *tcm)
2927 {
2928         tcm->tcm_handle |= TC_H_MIN(cl);
2929         return 0;
2930 }
2931
2932 static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
2933                                  struct gnet_dump *d)
2934 {
2935         struct cake_sched_data *q = qdisc_priv(sch);
2936         const struct cake_flow *flow = NULL;
2937         struct gnet_stats_queue qs = { 0 };
2938         struct nlattr *stats;
2939         u32 idx = cl - 1;
2940
2941         if (idx < CAKE_QUEUES * q->tin_cnt) {
2942                 const struct cake_tin_data *b = \
2943                         &q->tins[q->tin_order[idx / CAKE_QUEUES]];
2944                 const struct sk_buff *skb;
2945
2946                 flow = &b->flows[idx % CAKE_QUEUES];
2947
2948                 if (flow->head) {
2949                         sch_tree_lock(sch);
2950                         skb = flow->head;
2951                         while (skb) {
2952                                 qs.qlen++;
2953                                 skb = skb->next;
2954                         }
2955                         sch_tree_unlock(sch);
2956                 }
2957                 qs.backlog = b->backlogs[idx % CAKE_QUEUES];
2958                 qs.drops = flow->dropped;
2959         }
2960         if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
2961                 return -1;
2962         if (flow) {
2963                 ktime_t now = ktime_get();
2964
2965                 stats = nla_nest_start(d->skb, TCA_STATS_APP);
2966                 if (!stats)
2967                         return -1;
2968
2969 #define PUT_STAT_U32(attr, data) do {                                  \
2970                 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2971                         goto nla_put_failure;                          \
2972         } while (0)
2973 #define PUT_STAT_S32(attr, data) do {                                  \
2974                 if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2975                         goto nla_put_failure;                          \
2976         } while (0)
2977
2978                 PUT_STAT_S32(DEFICIT, flow->deficit);
2979                 PUT_STAT_U32(DROPPING, flow->cvars.dropping);
2980                 PUT_STAT_U32(COBALT_COUNT, flow->cvars.count);
2981                 PUT_STAT_U32(P_DROP, flow->cvars.p_drop);
2982                 if (flow->cvars.p_drop) {
2983                         PUT_STAT_S32(BLUE_TIMER_US,
2984                                      ktime_to_us(
2985                                              ktime_sub(now,
2986                                                      flow->cvars.blue_timer)));
2987                 }
2988                 if (flow->cvars.dropping) {
2989                         PUT_STAT_S32(DROP_NEXT_US,
2990                                      ktime_to_us(
2991                                              ktime_sub(now,
2992                                                        flow->cvars.drop_next)));
2993                 }
2994
2995                 if (nla_nest_end(d->skb, stats) < 0)
2996                         return -1;
2997         }
2998
2999         return 0;
3000
3001 nla_put_failure:
3002         nla_nest_cancel(d->skb, stats);
3003         return -1;
3004 }
3005
3006 static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg)
3007 {
3008         struct cake_sched_data *q = qdisc_priv(sch);
3009         unsigned int i, j;
3010
3011         if (arg->stop)
3012                 return;
3013
3014         for (i = 0; i < q->tin_cnt; i++) {
3015                 struct cake_tin_data *b = &q->tins[q->tin_order[i]];
3016
3017                 for (j = 0; j < CAKE_QUEUES; j++) {
3018                         if (list_empty(&b->flows[j].flowchain) ||
3019                             arg->count < arg->skip) {
3020                                 arg->count++;
3021                                 continue;
3022                         }
3023                         if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) {
3024                                 arg->stop = 1;
3025                                 break;
3026                         }
3027                         arg->count++;
3028                 }
3029         }
3030 }
3031
3032 static const struct Qdisc_class_ops cake_class_ops = {
3033         .leaf           =       cake_leaf,
3034         .find           =       cake_find,
3035         .tcf_block      =       cake_tcf_block,
3036         .bind_tcf       =       cake_bind,
3037         .unbind_tcf     =       cake_unbind,
3038         .dump           =       cake_dump_class,
3039         .dump_stats     =       cake_dump_class_stats,
3040         .walk           =       cake_walk,
3041 };
3042
3043 static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
3044         .cl_ops         =       &cake_class_ops,
3045         .id             =       "cake",
3046         .priv_size      =       sizeof(struct cake_sched_data),
3047         .enqueue        =       cake_enqueue,
3048         .dequeue        =       cake_dequeue,
3049         .peek           =       qdisc_peek_dequeued,
3050         .init           =       cake_init,
3051         .reset          =       cake_reset,
3052         .destroy        =       cake_destroy,
3053         .change         =       cake_change,
3054         .dump           =       cake_dump,
3055         .dump_stats     =       cake_dump_stats,
3056         .owner          =       THIS_MODULE,
3057 };
3058
3059 static int __init cake_module_init(void)
3060 {
3061         return register_qdisc(&cake_qdisc_ops);
3062 }
3063
3064 static void __exit cake_module_exit(void)
3065 {
3066         unregister_qdisc(&cake_qdisc_ops);
3067 }
3068
3069 module_init(cake_module_init)
3070 module_exit(cake_module_exit)
3071 MODULE_AUTHOR("Jonathan Morton");
3072 MODULE_LICENSE("Dual BSD/GPL");
3073 MODULE_DESCRIPTION("The CAKE shaper.");