Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart
[sfrench/cifs-2.6.git] / net / sched / sch_hfsc.c
1 /*
2  * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * 2003-10-17 - Ported from altq
10  */
11 /*
12  * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
13  *
14  * Permission to use, copy, modify, and distribute this software and
15  * its documentation is hereby granted (including for commercial or
16  * for-profit use), provided that both the copyright notice and this
17  * permission notice appear in all copies of the software, derivative
18  * works, or modified versions, and any portions thereof.
19  *
20  * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21  * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
22  * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25  * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32  * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
33  * DAMAGE.
34  *
35  * Carnegie Mellon encourages (but does not require) users of this
36  * software to return any improvements or extensions that they make,
37  * and to grant Carnegie Mellon the rights to redistribute these
38  * changes without encumbrance.
39  */
40 /*
41  * H-FSC is described in Proceedings of SIGCOMM'97,
42  * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43  * Real-Time and Priority Service"
44  * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
45  *
46  * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47  * when a class has an upperlimit, the fit-time is computed from the
48  * upperlimit service curve.  the link-sharing scheduler does not schedule
49  * a class whose fit-time exceeds the current time.
50  */
51
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/jiffies.h>
57 #include <linux/compiler.h>
58 #include <linux/spinlock.h>
59 #include <linux/skbuff.h>
60 #include <linux/string.h>
61 #include <linux/slab.h>
62 #include <linux/timer.h>
63 #include <linux/list.h>
64 #include <linux/rbtree.h>
65 #include <linux/init.h>
66 #include <linux/netdevice.h>
67 #include <linux/rtnetlink.h>
68 #include <linux/pkt_sched.h>
69 #include <net/pkt_sched.h>
70 #include <net/pkt_cls.h>
71 #include <asm/system.h>
72 #include <asm/div64.h>
73
74 /*
75  * kernel internal service curve representation:
76  *   coordinates are given by 64 bit unsigned integers.
77  *   x-axis: unit is clock count.
78  *   y-axis: unit is byte.
79  *
80  *   The service curve parameters are converted to the internal
81  *   representation. The slope values are scaled to avoid overflow.
82  *   the inverse slope values as well as the y-projection of the 1st
83  *   segment are kept in order to to avoid 64-bit divide operations
84  *   that are expensive on 32-bit architectures.
85  */
86
87 struct internal_sc
88 {
89         u64     sm1;    /* scaled slope of the 1st segment */
90         u64     ism1;   /* scaled inverse-slope of the 1st segment */
91         u64     dx;     /* the x-projection of the 1st segment */
92         u64     dy;     /* the y-projection of the 1st segment */
93         u64     sm2;    /* scaled slope of the 2nd segment */
94         u64     ism2;   /* scaled inverse-slope of the 2nd segment */
95 };
96
97 /* runtime service curve */
98 struct runtime_sc
99 {
100         u64     x;      /* current starting position on x-axis */
101         u64     y;      /* current starting position on y-axis */
102         u64     sm1;    /* scaled slope of the 1st segment */
103         u64     ism1;   /* scaled inverse-slope of the 1st segment */
104         u64     dx;     /* the x-projection of the 1st segment */
105         u64     dy;     /* the y-projection of the 1st segment */
106         u64     sm2;    /* scaled slope of the 2nd segment */
107         u64     ism2;   /* scaled inverse-slope of the 2nd segment */
108 };
109
110 enum hfsc_class_flags
111 {
112         HFSC_RSC = 0x1,
113         HFSC_FSC = 0x2,
114         HFSC_USC = 0x4
115 };
116
117 struct hfsc_class
118 {
119         u32             classid;        /* class id */
120         unsigned int    refcnt;         /* usage count */
121
122         struct gnet_stats_basic bstats;
123         struct gnet_stats_queue qstats;
124         struct gnet_stats_rate_est rate_est;
125         spinlock_t      *stats_lock;
126         unsigned int    level;          /* class level in hierarchy */
127         struct tcf_proto *filter_list;  /* filter list */
128         unsigned int    filter_cnt;     /* filter count */
129
130         struct hfsc_sched *sched;       /* scheduler data */
131         struct hfsc_class *cl_parent;   /* parent class */
132         struct list_head siblings;      /* sibling classes */
133         struct list_head children;      /* child classes */
134         struct Qdisc    *qdisc;         /* leaf qdisc */
135
136         struct rb_node el_node;         /* qdisc's eligible tree member */
137         struct rb_root vt_tree;         /* active children sorted by cl_vt */
138         struct rb_node vt_node;         /* parent's vt_tree member */
139         struct rb_root cf_tree;         /* active children sorted by cl_f */
140         struct rb_node cf_node;         /* parent's cf_heap member */
141         struct list_head hlist;         /* hash list member */
142         struct list_head dlist;         /* drop list member */
143
144         u64     cl_total;               /* total work in bytes */
145         u64     cl_cumul;               /* cumulative work in bytes done by
146                                            real-time criteria */
147
148         u64     cl_d;                   /* deadline*/
149         u64     cl_e;                   /* eligible time */
150         u64     cl_vt;                  /* virtual time */
151         u64     cl_f;                   /* time when this class will fit for
152                                            link-sharing, max(myf, cfmin) */
153         u64     cl_myf;                 /* my fit-time (calculated from this
154                                            class's own upperlimit curve) */
155         u64     cl_myfadj;              /* my fit-time adjustment (to cancel
156                                            history dependence) */
157         u64     cl_cfmin;               /* earliest children's fit-time (used
158                                            with cl_myf to obtain cl_f) */
159         u64     cl_cvtmin;              /* minimal virtual time among the
160                                            children fit for link-sharing
161                                            (monotonic within a period) */
162         u64     cl_vtadj;               /* intra-period cumulative vt
163                                            adjustment */
164         u64     cl_vtoff;               /* inter-period cumulative vt offset */
165         u64     cl_cvtmax;              /* max child's vt in the last period */
166         u64     cl_cvtoff;              /* cumulative cvtmax of all periods */
167         u64     cl_pcvtoff;             /* parent's cvtoff at initalization
168                                            time */
169
170         struct internal_sc cl_rsc;      /* internal real-time service curve */
171         struct internal_sc cl_fsc;      /* internal fair service curve */
172         struct internal_sc cl_usc;      /* internal upperlimit service curve */
173         struct runtime_sc cl_deadline;  /* deadline curve */
174         struct runtime_sc cl_eligible;  /* eligible curve */
175         struct runtime_sc cl_virtual;   /* virtual curve */
176         struct runtime_sc cl_ulimit;    /* upperlimit curve */
177
178         unsigned long   cl_flags;       /* which curves are valid */
179         unsigned long   cl_vtperiod;    /* vt period sequence number */
180         unsigned long   cl_parentperiod;/* parent's vt period sequence number*/
181         unsigned long   cl_nactive;     /* number of active children */
182 };
183
184 #define HFSC_HSIZE      16
185
186 struct hfsc_sched
187 {
188         u16     defcls;                         /* default class id */
189         struct hfsc_class root;                 /* root class */
190         struct list_head clhash[HFSC_HSIZE];    /* class hash */
191         struct rb_root eligible;                /* eligible tree */
192         struct list_head droplist;              /* active leaf class list (for
193                                                    dropping) */
194         struct sk_buff_head requeue;            /* requeued packet */
195         struct timer_list wd_timer;             /* watchdog timer */
196 };
197
198 /*
199  * macros
200  */
201 #ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
202 #include <linux/time.h>
203 #undef PSCHED_GET_TIME
204 #define PSCHED_GET_TIME(stamp)                                          \
205 do {                                                                    \
206         struct timeval tv;                                              \
207         do_gettimeofday(&tv);                                           \
208         (stamp) = 1ULL * USEC_PER_SEC * tv.tv_sec + tv.tv_usec;         \
209 } while (0)
210 #endif
211
212 #define HT_INFINITY     0xffffffffffffffffULL   /* infinite time value */
213
214
215 /*
216  * eligible tree holds backlogged classes being sorted by their eligible times.
217  * there is one eligible tree per hfsc instance.
218  */
219
220 static void
221 eltree_insert(struct hfsc_class *cl)
222 {
223         struct rb_node **p = &cl->sched->eligible.rb_node;
224         struct rb_node *parent = NULL;
225         struct hfsc_class *cl1;
226
227         while (*p != NULL) {
228                 parent = *p;
229                 cl1 = rb_entry(parent, struct hfsc_class, el_node);
230                 if (cl->cl_e >= cl1->cl_e)
231                         p = &parent->rb_right;
232                 else
233                         p = &parent->rb_left;
234         }
235         rb_link_node(&cl->el_node, parent, p);
236         rb_insert_color(&cl->el_node, &cl->sched->eligible);
237 }
238
239 static inline void
240 eltree_remove(struct hfsc_class *cl)
241 {
242         rb_erase(&cl->el_node, &cl->sched->eligible);
243 }
244
245 static inline void
246 eltree_update(struct hfsc_class *cl)
247 {
248         eltree_remove(cl);
249         eltree_insert(cl);
250 }
251
252 /* find the class with the minimum deadline among the eligible classes */
253 static inline struct hfsc_class *
254 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
255 {
256         struct hfsc_class *p, *cl = NULL;
257         struct rb_node *n;
258
259         for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
260                 p = rb_entry(n, struct hfsc_class, el_node);
261                 if (p->cl_e > cur_time)
262                         break;
263                 if (cl == NULL || p->cl_d < cl->cl_d)
264                         cl = p;
265         }
266         return cl;
267 }
268
269 /* find the class with minimum eligible time among the eligible classes */
270 static inline struct hfsc_class *
271 eltree_get_minel(struct hfsc_sched *q)
272 {
273         struct rb_node *n;
274
275         n = rb_first(&q->eligible);
276         if (n == NULL)
277                 return NULL;
278         return rb_entry(n, struct hfsc_class, el_node);
279 }
280
281 /*
282  * vttree holds holds backlogged child classes being sorted by their virtual
283  * time. each intermediate class has one vttree.
284  */
285 static void
286 vttree_insert(struct hfsc_class *cl)
287 {
288         struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
289         struct rb_node *parent = NULL;
290         struct hfsc_class *cl1;
291
292         while (*p != NULL) {
293                 parent = *p;
294                 cl1 = rb_entry(parent, struct hfsc_class, vt_node);
295                 if (cl->cl_vt >= cl1->cl_vt)
296                         p = &parent->rb_right;
297                 else
298                         p = &parent->rb_left;
299         }
300         rb_link_node(&cl->vt_node, parent, p);
301         rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
302 }
303
304 static inline void
305 vttree_remove(struct hfsc_class *cl)
306 {
307         rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
308 }
309
310 static inline void
311 vttree_update(struct hfsc_class *cl)
312 {
313         vttree_remove(cl);
314         vttree_insert(cl);
315 }
316
317 static inline struct hfsc_class *
318 vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
319 {
320         struct hfsc_class *p;
321         struct rb_node *n;
322
323         for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
324                 p = rb_entry(n, struct hfsc_class, vt_node);
325                 if (p->cl_f <= cur_time)
326                         return p;
327         }
328         return NULL;
329 }
330
331 /*
332  * get the leaf class with the minimum vt in the hierarchy
333  */
334 static struct hfsc_class *
335 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
336 {
337         /* if root-class's cfmin is bigger than cur_time nothing to do */
338         if (cl->cl_cfmin > cur_time)
339                 return NULL;
340
341         while (cl->level > 0) {
342                 cl = vttree_firstfit(cl, cur_time);
343                 if (cl == NULL)
344                         return NULL;
345                 /*
346                  * update parent's cl_cvtmin.
347                  */
348                 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
349                         cl->cl_parent->cl_cvtmin = cl->cl_vt;
350         }
351         return cl;
352 }
353
354 static void
355 cftree_insert(struct hfsc_class *cl)
356 {
357         struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
358         struct rb_node *parent = NULL;
359         struct hfsc_class *cl1;
360
361         while (*p != NULL) {
362                 parent = *p;
363                 cl1 = rb_entry(parent, struct hfsc_class, cf_node);
364                 if (cl->cl_f >= cl1->cl_f)
365                         p = &parent->rb_right;
366                 else
367                         p = &parent->rb_left;
368         }
369         rb_link_node(&cl->cf_node, parent, p);
370         rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
371 }
372
373 static inline void
374 cftree_remove(struct hfsc_class *cl)
375 {
376         rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
377 }
378
379 static inline void
380 cftree_update(struct hfsc_class *cl)
381 {
382         cftree_remove(cl);
383         cftree_insert(cl);
384 }
385
386 /*
387  * service curve support functions
388  *
389  *  external service curve parameters
390  *      m: bps
391  *      d: us
392  *  internal service curve parameters
393  *      sm: (bytes/psched_us) << SM_SHIFT
394  *      ism: (psched_us/byte) << ISM_SHIFT
395  *      dx: psched_us
396  *
397  * Clock source resolution (CONFIG_NET_SCH_CLK_*)
398  *  JIFFIES: for 48<=HZ<=1534 resolution is between 0.63us and 1.27us.
399  *  CPU: resolution is between 0.5us and 1us.
400  *  GETTIMEOFDAY: resolution is exactly 1us.
401  *
402  * sm and ism are scaled in order to keep effective digits.
403  * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
404  * digits in decimal using the following table.
405  *
406  * Note: We can afford the additional accuracy (altq hfsc keeps at most
407  * 3 effective digits) thanks to the fact that linux clock is bounded
408  * much more tightly.
409  *
410  *  bits/sec      100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
411  *  ------------+-------------------------------------------------------
412  *  bytes/0.5us   6.25e-3    62.5e-3    625e-3     6250e-e    62500e-3
413  *  bytes/us      12.5e-3    125e-3     1250e-3    12500e-3   125000e-3
414  *  bytes/1.27us  15.875e-3  158.75e-3  1587.5e-3  15875e-3   158750e-3
415  *
416  *  0.5us/byte    160        16         1.6        0.16       0.016
417  *  us/byte       80         8          0.8        0.08       0.008
418  *  1.27us/byte   63         6.3        0.63       0.063      0.0063
419  */
420 #define SM_SHIFT        20
421 #define ISM_SHIFT       18
422
423 #define SM_MASK         ((1ULL << SM_SHIFT) - 1)
424 #define ISM_MASK        ((1ULL << ISM_SHIFT) - 1)
425
426 static inline u64
427 seg_x2y(u64 x, u64 sm)
428 {
429         u64 y;
430
431         /*
432          * compute
433          *      y = x * sm >> SM_SHIFT
434          * but divide it for the upper and lower bits to avoid overflow
435          */
436         y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
437         return y;
438 }
439
440 static inline u64
441 seg_y2x(u64 y, u64 ism)
442 {
443         u64 x;
444
445         if (y == 0)
446                 x = 0;
447         else if (ism == HT_INFINITY)
448                 x = HT_INFINITY;
449         else {
450                 x = (y >> ISM_SHIFT) * ism
451                     + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
452         }
453         return x;
454 }
455
456 /* Convert m (bps) into sm (bytes/psched us) */
457 static u64
458 m2sm(u32 m)
459 {
460         u64 sm;
461
462         sm = ((u64)m << SM_SHIFT);
463         sm += PSCHED_JIFFIE2US(HZ) - 1;
464         do_div(sm, PSCHED_JIFFIE2US(HZ));
465         return sm;
466 }
467
468 /* convert m (bps) into ism (psched us/byte) */
469 static u64
470 m2ism(u32 m)
471 {
472         u64 ism;
473
474         if (m == 0)
475                 ism = HT_INFINITY;
476         else {
477                 ism = ((u64)PSCHED_JIFFIE2US(HZ) << ISM_SHIFT);
478                 ism += m - 1;
479                 do_div(ism, m);
480         }
481         return ism;
482 }
483
484 /* convert d (us) into dx (psched us) */
485 static u64
486 d2dx(u32 d)
487 {
488         u64 dx;
489
490         dx = ((u64)d * PSCHED_JIFFIE2US(HZ));
491         dx += USEC_PER_SEC - 1;
492         do_div(dx, USEC_PER_SEC);
493         return dx;
494 }
495
496 /* convert sm (bytes/psched us) into m (bps) */
497 static u32
498 sm2m(u64 sm)
499 {
500         u64 m;
501
502         m = (sm * PSCHED_JIFFIE2US(HZ)) >> SM_SHIFT;
503         return (u32)m;
504 }
505
506 /* convert dx (psched us) into d (us) */
507 static u32
508 dx2d(u64 dx)
509 {
510         u64 d;
511
512         d = dx * USEC_PER_SEC;
513         do_div(d, PSCHED_JIFFIE2US(HZ));
514         return (u32)d;
515 }
516
517 static void
518 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
519 {
520         isc->sm1  = m2sm(sc->m1);
521         isc->ism1 = m2ism(sc->m1);
522         isc->dx   = d2dx(sc->d);
523         isc->dy   = seg_x2y(isc->dx, isc->sm1);
524         isc->sm2  = m2sm(sc->m2);
525         isc->ism2 = m2ism(sc->m2);
526 }
527
528 /*
529  * initialize the runtime service curve with the given internal
530  * service curve starting at (x, y).
531  */
532 static void
533 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
534 {
535         rtsc->x    = x;
536         rtsc->y    = y;
537         rtsc->sm1  = isc->sm1;
538         rtsc->ism1 = isc->ism1;
539         rtsc->dx   = isc->dx;
540         rtsc->dy   = isc->dy;
541         rtsc->sm2  = isc->sm2;
542         rtsc->ism2 = isc->ism2;
543 }
544
545 /*
546  * calculate the y-projection of the runtime service curve by the
547  * given x-projection value
548  */
549 static u64
550 rtsc_y2x(struct runtime_sc *rtsc, u64 y)
551 {
552         u64 x;
553
554         if (y < rtsc->y)
555                 x = rtsc->x;
556         else if (y <= rtsc->y + rtsc->dy) {
557                 /* x belongs to the 1st segment */
558                 if (rtsc->dy == 0)
559                         x = rtsc->x + rtsc->dx;
560                 else
561                         x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
562         } else {
563                 /* x belongs to the 2nd segment */
564                 x = rtsc->x + rtsc->dx
565                     + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
566         }
567         return x;
568 }
569
570 static u64
571 rtsc_x2y(struct runtime_sc *rtsc, u64 x)
572 {
573         u64 y;
574
575         if (x <= rtsc->x)
576                 y = rtsc->y;
577         else if (x <= rtsc->x + rtsc->dx)
578                 /* y belongs to the 1st segment */
579                 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
580         else
581                 /* y belongs to the 2nd segment */
582                 y = rtsc->y + rtsc->dy
583                     + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
584         return y;
585 }
586
587 /*
588  * update the runtime service curve by taking the minimum of the current
589  * runtime service curve and the service curve starting at (x, y).
590  */
591 static void
592 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
593 {
594         u64 y1, y2, dx, dy;
595         u32 dsm;
596
597         if (isc->sm1 <= isc->sm2) {
598                 /* service curve is convex */
599                 y1 = rtsc_x2y(rtsc, x);
600                 if (y1 < y)
601                         /* the current rtsc is smaller */
602                         return;
603                 rtsc->x = x;
604                 rtsc->y = y;
605                 return;
606         }
607
608         /*
609          * service curve is concave
610          * compute the two y values of the current rtsc
611          *      y1: at x
612          *      y2: at (x + dx)
613          */
614         y1 = rtsc_x2y(rtsc, x);
615         if (y1 <= y) {
616                 /* rtsc is below isc, no change to rtsc */
617                 return;
618         }
619
620         y2 = rtsc_x2y(rtsc, x + isc->dx);
621         if (y2 >= y + isc->dy) {
622                 /* rtsc is above isc, replace rtsc by isc */
623                 rtsc->x = x;
624                 rtsc->y = y;
625                 rtsc->dx = isc->dx;
626                 rtsc->dy = isc->dy;
627                 return;
628         }
629
630         /*
631          * the two curves intersect
632          * compute the offsets (dx, dy) using the reverse
633          * function of seg_x2y()
634          *      seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
635          */
636         dx = (y1 - y) << SM_SHIFT;
637         dsm = isc->sm1 - isc->sm2;
638         do_div(dx, dsm);
639         /*
640          * check if (x, y1) belongs to the 1st segment of rtsc.
641          * if so, add the offset.
642          */
643         if (rtsc->x + rtsc->dx > x)
644                 dx += rtsc->x + rtsc->dx - x;
645         dy = seg_x2y(dx, isc->sm1);
646
647         rtsc->x = x;
648         rtsc->y = y;
649         rtsc->dx = dx;
650         rtsc->dy = dy;
651         return;
652 }
653
654 static void
655 init_ed(struct hfsc_class *cl, unsigned int next_len)
656 {
657         u64 cur_time;
658
659         PSCHED_GET_TIME(cur_time);
660
661         /* update the deadline curve */
662         rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
663
664         /*
665          * update the eligible curve.
666          * for concave, it is equal to the deadline curve.
667          * for convex, it is a linear curve with slope m2.
668          */
669         cl->cl_eligible = cl->cl_deadline;
670         if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
671                 cl->cl_eligible.dx = 0;
672                 cl->cl_eligible.dy = 0;
673         }
674
675         /* compute e and d */
676         cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
677         cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
678
679         eltree_insert(cl);
680 }
681
682 static void
683 update_ed(struct hfsc_class *cl, unsigned int next_len)
684 {
685         cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
686         cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
687
688         eltree_update(cl);
689 }
690
691 static inline void
692 update_d(struct hfsc_class *cl, unsigned int next_len)
693 {
694         cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
695 }
696
697 static inline void
698 update_cfmin(struct hfsc_class *cl)
699 {
700         struct rb_node *n = rb_first(&cl->cf_tree);
701         struct hfsc_class *p;
702
703         if (n == NULL) {
704                 cl->cl_cfmin = 0;
705                 return;
706         }
707         p = rb_entry(n, struct hfsc_class, cf_node);
708         cl->cl_cfmin = p->cl_f;
709 }
710
711 static void
712 init_vf(struct hfsc_class *cl, unsigned int len)
713 {
714         struct hfsc_class *max_cl;
715         struct rb_node *n;
716         u64 vt, f, cur_time;
717         int go_active;
718
719         cur_time = 0;
720         go_active = 1;
721         for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
722                 if (go_active && cl->cl_nactive++ == 0)
723                         go_active = 1;
724                 else
725                         go_active = 0;
726
727                 if (go_active) {
728                         n = rb_last(&cl->cl_parent->vt_tree);
729                         if (n != NULL) {
730                                 max_cl = rb_entry(n, struct hfsc_class,vt_node);
731                                 /*
732                                  * set vt to the average of the min and max
733                                  * classes.  if the parent's period didn't
734                                  * change, don't decrease vt of the class.
735                                  */
736                                 vt = max_cl->cl_vt;
737                                 if (cl->cl_parent->cl_cvtmin != 0)
738                                         vt = (cl->cl_parent->cl_cvtmin + vt)/2;
739
740                                 if (cl->cl_parent->cl_vtperiod !=
741                                     cl->cl_parentperiod || vt > cl->cl_vt)
742                                         cl->cl_vt = vt;
743                         } else {
744                                 /*
745                                  * first child for a new parent backlog period.
746                                  * add parent's cvtmax to cvtoff to make a new
747                                  * vt (vtoff + vt) larger than the vt in the
748                                  * last period for all children.
749                                  */
750                                 vt = cl->cl_parent->cl_cvtmax;
751                                 cl->cl_parent->cl_cvtoff += vt;
752                                 cl->cl_parent->cl_cvtmax = 0;
753                                 cl->cl_parent->cl_cvtmin = 0;
754                                 cl->cl_vt = 0;
755                         }
756
757                         cl->cl_vtoff = cl->cl_parent->cl_cvtoff -
758                                                         cl->cl_pcvtoff;
759
760                         /* update the virtual curve */
761                         vt = cl->cl_vt + cl->cl_vtoff;
762                         rtsc_min(&cl->cl_virtual, &cl->cl_fsc, vt,
763                                                       cl->cl_total);
764                         if (cl->cl_virtual.x == vt) {
765                                 cl->cl_virtual.x -= cl->cl_vtoff;
766                                 cl->cl_vtoff = 0;
767                         }
768                         cl->cl_vtadj = 0;
769
770                         cl->cl_vtperiod++;  /* increment vt period */
771                         cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
772                         if (cl->cl_parent->cl_nactive == 0)
773                                 cl->cl_parentperiod++;
774                         cl->cl_f = 0;
775
776                         vttree_insert(cl);
777                         cftree_insert(cl);
778
779                         if (cl->cl_flags & HFSC_USC) {
780                                 /* class has upper limit curve */
781                                 if (cur_time == 0)
782                                         PSCHED_GET_TIME(cur_time);
783
784                                 /* update the ulimit curve */
785                                 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
786                                          cl->cl_total);
787                                 /* compute myf */
788                                 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
789                                                       cl->cl_total);
790                                 cl->cl_myfadj = 0;
791                         }
792                 }
793
794                 f = max(cl->cl_myf, cl->cl_cfmin);
795                 if (f != cl->cl_f) {
796                         cl->cl_f = f;
797                         cftree_update(cl);
798                         update_cfmin(cl->cl_parent);
799                 }
800         }
801 }
802
803 static void
804 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
805 {
806         u64 f; /* , myf_bound, delta; */
807         int go_passive = 0;
808
809         if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
810                 go_passive = 1;
811
812         for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
813                 cl->cl_total += len;
814
815                 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
816                         continue;
817
818                 if (go_passive && --cl->cl_nactive == 0)
819                         go_passive = 1;
820                 else
821                         go_passive = 0;
822
823                 if (go_passive) {
824                         /* no more active child, going passive */
825
826                         /* update cvtmax of the parent class */
827                         if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
828                                 cl->cl_parent->cl_cvtmax = cl->cl_vt;
829
830                         /* remove this class from the vt tree */
831                         vttree_remove(cl);
832
833                         cftree_remove(cl);
834                         update_cfmin(cl->cl_parent);
835
836                         continue;
837                 }
838
839                 /*
840                  * update vt and f
841                  */
842                 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
843                             - cl->cl_vtoff + cl->cl_vtadj;
844
845                 /*
846                  * if vt of the class is smaller than cvtmin,
847                  * the class was skipped in the past due to non-fit.
848                  * if so, we need to adjust vtadj.
849                  */
850                 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
851                         cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
852                         cl->cl_vt = cl->cl_parent->cl_cvtmin;
853                 }
854
855                 /* update the vt tree */
856                 vttree_update(cl);
857
858                 if (cl->cl_flags & HFSC_USC) {
859                         cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
860                                                               cl->cl_total);
861 #if 0
862                         /*
863                          * This code causes classes to stay way under their
864                          * limit when multiple classes are used at gigabit
865                          * speed. needs investigation. -kaber
866                          */
867                         /*
868                          * if myf lags behind by more than one clock tick
869                          * from the current time, adjust myfadj to prevent
870                          * a rate-limited class from going greedy.
871                          * in a steady state under rate-limiting, myf
872                          * fluctuates within one clock tick.
873                          */
874                         myf_bound = cur_time - PSCHED_JIFFIE2US(1);
875                         if (cl->cl_myf < myf_bound) {
876                                 delta = cur_time - cl->cl_myf;
877                                 cl->cl_myfadj += delta;
878                                 cl->cl_myf += delta;
879                         }
880 #endif
881                 }
882
883                 f = max(cl->cl_myf, cl->cl_cfmin);
884                 if (f != cl->cl_f) {
885                         cl->cl_f = f;
886                         cftree_update(cl);
887                         update_cfmin(cl->cl_parent);
888                 }
889         }
890 }
891
892 static void
893 set_active(struct hfsc_class *cl, unsigned int len)
894 {
895         if (cl->cl_flags & HFSC_RSC)
896                 init_ed(cl, len);
897         if (cl->cl_flags & HFSC_FSC)
898                 init_vf(cl, len);
899
900         list_add_tail(&cl->dlist, &cl->sched->droplist);
901 }
902
903 static void
904 set_passive(struct hfsc_class *cl)
905 {
906         if (cl->cl_flags & HFSC_RSC)
907                 eltree_remove(cl);
908
909         list_del(&cl->dlist);
910
911         /*
912          * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
913          * needs to be called explicitly to remove a class from vttree.
914          */
915 }
916
917 /*
918  * hack to get length of first packet in queue.
919  */
920 static unsigned int
921 qdisc_peek_len(struct Qdisc *sch)
922 {
923         struct sk_buff *skb;
924         unsigned int len;
925
926         skb = sch->dequeue(sch);
927         if (skb == NULL) {
928                 if (net_ratelimit())
929                         printk("qdisc_peek_len: non work-conserving qdisc ?\n");
930                 return 0;
931         }
932         len = skb->len;
933         if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {
934                 if (net_ratelimit())
935                         printk("qdisc_peek_len: failed to requeue\n");
936                 qdisc_tree_decrease_qlen(sch, 1);
937                 return 0;
938         }
939         return len;
940 }
941
942 static void
943 hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
944 {
945         unsigned int len = cl->qdisc->q.qlen;
946
947         qdisc_reset(cl->qdisc);
948         qdisc_tree_decrease_qlen(cl->qdisc, len);
949 }
950
951 static void
952 hfsc_adjust_levels(struct hfsc_class *cl)
953 {
954         struct hfsc_class *p;
955         unsigned int level;
956
957         do {
958                 level = 0;
959                 list_for_each_entry(p, &cl->children, siblings) {
960                         if (p->level >= level)
961                                 level = p->level + 1;
962                 }
963                 cl->level = level;
964         } while ((cl = cl->cl_parent) != NULL);
965 }
966
967 static inline unsigned int
968 hfsc_hash(u32 h)
969 {
970         h ^= h >> 8;
971         h ^= h >> 4;
972
973         return h & (HFSC_HSIZE - 1);
974 }
975
976 static inline struct hfsc_class *
977 hfsc_find_class(u32 classid, struct Qdisc *sch)
978 {
979         struct hfsc_sched *q = qdisc_priv(sch);
980         struct hfsc_class *cl;
981
982         list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) {
983                 if (cl->classid == classid)
984                         return cl;
985         }
986         return NULL;
987 }
988
989 static void
990 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
991                 u64 cur_time)
992 {
993         sc2isc(rsc, &cl->cl_rsc);
994         rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
995         cl->cl_eligible = cl->cl_deadline;
996         if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
997                 cl->cl_eligible.dx = 0;
998                 cl->cl_eligible.dy = 0;
999         }
1000         cl->cl_flags |= HFSC_RSC;
1001 }
1002
1003 static void
1004 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
1005 {
1006         sc2isc(fsc, &cl->cl_fsc);
1007         rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
1008         cl->cl_flags |= HFSC_FSC;
1009 }
1010
1011 static void
1012 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
1013                 u64 cur_time)
1014 {
1015         sc2isc(usc, &cl->cl_usc);
1016         rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
1017         cl->cl_flags |= HFSC_USC;
1018 }
1019
1020 static int
1021 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1022                   struct rtattr **tca, unsigned long *arg)
1023 {
1024         struct hfsc_sched *q = qdisc_priv(sch);
1025         struct hfsc_class *cl = (struct hfsc_class *)*arg;
1026         struct hfsc_class *parent = NULL;
1027         struct rtattr *opt = tca[TCA_OPTIONS-1];
1028         struct rtattr *tb[TCA_HFSC_MAX];
1029         struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
1030         u64 cur_time;
1031
1032         if (opt == NULL || rtattr_parse_nested(tb, TCA_HFSC_MAX, opt))
1033                 return -EINVAL;
1034
1035         if (tb[TCA_HFSC_RSC-1]) {
1036                 if (RTA_PAYLOAD(tb[TCA_HFSC_RSC-1]) < sizeof(*rsc))
1037                         return -EINVAL;
1038                 rsc = RTA_DATA(tb[TCA_HFSC_RSC-1]);
1039                 if (rsc->m1 == 0 && rsc->m2 == 0)
1040                         rsc = NULL;
1041         }
1042
1043         if (tb[TCA_HFSC_FSC-1]) {
1044                 if (RTA_PAYLOAD(tb[TCA_HFSC_FSC-1]) < sizeof(*fsc))
1045                         return -EINVAL;
1046                 fsc = RTA_DATA(tb[TCA_HFSC_FSC-1]);
1047                 if (fsc->m1 == 0 && fsc->m2 == 0)
1048                         fsc = NULL;
1049         }
1050
1051         if (tb[TCA_HFSC_USC-1]) {
1052                 if (RTA_PAYLOAD(tb[TCA_HFSC_USC-1]) < sizeof(*usc))
1053                         return -EINVAL;
1054                 usc = RTA_DATA(tb[TCA_HFSC_USC-1]);
1055                 if (usc->m1 == 0 && usc->m2 == 0)
1056                         usc = NULL;
1057         }
1058
1059         if (cl != NULL) {
1060                 if (parentid) {
1061                         if (cl->cl_parent && cl->cl_parent->classid != parentid)
1062                                 return -EINVAL;
1063                         if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1064                                 return -EINVAL;
1065                 }
1066                 PSCHED_GET_TIME(cur_time);
1067
1068                 sch_tree_lock(sch);
1069                 if (rsc != NULL)
1070                         hfsc_change_rsc(cl, rsc, cur_time);
1071                 if (fsc != NULL)
1072                         hfsc_change_fsc(cl, fsc);
1073                 if (usc != NULL)
1074                         hfsc_change_usc(cl, usc, cur_time);
1075
1076                 if (cl->qdisc->q.qlen != 0) {
1077                         if (cl->cl_flags & HFSC_RSC)
1078                                 update_ed(cl, qdisc_peek_len(cl->qdisc));
1079                         if (cl->cl_flags & HFSC_FSC)
1080                                 update_vf(cl, 0, cur_time);
1081                 }
1082                 sch_tree_unlock(sch);
1083
1084 #ifdef CONFIG_NET_ESTIMATOR
1085                 if (tca[TCA_RATE-1])
1086                         gen_replace_estimator(&cl->bstats, &cl->rate_est,
1087                                 cl->stats_lock, tca[TCA_RATE-1]);
1088 #endif
1089                 return 0;
1090         }
1091
1092         if (parentid == TC_H_ROOT)
1093                 return -EEXIST;
1094
1095         parent = &q->root;
1096         if (parentid) {
1097                 parent = hfsc_find_class(parentid, sch);
1098                 if (parent == NULL)
1099                         return -ENOENT;
1100         }
1101
1102         if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1103                 return -EINVAL;
1104         if (hfsc_find_class(classid, sch))
1105                 return -EEXIST;
1106
1107         if (rsc == NULL && fsc == NULL)
1108                 return -EINVAL;
1109
1110         cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1111         if (cl == NULL)
1112                 return -ENOBUFS;
1113
1114         if (rsc != NULL)
1115                 hfsc_change_rsc(cl, rsc, 0);
1116         if (fsc != NULL)
1117                 hfsc_change_fsc(cl, fsc);
1118         if (usc != NULL)
1119                 hfsc_change_usc(cl, usc, 0);
1120
1121         cl->refcnt    = 1;
1122         cl->classid   = classid;
1123         cl->sched     = q;
1124         cl->cl_parent = parent;
1125         cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
1126         if (cl->qdisc == NULL)
1127                 cl->qdisc = &noop_qdisc;
1128         cl->stats_lock = &sch->dev->queue_lock;
1129         INIT_LIST_HEAD(&cl->children);
1130         cl->vt_tree = RB_ROOT;
1131         cl->cf_tree = RB_ROOT;
1132
1133         sch_tree_lock(sch);
1134         list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]);
1135         list_add_tail(&cl->siblings, &parent->children);
1136         if (parent->level == 0)
1137                 hfsc_purge_queue(sch, parent);
1138         hfsc_adjust_levels(parent);
1139         cl->cl_pcvtoff = parent->cl_cvtoff;
1140         sch_tree_unlock(sch);
1141
1142 #ifdef CONFIG_NET_ESTIMATOR
1143         if (tca[TCA_RATE-1])
1144                 gen_new_estimator(&cl->bstats, &cl->rate_est,
1145                         cl->stats_lock, tca[TCA_RATE-1]);
1146 #endif
1147         *arg = (unsigned long)cl;
1148         return 0;
1149 }
1150
1151 static void
1152 hfsc_destroy_filters(struct tcf_proto **fl)
1153 {
1154         struct tcf_proto *tp;
1155
1156         while ((tp = *fl) != NULL) {
1157                 *fl = tp->next;
1158                 tcf_destroy(tp);
1159         }
1160 }
1161
1162 static void
1163 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1164 {
1165         struct hfsc_sched *q = qdisc_priv(sch);
1166
1167         hfsc_destroy_filters(&cl->filter_list);
1168         qdisc_destroy(cl->qdisc);
1169 #ifdef CONFIG_NET_ESTIMATOR
1170         gen_kill_estimator(&cl->bstats, &cl->rate_est);
1171 #endif
1172         if (cl != &q->root)
1173                 kfree(cl);
1174 }
1175
1176 static int
1177 hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1178 {
1179         struct hfsc_sched *q = qdisc_priv(sch);
1180         struct hfsc_class *cl = (struct hfsc_class *)arg;
1181
1182         if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1183                 return -EBUSY;
1184
1185         sch_tree_lock(sch);
1186
1187         list_del(&cl->hlist);
1188         list_del(&cl->siblings);
1189         hfsc_adjust_levels(cl->cl_parent);
1190         hfsc_purge_queue(sch, cl);
1191         if (--cl->refcnt == 0)
1192                 hfsc_destroy_class(sch, cl);
1193
1194         sch_tree_unlock(sch);
1195         return 0;
1196 }
1197
1198 static struct hfsc_class *
1199 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1200 {
1201         struct hfsc_sched *q = qdisc_priv(sch);
1202         struct hfsc_class *cl;
1203         struct tcf_result res;
1204         struct tcf_proto *tcf;
1205         int result;
1206
1207         if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1208             (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1209                 if (cl->level == 0)
1210                         return cl;
1211
1212         *qerr = NET_XMIT_BYPASS;
1213         tcf = q->root.filter_list;
1214         while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1215 #ifdef CONFIG_NET_CLS_ACT
1216                 switch (result) {
1217                 case TC_ACT_QUEUED:
1218                 case TC_ACT_STOLEN:
1219                         *qerr = NET_XMIT_SUCCESS;
1220                 case TC_ACT_SHOT:
1221                         return NULL;
1222                 }
1223 #elif defined(CONFIG_NET_CLS_POLICE)
1224                 if (result == TC_POLICE_SHOT)
1225                         return NULL;
1226 #endif
1227                 if ((cl = (struct hfsc_class *)res.class) == NULL) {
1228                         if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
1229                                 break; /* filter selected invalid classid */
1230                 }
1231
1232                 if (cl->level == 0)
1233                         return cl; /* hit leaf class */
1234
1235                 /* apply inner filter chain */
1236                 tcf = cl->filter_list;
1237         }
1238
1239         /* classification failed, try default class */
1240         cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1241         if (cl == NULL || cl->level > 0)
1242                 return NULL;
1243
1244         return cl;
1245 }
1246
1247 static int
1248 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1249                  struct Qdisc **old)
1250 {
1251         struct hfsc_class *cl = (struct hfsc_class *)arg;
1252
1253         if (cl == NULL)
1254                 return -ENOENT;
1255         if (cl->level > 0)
1256                 return -EINVAL;
1257         if (new == NULL) {
1258                 new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1259                                         cl->classid);
1260                 if (new == NULL)
1261                         new = &noop_qdisc;
1262         }
1263
1264         sch_tree_lock(sch);
1265         hfsc_purge_queue(sch, cl);
1266         *old = xchg(&cl->qdisc, new);
1267         sch_tree_unlock(sch);
1268         return 0;
1269 }
1270
1271 static struct Qdisc *
1272 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1273 {
1274         struct hfsc_class *cl = (struct hfsc_class *)arg;
1275
1276         if (cl != NULL && cl->level == 0)
1277                 return cl->qdisc;
1278
1279         return NULL;
1280 }
1281
1282 static void
1283 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1284 {
1285         struct hfsc_class *cl = (struct hfsc_class *)arg;
1286
1287         if (cl->qdisc->q.qlen == 0) {
1288                 update_vf(cl, 0, 0);
1289                 set_passive(cl);
1290         }
1291 }
1292
1293 static unsigned long
1294 hfsc_get_class(struct Qdisc *sch, u32 classid)
1295 {
1296         struct hfsc_class *cl = hfsc_find_class(classid, sch);
1297
1298         if (cl != NULL)
1299                 cl->refcnt++;
1300
1301         return (unsigned long)cl;
1302 }
1303
1304 static void
1305 hfsc_put_class(struct Qdisc *sch, unsigned long arg)
1306 {
1307         struct hfsc_class *cl = (struct hfsc_class *)arg;
1308
1309         if (--cl->refcnt == 0)
1310                 hfsc_destroy_class(sch, cl);
1311 }
1312
1313 static unsigned long
1314 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1315 {
1316         struct hfsc_class *p = (struct hfsc_class *)parent;
1317         struct hfsc_class *cl = hfsc_find_class(classid, sch);
1318
1319         if (cl != NULL) {
1320                 if (p != NULL && p->level <= cl->level)
1321                         return 0;
1322                 cl->filter_cnt++;
1323         }
1324
1325         return (unsigned long)cl;
1326 }
1327
1328 static void
1329 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1330 {
1331         struct hfsc_class *cl = (struct hfsc_class *)arg;
1332
1333         cl->filter_cnt--;
1334 }
1335
1336 static struct tcf_proto **
1337 hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
1338 {
1339         struct hfsc_sched *q = qdisc_priv(sch);
1340         struct hfsc_class *cl = (struct hfsc_class *)arg;
1341
1342         if (cl == NULL)
1343                 cl = &q->root;
1344
1345         return &cl->filter_list;
1346 }
1347
1348 static int
1349 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1350 {
1351         struct tc_service_curve tsc;
1352
1353         tsc.m1 = sm2m(sc->sm1);
1354         tsc.d  = dx2d(sc->dx);
1355         tsc.m2 = sm2m(sc->sm2);
1356         RTA_PUT(skb, attr, sizeof(tsc), &tsc);
1357
1358         return skb->len;
1359
1360  rtattr_failure:
1361         return -1;
1362 }
1363
1364 static inline int
1365 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1366 {
1367         if ((cl->cl_flags & HFSC_RSC) &&
1368             (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1369                 goto rtattr_failure;
1370
1371         if ((cl->cl_flags & HFSC_FSC) &&
1372             (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1373                 goto rtattr_failure;
1374
1375         if ((cl->cl_flags & HFSC_USC) &&
1376             (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1377                 goto rtattr_failure;
1378
1379         return skb->len;
1380
1381  rtattr_failure:
1382         return -1;
1383 }
1384
1385 static int
1386 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1387                 struct tcmsg *tcm)
1388 {
1389         struct hfsc_class *cl = (struct hfsc_class *)arg;
1390         unsigned char *b = skb->tail;
1391         struct rtattr *rta = (struct rtattr *)b;
1392
1393         tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT;
1394         tcm->tcm_handle = cl->classid;
1395         if (cl->level == 0)
1396                 tcm->tcm_info = cl->qdisc->handle;
1397
1398         RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
1399         if (hfsc_dump_curves(skb, cl) < 0)
1400                 goto rtattr_failure;
1401         rta->rta_len = skb->tail - b;
1402         return skb->len;
1403
1404  rtattr_failure:
1405         skb_trim(skb, b - skb->data);
1406         return -1;
1407 }
1408
1409 static int
1410 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1411         struct gnet_dump *d)
1412 {
1413         struct hfsc_class *cl = (struct hfsc_class *)arg;
1414         struct tc_hfsc_stats xstats;
1415
1416         cl->qstats.qlen = cl->qdisc->q.qlen;
1417         xstats.level   = cl->level;
1418         xstats.period  = cl->cl_vtperiod;
1419         xstats.work    = cl->cl_total;
1420         xstats.rtwork  = cl->cl_cumul;
1421
1422         if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1423 #ifdef CONFIG_NET_ESTIMATOR
1424             gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1425 #endif
1426             gnet_stats_copy_queue(d, &cl->qstats) < 0)
1427                 return -1;
1428
1429         return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1430 }
1431
1432
1433
1434 static void
1435 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1436 {
1437         struct hfsc_sched *q = qdisc_priv(sch);
1438         struct hfsc_class *cl;
1439         unsigned int i;
1440
1441         if (arg->stop)
1442                 return;
1443
1444         for (i = 0; i < HFSC_HSIZE; i++) {
1445                 list_for_each_entry(cl, &q->clhash[i], hlist) {
1446                         if (arg->count < arg->skip) {
1447                                 arg->count++;
1448                                 continue;
1449                         }
1450                         if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1451                                 arg->stop = 1;
1452                                 return;
1453                         }
1454                         arg->count++;
1455                 }
1456         }
1457 }
1458
1459 static void
1460 hfsc_watchdog(unsigned long arg)
1461 {
1462         struct Qdisc *sch = (struct Qdisc *)arg;
1463
1464         sch->flags &= ~TCQ_F_THROTTLED;
1465         netif_schedule(sch->dev);
1466 }
1467
1468 static void
1469 hfsc_schedule_watchdog(struct Qdisc *sch, u64 cur_time)
1470 {
1471         struct hfsc_sched *q = qdisc_priv(sch);
1472         struct hfsc_class *cl;
1473         u64 next_time = 0;
1474         long delay;
1475
1476         if ((cl = eltree_get_minel(q)) != NULL)
1477                 next_time = cl->cl_e;
1478         if (q->root.cl_cfmin != 0) {
1479                 if (next_time == 0 || next_time > q->root.cl_cfmin)
1480                         next_time = q->root.cl_cfmin;
1481         }
1482         WARN_ON(next_time == 0);
1483         delay = next_time - cur_time;
1484         delay = PSCHED_US2JIFFIE(delay);
1485
1486         sch->flags |= TCQ_F_THROTTLED;
1487         mod_timer(&q->wd_timer, jiffies + delay);
1488 }
1489
1490 static int
1491 hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
1492 {
1493         struct hfsc_sched *q = qdisc_priv(sch);
1494         struct tc_hfsc_qopt *qopt;
1495         unsigned int i;
1496
1497         if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
1498                 return -EINVAL;
1499         qopt = RTA_DATA(opt);
1500
1501         sch->stats_lock = &sch->dev->queue_lock;
1502
1503         q->defcls = qopt->defcls;
1504         for (i = 0; i < HFSC_HSIZE; i++)
1505                 INIT_LIST_HEAD(&q->clhash[i]);
1506         q->eligible = RB_ROOT;
1507         INIT_LIST_HEAD(&q->droplist);
1508         skb_queue_head_init(&q->requeue);
1509
1510         q->root.refcnt  = 1;
1511         q->root.classid = sch->handle;
1512         q->root.sched   = q;
1513         q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
1514                                           sch->handle);
1515         if (q->root.qdisc == NULL)
1516                 q->root.qdisc = &noop_qdisc;
1517         q->root.stats_lock = &sch->dev->queue_lock;
1518         INIT_LIST_HEAD(&q->root.children);
1519         q->root.vt_tree = RB_ROOT;
1520         q->root.cf_tree = RB_ROOT;
1521
1522         list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]);
1523
1524         init_timer(&q->wd_timer);
1525         q->wd_timer.function = hfsc_watchdog;
1526         q->wd_timer.data = (unsigned long)sch;
1527
1528         return 0;
1529 }
1530
1531 static int
1532 hfsc_change_qdisc(struct Qdisc *sch, struct rtattr *opt)
1533 {
1534         struct hfsc_sched *q = qdisc_priv(sch);
1535         struct tc_hfsc_qopt *qopt;
1536
1537         if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
1538                 return -EINVAL;
1539         qopt = RTA_DATA(opt);
1540
1541         sch_tree_lock(sch);
1542         q->defcls = qopt->defcls;
1543         sch_tree_unlock(sch);
1544
1545         return 0;
1546 }
1547
1548 static void
1549 hfsc_reset_class(struct hfsc_class *cl)
1550 {
1551         cl->cl_total        = 0;
1552         cl->cl_cumul        = 0;
1553         cl->cl_d            = 0;
1554         cl->cl_e            = 0;
1555         cl->cl_vt           = 0;
1556         cl->cl_vtadj        = 0;
1557         cl->cl_vtoff        = 0;
1558         cl->cl_cvtmin       = 0;
1559         cl->cl_cvtmax       = 0;
1560         cl->cl_cvtoff       = 0;
1561         cl->cl_pcvtoff      = 0;
1562         cl->cl_vtperiod     = 0;
1563         cl->cl_parentperiod = 0;
1564         cl->cl_f            = 0;
1565         cl->cl_myf          = 0;
1566         cl->cl_myfadj       = 0;
1567         cl->cl_cfmin        = 0;
1568         cl->cl_nactive      = 0;
1569
1570         cl->vt_tree = RB_ROOT;
1571         cl->cf_tree = RB_ROOT;
1572         qdisc_reset(cl->qdisc);
1573
1574         if (cl->cl_flags & HFSC_RSC)
1575                 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1576         if (cl->cl_flags & HFSC_FSC)
1577                 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1578         if (cl->cl_flags & HFSC_USC)
1579                 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1580 }
1581
1582 static void
1583 hfsc_reset_qdisc(struct Qdisc *sch)
1584 {
1585         struct hfsc_sched *q = qdisc_priv(sch);
1586         struct hfsc_class *cl;
1587         unsigned int i;
1588
1589         for (i = 0; i < HFSC_HSIZE; i++) {
1590                 list_for_each_entry(cl, &q->clhash[i], hlist)
1591                         hfsc_reset_class(cl);
1592         }
1593         __skb_queue_purge(&q->requeue);
1594         q->eligible = RB_ROOT;
1595         INIT_LIST_HEAD(&q->droplist);
1596         del_timer(&q->wd_timer);
1597         sch->flags &= ~TCQ_F_THROTTLED;
1598         sch->q.qlen = 0;
1599 }
1600
1601 static void
1602 hfsc_destroy_qdisc(struct Qdisc *sch)
1603 {
1604         struct hfsc_sched *q = qdisc_priv(sch);
1605         struct hfsc_class *cl, *next;
1606         unsigned int i;
1607
1608         for (i = 0; i < HFSC_HSIZE; i++) {
1609                 list_for_each_entry_safe(cl, next, &q->clhash[i], hlist)
1610                         hfsc_destroy_class(sch, cl);
1611         }
1612         __skb_queue_purge(&q->requeue);
1613         del_timer(&q->wd_timer);
1614 }
1615
1616 static int
1617 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1618 {
1619         struct hfsc_sched *q = qdisc_priv(sch);
1620         unsigned char *b = skb->tail;
1621         struct tc_hfsc_qopt qopt;
1622
1623         qopt.defcls = q->defcls;
1624         RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
1625         return skb->len;
1626
1627  rtattr_failure:
1628         skb_trim(skb, b - skb->data);
1629         return -1;
1630 }
1631
1632 static int
1633 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1634 {
1635         struct hfsc_class *cl;
1636         unsigned int len;
1637         int err;
1638
1639         cl = hfsc_classify(skb, sch, &err);
1640         if (cl == NULL) {
1641                 if (err == NET_XMIT_BYPASS)
1642                         sch->qstats.drops++;
1643                 kfree_skb(skb);
1644                 return err;
1645         }
1646
1647         len = skb->len;
1648         err = cl->qdisc->enqueue(skb, cl->qdisc);
1649         if (unlikely(err != NET_XMIT_SUCCESS)) {
1650                 cl->qstats.drops++;
1651                 sch->qstats.drops++;
1652                 return err;
1653         }
1654
1655         if (cl->qdisc->q.qlen == 1)
1656                 set_active(cl, len);
1657
1658         cl->bstats.packets++;
1659         cl->bstats.bytes += len;
1660         sch->bstats.packets++;
1661         sch->bstats.bytes += len;
1662         sch->q.qlen++;
1663
1664         return NET_XMIT_SUCCESS;
1665 }
1666
1667 static struct sk_buff *
1668 hfsc_dequeue(struct Qdisc *sch)
1669 {
1670         struct hfsc_sched *q = qdisc_priv(sch);
1671         struct hfsc_class *cl;
1672         struct sk_buff *skb;
1673         u64 cur_time;
1674         unsigned int next_len;
1675         int realtime = 0;
1676
1677         if (sch->q.qlen == 0)
1678                 return NULL;
1679         if ((skb = __skb_dequeue(&q->requeue)))
1680                 goto out;
1681
1682         PSCHED_GET_TIME(cur_time);
1683
1684         /*
1685          * if there are eligible classes, use real-time criteria.
1686          * find the class with the minimum deadline among
1687          * the eligible classes.
1688          */
1689         if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
1690                 realtime = 1;
1691         } else {
1692                 /*
1693                  * use link-sharing criteria
1694                  * get the class with the minimum vt in the hierarchy
1695                  */
1696                 cl = vttree_get_minvt(&q->root, cur_time);
1697                 if (cl == NULL) {
1698                         sch->qstats.overlimits++;
1699                         hfsc_schedule_watchdog(sch, cur_time);
1700                         return NULL;
1701                 }
1702         }
1703
1704         skb = cl->qdisc->dequeue(cl->qdisc);
1705         if (skb == NULL) {
1706                 if (net_ratelimit())
1707                         printk("HFSC: Non-work-conserving qdisc ?\n");
1708                 return NULL;
1709         }
1710
1711         update_vf(cl, skb->len, cur_time);
1712         if (realtime)
1713                 cl->cl_cumul += skb->len;
1714
1715         if (cl->qdisc->q.qlen != 0) {
1716                 if (cl->cl_flags & HFSC_RSC) {
1717                         /* update ed */
1718                         next_len = qdisc_peek_len(cl->qdisc);
1719                         if (realtime)
1720                                 update_ed(cl, next_len);
1721                         else
1722                                 update_d(cl, next_len);
1723                 }
1724         } else {
1725                 /* the class becomes passive */
1726                 set_passive(cl);
1727         }
1728
1729  out:
1730         sch->flags &= ~TCQ_F_THROTTLED;
1731         sch->q.qlen--;
1732
1733         return skb;
1734 }
1735
1736 static int
1737 hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch)
1738 {
1739         struct hfsc_sched *q = qdisc_priv(sch);
1740
1741         __skb_queue_head(&q->requeue, skb);
1742         sch->q.qlen++;
1743         sch->qstats.requeues++;
1744         return NET_XMIT_SUCCESS;
1745 }
1746
1747 static unsigned int
1748 hfsc_drop(struct Qdisc *sch)
1749 {
1750         struct hfsc_sched *q = qdisc_priv(sch);
1751         struct hfsc_class *cl;
1752         unsigned int len;
1753
1754         list_for_each_entry(cl, &q->droplist, dlist) {
1755                 if (cl->qdisc->ops->drop != NULL &&
1756                     (len = cl->qdisc->ops->drop(cl->qdisc)) > 0) {
1757                         if (cl->qdisc->q.qlen == 0) {
1758                                 update_vf(cl, 0, 0);
1759                                 set_passive(cl);
1760                         } else {
1761                                 list_move_tail(&cl->dlist, &q->droplist);
1762                         }
1763                         cl->qstats.drops++;
1764                         sch->qstats.drops++;
1765                         sch->q.qlen--;
1766                         return len;
1767                 }
1768         }
1769         return 0;
1770 }
1771
1772 static struct Qdisc_class_ops hfsc_class_ops = {
1773         .change         = hfsc_change_class,
1774         .delete         = hfsc_delete_class,
1775         .graft          = hfsc_graft_class,
1776         .leaf           = hfsc_class_leaf,
1777         .qlen_notify    = hfsc_qlen_notify,
1778         .get            = hfsc_get_class,
1779         .put            = hfsc_put_class,
1780         .bind_tcf       = hfsc_bind_tcf,
1781         .unbind_tcf     = hfsc_unbind_tcf,
1782         .tcf_chain      = hfsc_tcf_chain,
1783         .dump           = hfsc_dump_class,
1784         .dump_stats     = hfsc_dump_class_stats,
1785         .walk           = hfsc_walk
1786 };
1787
1788 static struct Qdisc_ops hfsc_qdisc_ops = {
1789         .id             = "hfsc",
1790         .init           = hfsc_init_qdisc,
1791         .change         = hfsc_change_qdisc,
1792         .reset          = hfsc_reset_qdisc,
1793         .destroy        = hfsc_destroy_qdisc,
1794         .dump           = hfsc_dump_qdisc,
1795         .enqueue        = hfsc_enqueue,
1796         .dequeue        = hfsc_dequeue,
1797         .requeue        = hfsc_requeue,
1798         .drop           = hfsc_drop,
1799         .cl_ops         = &hfsc_class_ops,
1800         .priv_size      = sizeof(struct hfsc_sched),
1801         .owner          = THIS_MODULE
1802 };
1803
1804 static int __init
1805 hfsc_init(void)
1806 {
1807         return register_qdisc(&hfsc_qdisc_ops);
1808 }
1809
1810 static void __exit
1811 hfsc_cleanup(void)
1812 {
1813         unregister_qdisc(&hfsc_qdisc_ops);
1814 }
1815
1816 MODULE_LICENSE("GPL");
1817 module_init(hfsc_init);
1818 module_exit(hfsc_cleanup);