Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / net / sched / sch_prio.c
1 /*
2  * net/sched/sch_prio.c Simple 3-band priority "scheduler".
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  * Fixes:       19990609: J Hadi Salim <hadi@nortelnetworks.com>:
11  *              Init --  EINVAL when opt undefined
12  */
13
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <net/netlink.h>
22 #include <net/pkt_sched.h>
23 #include <net/pkt_cls.h>
24
25 struct prio_sched_data {
26         int bands;
27         struct tcf_proto __rcu *filter_list;
28         struct tcf_block *block;
29         u8  prio2band[TC_PRIO_MAX+1];
30         struct Qdisc *queues[TCQ_PRIO_BANDS];
31 };
32
33
34 static struct Qdisc *
35 prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
36 {
37         struct prio_sched_data *q = qdisc_priv(sch);
38         u32 band = skb->priority;
39         struct tcf_result res;
40         struct tcf_proto *fl;
41         int err;
42
43         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
44         if (TC_H_MAJ(skb->priority) != sch->handle) {
45                 fl = rcu_dereference_bh(q->filter_list);
46                 err = tcf_classify(skb, fl, &res, false);
47 #ifdef CONFIG_NET_CLS_ACT
48                 switch (err) {
49                 case TC_ACT_STOLEN:
50                 case TC_ACT_QUEUED:
51                 case TC_ACT_TRAP:
52                         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
53                         /* fall through */
54                 case TC_ACT_SHOT:
55                         return NULL;
56                 }
57 #endif
58                 if (!fl || err < 0) {
59                         if (TC_H_MAJ(band))
60                                 band = 0;
61                         return q->queues[q->prio2band[band & TC_PRIO_MAX]];
62                 }
63                 band = res.classid;
64         }
65         band = TC_H_MIN(band) - 1;
66         if (band >= q->bands)
67                 return q->queues[q->prio2band[0]];
68
69         return q->queues[band];
70 }
71
72 static int
73 prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
74 {
75         unsigned int len = qdisc_pkt_len(skb);
76         struct Qdisc *qdisc;
77         int ret;
78
79         qdisc = prio_classify(skb, sch, &ret);
80 #ifdef CONFIG_NET_CLS_ACT
81         if (qdisc == NULL) {
82
83                 if (ret & __NET_XMIT_BYPASS)
84                         qdisc_qstats_drop(sch);
85                 __qdisc_drop(skb, to_free);
86                 return ret;
87         }
88 #endif
89
90         ret = qdisc_enqueue(skb, qdisc, to_free);
91         if (ret == NET_XMIT_SUCCESS) {
92                 sch->qstats.backlog += len;
93                 sch->q.qlen++;
94                 return NET_XMIT_SUCCESS;
95         }
96         if (net_xmit_drop_count(ret))
97                 qdisc_qstats_drop(sch);
98         return ret;
99 }
100
101 static struct sk_buff *prio_peek(struct Qdisc *sch)
102 {
103         struct prio_sched_data *q = qdisc_priv(sch);
104         int prio;
105
106         for (prio = 0; prio < q->bands; prio++) {
107                 struct Qdisc *qdisc = q->queues[prio];
108                 struct sk_buff *skb = qdisc->ops->peek(qdisc);
109                 if (skb)
110                         return skb;
111         }
112         return NULL;
113 }
114
115 static struct sk_buff *prio_dequeue(struct Qdisc *sch)
116 {
117         struct prio_sched_data *q = qdisc_priv(sch);
118         int prio;
119
120         for (prio = 0; prio < q->bands; prio++) {
121                 struct Qdisc *qdisc = q->queues[prio];
122                 struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
123                 if (skb) {
124                         qdisc_bstats_update(sch, skb);
125                         qdisc_qstats_backlog_dec(sch, skb);
126                         sch->q.qlen--;
127                         return skb;
128                 }
129         }
130         return NULL;
131
132 }
133
134 static void
135 prio_reset(struct Qdisc *sch)
136 {
137         int prio;
138         struct prio_sched_data *q = qdisc_priv(sch);
139
140         for (prio = 0; prio < q->bands; prio++)
141                 qdisc_reset(q->queues[prio]);
142         sch->qstats.backlog = 0;
143         sch->q.qlen = 0;
144 }
145
146 static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt)
147 {
148         struct net_device *dev = qdisc_dev(sch);
149         struct tc_prio_qopt_offload opt = {
150                 .handle = sch->handle,
151                 .parent = sch->parent,
152         };
153
154         if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
155                 return -EOPNOTSUPP;
156
157         if (qopt) {
158                 opt.command = TC_PRIO_REPLACE;
159                 opt.replace_params.bands = qopt->bands;
160                 memcpy(&opt.replace_params.priomap, qopt->priomap,
161                        TC_PRIO_MAX + 1);
162                 opt.replace_params.qstats = &sch->qstats;
163         } else {
164                 opt.command = TC_PRIO_DESTROY;
165         }
166
167         return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO, &opt);
168 }
169
170 static void
171 prio_destroy(struct Qdisc *sch)
172 {
173         int prio;
174         struct prio_sched_data *q = qdisc_priv(sch);
175
176         tcf_block_put(q->block);
177         prio_offload(sch, NULL);
178         for (prio = 0; prio < q->bands; prio++)
179                 qdisc_put(q->queues[prio]);
180 }
181
182 static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
183                      struct netlink_ext_ack *extack)
184 {
185         struct prio_sched_data *q = qdisc_priv(sch);
186         struct Qdisc *queues[TCQ_PRIO_BANDS];
187         int oldbands = q->bands, i;
188         struct tc_prio_qopt *qopt;
189
190         if (nla_len(opt) < sizeof(*qopt))
191                 return -EINVAL;
192         qopt = nla_data(opt);
193
194         if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
195                 return -EINVAL;
196
197         for (i = 0; i <= TC_PRIO_MAX; i++) {
198                 if (qopt->priomap[i] >= qopt->bands)
199                         return -EINVAL;
200         }
201
202         /* Before commit, make sure we can allocate all new qdiscs */
203         for (i = oldbands; i < qopt->bands; i++) {
204                 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
205                                               TC_H_MAKE(sch->handle, i + 1),
206                                               extack);
207                 if (!queues[i]) {
208                         while (i > oldbands)
209                                 qdisc_put(queues[--i]);
210                         return -ENOMEM;
211                 }
212         }
213
214         prio_offload(sch, qopt);
215         sch_tree_lock(sch);
216         q->bands = qopt->bands;
217         memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
218
219         for (i = q->bands; i < oldbands; i++) {
220                 struct Qdisc *child = q->queues[i];
221
222                 qdisc_tree_reduce_backlog(child, child->q.qlen,
223                                           child->qstats.backlog);
224         }
225
226         for (i = oldbands; i < q->bands; i++) {
227                 q->queues[i] = queues[i];
228                 if (q->queues[i] != &noop_qdisc)
229                         qdisc_hash_add(q->queues[i], true);
230         }
231
232         sch_tree_unlock(sch);
233
234         for (i = q->bands; i < oldbands; i++)
235                 qdisc_put(q->queues[i]);
236         return 0;
237 }
238
239 static int prio_init(struct Qdisc *sch, struct nlattr *opt,
240                      struct netlink_ext_ack *extack)
241 {
242         struct prio_sched_data *q = qdisc_priv(sch);
243         int err;
244
245         if (!opt)
246                 return -EINVAL;
247
248         err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
249         if (err)
250                 return err;
251
252         return prio_tune(sch, opt, extack);
253 }
254
255 static int prio_dump_offload(struct Qdisc *sch)
256 {
257         struct tc_prio_qopt_offload hw_stats = {
258                 .command = TC_PRIO_STATS,
259                 .handle = sch->handle,
260                 .parent = sch->parent,
261                 {
262                         .stats = {
263                                 .bstats = &sch->bstats,
264                                 .qstats = &sch->qstats,
265                         },
266                 },
267         };
268
269         return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_PRIO, &hw_stats);
270 }
271
272 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
273 {
274         struct prio_sched_data *q = qdisc_priv(sch);
275         unsigned char *b = skb_tail_pointer(skb);
276         struct tc_prio_qopt opt;
277         int err;
278
279         opt.bands = q->bands;
280         memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
281
282         err = prio_dump_offload(sch);
283         if (err)
284                 goto nla_put_failure;
285
286         if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
287                 goto nla_put_failure;
288
289         return skb->len;
290
291 nla_put_failure:
292         nlmsg_trim(skb, b);
293         return -1;
294 }
295
296 static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
297                       struct Qdisc **old, struct netlink_ext_ack *extack)
298 {
299         struct prio_sched_data *q = qdisc_priv(sch);
300         struct tc_prio_qopt_offload graft_offload;
301         unsigned long band = arg - 1;
302
303         if (new == NULL)
304                 new = &noop_qdisc;
305
306         *old = qdisc_replace(sch, new, &q->queues[band]);
307
308         graft_offload.handle = sch->handle;
309         graft_offload.parent = sch->parent;
310         graft_offload.graft_params.band = band;
311         graft_offload.graft_params.child_handle = new->handle;
312         graft_offload.command = TC_PRIO_GRAFT;
313
314         qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
315                                    TC_SETUP_QDISC_PRIO, &graft_offload,
316                                    extack);
317         return 0;
318 }
319
320 static struct Qdisc *
321 prio_leaf(struct Qdisc *sch, unsigned long arg)
322 {
323         struct prio_sched_data *q = qdisc_priv(sch);
324         unsigned long band = arg - 1;
325
326         return q->queues[band];
327 }
328
329 static unsigned long prio_find(struct Qdisc *sch, u32 classid)
330 {
331         struct prio_sched_data *q = qdisc_priv(sch);
332         unsigned long band = TC_H_MIN(classid);
333
334         if (band - 1 >= q->bands)
335                 return 0;
336         return band;
337 }
338
339 static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
340 {
341         return prio_find(sch, classid);
342 }
343
344
345 static void prio_unbind(struct Qdisc *q, unsigned long cl)
346 {
347 }
348
349 static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
350                            struct tcmsg *tcm)
351 {
352         struct prio_sched_data *q = qdisc_priv(sch);
353
354         tcm->tcm_handle |= TC_H_MIN(cl);
355         tcm->tcm_info = q->queues[cl-1]->handle;
356         return 0;
357 }
358
359 static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
360                                  struct gnet_dump *d)
361 {
362         struct prio_sched_data *q = qdisc_priv(sch);
363         struct Qdisc *cl_q;
364
365         cl_q = q->queues[cl - 1];
366         if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
367                                   d, NULL, &cl_q->bstats) < 0 ||
368             gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
369                 return -1;
370
371         return 0;
372 }
373
374 static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
375 {
376         struct prio_sched_data *q = qdisc_priv(sch);
377         int prio;
378
379         if (arg->stop)
380                 return;
381
382         for (prio = 0; prio < q->bands; prio++) {
383                 if (arg->count < arg->skip) {
384                         arg->count++;
385                         continue;
386                 }
387                 if (arg->fn(sch, prio + 1, arg) < 0) {
388                         arg->stop = 1;
389                         break;
390                 }
391                 arg->count++;
392         }
393 }
394
395 static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
396                                         struct netlink_ext_ack *extack)
397 {
398         struct prio_sched_data *q = qdisc_priv(sch);
399
400         if (cl)
401                 return NULL;
402         return q->block;
403 }
404
405 static const struct Qdisc_class_ops prio_class_ops = {
406         .graft          =       prio_graft,
407         .leaf           =       prio_leaf,
408         .find           =       prio_find,
409         .walk           =       prio_walk,
410         .tcf_block      =       prio_tcf_block,
411         .bind_tcf       =       prio_bind,
412         .unbind_tcf     =       prio_unbind,
413         .dump           =       prio_dump_class,
414         .dump_stats     =       prio_dump_class_stats,
415 };
416
417 static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
418         .next           =       NULL,
419         .cl_ops         =       &prio_class_ops,
420         .id             =       "prio",
421         .priv_size      =       sizeof(struct prio_sched_data),
422         .enqueue        =       prio_enqueue,
423         .dequeue        =       prio_dequeue,
424         .peek           =       prio_peek,
425         .init           =       prio_init,
426         .reset          =       prio_reset,
427         .destroy        =       prio_destroy,
428         .change         =       prio_tune,
429         .dump           =       prio_dump,
430         .owner          =       THIS_MODULE,
431 };
432
433 static int __init prio_module_init(void)
434 {
435         return register_qdisc(&prio_qdisc_ops);
436 }
437
438 static void __exit prio_module_exit(void)
439 {
440         unregister_qdisc(&prio_qdisc_ops);
441 }
442
443 module_init(prio_module_init)
444 module_exit(prio_module_exit)
445
446 MODULE_LICENSE("GPL");