2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
16 #include <linux/module.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
30 /* Network Emulation Queuing algorithm.
31 ====================================
33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 Network Emulation Tool
35 [2] Luigi Rizzo, DummyNet for FreeBSD
37 ----------------------------------------------------------------
39 This started out as a simple way to delay outgoing packets to
40 test TCP but has grown to include most of the functionality
41 of a full blown network emulator like NISTnet. It can delay
42 packets and add random jitter (and correlation). The random
43 distribution can be loaded from a table as well to provide
44 normal, Pareto, or experimental curves. Packet loss,
45 duplication, and reordering can also be emulated.
47 This qdisc does not do classification that can be handled in
48 layering other disciplines. It does not need to do bandwidth
49 control either since that can be handled by using token
50 bucket or other rate control.
52 The simulator is limited by the Linux timer resolution
53 and will create packet bursts on the HZ boundary (1ms).
56 struct netem_sched_data {
58 struct qdisc_watchdog watchdog;
60 psched_tdiff_t latency;
61 psched_tdiff_t jitter;
74 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
82 /* Time stamp put into socket buffer control block */
84 psched_time_t time_to_send;
87 /* init_crandom - initialize correlated random number generator
88 * Use entropy source for initial seed.
90 static void init_crandom(struct crndstate *state, unsigned long rho)
93 state->last = net_random();
96 /* get_crandom - correlated random number generator
97 * Next number depends on last value.
98 * rho is scaled to avoid floating point.
100 static u32 get_crandom(struct crndstate *state)
103 unsigned long answer;
105 if (state->rho == 0) /* no correllation */
108 value = net_random();
109 rho = (u64)state->rho + 1;
110 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
111 state->last = answer;
115 /* tabledist - return a pseudo-randomly distributed value with mean mu and
116 * std deviation sigma. Uses table lookup to approximate the desired
117 * distribution, and a uniformly-distributed pseudo-random source.
119 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
120 struct crndstate *state,
121 const struct disttable *dist)
130 rnd = get_crandom(state);
132 /* default uniform distribution */
134 return (rnd % (2*sigma)) - sigma + mu;
136 t = dist->table[rnd % dist->size];
137 x = (sigma % NETEM_DIST_SCALE) * t;
139 x += NETEM_DIST_SCALE/2;
141 x -= NETEM_DIST_SCALE/2;
143 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
147 * Insert one skb into qdisc.
148 * Note: parent depends on return value to account for queue length.
149 * NET_XMIT_DROP: queue length didn't change.
150 * NET_XMIT_SUCCESS: one skb was queued.
152 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
154 struct netem_sched_data *q = qdisc_priv(sch);
155 /* We don't fill cb now as skb_unshare() may invalidate it */
156 struct netem_skb_cb *cb;
157 struct sk_buff *skb2;
161 pr_debug("netem_enqueue skb=%p\n", skb);
163 /* Random duplication */
164 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
167 /* Random packet drop 0 => none, ~0 => all */
168 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
174 return NET_XMIT_BYPASS;
180 * If we need to duplicate packet, then re-insert at top of the
181 * qdisc tree, since parent queuer expects that only one
182 * skb will be queued.
184 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
185 struct Qdisc *rootq = sch->dev->qdisc;
186 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
189 rootq->enqueue(skb2, rootq);
190 q->duplicate = dupsave;
194 * Randomized packet corruption.
195 * Make copy if needed since we are modifying
196 * If packet is going to be hardware checksummed, then
197 * do it now in software before we mangle it.
199 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
200 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
201 || (skb->ip_summed == CHECKSUM_PARTIAL
202 && skb_checksum_help(skb))) {
204 return NET_XMIT_DROP;
207 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
210 cb = (struct netem_skb_cb *)skb->cb;
211 if (q->gap == 0 /* not doing reordering */
212 || q->counter < q->gap /* inside last reordering gap */
213 || q->reorder < get_crandom(&q->reorder_cor)) {
215 psched_tdiff_t delay;
217 delay = tabledist(q->latency, q->jitter,
218 &q->delay_cor, q->delay_dist);
220 PSCHED_GET_TIME(now);
221 PSCHED_TADD2(now, delay, cb->time_to_send);
223 ret = q->qdisc->enqueue(skb, q->qdisc);
226 * Do re-ordering by putting one out of N packets at the front
229 PSCHED_GET_TIME(cb->time_to_send);
231 ret = q->qdisc->ops->requeue(skb, q->qdisc);
234 if (likely(ret == NET_XMIT_SUCCESS)) {
236 sch->bstats.bytes += skb->len;
237 sch->bstats.packets++;
241 pr_debug("netem: enqueue ret %d\n", ret);
245 /* Requeue packets but don't change time stamp */
246 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
248 struct netem_sched_data *q = qdisc_priv(sch);
251 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
253 sch->qstats.requeues++;
259 static unsigned int netem_drop(struct Qdisc* sch)
261 struct netem_sched_data *q = qdisc_priv(sch);
262 unsigned int len = 0;
264 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
271 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
273 struct netem_sched_data *q = qdisc_priv(sch);
276 skb = q->qdisc->dequeue(q->qdisc);
278 const struct netem_skb_cb *cb
279 = (const struct netem_skb_cb *)skb->cb;
282 /* if more time remaining? */
283 PSCHED_GET_TIME(now);
285 if (PSCHED_TLESS(cb->time_to_send, now)) {
286 pr_debug("netem_dequeue: return skb=%p\n", skb);
288 sch->flags &= ~TCQ_F_THROTTLED;
291 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
293 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
294 qdisc_tree_decrease_qlen(q->qdisc, 1);
296 printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
305 static void netem_reset(struct Qdisc *sch)
307 struct netem_sched_data *q = qdisc_priv(sch);
309 qdisc_reset(q->qdisc);
311 qdisc_watchdog_cancel(&q->watchdog);
314 /* Pass size change message down to embedded FIFO */
315 static int set_fifo_limit(struct Qdisc *q, int limit)
320 /* Hack to avoid sending change message to non-FIFO */
321 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
324 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
326 rta->rta_type = RTM_NEWQDISC;
327 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
328 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
330 ret = q->ops->change(q, rta);
337 * Distribution data is a variable size payload containing
338 * signed 16 bit values.
340 static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
342 struct netem_sched_data *q = qdisc_priv(sch);
343 unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
344 const __s16 *data = RTA_DATA(attr);
351 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
356 for (i = 0; i < n; i++)
357 d->table[i] = data[i];
359 spin_lock_bh(&sch->dev->queue_lock);
360 d = xchg(&q->delay_dist, d);
361 spin_unlock_bh(&sch->dev->queue_lock);
367 static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
369 struct netem_sched_data *q = qdisc_priv(sch);
370 const struct tc_netem_corr *c = RTA_DATA(attr);
372 if (RTA_PAYLOAD(attr) != sizeof(*c))
375 init_crandom(&q->delay_cor, c->delay_corr);
376 init_crandom(&q->loss_cor, c->loss_corr);
377 init_crandom(&q->dup_cor, c->dup_corr);
381 static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
383 struct netem_sched_data *q = qdisc_priv(sch);
384 const struct tc_netem_reorder *r = RTA_DATA(attr);
386 if (RTA_PAYLOAD(attr) != sizeof(*r))
389 q->reorder = r->probability;
390 init_crandom(&q->reorder_cor, r->correlation);
394 static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
396 struct netem_sched_data *q = qdisc_priv(sch);
397 const struct tc_netem_corrupt *r = RTA_DATA(attr);
399 if (RTA_PAYLOAD(attr) != sizeof(*r))
402 q->corrupt = r->probability;
403 init_crandom(&q->corrupt_cor, r->correlation);
407 /* Parse netlink message to set options */
408 static int netem_change(struct Qdisc *sch, struct rtattr *opt)
410 struct netem_sched_data *q = qdisc_priv(sch);
411 struct tc_netem_qopt *qopt;
414 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
417 qopt = RTA_DATA(opt);
418 ret = set_fifo_limit(q->qdisc, qopt->limit);
420 pr_debug("netem: can't set fifo limit\n");
424 q->latency = qopt->latency;
425 q->jitter = qopt->jitter;
426 q->limit = qopt->limit;
429 q->loss = qopt->loss;
430 q->duplicate = qopt->duplicate;
432 /* for compatiablity with earlier versions.
433 * if gap is set, need to assume 100% probablity
438 /* Handle nested options after initial queue options.
439 * Should have put all options in nested format but too late now.
441 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
442 struct rtattr *tb[TCA_NETEM_MAX];
443 if (rtattr_parse(tb, TCA_NETEM_MAX,
444 RTA_DATA(opt) + sizeof(*qopt),
445 RTA_PAYLOAD(opt) - sizeof(*qopt)))
448 if (tb[TCA_NETEM_CORR-1]) {
449 ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
454 if (tb[TCA_NETEM_DELAY_DIST-1]) {
455 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
460 if (tb[TCA_NETEM_REORDER-1]) {
461 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
466 if (tb[TCA_NETEM_CORRUPT-1]) {
467 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
477 * Special case version of FIFO queue for use by netem.
478 * It queues in order based on timestamps in skb's
480 struct fifo_sched_data {
482 psched_time_t oldest;
485 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
487 struct fifo_sched_data *q = qdisc_priv(sch);
488 struct sk_buff_head *list = &sch->q;
489 psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send;
492 if (likely(skb_queue_len(list) < q->limit)) {
493 /* Optimize for add at tail */
494 if (likely(skb_queue_empty(list) || !PSCHED_TLESS(tnext, q->oldest))) {
496 return qdisc_enqueue_tail(nskb, sch);
499 skb_queue_reverse_walk(list, skb) {
500 const struct netem_skb_cb *cb
501 = (const struct netem_skb_cb *)skb->cb;
503 if (!PSCHED_TLESS(tnext, cb->time_to_send))
507 __skb_queue_after(list, skb, nskb);
509 sch->qstats.backlog += nskb->len;
510 sch->bstats.bytes += nskb->len;
511 sch->bstats.packets++;
513 return NET_XMIT_SUCCESS;
516 return qdisc_reshape_fail(nskb, sch);
519 static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
521 struct fifo_sched_data *q = qdisc_priv(sch);
524 struct tc_fifo_qopt *ctl = RTA_DATA(opt);
525 if (RTA_PAYLOAD(opt) < sizeof(*ctl))
528 q->limit = ctl->limit;
530 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
532 PSCHED_SET_PASTPERFECT(q->oldest);
536 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
538 struct fifo_sched_data *q = qdisc_priv(sch);
539 struct tc_fifo_qopt opt = { .limit = q->limit };
541 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
548 static struct Qdisc_ops tfifo_qdisc_ops = {
550 .priv_size = sizeof(struct fifo_sched_data),
551 .enqueue = tfifo_enqueue,
552 .dequeue = qdisc_dequeue_head,
553 .requeue = qdisc_requeue,
554 .drop = qdisc_queue_drop,
556 .reset = qdisc_reset_queue,
557 .change = tfifo_init,
561 static int netem_init(struct Qdisc *sch, struct rtattr *opt)
563 struct netem_sched_data *q = qdisc_priv(sch);
569 qdisc_watchdog_init(&q->watchdog, sch);
571 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
572 TC_H_MAKE(sch->handle, 1));
574 pr_debug("netem: qdisc create failed\n");
578 ret = netem_change(sch, opt);
580 pr_debug("netem: change failed\n");
581 qdisc_destroy(q->qdisc);
586 static void netem_destroy(struct Qdisc *sch)
588 struct netem_sched_data *q = qdisc_priv(sch);
590 qdisc_watchdog_cancel(&q->watchdog);
591 qdisc_destroy(q->qdisc);
592 kfree(q->delay_dist);
595 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
597 const struct netem_sched_data *q = qdisc_priv(sch);
598 unsigned char *b = skb_tail_pointer(skb);
599 struct rtattr *rta = (struct rtattr *) b;
600 struct tc_netem_qopt qopt;
601 struct tc_netem_corr cor;
602 struct tc_netem_reorder reorder;
603 struct tc_netem_corrupt corrupt;
605 qopt.latency = q->latency;
606 qopt.jitter = q->jitter;
607 qopt.limit = q->limit;
610 qopt.duplicate = q->duplicate;
611 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
613 cor.delay_corr = q->delay_cor.rho;
614 cor.loss_corr = q->loss_cor.rho;
615 cor.dup_corr = q->dup_cor.rho;
616 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
618 reorder.probability = q->reorder;
619 reorder.correlation = q->reorder_cor.rho;
620 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
622 corrupt.probability = q->corrupt;
623 corrupt.correlation = q->corrupt_cor.rho;
624 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
626 rta->rta_len = skb_tail_pointer(skb) - b;
635 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
636 struct sk_buff *skb, struct tcmsg *tcm)
638 struct netem_sched_data *q = qdisc_priv(sch);
640 if (cl != 1) /* only one class */
643 tcm->tcm_handle |= TC_H_MIN(1);
644 tcm->tcm_info = q->qdisc->handle;
649 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
652 struct netem_sched_data *q = qdisc_priv(sch);
658 *old = xchg(&q->qdisc, new);
659 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
661 sch_tree_unlock(sch);
666 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
668 struct netem_sched_data *q = qdisc_priv(sch);
672 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
677 static void netem_put(struct Qdisc *sch, unsigned long arg)
681 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
682 struct rtattr **tca, unsigned long *arg)
687 static int netem_delete(struct Qdisc *sch, unsigned long arg)
692 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
695 if (walker->count >= walker->skip)
696 if (walker->fn(sch, 1, walker) < 0) {
704 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
709 static struct Qdisc_class_ops netem_class_ops = {
710 .graft = netem_graft,
714 .change = netem_change_class,
715 .delete = netem_delete,
717 .tcf_chain = netem_find_tcf,
718 .dump = netem_dump_class,
721 static struct Qdisc_ops netem_qdisc_ops = {
723 .cl_ops = &netem_class_ops,
724 .priv_size = sizeof(struct netem_sched_data),
725 .enqueue = netem_enqueue,
726 .dequeue = netem_dequeue,
727 .requeue = netem_requeue,
730 .reset = netem_reset,
731 .destroy = netem_destroy,
732 .change = netem_change,
734 .owner = THIS_MODULE,
738 static int __init netem_module_init(void)
740 pr_info("netem: version " VERSION "\n");
741 return register_qdisc(&netem_qdisc_ops);
743 static void __exit netem_module_exit(void)
745 unregister_qdisc(&netem_qdisc_ops);
747 module_init(netem_module_init)
748 module_exit(netem_module_exit)
749 MODULE_LICENSE("GPL");