Merge remote-tracking branch 'regulator/fix/qcom-spmi' into regulator-linus
[sfrench/cifs-2.6.git] / include / net / fq_impl.h
1 /*
2  * Copyright (c) 2016 Qualcomm Atheros, Inc
3  *
4  * GPL v2
5  *
6  * Based on net/sched/sch_fq_codel.c
7  */
8 #ifndef __NET_SCHED_FQ_IMPL_H
9 #define __NET_SCHED_FQ_IMPL_H
10
11 #include <net/fq.h>
12
13 /* functions that are embedded into includer */
14
15 static struct sk_buff *fq_flow_dequeue(struct fq *fq,
16                                        struct fq_flow *flow)
17 {
18         struct fq_tin *tin = flow->tin;
19         struct fq_flow *i;
20         struct sk_buff *skb;
21
22         lockdep_assert_held(&fq->lock);
23
24         skb = __skb_dequeue(&flow->queue);
25         if (!skb)
26                 return NULL;
27
28         tin->backlog_bytes -= skb->len;
29         tin->backlog_packets--;
30         flow->backlog -= skb->len;
31         fq->backlog--;
32         fq->memory_usage -= skb->truesize;
33
34         if (flow->backlog == 0) {
35                 list_del_init(&flow->backlogchain);
36         } else {
37                 i = flow;
38
39                 list_for_each_entry_continue(i, &fq->backlogs, backlogchain)
40                         if (i->backlog < flow->backlog)
41                                 break;
42
43                 list_move_tail(&flow->backlogchain,
44                                &i->backlogchain);
45         }
46
47         return skb;
48 }
49
50 static struct sk_buff *fq_tin_dequeue(struct fq *fq,
51                                       struct fq_tin *tin,
52                                       fq_tin_dequeue_t dequeue_func)
53 {
54         struct fq_flow *flow;
55         struct list_head *head;
56         struct sk_buff *skb;
57
58         lockdep_assert_held(&fq->lock);
59
60 begin:
61         head = &tin->new_flows;
62         if (list_empty(head)) {
63                 head = &tin->old_flows;
64                 if (list_empty(head))
65                         return NULL;
66         }
67
68         flow = list_first_entry(head, struct fq_flow, flowchain);
69
70         if (flow->deficit <= 0) {
71                 flow->deficit += fq->quantum;
72                 list_move_tail(&flow->flowchain,
73                                &tin->old_flows);
74                 goto begin;
75         }
76
77         skb = dequeue_func(fq, tin, flow);
78         if (!skb) {
79                 /* force a pass through old_flows to prevent starvation */
80                 if ((head == &tin->new_flows) &&
81                     !list_empty(&tin->old_flows)) {
82                         list_move_tail(&flow->flowchain, &tin->old_flows);
83                 } else {
84                         list_del_init(&flow->flowchain);
85                         flow->tin = NULL;
86                 }
87                 goto begin;
88         }
89
90         flow->deficit -= skb->len;
91         tin->tx_bytes += skb->len;
92         tin->tx_packets++;
93
94         return skb;
95 }
96
97 static struct fq_flow *fq_flow_classify(struct fq *fq,
98                                         struct fq_tin *tin,
99                                         struct sk_buff *skb,
100                                         fq_flow_get_default_t get_default_func)
101 {
102         struct fq_flow *flow;
103         u32 hash;
104         u32 idx;
105
106         lockdep_assert_held(&fq->lock);
107
108         hash = skb_get_hash_perturb(skb, fq->perturbation);
109         idx = reciprocal_scale(hash, fq->flows_cnt);
110         flow = &fq->flows[idx];
111
112         if (flow->tin && flow->tin != tin) {
113                 flow = get_default_func(fq, tin, idx, skb);
114                 tin->collisions++;
115                 fq->collisions++;
116         }
117
118         if (!flow->tin)
119                 tin->flows++;
120
121         return flow;
122 }
123
124 static void fq_recalc_backlog(struct fq *fq,
125                               struct fq_tin *tin,
126                               struct fq_flow *flow)
127 {
128         struct fq_flow *i;
129
130         if (list_empty(&flow->backlogchain))
131                 list_add_tail(&flow->backlogchain, &fq->backlogs);
132
133         i = flow;
134         list_for_each_entry_continue_reverse(i, &fq->backlogs,
135                                              backlogchain)
136                 if (i->backlog > flow->backlog)
137                         break;
138
139         list_move(&flow->backlogchain, &i->backlogchain);
140 }
141
142 static void fq_tin_enqueue(struct fq *fq,
143                            struct fq_tin *tin,
144                            struct sk_buff *skb,
145                            fq_skb_free_t free_func,
146                            fq_flow_get_default_t get_default_func)
147 {
148         struct fq_flow *flow;
149         bool oom;
150
151         lockdep_assert_held(&fq->lock);
152
153         flow = fq_flow_classify(fq, tin, skb, get_default_func);
154
155         flow->tin = tin;
156         flow->backlog += skb->len;
157         tin->backlog_bytes += skb->len;
158         tin->backlog_packets++;
159         fq->memory_usage += skb->truesize;
160         fq->backlog++;
161
162         fq_recalc_backlog(fq, tin, flow);
163
164         if (list_empty(&flow->flowchain)) {
165                 flow->deficit = fq->quantum;
166                 list_add_tail(&flow->flowchain,
167                               &tin->new_flows);
168         }
169
170         __skb_queue_tail(&flow->queue, skb);
171         oom = (fq->memory_usage > fq->memory_limit);
172         while (fq->backlog > fq->limit || oom) {
173                 flow = list_first_entry_or_null(&fq->backlogs,
174                                                 struct fq_flow,
175                                                 backlogchain);
176                 if (!flow)
177                         return;
178
179                 skb = fq_flow_dequeue(fq, flow);
180                 if (!skb)
181                         return;
182
183                 free_func(fq, flow->tin, flow, skb);
184
185                 flow->tin->overlimit++;
186                 fq->overlimit++;
187                 if (oom) {
188                         fq->overmemory++;
189                         oom = (fq->memory_usage > fq->memory_limit);
190                 }
191         }
192 }
193
194 static void fq_flow_reset(struct fq *fq,
195                           struct fq_flow *flow,
196                           fq_skb_free_t free_func)
197 {
198         struct sk_buff *skb;
199
200         while ((skb = fq_flow_dequeue(fq, flow)))
201                 free_func(fq, flow->tin, flow, skb);
202
203         if (!list_empty(&flow->flowchain))
204                 list_del_init(&flow->flowchain);
205
206         if (!list_empty(&flow->backlogchain))
207                 list_del_init(&flow->backlogchain);
208
209         flow->tin = NULL;
210
211         WARN_ON_ONCE(flow->backlog);
212 }
213
214 static void fq_tin_reset(struct fq *fq,
215                          struct fq_tin *tin,
216                          fq_skb_free_t free_func)
217 {
218         struct list_head *head;
219         struct fq_flow *flow;
220
221         for (;;) {
222                 head = &tin->new_flows;
223                 if (list_empty(head)) {
224                         head = &tin->old_flows;
225                         if (list_empty(head))
226                                 break;
227                 }
228
229                 flow = list_first_entry(head, struct fq_flow, flowchain);
230                 fq_flow_reset(fq, flow, free_func);
231         }
232
233         WARN_ON_ONCE(tin->backlog_bytes);
234         WARN_ON_ONCE(tin->backlog_packets);
235 }
236
237 static void fq_flow_init(struct fq_flow *flow)
238 {
239         INIT_LIST_HEAD(&flow->flowchain);
240         INIT_LIST_HEAD(&flow->backlogchain);
241         __skb_queue_head_init(&flow->queue);
242 }
243
244 static void fq_tin_init(struct fq_tin *tin)
245 {
246         INIT_LIST_HEAD(&tin->new_flows);
247         INIT_LIST_HEAD(&tin->old_flows);
248 }
249
250 static int fq_init(struct fq *fq, int flows_cnt)
251 {
252         int i;
253
254         memset(fq, 0, sizeof(fq[0]));
255         INIT_LIST_HEAD(&fq->backlogs);
256         spin_lock_init(&fq->lock);
257         fq->flows_cnt = max_t(u32, flows_cnt, 1);
258         fq->perturbation = prandom_u32();
259         fq->quantum = 300;
260         fq->limit = 8192;
261         fq->memory_limit = 16 << 20; /* 16 MBytes */
262
263         fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
264         if (!fq->flows)
265                 return -ENOMEM;
266
267         for (i = 0; i < fq->flows_cnt; i++)
268                 fq_flow_init(&fq->flows[i]);
269
270         return 0;
271 }
272
273 static void fq_reset(struct fq *fq,
274                      fq_skb_free_t free_func)
275 {
276         int i;
277
278         for (i = 0; i < fq->flows_cnt; i++)
279                 fq_flow_reset(fq, &fq->flows[i], free_func);
280
281         kfree(fq->flows);
282         fq->flows = NULL;
283 }
284
285 #endif