Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / net / ipv4 / inet_fragment.c
1 /*
2  * inet fragments management
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  *              Authors:        Pavel Emelyanov <xemul@openvz.org>
10  *                              Started as consolidation of ipv4/ip_fragment.c,
11  *                              ipv6/reassembly. and ipv6 nf conntrack reassembly
12  */
13
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/slab.h>
23
24 #include <net/sock.h>
25 #include <net/inet_frag.h>
26 #include <net/inet_ecn.h>
27
28 #define INETFRAGS_EVICT_BUCKETS   128
29 #define INETFRAGS_EVICT_MAX       512
30
31 /* don't rebuild inetfrag table with new secret more often than this */
32 #define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33
34 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35  * Value : 0xff if frame should be dropped.
36  *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
37  */
38 const u8 ip_frag_ecn_table[16] = {
39         /* at least one fragment had CE, and others ECT_0 or ECT_1 */
40         [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]                      = INET_ECN_CE,
41         [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]                      = INET_ECN_CE,
42         [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]   = INET_ECN_CE,
43
44         /* invalid combinations : drop frame */
45         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51         [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
52 };
53 EXPORT_SYMBOL(ip_frag_ecn_table);
54
55 static unsigned int
56 inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
57 {
58         return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
59 }
60
61 static bool inet_frag_may_rebuild(struct inet_frags *f)
62 {
63         return time_after(jiffies,
64                f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65 }
66
67 static void inet_frag_secret_rebuild(struct inet_frags *f)
68 {
69         int i;
70
71         write_seqlock_bh(&f->rnd_seqlock);
72
73         if (!inet_frag_may_rebuild(f))
74                 goto out;
75
76         get_random_bytes(&f->rnd, sizeof(u32));
77
78         for (i = 0; i < INETFRAGS_HASHSZ; i++) {
79                 struct inet_frag_bucket *hb;
80                 struct inet_frag_queue *q;
81                 struct hlist_node *n;
82
83                 hb = &f->hash[i];
84                 spin_lock(&hb->chain_lock);
85
86                 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87                         unsigned int hval = inet_frag_hashfn(f, q);
88
89                         if (hval != i) {
90                                 struct inet_frag_bucket *hb_dest;
91
92                                 hlist_del(&q->list);
93
94                                 /* Relink to new hash chain. */
95                                 hb_dest = &f->hash[hval];
96
97                                 /* This is the only place where we take
98                                  * another chain_lock while already holding
99                                  * one.  As this will not run concurrently,
100                                  * we cannot deadlock on hb_dest lock below, if its
101                                  * already locked it will be released soon since
102                                  * other caller cannot be waiting for hb lock
103                                  * that we've taken above.
104                                  */
105                                 spin_lock_nested(&hb_dest->chain_lock,
106                                                  SINGLE_DEPTH_NESTING);
107                                 hlist_add_head(&q->list, &hb_dest->chain);
108                                 spin_unlock(&hb_dest->chain_lock);
109                         }
110                 }
111                 spin_unlock(&hb->chain_lock);
112         }
113
114         f->rebuild = false;
115         f->last_rebuild_jiffies = jiffies;
116 out:
117         write_sequnlock_bh(&f->rnd_seqlock);
118 }
119
120 static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121 {
122         return q->net->low_thresh == 0 ||
123                frag_mem_limit(q->net) >= q->net->low_thresh;
124 }
125
126 static unsigned int
127 inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
128 {
129         struct inet_frag_queue *fq;
130         struct hlist_node *n;
131         unsigned int evicted = 0;
132         HLIST_HEAD(expired);
133
134 evict_again:
135         spin_lock(&hb->chain_lock);
136
137         hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
138                 if (!inet_fragq_should_evict(fq))
139                         continue;
140
141                 if (!del_timer(&fq->timer)) {
142                         /* q expiring right now thus increment its refcount so
143                          * it won't be freed under us and wait until the timer
144                          * has finished executing then destroy it
145                          */
146                         atomic_inc(&fq->refcnt);
147                         spin_unlock(&hb->chain_lock);
148                         del_timer_sync(&fq->timer);
149                         inet_frag_put(fq, f);
150                         goto evict_again;
151                 }
152
153                 fq->flags |= INET_FRAG_EVICTED;
154                 hlist_del(&fq->list);
155                 hlist_add_head(&fq->list, &expired);
156                 ++evicted;
157         }
158
159         spin_unlock(&hb->chain_lock);
160
161         hlist_for_each_entry_safe(fq, n, &expired, list)
162                 f->frag_expire((unsigned long) fq);
163
164         return evicted;
165 }
166
167 static void inet_frag_worker(struct work_struct *work)
168 {
169         unsigned int budget = INETFRAGS_EVICT_BUCKETS;
170         unsigned int i, evicted = 0;
171         struct inet_frags *f;
172
173         f = container_of(work, struct inet_frags, frags_work);
174
175         BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
176
177         local_bh_disable();
178
179         for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
180                 evicted += inet_evict_bucket(f, &f->hash[i]);
181                 i = (i + 1) & (INETFRAGS_HASHSZ - 1);
182                 if (evicted > INETFRAGS_EVICT_MAX)
183                         break;
184         }
185
186         f->next_bucket = i;
187
188         local_bh_enable();
189
190         if (f->rebuild && inet_frag_may_rebuild(f))
191                 inet_frag_secret_rebuild(f);
192 }
193
194 static void inet_frag_schedule_worker(struct inet_frags *f)
195 {
196         if (unlikely(!work_pending(&f->frags_work)))
197                 schedule_work(&f->frags_work);
198 }
199
200 int inet_frags_init(struct inet_frags *f)
201 {
202         int i;
203
204         INIT_WORK(&f->frags_work, inet_frag_worker);
205
206         for (i = 0; i < INETFRAGS_HASHSZ; i++) {
207                 struct inet_frag_bucket *hb = &f->hash[i];
208
209                 spin_lock_init(&hb->chain_lock);
210                 INIT_HLIST_HEAD(&hb->chain);
211         }
212
213         seqlock_init(&f->rnd_seqlock);
214         f->last_rebuild_jiffies = 0;
215         f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
216                                             NULL);
217         if (!f->frags_cachep)
218                 return -ENOMEM;
219
220         return 0;
221 }
222 EXPORT_SYMBOL(inet_frags_init);
223
224 void inet_frags_init_net(struct netns_frags *nf)
225 {
226         init_frag_mem_limit(nf);
227 }
228 EXPORT_SYMBOL(inet_frags_init_net);
229
230 void inet_frags_fini(struct inet_frags *f)
231 {
232         cancel_work_sync(&f->frags_work);
233         kmem_cache_destroy(f->frags_cachep);
234 }
235 EXPORT_SYMBOL(inet_frags_fini);
236
237 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
238 {
239         unsigned int seq;
240         int i;
241
242         nf->low_thresh = 0;
243         local_bh_disable();
244
245 evict_again:
246         seq = read_seqbegin(&f->rnd_seqlock);
247
248         for (i = 0; i < INETFRAGS_HASHSZ ; i++)
249                 inet_evict_bucket(f, &f->hash[i]);
250
251         if (read_seqretry(&f->rnd_seqlock, seq))
252                 goto evict_again;
253
254         local_bh_enable();
255
256         percpu_counter_destroy(&nf->mem);
257 }
258 EXPORT_SYMBOL(inet_frags_exit_net);
259
260 static struct inet_frag_bucket *
261 get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
262 __acquires(hb->chain_lock)
263 {
264         struct inet_frag_bucket *hb;
265         unsigned int seq, hash;
266
267  restart:
268         seq = read_seqbegin(&f->rnd_seqlock);
269
270         hash = inet_frag_hashfn(f, fq);
271         hb = &f->hash[hash];
272
273         spin_lock(&hb->chain_lock);
274         if (read_seqretry(&f->rnd_seqlock, seq)) {
275                 spin_unlock(&hb->chain_lock);
276                 goto restart;
277         }
278
279         return hb;
280 }
281
282 static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
283 {
284         struct inet_frag_bucket *hb;
285
286         hb = get_frag_bucket_locked(fq, f);
287         if (!(fq->flags & INET_FRAG_EVICTED))
288                 hlist_del(&fq->list);
289         spin_unlock(&hb->chain_lock);
290 }
291
292 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
293 {
294         if (del_timer(&fq->timer))
295                 atomic_dec(&fq->refcnt);
296
297         if (!(fq->flags & INET_FRAG_COMPLETE)) {
298                 fq_unlink(fq, f);
299                 atomic_dec(&fq->refcnt);
300                 fq->flags |= INET_FRAG_COMPLETE;
301         }
302 }
303 EXPORT_SYMBOL(inet_frag_kill);
304
305 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
306                                   struct sk_buff *skb)
307 {
308         if (f->skb_free)
309                 f->skb_free(skb);
310         kfree_skb(skb);
311 }
312
313 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
314 {
315         struct sk_buff *fp;
316         struct netns_frags *nf;
317         unsigned int sum, sum_truesize = 0;
318
319         WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
320         WARN_ON(del_timer(&q->timer) != 0);
321
322         /* Release all fragment data. */
323         fp = q->fragments;
324         nf = q->net;
325         while (fp) {
326                 struct sk_buff *xp = fp->next;
327
328                 sum_truesize += fp->truesize;
329                 frag_kfree_skb(nf, f, fp);
330                 fp = xp;
331         }
332         sum = sum_truesize + f->qsize;
333         sub_frag_mem_limit(q, sum);
334
335         if (f->destructor)
336                 f->destructor(q);
337         kmem_cache_free(f->frags_cachep, q);
338 }
339 EXPORT_SYMBOL(inet_frag_destroy);
340
341 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
342                                                 struct inet_frag_queue *qp_in,
343                                                 struct inet_frags *f,
344                                                 void *arg)
345 {
346         struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
347         struct inet_frag_queue *qp;
348
349 #ifdef CONFIG_SMP
350         /* With SMP race we have to recheck hash table, because
351          * such entry could have been created on other cpu before
352          * we acquired hash bucket lock.
353          */
354         hlist_for_each_entry(qp, &hb->chain, list) {
355                 if (qp->net == nf && f->match(qp, arg)) {
356                         atomic_inc(&qp->refcnt);
357                         spin_unlock(&hb->chain_lock);
358                         qp_in->flags |= INET_FRAG_COMPLETE;
359                         inet_frag_put(qp_in, f);
360                         return qp;
361                 }
362         }
363 #endif
364         qp = qp_in;
365         if (!mod_timer(&qp->timer, jiffies + nf->timeout))
366                 atomic_inc(&qp->refcnt);
367
368         atomic_inc(&qp->refcnt);
369         hlist_add_head(&qp->list, &hb->chain);
370
371         spin_unlock(&hb->chain_lock);
372
373         return qp;
374 }
375
376 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
377                                                struct inet_frags *f,
378                                                void *arg)
379 {
380         struct inet_frag_queue *q;
381
382         if (frag_mem_limit(nf) > nf->high_thresh) {
383                 inet_frag_schedule_worker(f);
384                 return NULL;
385         }
386
387         q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
388         if (q == NULL)
389                 return NULL;
390
391         q->net = nf;
392         f->constructor(q, arg);
393         add_frag_mem_limit(q, f->qsize);
394
395         setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
396         spin_lock_init(&q->lock);
397         atomic_set(&q->refcnt, 1);
398
399         return q;
400 }
401
402 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
403                                                 struct inet_frags *f,
404                                                 void *arg)
405 {
406         struct inet_frag_queue *q;
407
408         q = inet_frag_alloc(nf, f, arg);
409         if (q == NULL)
410                 return NULL;
411
412         return inet_frag_intern(nf, q, f, arg);
413 }
414
415 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
416                                        struct inet_frags *f, void *key,
417                                        unsigned int hash)
418 {
419         struct inet_frag_bucket *hb;
420         struct inet_frag_queue *q;
421         int depth = 0;
422
423         if (frag_mem_limit(nf) > nf->low_thresh)
424                 inet_frag_schedule_worker(f);
425
426         hash &= (INETFRAGS_HASHSZ - 1);
427         hb = &f->hash[hash];
428
429         spin_lock(&hb->chain_lock);
430         hlist_for_each_entry(q, &hb->chain, list) {
431                 if (q->net == nf && f->match(q, key)) {
432                         atomic_inc(&q->refcnt);
433                         spin_unlock(&hb->chain_lock);
434                         return q;
435                 }
436                 depth++;
437         }
438         spin_unlock(&hb->chain_lock);
439
440         if (depth <= INETFRAGS_MAXDEPTH)
441                 return inet_frag_create(nf, f, key);
442
443         if (inet_frag_may_rebuild(f)) {
444                 if (!f->rebuild)
445                         f->rebuild = true;
446                 inet_frag_schedule_worker(f);
447         }
448
449         return ERR_PTR(-ENOBUFS);
450 }
451 EXPORT_SYMBOL(inet_frag_find);
452
453 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
454                                    const char *prefix)
455 {
456         static const char msg[] = "inet_frag_find: Fragment hash bucket"
457                 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
458                 ". Dropping fragment.\n";
459
460         if (PTR_ERR(q) == -ENOBUFS)
461                 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
462 }
463 EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);