[INET]: Consolidate inet(6)_hash_connect.
[sfrench/cifs-2.6.git] / net / ipv4 / inet_hashtables.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Generic INET transport hashtables
7  *
8  * Authors:     Lotsa people, from code originally in tcp
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/wait.h>
21
22 #include <net/inet_connection_sock.h>
23 #include <net/inet_hashtables.h>
24 #include <net/ip.h>
25
26 /*
27  * Allocate and initialize a new local port bind bucket.
28  * The bindhash mutex for snum's hash chain must be held here.
29  */
30 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
31                                                  struct inet_bind_hashbucket *head,
32                                                  const unsigned short snum)
33 {
34         struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
35
36         if (tb != NULL) {
37                 tb->port      = snum;
38                 tb->fastreuse = 0;
39                 INIT_HLIST_HEAD(&tb->owners);
40                 hlist_add_head(&tb->node, &head->chain);
41         }
42         return tb;
43 }
44
45 /*
46  * Caller must hold hashbucket lock for this tb with local BH disabled
47  */
48 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
49 {
50         if (hlist_empty(&tb->owners)) {
51                 __hlist_del(&tb->node);
52                 kmem_cache_free(cachep, tb);
53         }
54 }
55
56 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
57                     const unsigned short snum)
58 {
59         inet_sk(sk)->num = snum;
60         sk_add_bind_node(sk, &tb->owners);
61         inet_csk(sk)->icsk_bind_hash = tb;
62 }
63
64 /*
65  * Get rid of any references to a local port held by the given sock.
66  */
67 static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
68 {
69         const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size);
70         struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
71         struct inet_bind_bucket *tb;
72
73         spin_lock(&head->lock);
74         tb = inet_csk(sk)->icsk_bind_hash;
75         __sk_del_bind_node(sk);
76         inet_csk(sk)->icsk_bind_hash = NULL;
77         inet_sk(sk)->num = 0;
78         inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
79         spin_unlock(&head->lock);
80 }
81
82 void inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
83 {
84         local_bh_disable();
85         __inet_put_port(hashinfo, sk);
86         local_bh_enable();
87 }
88
89 EXPORT_SYMBOL(inet_put_port);
90
91 /*
92  * This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
93  * Look, when several writers sleep and reader wakes them up, all but one
94  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
95  * this, _but_ remember, it adds useless work on UP machines (wake up each
96  * exclusive lock release). It should be ifdefed really.
97  */
98 void inet_listen_wlock(struct inet_hashinfo *hashinfo)
99         __acquires(hashinfo->lhash_lock)
100 {
101         write_lock(&hashinfo->lhash_lock);
102
103         if (atomic_read(&hashinfo->lhash_users)) {
104                 DEFINE_WAIT(wait);
105
106                 for (;;) {
107                         prepare_to_wait_exclusive(&hashinfo->lhash_wait,
108                                                   &wait, TASK_UNINTERRUPTIBLE);
109                         if (!atomic_read(&hashinfo->lhash_users))
110                                 break;
111                         write_unlock_bh(&hashinfo->lhash_lock);
112                         schedule();
113                         write_lock_bh(&hashinfo->lhash_lock);
114                 }
115
116                 finish_wait(&hashinfo->lhash_wait, &wait);
117         }
118 }
119
120 EXPORT_SYMBOL(inet_listen_wlock);
121
122 /*
123  * Don't inline this cruft. Here are some nice properties to exploit here. The
124  * BSD API does not allow a listening sock to specify the remote port nor the
125  * remote address for the connection. So always assume those are both
126  * wildcarded during the search since they can never be otherwise.
127  */
128 static struct sock *inet_lookup_listener_slow(const struct hlist_head *head,
129                                               const __be32 daddr,
130                                               const unsigned short hnum,
131                                               const int dif)
132 {
133         struct sock *result = NULL, *sk;
134         const struct hlist_node *node;
135         int hiscore = -1;
136
137         sk_for_each(sk, node, head) {
138                 const struct inet_sock *inet = inet_sk(sk);
139
140                 if (inet->num == hnum && !ipv6_only_sock(sk)) {
141                         const __be32 rcv_saddr = inet->rcv_saddr;
142                         int score = sk->sk_family == PF_INET ? 1 : 0;
143
144                         if (rcv_saddr) {
145                                 if (rcv_saddr != daddr)
146                                         continue;
147                                 score += 2;
148                         }
149                         if (sk->sk_bound_dev_if) {
150                                 if (sk->sk_bound_dev_if != dif)
151                                         continue;
152                                 score += 2;
153                         }
154                         if (score == 5)
155                                 return sk;
156                         if (score > hiscore) {
157                                 hiscore = score;
158                                 result  = sk;
159                         }
160                 }
161         }
162         return result;
163 }
164
165 /* Optimize the common listener case. */
166 struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo,
167                                     const __be32 daddr, const unsigned short hnum,
168                                     const int dif)
169 {
170         struct sock *sk = NULL;
171         const struct hlist_head *head;
172
173         read_lock(&hashinfo->lhash_lock);
174         head = &hashinfo->listening_hash[inet_lhashfn(hnum)];
175         if (!hlist_empty(head)) {
176                 const struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
177
178                 if (inet->num == hnum && !sk->sk_node.next &&
179                     (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
180                     (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
181                     !sk->sk_bound_dev_if)
182                         goto sherry_cache;
183                 sk = inet_lookup_listener_slow(head, daddr, hnum, dif);
184         }
185         if (sk) {
186 sherry_cache:
187                 sock_hold(sk);
188         }
189         read_unlock(&hashinfo->lhash_lock);
190         return sk;
191 }
192 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
193
194 struct sock * __inet_lookup_established(struct inet_hashinfo *hashinfo,
195                                   const __be32 saddr, const __be16 sport,
196                                   const __be32 daddr, const u16 hnum,
197                                   const int dif)
198 {
199         INET_ADDR_COOKIE(acookie, saddr, daddr)
200         const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
201         struct sock *sk;
202         const struct hlist_node *node;
203         /* Optimize here for direct hit, only listening connections can
204          * have wildcards anyways.
205          */
206         unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
207         struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
208         rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
209
210         prefetch(head->chain.first);
211         read_lock(lock);
212         sk_for_each(sk, node, &head->chain) {
213                 if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
214                         goto hit; /* You sunk my battleship! */
215         }
216
217         /* Must check for a TIME_WAIT'er before going to listener hash. */
218         sk_for_each(sk, node, &head->twchain) {
219                 if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
220                         goto hit;
221         }
222         sk = NULL;
223 out:
224         read_unlock(lock);
225         return sk;
226 hit:
227         sock_hold(sk);
228         goto out;
229 }
230 EXPORT_SYMBOL_GPL(__inet_lookup_established);
231
232 /* called with local bh disabled */
233 static int __inet_check_established(struct inet_timewait_death_row *death_row,
234                                     struct sock *sk, __u16 lport,
235                                     struct inet_timewait_sock **twp)
236 {
237         struct inet_hashinfo *hinfo = death_row->hashinfo;
238         struct inet_sock *inet = inet_sk(sk);
239         __be32 daddr = inet->rcv_saddr;
240         __be32 saddr = inet->daddr;
241         int dif = sk->sk_bound_dev_if;
242         INET_ADDR_COOKIE(acookie, saddr, daddr)
243         const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
244         unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
245         struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
246         rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
247         struct sock *sk2;
248         const struct hlist_node *node;
249         struct inet_timewait_sock *tw;
250
251         prefetch(head->chain.first);
252         write_lock(lock);
253
254         /* Check TIME-WAIT sockets first. */
255         sk_for_each(sk2, node, &head->twchain) {
256                 tw = inet_twsk(sk2);
257
258                 if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) {
259                         if (twsk_unique(sk, sk2, twp))
260                                 goto unique;
261                         else
262                                 goto not_unique;
263                 }
264         }
265         tw = NULL;
266
267         /* And established part... */
268         sk_for_each(sk2, node, &head->chain) {
269                 if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif))
270                         goto not_unique;
271         }
272
273 unique:
274         /* Must record num and sport now. Otherwise we will see
275          * in hash table socket with a funny identity. */
276         inet->num = lport;
277         inet->sport = htons(lport);
278         sk->sk_hash = hash;
279         BUG_TRAP(sk_unhashed(sk));
280         __sk_add_node(sk, &head->chain);
281         sock_prot_inuse_add(sk->sk_prot, 1);
282         write_unlock(lock);
283
284         if (twp) {
285                 *twp = tw;
286                 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
287         } else if (tw) {
288                 /* Silly. Should hash-dance instead... */
289                 inet_twsk_deschedule(tw, death_row);
290                 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
291
292                 inet_twsk_put(tw);
293         }
294
295         return 0;
296
297 not_unique:
298         write_unlock(lock);
299         return -EADDRNOTAVAIL;
300 }
301
302 static inline u32 inet_sk_port_offset(const struct sock *sk)
303 {
304         const struct inet_sock *inet = inet_sk(sk);
305         return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr,
306                                           inet->dport);
307 }
308
309 void __inet_hash_nolisten(struct inet_hashinfo *hashinfo, struct sock *sk)
310 {
311         struct hlist_head *list;
312         rwlock_t *lock;
313         struct inet_ehash_bucket *head;
314
315         BUG_TRAP(sk_unhashed(sk));
316
317         sk->sk_hash = inet_sk_ehashfn(sk);
318         head = inet_ehash_bucket(hashinfo, sk->sk_hash);
319         list = &head->chain;
320         lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
321
322         write_lock(lock);
323         __sk_add_node(sk, list);
324         sock_prot_inuse_add(sk->sk_prot, 1);
325         write_unlock(lock);
326 }
327 EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
328
329 void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
330 {
331         struct hlist_head *list;
332         rwlock_t *lock;
333
334         if (sk->sk_state != TCP_LISTEN) {
335                 __inet_hash_nolisten(hashinfo, sk);
336                 return;
337         }
338
339         BUG_TRAP(sk_unhashed(sk));
340         list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
341         lock = &hashinfo->lhash_lock;
342
343         inet_listen_wlock(hashinfo);
344         __sk_add_node(sk, list);
345         sock_prot_inuse_add(sk->sk_prot, 1);
346         write_unlock(lock);
347         wake_up(&hashinfo->lhash_wait);
348 }
349 EXPORT_SYMBOL_GPL(__inet_hash);
350
351 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
352                 struct sock *sk,
353                 int (*check_established)(struct inet_timewait_death_row *,
354                         struct sock *, __u16, struct inet_timewait_sock **),
355                 void (*hash)(struct inet_hashinfo *, struct sock *))
356 {
357         struct inet_hashinfo *hinfo = death_row->hashinfo;
358         const unsigned short snum = inet_sk(sk)->num;
359         struct inet_bind_hashbucket *head;
360         struct inet_bind_bucket *tb;
361         int ret;
362
363         if (!snum) {
364                 int i, remaining, low, high, port;
365                 static u32 hint;
366                 u32 offset = hint + inet_sk_port_offset(sk);
367                 struct hlist_node *node;
368                 struct inet_timewait_sock *tw = NULL;
369
370                 inet_get_local_port_range(&low, &high);
371                 remaining = (high - low) + 1;
372
373                 local_bh_disable();
374                 for (i = 1; i <= remaining; i++) {
375                         port = low + (i + offset) % remaining;
376                         head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)];
377                         spin_lock(&head->lock);
378
379                         /* Does not bother with rcv_saddr checks,
380                          * because the established check is already
381                          * unique enough.
382                          */
383                         inet_bind_bucket_for_each(tb, node, &head->chain) {
384                                 if (tb->port == port) {
385                                         BUG_TRAP(!hlist_empty(&tb->owners));
386                                         if (tb->fastreuse >= 0)
387                                                 goto next_port;
388                                         if (!check_established(death_row, sk,
389                                                                 port, &tw))
390                                                 goto ok;
391                                         goto next_port;
392                                 }
393                         }
394
395                         tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, head, port);
396                         if (!tb) {
397                                 spin_unlock(&head->lock);
398                                 break;
399                         }
400                         tb->fastreuse = -1;
401                         goto ok;
402
403                 next_port:
404                         spin_unlock(&head->lock);
405                 }
406                 local_bh_enable();
407
408                 return -EADDRNOTAVAIL;
409
410 ok:
411                 hint += i;
412
413                 /* Head lock still held and bh's disabled */
414                 inet_bind_hash(sk, tb, port);
415                 if (sk_unhashed(sk)) {
416                         inet_sk(sk)->sport = htons(port);
417                         hash(hinfo, sk);
418                 }
419                 spin_unlock(&head->lock);
420
421                 if (tw) {
422                         inet_twsk_deschedule(tw, death_row);
423                         inet_twsk_put(tw);
424                 }
425
426                 ret = 0;
427                 goto out;
428         }
429
430         head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)];
431         tb  = inet_csk(sk)->icsk_bind_hash;
432         spin_lock_bh(&head->lock);
433         if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
434                 hash(hinfo, sk);
435                 spin_unlock_bh(&head->lock);
436                 return 0;
437         } else {
438                 spin_unlock(&head->lock);
439                 /* No definite answer... Walk to established hash table */
440                 ret = check_established(death_row, sk, snum, NULL);
441 out:
442                 local_bh_enable();
443                 return ret;
444         }
445 }
446 EXPORT_SYMBOL_GPL(__inet_hash_connect);
447
448 /*
449  * Bind a port for a connect operation and hash it.
450  */
451 int inet_hash_connect(struct inet_timewait_death_row *death_row,
452                       struct sock *sk)
453 {
454         return __inet_hash_connect(death_row, sk,
455                         __inet_check_established, __inet_hash_nolisten);
456 }
457
458 EXPORT_SYMBOL_GPL(inet_hash_connect);